file_id
stringlengths 5
9
| content
stringlengths 100
5.25M
| local_path
stringlengths 66
70
| kaggle_dataset_name
stringlengths 3
50
⌀ | kaggle_dataset_owner
stringlengths 3
20
⌀ | kversion
stringlengths 497
763
⌀ | kversion_datasetsources
stringlengths 71
5.46k
⌀ | dataset_versions
stringlengths 338
235k
⌀ | datasets
stringlengths 334
371
⌀ | users
stringlengths 111
264
⌀ | script
stringlengths 100
5.25M
| df_info
stringlengths 0
4.87M
| has_data_info
bool 2
classes | nb_filenames
int64 0
370
| retreived_data_description
stringlengths 0
4.44M
| script_nb_tokens
int64 25
663k
| upvotes
int64 0
1.65k
| tokens_description
int64 25
663k
| tokens_script
int64 25
663k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
129561985
|
<jupyter_start><jupyter_text>Salary Dataset - Simple linear regression
## Dataset Description
Salary Dataset in CSV for Simple linear regression. It has also been used in Machine Learning A to Z course of my series.
## Columns
- #
- YearsExperience
- Salary
Kaggle dataset identifier: salary-dataset-simple-linear-regression
<jupyter_code>import pandas as pd
df = pd.read_csv('salary-dataset-simple-linear-regression/Salary_dataset.csv')
df.info()
<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 30 entries, 0 to 29
Data columns (total 3 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Unnamed: 0 30 non-null int64
1 YearsExperience 30 non-null float64
2 Salary 30 non-null float64
dtypes: float64(2), int64(1)
memory usage: 848.0 bytes
<jupyter_text>Examples:
{
"Unnamed: 0": 0.0,
"YearsExperience": 1.2,
"Salary": 39344.0
}
{
"Unnamed: 0": 1.0,
"YearsExperience": 1.4,
"Salary": 46206.0
}
{
"Unnamed: 0": 2.0,
"YearsExperience": 1.6,
"Salary": 37732.0
}
{
"Unnamed: 0": 3.0,
"YearsExperience": 2.1,
"Salary": 43526.0
}
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
data = pd.read_csv(
"/kaggle/input/salary-dataset-simple-linear-regression/Salary_dataset.csv"
)
data
print(data.head(5))
print(data.describe())
print(data.info())
# salary_val = data["Salary"].values
# salary_val
from matplotlib import pyplot as plt
x = data.YearsExperience.values
y = data.Salary.values
# data['YearsExperience'] = pd.to_datetime(data['YearsExperience'])
# data['Salary'] = pd.to_datetime(data['Salary'])
from sklearn.model_selection import train_test_split
x = x.reshape(-1, 1)
x
x_train, x_test, y_train, y_test = train_test_split(
x, y, train_size=0.8, random_state=100
)
x_train
plt.scatter(x_train, y_train, color="red")
plt.xlabel("Years_of_Experience")
plt.ylabel("Salary")
plt.plot
from sklearn.linear_model import LinearRegression
lr = LinearRegression()
lr.fit(x_train, y_train)
lr.score(x_test, y_test) * 100
# ### Draw at least three conclusions from your regression model
x_train, x_test, y_train, y_test = train_test_split(
x, y, train_size=0.6, random_state=200
)
lr.fit(x_train, y_train)
print("THE SCORE IS ", lr.score(x_test, y_test) * 100)
y_predict = lr.predict(x_test)
plt.scatter(x_train, y_train, color="red")
plt.plot(x_test, y_predict, color="green")
plt.xlabel("yers_of_experiance")
plt.ylabel("Salary")
plt.plot
x_train, x_test, y_train, y_test = train_test_split(
x, y, train_size=0.4, random_state=300
)
lr.fit(x_train, y_train)
print("THE SCORE IS ", lr.score(x_test, y_test) * 100)
y_predict = lr.predict(x_test)
plt.scatter(x_train, y_train, color="red")
plt.plot(x_test, y_predict, color="green")
plt.xlabel("yers_of_experiance")
plt.ylabel("Salary")
plt.plot
x_train, x_test, y_train, y_test = train_test_split(
x, y, train_size=0.5, random_state=400
)
lr.fit(x_train, y_train)
print("THE SCORE IS ", lr.score(x_test, y_test) * 100)
y_predict = lr.predict(x_test)
plt.scatter(x_train, y_train, color="red")
plt.plot(x_test, y_predict, color="green")
plt.xlabel("yers_of_experiance")
plt.ylabel("Salary")
plt.plot
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/561/129561985.ipynb
|
salary-dataset-simple-linear-regression
|
abhishek14398
|
[{"Id": 129561985, "ScriptId": 38522377, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 15001180, "CreationDate": "05/14/2023 21:17:11", "VersionNumber": 1.0, "Title": "notebook345ac3d399", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 98.0, "LinesInsertedFromPrevious": 98.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
|
[{"Id": 185746430, "KernelVersionId": 129561985, "SourceDatasetVersionId": 4832081}]
|
[{"Id": 4832081, "DatasetId": 2799910, "DatasourceVersionId": 4895851, "CreatorUserId": 3259703, "LicenseName": "CC0: Public Domain", "CreationDate": "01/10/2023 03:55:40", "VersionNumber": 1.0, "Title": "Salary Dataset - Simple linear regression", "Slug": "salary-dataset-simple-linear-regression", "Subtitle": "Simple Linear Regression Dataset, used in Machine Learning A - Z", "Description": "## Dataset Description\nSalary Dataset in CSV for Simple linear regression. It has also been used in Machine Learning A to Z course of my series.\n\n## Columns\n- #\n- YearsExperience\n- Salary", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 2799910, "CreatorUserId": 3259703, "OwnerUserId": 3259703.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 4832081.0, "CurrentDatasourceVersionId": 4895851.0, "ForumId": 2834222, "Type": 2, "CreationDate": "01/10/2023 03:55:40", "LastActivityDate": "01/10/2023", "TotalViews": 65295, "TotalDownloads": 13051, "TotalVotes": 139, "TotalKernels": 93}]
|
[{"Id": 3259703, "UserName": "abhishek14398", "DisplayName": "Allena Venkata Sai Aby", "RegisterDate": "05/22/2019", "PerformanceTier": 2}]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
data = pd.read_csv(
"/kaggle/input/salary-dataset-simple-linear-regression/Salary_dataset.csv"
)
data
print(data.head(5))
print(data.describe())
print(data.info())
# salary_val = data["Salary"].values
# salary_val
from matplotlib import pyplot as plt
x = data.YearsExperience.values
y = data.Salary.values
# data['YearsExperience'] = pd.to_datetime(data['YearsExperience'])
# data['Salary'] = pd.to_datetime(data['Salary'])
from sklearn.model_selection import train_test_split
x = x.reshape(-1, 1)
x
x_train, x_test, y_train, y_test = train_test_split(
x, y, train_size=0.8, random_state=100
)
x_train
plt.scatter(x_train, y_train, color="red")
plt.xlabel("Years_of_Experience")
plt.ylabel("Salary")
plt.plot
from sklearn.linear_model import LinearRegression
lr = LinearRegression()
lr.fit(x_train, y_train)
lr.score(x_test, y_test) * 100
# ### Draw at least three conclusions from your regression model
x_train, x_test, y_train, y_test = train_test_split(
x, y, train_size=0.6, random_state=200
)
lr.fit(x_train, y_train)
print("THE SCORE IS ", lr.score(x_test, y_test) * 100)
y_predict = lr.predict(x_test)
plt.scatter(x_train, y_train, color="red")
plt.plot(x_test, y_predict, color="green")
plt.xlabel("yers_of_experiance")
plt.ylabel("Salary")
plt.plot
x_train, x_test, y_train, y_test = train_test_split(
x, y, train_size=0.4, random_state=300
)
lr.fit(x_train, y_train)
print("THE SCORE IS ", lr.score(x_test, y_test) * 100)
y_predict = lr.predict(x_test)
plt.scatter(x_train, y_train, color="red")
plt.plot(x_test, y_predict, color="green")
plt.xlabel("yers_of_experiance")
plt.ylabel("Salary")
plt.plot
x_train, x_test, y_train, y_test = train_test_split(
x, y, train_size=0.5, random_state=400
)
lr.fit(x_train, y_train)
print("THE SCORE IS ", lr.score(x_test, y_test) * 100)
y_predict = lr.predict(x_test)
plt.scatter(x_train, y_train, color="red")
plt.plot(x_test, y_predict, color="green")
plt.xlabel("yers_of_experiance")
plt.ylabel("Salary")
plt.plot
|
[{"salary-dataset-simple-linear-regression/Salary_dataset.csv": {"column_names": "[\"Unnamed: 0\", \"YearsExperience\", \"Salary\"]", "column_data_types": "{\"Unnamed: 0\": \"int64\", \"YearsExperience\": \"float64\", \"Salary\": \"float64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 30 entries, 0 to 29\nData columns (total 3 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Unnamed: 0 30 non-null int64 \n 1 YearsExperience 30 non-null float64\n 2 Salary 30 non-null float64\ndtypes: float64(2), int64(1)\nmemory usage: 848.0 bytes\n", "summary": "{\"Unnamed: 0\": {\"count\": 30.0, \"mean\": 14.5, \"std\": 8.803408430829505, \"min\": 0.0, \"25%\": 7.25, \"50%\": 14.5, \"75%\": 21.75, \"max\": 29.0}, \"YearsExperience\": {\"count\": 30.0, \"mean\": 5.413333333333332, \"std\": 2.8378881576627184, \"min\": 1.2000000000000002, \"25%\": 3.3000000000000003, \"50%\": 4.8, \"75%\": 7.8, \"max\": 10.6}, \"Salary\": {\"count\": 30.0, \"mean\": 76004.0, \"std\": 27414.4297845823, \"min\": 37732.0, \"25%\": 56721.75, \"50%\": 65238.0, \"75%\": 100545.75, \"max\": 122392.0}}", "examples": "{\"Unnamed: 0\":{\"0\":0,\"1\":1,\"2\":2,\"3\":3},\"YearsExperience\":{\"0\":1.2,\"1\":1.4,\"2\":1.6,\"3\":2.1},\"Salary\":{\"0\":39344.0,\"1\":46206.0,\"2\":37732.0,\"3\":43526.0}}"}}]
| true | 1 |
<start_data_description><data_path>salary-dataset-simple-linear-regression/Salary_dataset.csv:
<column_names>
['Unnamed: 0', 'YearsExperience', 'Salary']
<column_types>
{'Unnamed: 0': 'int64', 'YearsExperience': 'float64', 'Salary': 'float64'}
<dataframe_Summary>
{'Unnamed: 0': {'count': 30.0, 'mean': 14.5, 'std': 8.803408430829505, 'min': 0.0, '25%': 7.25, '50%': 14.5, '75%': 21.75, 'max': 29.0}, 'YearsExperience': {'count': 30.0, 'mean': 5.413333333333332, 'std': 2.8378881576627184, 'min': 1.2000000000000002, '25%': 3.3000000000000003, '50%': 4.8, '75%': 7.8, 'max': 10.6}, 'Salary': {'count': 30.0, 'mean': 76004.0, 'std': 27414.4297845823, 'min': 37732.0, '25%': 56721.75, '50%': 65238.0, '75%': 100545.75, 'max': 122392.0}}
<dataframe_info>
RangeIndex: 30 entries, 0 to 29
Data columns (total 3 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Unnamed: 0 30 non-null int64
1 YearsExperience 30 non-null float64
2 Salary 30 non-null float64
dtypes: float64(2), int64(1)
memory usage: 848.0 bytes
<some_examples>
{'Unnamed: 0': {'0': 0, '1': 1, '2': 2, '3': 3}, 'YearsExperience': {'0': 1.2, '1': 1.4, '2': 1.6, '3': 2.1}, 'Salary': {'0': 39344.0, '1': 46206.0, '2': 37732.0, '3': 43526.0}}
<end_description>
| 963 | 1 | 1,364 | 963 |
129561767
|
<jupyter_start><jupyter_text>Dummy Marketing and Sales Data
I made this data for my students in 'Data-Driven Marketing' and 'Data Science for Business'. Data contains:
- TV promotion budget (in million)
- Social Media promotion budget (in million)
- Radio promotion budget (in million)
- Influencer: Whether the promotion collaborate with Mega, Macro, Nano, Micro influencer
- Sales (in million)
This data can be used for simple tasks:
- Data preprocessing
- Exploratory Data Analysis
- Visualization
- Prediction using Linear Regression and Model Evaluation
Kaggle dataset identifier: dummy-advertising-and-sales-data
<jupyter_code>import pandas as pd
df = pd.read_csv('dummy-advertising-and-sales-data/Dummy Data HSS.csv')
df.info()
<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 4572 entries, 0 to 4571
Data columns (total 5 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 TV 4562 non-null float64
1 Radio 4568 non-null float64
2 Social Media 4566 non-null float64
3 Influencer 4572 non-null object
4 Sales 4566 non-null float64
dtypes: float64(4), object(1)
memory usage: 178.7+ KB
<jupyter_text>Examples:
{
"TV": 16,
"Radio": 6.566230788,
"Social Media": 2.907982773,
"Influencer": "Mega",
"Sales": 54.73275715
}
{
"TV": 13,
"Radio": 9.237764567,
"Social Media": 2.409567204,
"Influencer": "Mega",
"Sales": 46.67789698
}
{
"TV": 41,
"Radio": 15.88644602,
"Social Media": 2.913410175,
"Influencer": "Mega",
"Sales": 150.1778288
}
{
"TV": 83,
"Radio": 30.02002826,
"Social Media": 6.922303959,
"Influencer": "Mega",
"Sales": 298.2463398
}
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# ## Student study Hours
#
df = pd.read_csv("/kaggle/input/student-study-hours/score.csv")
df
hours_val = df.Hours.values
hours_val
scores_val = df.Scores.values
scores_val
from matplotlib import pyplot as plt
x = hours_val # x: independent variable
y = scores_val # y : dependent variable
plt.scatter(x, y, color="purple")
plt.xlabel("Hours")
plt.ylabel("Scores")
plt.plot
x = x.reshape(-1, 1)
x, len(x)
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(
x, y, train_size=0.8, random_state=600
)
x_train, len(x_train)
x_test, len(x_test)
plt.scatter(x_test, y_test, color="purple")
plt.scatter(x_train, y_train, color="red")
plt.xlabel("Hours")
plt.ylabel("Scores")
plt.plot
from sklearn.linear_model import LinearRegression
lr = LinearRegression()
lr.fit(x_train, y_train)
y_predict = lr.predict(x_test)
y_predict
lr.score(x_test, y_test) * 100
plt.scatter(x_train, y_train, color="red")
plt.scatter(x_test, y_predict, color="green")
plt.xlabel("Hours")
plt.ylabel("Scores")
plt.plot
# ## Dummy Marketing and Sales Data
#
df = pd.read_csv("/kaggle/input/dummy-advertising-and-sales-data/Dummy Data HSS.csv")
df
radio_val = df.Radio.values
radio_val
sales_val = df.Sales.values
sales_val
x = radio_val # x: independent variable
y = sales_val # y : dependent variable
plt.scatter(x, y, color="purple")
plt.xlabel("radio")
plt.ylabel("Sales")
plt.plot
x = x.reshape(-1, 1)
x, len(x)
x_train, x_test, y_train, y_test = train_test_split(
x, y, train_size=0.8, random_state=600
)
x_train, len(x_train)
x_test, len(x_test)
plt.scatter(x_test, y_test, color="red")
plt.scatter(x_train, y_train, color="purple")
plt.xlabel("Hours")
plt.ylabel("Scores")
plt.plot
from sklearn.impute import SimpleImputer
imputer = SimpleImputer(strategy="mean")
x_train = imputer.fit_transform(x_train)
x_test = imputer.transform(x_test)
y_train = imputer.fit_transform(y_train)
lr = LinearRegression()
lr.fit(x_train, y_train)
y_predict = lr.predict(x_test)
y_predict
y_test = imputer.transform(y_test.reshape(-1, 1)).flatten()
score = lr.score(x_test, y_test) * 100
score
plt.scatter(x_train, y_train, color="red")
plt.scatter(x_test, y_predict, color="green")
plt.xlabel("radio")
plt.ylabel("Sales")
plt.plot
# ## Fuel Consumption 2000-2022
#
df = pd.read_csv("/kaggle/input/fuel-consumption/Fuel_Consumption_2000-2022.csv")
df
fuel_consum_val = df["FUEL CONSUMPTION"].values
fuel_consum_val
emissions_val = df.EMISSIONS.values
emissions_val
from matplotlib import pyplot as plt
x = fuel_consum_val # x: independent variable
y = emissions_val # y : dependent variable
plt.scatter(x, y, color="red")
plt.xlabel("FUEL CONSUMPTION")
plt.ylabel("EMISSIONS")
plt.plot
x = x.reshape(-1, 1)
x, len(x)
x_train, x_test, y_train, y_test = train_test_split(
x, y, train_size=0.8, random_state=600
)
x_train, len(x_train)
x_test, len(x_test)
plt.scatter(x_train, y_train, color="red")
plt.scatter(x_test, y_test, color="green")
plt.xlabel("FUEL CONSUMPTION")
plt.ylabel("EMISSIONS")
plt.plot
lr = LinearRegression()
lr.fit(x_train, y_train)
y_predict = lr.predict(x_test)
y_predict
lr.score(x_test, y_test) * 100
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/561/129561767.ipynb
|
dummy-advertising-and-sales-data
|
harrimansaragih
|
[{"Id": 129561767, "ScriptId": 38523641, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14997001, "CreationDate": "05/14/2023 21:13:56", "VersionNumber": 1.0, "Title": "Linear Regression", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 195.0, "LinesInsertedFromPrevious": 195.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 185746034, "KernelVersionId": 129561767, "SourceDatasetVersionId": 2015190}, {"Id": 185746035, "KernelVersionId": 129561767, "SourceDatasetVersionId": 3965497}, {"Id": 185746036, "KernelVersionId": 129561767, "SourceDatasetVersionId": 4906399}]
|
[{"Id": 2015190, "DatasetId": 1206038, "DatasourceVersionId": 2054779, "CreatorUserId": 5459862, "LicenseName": "CC0: Public Domain", "CreationDate": "03/12/2021 00:09:03", "VersionNumber": 1.0, "Title": "Dummy Marketing and Sales Data", "Slug": "dummy-advertising-and-sales-data", "Subtitle": "Data of TV, Influencer, Radio, and Social Media Ads budget to predict Sales", "Description": "I made this data for my students in 'Data-Driven Marketing' and 'Data Science for Business'. Data contains:\n- TV promotion budget (in million)\n- Social Media promotion budget (in million)\n- Radio promotion budget (in million)\n- Influencer: Whether the promotion collaborate with Mega, Macro, Nano, Micro influencer\n- Sales (in million)\n\nThis data can be used for simple tasks:\n- Data preprocessing\n- Exploratory Data Analysis\n- Visualization\n- Prediction using Linear Regression and Model Evaluation", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1206038, "CreatorUserId": 5459862, "OwnerUserId": 5459862.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2015190.0, "CurrentDatasourceVersionId": 2054779.0, "ForumId": 1224032, "Type": 2, "CreationDate": "03/12/2021 00:09:03", "LastActivityDate": "03/12/2021", "TotalViews": 65390, "TotalDownloads": 8681, "TotalVotes": 103, "TotalKernels": 35}]
|
[{"Id": 5459862, "UserName": "harrimansaragih", "DisplayName": "Harriman Samuel Saragih", "RegisterDate": "07/13/2020", "PerformanceTier": 2}]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# ## Student study Hours
#
df = pd.read_csv("/kaggle/input/student-study-hours/score.csv")
df
hours_val = df.Hours.values
hours_val
scores_val = df.Scores.values
scores_val
from matplotlib import pyplot as plt
x = hours_val # x: independent variable
y = scores_val # y : dependent variable
plt.scatter(x, y, color="purple")
plt.xlabel("Hours")
plt.ylabel("Scores")
plt.plot
x = x.reshape(-1, 1)
x, len(x)
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(
x, y, train_size=0.8, random_state=600
)
x_train, len(x_train)
x_test, len(x_test)
plt.scatter(x_test, y_test, color="purple")
plt.scatter(x_train, y_train, color="red")
plt.xlabel("Hours")
plt.ylabel("Scores")
plt.plot
from sklearn.linear_model import LinearRegression
lr = LinearRegression()
lr.fit(x_train, y_train)
y_predict = lr.predict(x_test)
y_predict
lr.score(x_test, y_test) * 100
plt.scatter(x_train, y_train, color="red")
plt.scatter(x_test, y_predict, color="green")
plt.xlabel("Hours")
plt.ylabel("Scores")
plt.plot
# ## Dummy Marketing and Sales Data
#
df = pd.read_csv("/kaggle/input/dummy-advertising-and-sales-data/Dummy Data HSS.csv")
df
radio_val = df.Radio.values
radio_val
sales_val = df.Sales.values
sales_val
x = radio_val # x: independent variable
y = sales_val # y : dependent variable
plt.scatter(x, y, color="purple")
plt.xlabel("radio")
plt.ylabel("Sales")
plt.plot
x = x.reshape(-1, 1)
x, len(x)
x_train, x_test, y_train, y_test = train_test_split(
x, y, train_size=0.8, random_state=600
)
x_train, len(x_train)
x_test, len(x_test)
plt.scatter(x_test, y_test, color="red")
plt.scatter(x_train, y_train, color="purple")
plt.xlabel("Hours")
plt.ylabel("Scores")
plt.plot
from sklearn.impute import SimpleImputer
imputer = SimpleImputer(strategy="mean")
x_train = imputer.fit_transform(x_train)
x_test = imputer.transform(x_test)
y_train = imputer.fit_transform(y_train)
lr = LinearRegression()
lr.fit(x_train, y_train)
y_predict = lr.predict(x_test)
y_predict
y_test = imputer.transform(y_test.reshape(-1, 1)).flatten()
score = lr.score(x_test, y_test) * 100
score
plt.scatter(x_train, y_train, color="red")
plt.scatter(x_test, y_predict, color="green")
plt.xlabel("radio")
plt.ylabel("Sales")
plt.plot
# ## Fuel Consumption 2000-2022
#
df = pd.read_csv("/kaggle/input/fuel-consumption/Fuel_Consumption_2000-2022.csv")
df
fuel_consum_val = df["FUEL CONSUMPTION"].values
fuel_consum_val
emissions_val = df.EMISSIONS.values
emissions_val
from matplotlib import pyplot as plt
x = fuel_consum_val # x: independent variable
y = emissions_val # y : dependent variable
plt.scatter(x, y, color="red")
plt.xlabel("FUEL CONSUMPTION")
plt.ylabel("EMISSIONS")
plt.plot
x = x.reshape(-1, 1)
x, len(x)
x_train, x_test, y_train, y_test = train_test_split(
x, y, train_size=0.8, random_state=600
)
x_train, len(x_train)
x_test, len(x_test)
plt.scatter(x_train, y_train, color="red")
plt.scatter(x_test, y_test, color="green")
plt.xlabel("FUEL CONSUMPTION")
plt.ylabel("EMISSIONS")
plt.plot
lr = LinearRegression()
lr.fit(x_train, y_train)
y_predict = lr.predict(x_test)
y_predict
lr.score(x_test, y_test) * 100
|
[{"dummy-advertising-and-sales-data/Dummy Data HSS.csv": {"column_names": "[\"TV\", \"Radio\", \"Social Media\", \"Influencer\", \"Sales\"]", "column_data_types": "{\"TV\": \"float64\", \"Radio\": \"float64\", \"Social Media\": \"float64\", \"Influencer\": \"object\", \"Sales\": \"float64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 4572 entries, 0 to 4571\nData columns (total 5 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 TV 4562 non-null float64\n 1 Radio 4568 non-null float64\n 2 Social Media 4566 non-null float64\n 3 Influencer 4572 non-null object \n 4 Sales 4566 non-null float64\ndtypes: float64(4), object(1)\nmemory usage: 178.7+ KB\n", "summary": "{\"TV\": {\"count\": 4562.0, \"mean\": 54.06685664182376, \"std\": 26.125053891841468, \"min\": 10.0, \"25%\": 32.0, \"50%\": 53.0, \"75%\": 77.0, \"max\": 100.0}, \"Radio\": {\"count\": 4568.0, \"mean\": 18.160355892662654, \"std\": 9.676958456095429, \"min\": 0.000683948, \"25%\": 10.5259572775, \"50%\": 17.85951293, \"75%\": 25.649729847499998, \"max\": 48.87116125}, \"Social Media\": {\"count\": 4566.0, \"mean\": 3.323956161385458, \"std\": 2.212670263921655, \"min\": 3.13e-05, \"25%\": 1.52784868575, \"50%\": 3.055565435, \"75%\": 4.807557994, \"max\": 13.98166208}, \"Sales\": {\"count\": 4566.0, \"mean\": 192.46660210662066, \"std\": 93.13309168784247, \"min\": 31.19940869, \"25%\": 112.322882475, \"50%\": 189.23117235, \"75%\": 272.50792167500003, \"max\": 364.0797515}}", "examples": "{\"TV\":{\"0\":16.0,\"1\":13.0,\"2\":41.0,\"3\":83.0},\"Radio\":{\"0\":6.566230788,\"1\":9.237764567,\"2\":15.88644602,\"3\":30.02002826},\"Social Media\":{\"0\":2.907982773,\"1\":2.409567204,\"2\":2.913410175,\"3\":6.922303959},\"Influencer\":{\"0\":\"Mega\",\"1\":\"Mega\",\"2\":\"Mega\",\"3\":\"Mega\"},\"Sales\":{\"0\":54.73275715,\"1\":46.67789698,\"2\":150.1778288,\"3\":298.2463398}}"}}]
| true | 3 |
<start_data_description><data_path>dummy-advertising-and-sales-data/Dummy Data HSS.csv:
<column_names>
['TV', 'Radio', 'Social Media', 'Influencer', 'Sales']
<column_types>
{'TV': 'float64', 'Radio': 'float64', 'Social Media': 'float64', 'Influencer': 'object', 'Sales': 'float64'}
<dataframe_Summary>
{'TV': {'count': 4562.0, 'mean': 54.06685664182376, 'std': 26.125053891841468, 'min': 10.0, '25%': 32.0, '50%': 53.0, '75%': 77.0, 'max': 100.0}, 'Radio': {'count': 4568.0, 'mean': 18.160355892662654, 'std': 9.676958456095429, 'min': 0.000683948, '25%': 10.5259572775, '50%': 17.85951293, '75%': 25.649729847499998, 'max': 48.87116125}, 'Social Media': {'count': 4566.0, 'mean': 3.323956161385458, 'std': 2.212670263921655, 'min': 3.13e-05, '25%': 1.52784868575, '50%': 3.055565435, '75%': 4.807557994, 'max': 13.98166208}, 'Sales': {'count': 4566.0, 'mean': 192.46660210662066, 'std': 93.13309168784247, 'min': 31.19940869, '25%': 112.322882475, '50%': 189.23117235, '75%': 272.50792167500003, 'max': 364.0797515}}
<dataframe_info>
RangeIndex: 4572 entries, 0 to 4571
Data columns (total 5 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 TV 4562 non-null float64
1 Radio 4568 non-null float64
2 Social Media 4566 non-null float64
3 Influencer 4572 non-null object
4 Sales 4566 non-null float64
dtypes: float64(4), object(1)
memory usage: 178.7+ KB
<some_examples>
{'TV': {'0': 16.0, '1': 13.0, '2': 41.0, '3': 83.0}, 'Radio': {'0': 6.566230788, '1': 9.237764567, '2': 15.88644602, '3': 30.02002826}, 'Social Media': {'0': 2.907982773, '1': 2.409567204, '2': 2.913410175, '3': 6.922303959}, 'Influencer': {'0': 'Mega', '1': 'Mega', '2': 'Mega', '3': 'Mega'}, 'Sales': {'0': 54.73275715, '1': 46.67789698, '2': 150.1778288, '3': 298.2463398}}
<end_description>
| 1,425 | 0 | 2,077 | 1,425 |
129561735
|
<jupyter_start><jupyter_text>Dubizzle used car sales data
## Dataset Description
Dubizzle is the UAE'S (Middle east country) favorite marketplace to buy, sell and find anything. In this dataset I scrapped almost all data from Dubizzle related to automobile selling. This data can be used for finding interesting fact and correlation between different kind brands, resell value of a specific car related to year and more. Enjoy and explore.
## Summary
- There are 20 columns and 9170 rows
- Scrapped date 12/05/2022
## Column Description
1. *title* - Vehicle name with model details
2. *price_in_aed* - Vehicle price in united arab emirates dhirham
3. *kilometer* - How many kilometer the vehicle travelled
4. *body_condition* - Body condition of vehicle
5. *mechanical_condition* - Mechanical condition of vehicle
6. *seller_type* - Type of seller ( Dealer, Owner, Other)
7. *body_type* - Body type ( SUV, Sedan, Other)
8. *no_of_cylinder* - Number of cylinder
9. *transmission_type* - Vehicle transmission type ( Automatic Transmission, Manual Transmission )
10. *regional_spec* - Regional Specification of vehicle
11. *horsepower* - Horsepower
12. *fuel_type* - Fuel Type
13. *steering_side* - Steering side of the vehicle
14. *year* - Vehicle model year
15. *color* - Vehicle color
16. *emirates* - Emirates is like state
17. *motor_trim* - Motor trim type
18. *company* - Vehicle manufacture company name
19. *model* - Vehicle model
20. *date_posted* - Date of ad posted
Kaggle dataset identifier: dubizzle-used-car-sale-data
<jupyter_script>import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib
import matplotlib.pyplot as plt
from datasist.structdata import detect_outliers
df = pd.read_csv(
"/kaggle/input/dubizzle-used-car-sale-data/data.csv",
na_values=["NoneUnknown", "Unknown"],
)
df.head()
df.shape
# ## Cheack feature,data types and missing values
df.info()
# ## Cheack for duplicated data
df.duplicated().sum()
df.drop_duplicates(inplace=True)
df.duplicated().sum()
df.shape
df.describe()
# ## Cheack for Null values
(df.isna().mean() * 100)
# Prsentage of Null values intair the whole data
(df.isna().mean() * 100).sum()
# ## Handlin Null values
from sklearn.impute import KNNImputer
imputer = KNNImputer()
df_numeric = df.select_dtypes(include=np.number)
df_catt = df.select_dtypes(include="object_")
df_numeric_Arr = imputer.fit_transform(df_numeric)
df_numeric_Arr
len(df_numeric_Arr)
df_numeric = pd.DataFrame(df_numeric_Arr, columns=imputer.get_feature_names_out())
df_numeric.shape
df_numeric.isna().sum()
df_numeric.shape
df_catt.isna().mean() * 100
df = pd.concat([df_catt.reset_index(), df_numeric.reset_index()], axis=1)
from sklearn.impute import SimpleImputer
imputer = SimpleImputer(strategy="most_frequent")
df["horsepower"] = imputer.fit_transform(df[["horsepower"]])
df["no_of_cylinders"] = imputer.fit_transform(df[["no_of_cylinders"]])
df["motors_trim"] = imputer.fit_transform(df[["motors_trim"]])
df.isna().mean() * 100
# df.dropna(inplace=True)
# lapel=df['price_in_aed']
# df=df.drop('price_in_aed',axis=1)
# # EDA
# ## univariate analysis
# ## Handel price_in_aed >> Convert it to Int
# ### I have a ploblen to convert price_in_aed to flaot beacouse of special character ','
# df['price_in_aed'].apply(lambda x: float(x))
##df['price_in_aed'] = df['price_in_aed'].astype(float)
def Price_in_aed_To_Int(price):
num = ""
price_List = price.split(",")
for i in price_List:
num += i
return int(num)
df["price_in_aed"] = df["price_in_aed"].apply(Price_in_aed_To_Int)
df["price_in_aed"]
### First I will Handel feature that have maney values (Not unique) & That have contunuas values like kilometers
df.title.value_counts()
# title have model and company and I have both as individual feature so I will drop title
df.drop("title", axis=1, inplace=True)
# high_variance_columns=['body_condition', 'mechanical_condition', 'steering_side', 'fuel_type', 'transmission_type', 'emirate','motors_trim','model','date_posted']
# high_variance_columns
# ## there is alot of features totallt inbalanced so I have to remove them
# ##### body_condition, mechanical_condition, steering_side, fuel_type, transmission_type, emirate, motors_trim, model,date_posted## there is alot of features totallt inbalanced so I have to remove them
# ##### body_condition, mechanical_condition, steering_side, fuel_type, transmission_type
inbalanced_columns = [
"body_condition",
"mechanical_condition",
"steering_side",
"fuel_type",
"transmission_type",
]
for x in inbalanced_columns:
sns.countplot(data=df, y=df[x])
plt.figure() # this creates a new figure on which
df.drop(inbalanced_columns, axis=1, inplace=True)
df.drop("index", inplace=True, axis=1)
df["color"].value_counts()
sns.countplot(y=df["color"])
df.drop(
df[
(df["color"] == "Purple") | (df["color"] == "Teal") | (df["color"] == "Tan")
].index,
inplace=True,
axis=0,
)
sns.countplot(y=df["color"])
df.shape
# ## I need to make some Bivariate Analysis between price_in_aed and kilometers To show if outliers will evect the corelation bentween them or not
# befor removing outliers corr=-0.2
df.corr()
# ### befor removing outliers corr=-0.2 & 0.25
# df['kilometers'] = df['kilometers'].astype(int)
sns.kdeplot(df["kilometers"])
sns.boxenplot(df["kilometers"])
df[df["kilometers"] > 300000].index
df.drop(df[df["kilometers"] > 300000].index, inplace=True, axis=0)
df.shape
sns.kdeplot(df["kilometers"])
sns.displot(data=df, x="kilometers")
sns.boxplot(df["kilometers"])
# #### I concluded form her that ther is many cars has never used becouse the kilometers=0
df.describe()
df["year"] = df["year"].astype(int)
df.info()
print(list(df["year"].value_counts()))
sns.displot(df["year"])
sns.boxplot(df["year"])
df[df["year"] <= 2005]
df.drop(df[df["year"] <= 2005].index, inplace=True, axis=0)
sns.displot(df["year"])
sns.boxplot(df["year"])
df.corr()
# ### after removing outliers corr=-0.4 &0.28
# ### so outliers have no meaning here
# sns.countplot(df['year'])
# ### Habdel body_type
sns.countplot(data=df, y="body_type")
def Body_Type_(body_type):
if body_type == "SUV" or body_type == "Sedan" or body_type == "Coupe":
return body_type
else:
return "Other"
df["body_type"] = df["body_type"].apply(Body_Type_)
sns.countplot(df["body_type"])
# ### Handel no_of_cylinders
sns.countplot(data=df, x="no_of_cylinders")
# #### Remove 3, 10, 5, None
not_important_no_of_cylinders_idx = df[
(df["no_of_cylinders"] == "3")
| (df["no_of_cylinders"] == "10")
| (df["no_of_cylinders"] == "5")
| (df["no_of_cylinders"] == "None")
].index
df.drop(not_important_no_of_cylinders_idx, axis=0, inplace=True)
df["no_of_cylinders"] = df["no_of_cylinders"].astype(int)
sns.countplot(data=df, x="no_of_cylinders")
# print(list(df['motors_trim'].unique()))
df.columns
col = [
"body_condition",
"mechanical_condition",
"seller_type",
"body_type",
"no_of_cylinders",
"transmission_type",
"regional_specs",
"horsepower",
"fuel_type",
"steering_side",
"color",
"emirate",
]
df.info()
for x in df.columns:
sns.countplot(data=df, y=df[x])
plt.figure() # this creates a new figure on which
df["company"].value_counts().head(23).index
df["company"].value_counts().sort_values().head(29)
other_companies = df["company"].value_counts().sort_values().head(29).index
other_companies = list(other_companies)
def Handel_Companies(company):
for c in range(len(other_companies)):
if company == other_companies[c]:
return "other"
return company
df["company"] = df["company"].apply(Handel_Companies)
sns.countplot(data=df, y=df["company"])
df["company"].value_counts()
# df.groupby('company').sum()
df
df.info()
df
df["horsepower"].value_counts()
def Horsepower(horsepower):
if (
horsepower == "700 - 800 HP"
or horsepower == "800 - 900 HP"
or horsepower == "900+ HP"
):
return "More than 700"
else:
return horsepower
df["horsepower"] = df["horsepower"].apply(Horsepower)
df["horsepower"].value_counts()
for col in df.columns:
sns.countplot(data=df, y=df[col])
plt.figure()
sns.displot(data=df, x=df["price_in_aed"])
sns.boxenplot(data=df, x=df["price_in_aed"])
df[df["price_in_aed"] > 1500000].index
x = df.drop(df[df["price_in_aed"] > 1500000].index)
sns.displot(data=x, x=x["price_in_aed"])
sns.displot(np.log(df["price_in_aed"]))
df.describe()
df.corr()
x.corr()
# df.drop('date_posted',inplace=True,axis=1)
# ## Here we can descover when removin outliers from the target it incres the corr so outliers here have no sens
df = x
df
df.describe()
df.head(1)
# # Bivariate Analysis
# ## relatiob between
sns.heatmap(df.corr(), annot=True)
sns.scatterplot(x=df["price_in_aed"], y=df["year"])
sns.scatterplot(y=df["price_in_aed"], x=df["kilometers"])
sns.scatterplot(x=df["price_in_aed"], y=df["no_of_cylinders"])
sns.scatterplot(x=df["price_in_aed"], y=df["no_of_cylinders"], hue=df["year"])
sns.scatterplot(x=df["price_in_aed"], y=df["no_of_cylinders"], hue=df["kilometers"])
sns.scatterplot(y=df["year"], x=df["kilometers"])
df.corr()
df.drop("emirate", inplace=True, axis=1)
df["motors_trim"].value_counts()
# df.drop('motors_trim',inplace=True,axis=1)
for col in df.columns:
sns.countplot(data=df, y=df[col])
plt.figure()
df.describe()
df.head(2)
df.reset_index()
df.info()
df.reset_index(inplace=True, drop=True)
# # Lets Answer Business Questions
# ## 1- most 10 companies have seles
# ## 2- for most 10 companies have seles what is the common 5 models
# ## 3- most 10 high price companies regardless othr factors
# ## 4- most 10 high price companies in a category of 200-300 horsepower
# ## 5- most 10 high price companies at duration of 2010-2015
# ## 6- Best selling body_type at GCC Specs (regional_specs)
# ## 7- most 10 models have seles
# ## 8- most 10 high price models and its cmopany
# ## 9- most 10 high price models and its cmopany in a category of 200-500 horsepower
# ## 10- most 10 models that spend 0 kilometers and its data
# ## 11- most freq regional_specs that that cars spend 0 kilometers
# ## 12- according to ouner what is most 2 body_types seles
# ## 13- according to ouner what is most 10 models sales and its data
# ## 14- according to dealer what is most 10 models sales and its data
# ## 15- most 5 high price colors Under the average kilometers
# ## 16- most 10 motors_trim have seles
# ## 17- For each body_type for each seller_type which has most sales
# ## 18- For each body_type for each no_of_cylinders which has most sales
# ## 19- For each seller_type for each no_of_cylinders which has most sales
# ## 20- For each seller_type for each horsepower which has most sales
# ## 21- For each body_type for each horsepower which has most salesbody_type
# ## 22- For each horsepower for each no_of_cylinders which has most salesbody_type
# ## 23- For each seller_type how much kilometers driven
# ## 24- For each seller_type how much kilometers driven in average
# ## 25- For each body_type how much kilometers driven
# ## 26- For each body_type how much kilometers driven in average
# ## 27- For each seller_type for each body_type how much kilometers driven
# ## 28- For each seller_type for each body_type how much kilometers driven in average
# ## 29- For each body_type for each no_of_cylinders calculate the sum of kilometers driven
# ## 30- For each body_type for each no_of_cylinders calculate the sum of kilometers driven in average
# ## 1- most 10 companies have seles
df["company"].value_counts().head(10)
most_10_companies_idx = df["company"].value_counts().head(10).index
most_10_companies_df = df[df["company"].isin(most_10_companies_idx)]
most_10_companies_df
sns.countplot(
data=most_10_companies_df,
y=most_10_companies_df["company"],
order=most_10_companies_idx,
)
# ## 2- for most 10 companies have seles what is the common 5 models
#
most_10_companies_df["model"].value_counts().head(5)
most_5_models_in_most_10_companies_have_seles_idx = (
most_10_companies_df["model"].value_counts().head(5).index
)
most_5_models_in_most_10_companies_have_seles_df = most_10_companies_df[
most_10_companies_df["model"].isin(
most_5_models_in_most_10_companies_have_seles_idx
)
]
most_5_models_in_most_10_companies_have_seles_df
sns.countplot(
data=most_5_models_in_most_10_companies_have_seles_df,
x=most_5_models_in_most_10_companies_have_seles_df["model"],
order=most_5_models_in_most_10_companies_have_seles_idx,
)
# ## 3- most 10 high price companies regardless othr factors
#
df.groupby("company").sum().sort_values(by="price_in_aed").head(10)[["price_in_aed"]]
# ## 4- most 10 high price companies in a category of 200-300 horsepower
#
df[df["horsepower"] == "200 - 300 HP"].groupby("company").sum().sort_values(
by="price_in_aed"
).head(10)[["price_in_aed"]]
# ## 5- most 10 high price companies at duration of 2010-2015
#
df[(df["year"] < 2015) & (df["year"] > 2010)].groupby("company").sum().sort_values(
by="price_in_aed"
).head(10)[["price_in_aed"]]
# ## 6- Best selling body_type at GCC Specs (regional_specs)
#
df[df["regional_specs"] == "GCC Specs"]["body_type"].value_counts().to_frame()
# ## 7- most 10 models have seles
#
df["model"].value_counts().head(10).to_frame()
most_10_models_have_seles_idx = df["model"].value_counts().head(10).index
most_10_models_have_seles_df = df[df["model"].isin(most_10_models_have_seles_idx)]
most_10_models_have_seles_df.head()
sns.countplot(
data=most_10_models_have_seles_df,
y=most_10_models_have_seles_df["model"],
order=most_10_models_have_seles_idx,
)
# ## 8- most 10 high price models and its cmopany
#
df.groupby("model").sum().sort_values(by="price_in_aed").head(10)[["price_in_aed"]]
most_10_high_price_models_idx = (
df.groupby("model")
.sum()
.sort_values(by="price_in_aed")
.head(10)[["price_in_aed"]]
.index
)
most_10_high_price_models_df = df[df["model"].isin(most_10_high_price_models_idx)]
most_10_high_price_models_df[["model", "company", "price_in_aed"]]
# ## 9- most 10 high price models and its cmopany in a category of 200-500 horsepower
#
df_200_500_HP = df[
(df["horsepower"] == "200 - 300 HP") | (df["horsepower"] == "400 - 500 HP")
]
df_200_500_HP.groupby("model").sum().sort_values(by="price_in_aed").head(10)[
["price_in_aed"]
]
most_10_high_price_models_idx = (
df_200_500_HP.groupby("model")
.sum()
.sort_values(by="price_in_aed")
.head(10)[["price_in_aed"]]
.index
)
most_10_high_price_models_df = df_200_500_HP[
df_200_500_HP["model"].isin(most_10_high_price_models_idx)
]
most_10_high_price_models_df[["model", "company", "price_in_aed"]]
# ## 10- most 10 models that spend 0 kilometers
#
most_10_models_that_spend_0_km_idx = (
df[df["kilometers"] == 0.0]["model"].value_counts().head(10).index
)
df[df["kilometers"] == 0.0]["model"].value_counts().head(10)
most_10_models_that_spend_0_km_df = df[
df["model"].isin(most_10_models_that_spend_0_km_idx)
]
most_10_models_that_spend_0_km_df
sns.countplot(
data=most_10_models_that_spend_0_km_df,
y=most_10_models_that_spend_0_km_df["model"],
order=most_10_models_that_spend_0_km_idx,
)
# ## 11- most freq regional_specs that that cars spend 0 kilometers
#
df[df["kilometers"] == 0.0]["regional_specs"].value_counts().to_frame()
# ## 12- according to Owner what is most 2 body_types seles
#
df[df["seller_type"] == "Owner"]["body_type"].value_counts().head(2)
# ## 13- according to Owner what is most 10 models seles and its data
#
df[df["seller_type"] == "Owner"]["model"].value_counts().head(10)
owner_most_10_models_seles_idx = (
df[df["seller_type"] == "Owner"]["model"].value_counts().head(10).index
)
owner_most_10_models_seles_df = df[df["model"].isin(owner_most_10_models_seles_idx)]
owner_most_10_models_seles_df
# ## 14- according to Dealer what is most 10 models seles and its data
#
df[df["seller_type"] == "Dealer"]["model"].value_counts().head(10)
Dealer_most_10_models_seles_idx = (
df[df["seller_type"] == "Dealer"]["model"].value_counts().head(10).index
)
Dealer_most_10_models_seles_df = df[df["model"].isin(Dealer_most_10_models_seles_idx)]
Dealer_most_10_models_seles_df
# ## 15- most 5 high price colors Under the average kilometers
under_avg_km_df = df[df["kilometers"] < df["kilometers"].mean()]
df.groupby("color").sum().sort_values(by="price_in_aed", ascending=False)
# ## 16- most 10 motors_trim have seles
df["motors_trim"].value_counts().head(10)
most_10_motors_trim_have_seles_idx = df["motors_trim"].value_counts().head(10).index
most_10_motors_trim_have_seles_df = df[
df["motors_trim"].isin(most_10_motors_trim_have_seles_idx)
]
most_10_motors_trim_have_seles_df
sns.countplot(
data=most_10_motors_trim_have_seles_df,
y=most_10_motors_trim_have_seles_df["motors_trim"],
order=most_10_motors_trim_have_seles_idx,
)
# ## 17- For each body_type for each seller_type which has most sales
#
df["dumy"] = 1
df.pivot_table(columns="seller_type", index="body_type", values="dumy", aggfunc=sum)
sns.countplot(data=df, x="seller_type", hue="body_type")
# ## 18- For each body_type for each no_of_cylinders which has most sales
df.pivot_table(columns="body_type", index="no_of_cylinders", values="dumy", aggfunc=sum)
sns.countplot(data=df, x="body_type", hue="no_of_cylinders")
sns.countplot(df["no_of_cylinders"])
# ## 19- For each seller_type for each no_of_cylinders which has most sales
df.pivot_table(
columns="seller_type", index="no_of_cylinders", values="dumy", aggfunc=sum
)
sns.countplot(data=df, x="seller_type", hue="no_of_cylinders")
# ## 20- For each seller_type for each horsepower which has most sales
#
df.pivot_table(columns="seller_type", index="horsepower", values="dumy", aggfunc=sum)
sns.countplot(data=df, x="seller_type", hue="horsepower")
sns.countplot(y=df["horsepower"])
# ## 21- For each body_type for each horsepower which has most salesbody_type
#
df.pivot_table(columns="body_type", index="horsepower", values="dumy", aggfunc=sum)
sns.countplot(data=df, x="body_type", hue="horsepower")
sns.countplot(df["body_type"])
# ## 22- For each horsepower for each no_of_cylinders which has most salesbody_type
#
df.pivot_table(
columns="horsepower", index="no_of_cylinders", values="dumy", aggfunc=sum
)
sns.countplot(data=df, y="horsepower", hue="no_of_cylinders")
sns.countplot(y=df["no_of_cylinders"])
# ## 23- For each seller_type how much kilometers driven
df["seller_type"].value_counts().index
sns.barplot(data=df, x="seller_type", y="kilometers", estimator=sum)
df.groupby("seller_type").sum()[["kilometers"]]
# ## 24- For each seller_type how much kilometers driven in average
sns.barplot(data=df, x="seller_type", y="kilometers")
df.groupby("seller_type").mean()[["kilometers"]]
# conclulsion her is that owner type sell cars that drive more kilometers
# ## 25- For each body_type how much kilometers driven
sns.barplot(data=df, x="body_type", y="kilometers", estimator=sum)
df.groupby("body_type").sum()[["kilometers"]]
# ## 26- For each body_type how much kilometers driven in average
sns.barplot(data=df, x="body_type", y="kilometers")
df.groupby("body_type").mean()[["kilometers"]]
df
sns.barplot(
data=df, x="seller_type", y="kilometers", hue=df["body_type"], estimator=sum
)
df.pivot_table(
columns="seller_type", index="body_type", values="kilometers", aggfunc=sum
)
# ## 28- For each seller_type for each body_type how much kilometers driven in average
sns.barplot(data=df, x="seller_type", y="kilometers", hue=df["body_type"])
df.pivot_table(columns="seller_type", index="body_type", values="kilometers")
# ## 29- For each body_type for each no_of_cylinders calculate the sum of kilometers driven
#
sns.barplot(
data=df, x="body_type", y="kilometers", hue=df["no_of_cylinders"], estimator=sum
)
df.pivot_table(
columns="body_type", index="no_of_cylinders", values="kilometers", aggfunc=sum
)
# ## 30- For each body_type for each no_of_cylinders calculate the sum of kilometers driven in average
#
sns.barplot(data=df, x="body_type", y="kilometers", hue=df["no_of_cylinders"])
df.pivot_table(columns="body_type", index="no_of_cylinders", values="kilometers")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/561/129561735.ipynb
|
dubizzle-used-car-sale-data
|
alihassankp
|
[{"Id": 129561735, "ScriptId": 38523534, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 9201390, "CreationDate": "05/14/2023 21:13:23", "VersionNumber": 2.0, "Title": "dubizzle-used-car-Analysis", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 661.0, "LinesInsertedFromPrevious": 2.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 659.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 185745985, "KernelVersionId": 129561735, "SourceDatasetVersionId": 4471258}]
|
[{"Id": 4471258, "DatasetId": 2616968, "DatasourceVersionId": 4531310, "CreatorUserId": 2304566, "LicenseName": "CC0: Public Domain", "CreationDate": "11/08/2022 16:09:01", "VersionNumber": 1.0, "Title": "Dubizzle used car sales data", "Slug": "dubizzle-used-car-sale-data", "Subtitle": "Dubizzle used car sales data it can be used for recommendation system", "Description": "## Dataset Description \n\nDubizzle is the UAE'S (Middle east country) favorite marketplace to buy, sell and find anything. In this dataset I scrapped almost all data from Dubizzle related to automobile selling. This data can be used for finding interesting fact and correlation between different kind brands, resell value of a specific car related to year and more. Enjoy and explore.\n\n## Summary\n- There are 20 columns and 9170 rows\n- Scrapped date 12/05/2022\n\n## Column Description\n1. *title* - Vehicle name with model details\n2. *price_in_aed* - Vehicle price in united arab emirates dhirham\n3. *kilometer* - How many kilometer the vehicle travelled \n4. *body_condition* - Body condition of vehicle \n5. *mechanical_condition* - Mechanical condition of vehicle\n6. *seller_type* - Type of seller ( Dealer, Owner, Other)\n7. *body_type* - Body type ( SUV, Sedan, Other)\n8. *no_of_cylinder* - Number of cylinder \n9. *transmission_type* - Vehicle transmission type ( Automatic Transmission, Manual Transmission )\n10. *regional_spec* - Regional Specification of vehicle\n11. *horsepower* - Horsepower\n12. *fuel_type* - Fuel Type \n13. *steering_side* - Steering side of the vehicle\n14. *year* - Vehicle model year\n15. *color* - Vehicle color\n16. *emirates* - Emirates is like state\n17. *motor_trim* - Motor trim type\n18. *company* - Vehicle manufacture company name\n19. *model* - Vehicle model\n20. *date_posted* - Date of ad posted", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 2616968, "CreatorUserId": 2304566, "OwnerUserId": 2304566.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 4471258.0, "CurrentDatasourceVersionId": 4531310.0, "ForumId": 2647821, "Type": 2, "CreationDate": "11/08/2022 16:09:01", "LastActivityDate": "11/08/2022", "TotalViews": 10323, "TotalDownloads": 1711, "TotalVotes": 33, "TotalKernels": 8}]
|
[{"Id": 2304566, "UserName": "alihassankp", "DisplayName": "Ali Hassan", "RegisterDate": "09/30/2018", "PerformanceTier": 1}]
|
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib
import matplotlib.pyplot as plt
from datasist.structdata import detect_outliers
df = pd.read_csv(
"/kaggle/input/dubizzle-used-car-sale-data/data.csv",
na_values=["NoneUnknown", "Unknown"],
)
df.head()
df.shape
# ## Cheack feature,data types and missing values
df.info()
# ## Cheack for duplicated data
df.duplicated().sum()
df.drop_duplicates(inplace=True)
df.duplicated().sum()
df.shape
df.describe()
# ## Cheack for Null values
(df.isna().mean() * 100)
# Prsentage of Null values intair the whole data
(df.isna().mean() * 100).sum()
# ## Handlin Null values
from sklearn.impute import KNNImputer
imputer = KNNImputer()
df_numeric = df.select_dtypes(include=np.number)
df_catt = df.select_dtypes(include="object_")
df_numeric_Arr = imputer.fit_transform(df_numeric)
df_numeric_Arr
len(df_numeric_Arr)
df_numeric = pd.DataFrame(df_numeric_Arr, columns=imputer.get_feature_names_out())
df_numeric.shape
df_numeric.isna().sum()
df_numeric.shape
df_catt.isna().mean() * 100
df = pd.concat([df_catt.reset_index(), df_numeric.reset_index()], axis=1)
from sklearn.impute import SimpleImputer
imputer = SimpleImputer(strategy="most_frequent")
df["horsepower"] = imputer.fit_transform(df[["horsepower"]])
df["no_of_cylinders"] = imputer.fit_transform(df[["no_of_cylinders"]])
df["motors_trim"] = imputer.fit_transform(df[["motors_trim"]])
df.isna().mean() * 100
# df.dropna(inplace=True)
# lapel=df['price_in_aed']
# df=df.drop('price_in_aed',axis=1)
# # EDA
# ## univariate analysis
# ## Handel price_in_aed >> Convert it to Int
# ### I have a ploblen to convert price_in_aed to flaot beacouse of special character ','
# df['price_in_aed'].apply(lambda x: float(x))
##df['price_in_aed'] = df['price_in_aed'].astype(float)
def Price_in_aed_To_Int(price):
num = ""
price_List = price.split(",")
for i in price_List:
num += i
return int(num)
df["price_in_aed"] = df["price_in_aed"].apply(Price_in_aed_To_Int)
df["price_in_aed"]
### First I will Handel feature that have maney values (Not unique) & That have contunuas values like kilometers
df.title.value_counts()
# title have model and company and I have both as individual feature so I will drop title
df.drop("title", axis=1, inplace=True)
# high_variance_columns=['body_condition', 'mechanical_condition', 'steering_side', 'fuel_type', 'transmission_type', 'emirate','motors_trim','model','date_posted']
# high_variance_columns
# ## there is alot of features totallt inbalanced so I have to remove them
# ##### body_condition, mechanical_condition, steering_side, fuel_type, transmission_type, emirate, motors_trim, model,date_posted## there is alot of features totallt inbalanced so I have to remove them
# ##### body_condition, mechanical_condition, steering_side, fuel_type, transmission_type
inbalanced_columns = [
"body_condition",
"mechanical_condition",
"steering_side",
"fuel_type",
"transmission_type",
]
for x in inbalanced_columns:
sns.countplot(data=df, y=df[x])
plt.figure() # this creates a new figure on which
df.drop(inbalanced_columns, axis=1, inplace=True)
df.drop("index", inplace=True, axis=1)
df["color"].value_counts()
sns.countplot(y=df["color"])
df.drop(
df[
(df["color"] == "Purple") | (df["color"] == "Teal") | (df["color"] == "Tan")
].index,
inplace=True,
axis=0,
)
sns.countplot(y=df["color"])
df.shape
# ## I need to make some Bivariate Analysis between price_in_aed and kilometers To show if outliers will evect the corelation bentween them or not
# befor removing outliers corr=-0.2
df.corr()
# ### befor removing outliers corr=-0.2 & 0.25
# df['kilometers'] = df['kilometers'].astype(int)
sns.kdeplot(df["kilometers"])
sns.boxenplot(df["kilometers"])
df[df["kilometers"] > 300000].index
df.drop(df[df["kilometers"] > 300000].index, inplace=True, axis=0)
df.shape
sns.kdeplot(df["kilometers"])
sns.displot(data=df, x="kilometers")
sns.boxplot(df["kilometers"])
# #### I concluded form her that ther is many cars has never used becouse the kilometers=0
df.describe()
df["year"] = df["year"].astype(int)
df.info()
print(list(df["year"].value_counts()))
sns.displot(df["year"])
sns.boxplot(df["year"])
df[df["year"] <= 2005]
df.drop(df[df["year"] <= 2005].index, inplace=True, axis=0)
sns.displot(df["year"])
sns.boxplot(df["year"])
df.corr()
# ### after removing outliers corr=-0.4 &0.28
# ### so outliers have no meaning here
# sns.countplot(df['year'])
# ### Habdel body_type
sns.countplot(data=df, y="body_type")
def Body_Type_(body_type):
if body_type == "SUV" or body_type == "Sedan" or body_type == "Coupe":
return body_type
else:
return "Other"
df["body_type"] = df["body_type"].apply(Body_Type_)
sns.countplot(df["body_type"])
# ### Handel no_of_cylinders
sns.countplot(data=df, x="no_of_cylinders")
# #### Remove 3, 10, 5, None
not_important_no_of_cylinders_idx = df[
(df["no_of_cylinders"] == "3")
| (df["no_of_cylinders"] == "10")
| (df["no_of_cylinders"] == "5")
| (df["no_of_cylinders"] == "None")
].index
df.drop(not_important_no_of_cylinders_idx, axis=0, inplace=True)
df["no_of_cylinders"] = df["no_of_cylinders"].astype(int)
sns.countplot(data=df, x="no_of_cylinders")
# print(list(df['motors_trim'].unique()))
df.columns
col = [
"body_condition",
"mechanical_condition",
"seller_type",
"body_type",
"no_of_cylinders",
"transmission_type",
"regional_specs",
"horsepower",
"fuel_type",
"steering_side",
"color",
"emirate",
]
df.info()
for x in df.columns:
sns.countplot(data=df, y=df[x])
plt.figure() # this creates a new figure on which
df["company"].value_counts().head(23).index
df["company"].value_counts().sort_values().head(29)
other_companies = df["company"].value_counts().sort_values().head(29).index
other_companies = list(other_companies)
def Handel_Companies(company):
for c in range(len(other_companies)):
if company == other_companies[c]:
return "other"
return company
df["company"] = df["company"].apply(Handel_Companies)
sns.countplot(data=df, y=df["company"])
df["company"].value_counts()
# df.groupby('company').sum()
df
df.info()
df
df["horsepower"].value_counts()
def Horsepower(horsepower):
if (
horsepower == "700 - 800 HP"
or horsepower == "800 - 900 HP"
or horsepower == "900+ HP"
):
return "More than 700"
else:
return horsepower
df["horsepower"] = df["horsepower"].apply(Horsepower)
df["horsepower"].value_counts()
for col in df.columns:
sns.countplot(data=df, y=df[col])
plt.figure()
sns.displot(data=df, x=df["price_in_aed"])
sns.boxenplot(data=df, x=df["price_in_aed"])
df[df["price_in_aed"] > 1500000].index
x = df.drop(df[df["price_in_aed"] > 1500000].index)
sns.displot(data=x, x=x["price_in_aed"])
sns.displot(np.log(df["price_in_aed"]))
df.describe()
df.corr()
x.corr()
# df.drop('date_posted',inplace=True,axis=1)
# ## Here we can descover when removin outliers from the target it incres the corr so outliers here have no sens
df = x
df
df.describe()
df.head(1)
# # Bivariate Analysis
# ## relatiob between
sns.heatmap(df.corr(), annot=True)
sns.scatterplot(x=df["price_in_aed"], y=df["year"])
sns.scatterplot(y=df["price_in_aed"], x=df["kilometers"])
sns.scatterplot(x=df["price_in_aed"], y=df["no_of_cylinders"])
sns.scatterplot(x=df["price_in_aed"], y=df["no_of_cylinders"], hue=df["year"])
sns.scatterplot(x=df["price_in_aed"], y=df["no_of_cylinders"], hue=df["kilometers"])
sns.scatterplot(y=df["year"], x=df["kilometers"])
df.corr()
df.drop("emirate", inplace=True, axis=1)
df["motors_trim"].value_counts()
# df.drop('motors_trim',inplace=True,axis=1)
for col in df.columns:
sns.countplot(data=df, y=df[col])
plt.figure()
df.describe()
df.head(2)
df.reset_index()
df.info()
df.reset_index(inplace=True, drop=True)
# # Lets Answer Business Questions
# ## 1- most 10 companies have seles
# ## 2- for most 10 companies have seles what is the common 5 models
# ## 3- most 10 high price companies regardless othr factors
# ## 4- most 10 high price companies in a category of 200-300 horsepower
# ## 5- most 10 high price companies at duration of 2010-2015
# ## 6- Best selling body_type at GCC Specs (regional_specs)
# ## 7- most 10 models have seles
# ## 8- most 10 high price models and its cmopany
# ## 9- most 10 high price models and its cmopany in a category of 200-500 horsepower
# ## 10- most 10 models that spend 0 kilometers and its data
# ## 11- most freq regional_specs that that cars spend 0 kilometers
# ## 12- according to ouner what is most 2 body_types seles
# ## 13- according to ouner what is most 10 models sales and its data
# ## 14- according to dealer what is most 10 models sales and its data
# ## 15- most 5 high price colors Under the average kilometers
# ## 16- most 10 motors_trim have seles
# ## 17- For each body_type for each seller_type which has most sales
# ## 18- For each body_type for each no_of_cylinders which has most sales
# ## 19- For each seller_type for each no_of_cylinders which has most sales
# ## 20- For each seller_type for each horsepower which has most sales
# ## 21- For each body_type for each horsepower which has most salesbody_type
# ## 22- For each horsepower for each no_of_cylinders which has most salesbody_type
# ## 23- For each seller_type how much kilometers driven
# ## 24- For each seller_type how much kilometers driven in average
# ## 25- For each body_type how much kilometers driven
# ## 26- For each body_type how much kilometers driven in average
# ## 27- For each seller_type for each body_type how much kilometers driven
# ## 28- For each seller_type for each body_type how much kilometers driven in average
# ## 29- For each body_type for each no_of_cylinders calculate the sum of kilometers driven
# ## 30- For each body_type for each no_of_cylinders calculate the sum of kilometers driven in average
# ## 1- most 10 companies have seles
df["company"].value_counts().head(10)
most_10_companies_idx = df["company"].value_counts().head(10).index
most_10_companies_df = df[df["company"].isin(most_10_companies_idx)]
most_10_companies_df
sns.countplot(
data=most_10_companies_df,
y=most_10_companies_df["company"],
order=most_10_companies_idx,
)
# ## 2- for most 10 companies have seles what is the common 5 models
#
most_10_companies_df["model"].value_counts().head(5)
most_5_models_in_most_10_companies_have_seles_idx = (
most_10_companies_df["model"].value_counts().head(5).index
)
most_5_models_in_most_10_companies_have_seles_df = most_10_companies_df[
most_10_companies_df["model"].isin(
most_5_models_in_most_10_companies_have_seles_idx
)
]
most_5_models_in_most_10_companies_have_seles_df
sns.countplot(
data=most_5_models_in_most_10_companies_have_seles_df,
x=most_5_models_in_most_10_companies_have_seles_df["model"],
order=most_5_models_in_most_10_companies_have_seles_idx,
)
# ## 3- most 10 high price companies regardless othr factors
#
df.groupby("company").sum().sort_values(by="price_in_aed").head(10)[["price_in_aed"]]
# ## 4- most 10 high price companies in a category of 200-300 horsepower
#
df[df["horsepower"] == "200 - 300 HP"].groupby("company").sum().sort_values(
by="price_in_aed"
).head(10)[["price_in_aed"]]
# ## 5- most 10 high price companies at duration of 2010-2015
#
df[(df["year"] < 2015) & (df["year"] > 2010)].groupby("company").sum().sort_values(
by="price_in_aed"
).head(10)[["price_in_aed"]]
# ## 6- Best selling body_type at GCC Specs (regional_specs)
#
df[df["regional_specs"] == "GCC Specs"]["body_type"].value_counts().to_frame()
# ## 7- most 10 models have seles
#
df["model"].value_counts().head(10).to_frame()
most_10_models_have_seles_idx = df["model"].value_counts().head(10).index
most_10_models_have_seles_df = df[df["model"].isin(most_10_models_have_seles_idx)]
most_10_models_have_seles_df.head()
sns.countplot(
data=most_10_models_have_seles_df,
y=most_10_models_have_seles_df["model"],
order=most_10_models_have_seles_idx,
)
# ## 8- most 10 high price models and its cmopany
#
df.groupby("model").sum().sort_values(by="price_in_aed").head(10)[["price_in_aed"]]
most_10_high_price_models_idx = (
df.groupby("model")
.sum()
.sort_values(by="price_in_aed")
.head(10)[["price_in_aed"]]
.index
)
most_10_high_price_models_df = df[df["model"].isin(most_10_high_price_models_idx)]
most_10_high_price_models_df[["model", "company", "price_in_aed"]]
# ## 9- most 10 high price models and its cmopany in a category of 200-500 horsepower
#
df_200_500_HP = df[
(df["horsepower"] == "200 - 300 HP") | (df["horsepower"] == "400 - 500 HP")
]
df_200_500_HP.groupby("model").sum().sort_values(by="price_in_aed").head(10)[
["price_in_aed"]
]
most_10_high_price_models_idx = (
df_200_500_HP.groupby("model")
.sum()
.sort_values(by="price_in_aed")
.head(10)[["price_in_aed"]]
.index
)
most_10_high_price_models_df = df_200_500_HP[
df_200_500_HP["model"].isin(most_10_high_price_models_idx)
]
most_10_high_price_models_df[["model", "company", "price_in_aed"]]
# ## 10- most 10 models that spend 0 kilometers
#
most_10_models_that_spend_0_km_idx = (
df[df["kilometers"] == 0.0]["model"].value_counts().head(10).index
)
df[df["kilometers"] == 0.0]["model"].value_counts().head(10)
most_10_models_that_spend_0_km_df = df[
df["model"].isin(most_10_models_that_spend_0_km_idx)
]
most_10_models_that_spend_0_km_df
sns.countplot(
data=most_10_models_that_spend_0_km_df,
y=most_10_models_that_spend_0_km_df["model"],
order=most_10_models_that_spend_0_km_idx,
)
# ## 11- most freq regional_specs that that cars spend 0 kilometers
#
df[df["kilometers"] == 0.0]["regional_specs"].value_counts().to_frame()
# ## 12- according to Owner what is most 2 body_types seles
#
df[df["seller_type"] == "Owner"]["body_type"].value_counts().head(2)
# ## 13- according to Owner what is most 10 models seles and its data
#
df[df["seller_type"] == "Owner"]["model"].value_counts().head(10)
owner_most_10_models_seles_idx = (
df[df["seller_type"] == "Owner"]["model"].value_counts().head(10).index
)
owner_most_10_models_seles_df = df[df["model"].isin(owner_most_10_models_seles_idx)]
owner_most_10_models_seles_df
# ## 14- according to Dealer what is most 10 models seles and its data
#
df[df["seller_type"] == "Dealer"]["model"].value_counts().head(10)
Dealer_most_10_models_seles_idx = (
df[df["seller_type"] == "Dealer"]["model"].value_counts().head(10).index
)
Dealer_most_10_models_seles_df = df[df["model"].isin(Dealer_most_10_models_seles_idx)]
Dealer_most_10_models_seles_df
# ## 15- most 5 high price colors Under the average kilometers
under_avg_km_df = df[df["kilometers"] < df["kilometers"].mean()]
df.groupby("color").sum().sort_values(by="price_in_aed", ascending=False)
# ## 16- most 10 motors_trim have seles
df["motors_trim"].value_counts().head(10)
most_10_motors_trim_have_seles_idx = df["motors_trim"].value_counts().head(10).index
most_10_motors_trim_have_seles_df = df[
df["motors_trim"].isin(most_10_motors_trim_have_seles_idx)
]
most_10_motors_trim_have_seles_df
sns.countplot(
data=most_10_motors_trim_have_seles_df,
y=most_10_motors_trim_have_seles_df["motors_trim"],
order=most_10_motors_trim_have_seles_idx,
)
# ## 17- For each body_type for each seller_type which has most sales
#
df["dumy"] = 1
df.pivot_table(columns="seller_type", index="body_type", values="dumy", aggfunc=sum)
sns.countplot(data=df, x="seller_type", hue="body_type")
# ## 18- For each body_type for each no_of_cylinders which has most sales
df.pivot_table(columns="body_type", index="no_of_cylinders", values="dumy", aggfunc=sum)
sns.countplot(data=df, x="body_type", hue="no_of_cylinders")
sns.countplot(df["no_of_cylinders"])
# ## 19- For each seller_type for each no_of_cylinders which has most sales
df.pivot_table(
columns="seller_type", index="no_of_cylinders", values="dumy", aggfunc=sum
)
sns.countplot(data=df, x="seller_type", hue="no_of_cylinders")
# ## 20- For each seller_type for each horsepower which has most sales
#
df.pivot_table(columns="seller_type", index="horsepower", values="dumy", aggfunc=sum)
sns.countplot(data=df, x="seller_type", hue="horsepower")
sns.countplot(y=df["horsepower"])
# ## 21- For each body_type for each horsepower which has most salesbody_type
#
df.pivot_table(columns="body_type", index="horsepower", values="dumy", aggfunc=sum)
sns.countplot(data=df, x="body_type", hue="horsepower")
sns.countplot(df["body_type"])
# ## 22- For each horsepower for each no_of_cylinders which has most salesbody_type
#
df.pivot_table(
columns="horsepower", index="no_of_cylinders", values="dumy", aggfunc=sum
)
sns.countplot(data=df, y="horsepower", hue="no_of_cylinders")
sns.countplot(y=df["no_of_cylinders"])
# ## 23- For each seller_type how much kilometers driven
df["seller_type"].value_counts().index
sns.barplot(data=df, x="seller_type", y="kilometers", estimator=sum)
df.groupby("seller_type").sum()[["kilometers"]]
# ## 24- For each seller_type how much kilometers driven in average
sns.barplot(data=df, x="seller_type", y="kilometers")
df.groupby("seller_type").mean()[["kilometers"]]
# conclulsion her is that owner type sell cars that drive more kilometers
# ## 25- For each body_type how much kilometers driven
sns.barplot(data=df, x="body_type", y="kilometers", estimator=sum)
df.groupby("body_type").sum()[["kilometers"]]
# ## 26- For each body_type how much kilometers driven in average
sns.barplot(data=df, x="body_type", y="kilometers")
df.groupby("body_type").mean()[["kilometers"]]
df
sns.barplot(
data=df, x="seller_type", y="kilometers", hue=df["body_type"], estimator=sum
)
df.pivot_table(
columns="seller_type", index="body_type", values="kilometers", aggfunc=sum
)
# ## 28- For each seller_type for each body_type how much kilometers driven in average
sns.barplot(data=df, x="seller_type", y="kilometers", hue=df["body_type"])
df.pivot_table(columns="seller_type", index="body_type", values="kilometers")
# ## 29- For each body_type for each no_of_cylinders calculate the sum of kilometers driven
#
sns.barplot(
data=df, x="body_type", y="kilometers", hue=df["no_of_cylinders"], estimator=sum
)
df.pivot_table(
columns="body_type", index="no_of_cylinders", values="kilometers", aggfunc=sum
)
# ## 30- For each body_type for each no_of_cylinders calculate the sum of kilometers driven in average
#
sns.barplot(data=df, x="body_type", y="kilometers", hue=df["no_of_cylinders"])
df.pivot_table(columns="body_type", index="no_of_cylinders", values="kilometers")
| false | 1 | 7,083 | 0 | 7,558 | 7,083 |
||
129561547
|
<jupyter_start><jupyter_text>Tetris Openers Dataset
Kaggle dataset identifier: tetris-openers-dataset
<jupyter_script># Data Science Classification Project: Tetris Opener Classification
# ## Entire project is available at https://github.com/quickandsmart/tetris-opener-classifier
import tensorflow as tf
from tensorflow.keras import models, layers
import matplotlib.pyplot as plt
from IPython.display import HTML
import numpy as np
# Data Load: Load in and Resize Tetris Opener Images
# **Manually collected different tetris openers and resized them to be 256x256. More information about the openers can be found in the README**
BATCH_SIZE = 8
IMAGE_SIZE = 256
CHANNELS = 3
EPOCHS = 15
dataset = tf.keras.preprocessing.image_dataset_from_directory(
"openers",
seed=123,
shuffle=True,
image_size=(IMAGE_SIZE, IMAGE_SIZE),
batch_size=BATCH_SIZE,
)
class_names = dataset.class_names
class_names
# ### Visualization of the first batch of images in our dataset
for image_batch, labels_batch in dataset.take(1):
for i in range(8):
ax = plt.subplot(2, 4, i + 1)
plt.imshow(image_batch[i].numpy().astype("uint8"))
plt.title(class_names[labels_batch[i]])
plt.axis("off")
# Train Validate Test Split the Data
# **The dataset needs to be split into 3 categories to train the Neural Network.A large set of the data is needed for training the model, a validation set is needed to test the model while it's being trained, and lastly a test set to see how to model does after being fully trained. The current model was trained with an 60,20,20 split which lead to 10 out of 17 batches being trained, 3 out of 17 being validated, and 4 out of 17 being tested**
def get_dataset_partitions_tf(
ds, train_split=0.8, val_split=0.1, test_split=0.1, shuffle=True, shuffle_size=10000
):
assert (train_split + test_split + val_split) == 1
ds_size = len(ds)
if shuffle:
ds = ds.shuffle(shuffle_size, seed=12)
train_size = int(train_split * ds_size)
val_size = int(val_split * ds_size)
train_ds = ds.take(train_size)
val_ds = ds.skip(train_size).take(val_size)
test_ds = ds.skip(train_size).skip(val_size)
return train_ds, val_ds, test_ds
train_ds, val_ds, test_ds = get_dataset_partitions_tf(dataset)
len(train_ds), len(val_ds), len(test_ds)
# **Cache and prefetch the datasets to make training the model faster and shuffle the datasets one more time**
train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=tf.data.AUTOTUNE)
val_ds = val_ds.cache().shuffle(1000).prefetch(buffer_size=tf.data.AUTOTUNE)
test_ds = test_ds.cache().shuffle(1000).prefetch(buffer_size=tf.data.AUTOTUNE)
# Building the Classification Model
# ### Creating a Layer for Resizing and Normalization
# **This first layer in the neural network is meant to resize and rescale any image that is put into the model to classify after the training is done**
resize_and_rescale = tf.keras.Sequential(
[
layers.experimental.preprocessing.Resizing(IMAGE_SIZE, IMAGE_SIZE),
layers.experimental.preprocessing.Rescaling(1.0 / 255),
]
)
# ### Data Augmentation
# **Since I didn't have a lot of data to work with Data Augmentation is used to create more sample data to train with, which can help boost the accuracy of the model. The data augmentation included Flipping the image horizontally, adding contrast to the image by up to 40% and zooming in the image horizontally and vertically by up to 10%**
data_augmentation = tf.keras.Sequential(
[
layers.experimental.preprocessing.RandomFlip("horizontal"),
layers.experimental.preprocessing.RandomContrast(0.4),
layers.experimental.preprocessing.RandomZoom(
height_factor=(0.2, 0.0), width_factor=(0.2, 0.0)
),
]
)
augmented_ds = train_ds.map(lambda x, y: (data_augmentation(x, training=True), y))
train_ds_final = (
train_ds.concatenate(augmented_ds)
.shuffle(buffer_size=10000)
.prefetch(buffer_size=tf.data.AUTOTUNE)
)
# **With this data augmentation, I have 2x more images to work with for our training set**
len(train_ds_final)
# Model Architecture
# **I used a Convolutional Neural Network (CNN) since it works well with training images.This involves repeatedly using the convolution and maxpooling layers to shrink the image and create more recognized patterns**
input_shape = (BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, CHANNELS)
n_classes = 5
model = models.Sequential(
[
resize_and_rescale,
layers.Conv2D(32, (3, 3), activation="relu", input_shape=input_shape),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(64, (3, 3), activation="relu"),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(64, (3, 3), activation="relu"),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(64, (3, 3), activation="relu"),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(64, (3, 3), activation="relu"),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(64, (3, 3), activation="relu"),
layers.MaxPooling2D((2, 2)),
layers.Flatten(),
layers.Dense(64, activation="relu"),
layers.Dense(n_classes, activation="softmax"),
]
)
model.build(input_shape=input_shape)
model.summary()
model.compile(
optimizer="adam",
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
metrics=["accuracy"],
)
history = model.fit(
train_ds_final,
batch_size=BATCH_SIZE,
validation_data=val_ds,
verbose=1,
epochs=15,
)
scores = model.evaluate(test_ds)
scores
# **With this model I managed to achieve a 83.333% accuracy for the test dataset which is very good**
# Run Predictions on Test Images
import numpy as np
plt.figure(figsize=(3, 3))
for images_batch, labels_batch in test_ds.take(1):
first_image = images_batch[0].numpy().astype("uint8")
first_label = labels_batch[0].numpy()
plt.imshow(first_image)
print("actual label:", class_names[first_label])
batch_prediction = model.predict(images_batch, verbose=0)
print("predicted label:", class_names[np.argmax(batch_prediction[0])])
plt.axis("off")
def predict(model, img):
img_array = tf.keras.preprocessing.image.img_to_array(images[i].numpy())
img_array = tf.expand_dims(img_array, 0)
predictions = model.predict(img_array, verbose=0)
predicted_class = class_names[np.argmax(predictions[0])]
confidence = round(100 * (np.max(predictions[0])), 2)
return predicted_class, confidence
plt.figure(figsize=(10, 10))
for images, labels in test_ds.take(1):
for i in range(8):
ax = plt.subplot(2, 4, i + 1)
plt.imshow(images[i].numpy().astype("uint8"))
predicted_class, confidence = predict(model, images[i].numpy())
actual_class = class_names[labels[i]]
plt.title(
f"Actual: {actual_class},\n Predicted: {predicted_class}.\n Confidence: {confidence}%"
)
plt.axis("off")
# Save the Model
# **Save the model, and whenever I want to make changes to test later I can have multiple models**
import os
model_version = max([int(i) for i in os.listdir("../models") + [0]]) + 1
model.save(f"../models/{model_version}")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/561/129561547.ipynb
|
tetris-openers-dataset
|
quickandsmart
|
[{"Id": 129561547, "ScriptId": 38525309, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13330521, "CreationDate": "05/14/2023 21:10:23", "VersionNumber": 1.0, "Title": "Tetris Opener Classification", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 194.0, "LinesInsertedFromPrevious": 194.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 185745669, "KernelVersionId": 129561547, "SourceDatasetVersionId": 5685419}]
|
[{"Id": 5685419, "DatasetId": 3268605, "DatasourceVersionId": 5760996, "CreatorUserId": 13330521, "LicenseName": "Unknown", "CreationDate": "05/14/2023 21:02:03", "VersionNumber": 1.0, "Title": "Tetris Openers Dataset", "Slug": "tetris-openers-dataset", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 3268605, "CreatorUserId": 13330521, "OwnerUserId": 13330521.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5685419.0, "CurrentDatasourceVersionId": 5760996.0, "ForumId": 3334223, "Type": 2, "CreationDate": "05/14/2023 21:02:03", "LastActivityDate": "05/14/2023", "TotalViews": 138, "TotalDownloads": 2, "TotalVotes": 0, "TotalKernels": 1}]
|
[{"Id": 13330521, "UserName": "quickandsmart", "DisplayName": "Quickandsmart", "RegisterDate": "01/18/2023", "PerformanceTier": 0}]
|
# Data Science Classification Project: Tetris Opener Classification
# ## Entire project is available at https://github.com/quickandsmart/tetris-opener-classifier
import tensorflow as tf
from tensorflow.keras import models, layers
import matplotlib.pyplot as plt
from IPython.display import HTML
import numpy as np
# Data Load: Load in and Resize Tetris Opener Images
# **Manually collected different tetris openers and resized them to be 256x256. More information about the openers can be found in the README**
BATCH_SIZE = 8
IMAGE_SIZE = 256
CHANNELS = 3
EPOCHS = 15
dataset = tf.keras.preprocessing.image_dataset_from_directory(
"openers",
seed=123,
shuffle=True,
image_size=(IMAGE_SIZE, IMAGE_SIZE),
batch_size=BATCH_SIZE,
)
class_names = dataset.class_names
class_names
# ### Visualization of the first batch of images in our dataset
for image_batch, labels_batch in dataset.take(1):
for i in range(8):
ax = plt.subplot(2, 4, i + 1)
plt.imshow(image_batch[i].numpy().astype("uint8"))
plt.title(class_names[labels_batch[i]])
plt.axis("off")
# Train Validate Test Split the Data
# **The dataset needs to be split into 3 categories to train the Neural Network.A large set of the data is needed for training the model, a validation set is needed to test the model while it's being trained, and lastly a test set to see how to model does after being fully trained. The current model was trained with an 60,20,20 split which lead to 10 out of 17 batches being trained, 3 out of 17 being validated, and 4 out of 17 being tested**
def get_dataset_partitions_tf(
ds, train_split=0.8, val_split=0.1, test_split=0.1, shuffle=True, shuffle_size=10000
):
assert (train_split + test_split + val_split) == 1
ds_size = len(ds)
if shuffle:
ds = ds.shuffle(shuffle_size, seed=12)
train_size = int(train_split * ds_size)
val_size = int(val_split * ds_size)
train_ds = ds.take(train_size)
val_ds = ds.skip(train_size).take(val_size)
test_ds = ds.skip(train_size).skip(val_size)
return train_ds, val_ds, test_ds
train_ds, val_ds, test_ds = get_dataset_partitions_tf(dataset)
len(train_ds), len(val_ds), len(test_ds)
# **Cache and prefetch the datasets to make training the model faster and shuffle the datasets one more time**
train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=tf.data.AUTOTUNE)
val_ds = val_ds.cache().shuffle(1000).prefetch(buffer_size=tf.data.AUTOTUNE)
test_ds = test_ds.cache().shuffle(1000).prefetch(buffer_size=tf.data.AUTOTUNE)
# Building the Classification Model
# ### Creating a Layer for Resizing and Normalization
# **This first layer in the neural network is meant to resize and rescale any image that is put into the model to classify after the training is done**
resize_and_rescale = tf.keras.Sequential(
[
layers.experimental.preprocessing.Resizing(IMAGE_SIZE, IMAGE_SIZE),
layers.experimental.preprocessing.Rescaling(1.0 / 255),
]
)
# ### Data Augmentation
# **Since I didn't have a lot of data to work with Data Augmentation is used to create more sample data to train with, which can help boost the accuracy of the model. The data augmentation included Flipping the image horizontally, adding contrast to the image by up to 40% and zooming in the image horizontally and vertically by up to 10%**
data_augmentation = tf.keras.Sequential(
[
layers.experimental.preprocessing.RandomFlip("horizontal"),
layers.experimental.preprocessing.RandomContrast(0.4),
layers.experimental.preprocessing.RandomZoom(
height_factor=(0.2, 0.0), width_factor=(0.2, 0.0)
),
]
)
augmented_ds = train_ds.map(lambda x, y: (data_augmentation(x, training=True), y))
train_ds_final = (
train_ds.concatenate(augmented_ds)
.shuffle(buffer_size=10000)
.prefetch(buffer_size=tf.data.AUTOTUNE)
)
# **With this data augmentation, I have 2x more images to work with for our training set**
len(train_ds_final)
# Model Architecture
# **I used a Convolutional Neural Network (CNN) since it works well with training images.This involves repeatedly using the convolution and maxpooling layers to shrink the image and create more recognized patterns**
input_shape = (BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, CHANNELS)
n_classes = 5
model = models.Sequential(
[
resize_and_rescale,
layers.Conv2D(32, (3, 3), activation="relu", input_shape=input_shape),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(64, (3, 3), activation="relu"),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(64, (3, 3), activation="relu"),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(64, (3, 3), activation="relu"),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(64, (3, 3), activation="relu"),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(64, (3, 3), activation="relu"),
layers.MaxPooling2D((2, 2)),
layers.Flatten(),
layers.Dense(64, activation="relu"),
layers.Dense(n_classes, activation="softmax"),
]
)
model.build(input_shape=input_shape)
model.summary()
model.compile(
optimizer="adam",
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
metrics=["accuracy"],
)
history = model.fit(
train_ds_final,
batch_size=BATCH_SIZE,
validation_data=val_ds,
verbose=1,
epochs=15,
)
scores = model.evaluate(test_ds)
scores
# **With this model I managed to achieve a 83.333% accuracy for the test dataset which is very good**
# Run Predictions on Test Images
import numpy as np
plt.figure(figsize=(3, 3))
for images_batch, labels_batch in test_ds.take(1):
first_image = images_batch[0].numpy().astype("uint8")
first_label = labels_batch[0].numpy()
plt.imshow(first_image)
print("actual label:", class_names[first_label])
batch_prediction = model.predict(images_batch, verbose=0)
print("predicted label:", class_names[np.argmax(batch_prediction[0])])
plt.axis("off")
def predict(model, img):
img_array = tf.keras.preprocessing.image.img_to_array(images[i].numpy())
img_array = tf.expand_dims(img_array, 0)
predictions = model.predict(img_array, verbose=0)
predicted_class = class_names[np.argmax(predictions[0])]
confidence = round(100 * (np.max(predictions[0])), 2)
return predicted_class, confidence
plt.figure(figsize=(10, 10))
for images, labels in test_ds.take(1):
for i in range(8):
ax = plt.subplot(2, 4, i + 1)
plt.imshow(images[i].numpy().astype("uint8"))
predicted_class, confidence = predict(model, images[i].numpy())
actual_class = class_names[labels[i]]
plt.title(
f"Actual: {actual_class},\n Predicted: {predicted_class}.\n Confidence: {confidence}%"
)
plt.axis("off")
# Save the Model
# **Save the model, and whenever I want to make changes to test later I can have multiple models**
import os
model_version = max([int(i) for i in os.listdir("../models") + [0]]) + 1
model.save(f"../models/{model_version}")
| false | 0 | 2,153 | 0 | 2,178 | 2,153 |
||
129561844
|
# Code for ["Causal inference with Synthetic Control using Python and SparseSC"](https://aayushmnit.github.io/posts/2022-09-19-SyntheticControl/2022-09-19_SyntheticControl.html) blog.
# ## What is Synthetic Control Method?
# I will try to keep this part short and focus more on why Data scientists should care about such methods and how to use them on larger datasets based on practical experience using [SparseSC package](https://github.com/microsoft/SparseSC).
# The Synthetic Control (SC) method is a statistical method used to estimate causal effects from binary treatments on observational panel (longitudinal) data. The method got quite a coverage by being described as [“the most important innovation in the policy evaluation literature in the last few years”](https://www.aeaweb.org/articles?id=10.1257/jep.31.2.3) and got an article published in [Washington Post - Seriously, here’s one amazing math trick to learn what can’t be known](https://www.washingtonpost.com/news/wonk/wp/2015/10/30/how-to-measure-things-in-a-world-of-competing-claims/). “SC is a technique to create an artificial control group by taking a weighted average of untreated units in such a way that it reproduces the characteristics of the treated units before the intervention(treatment). The SC acts as the counterfactual for a treatment unit, and the estimate of a treatment effect is the difference between the observed outcome in the post-treatment period and the SC's outcome.”
# “One way to think of SC is as an improvement upon [difference-in-difference (DiD) estimation](https://en.wikipedia.org/wiki/Difference_in_differences). Typical DiD will compare a treated unit to the average of the control units. But often the treated unit does not look like a typical control (e.g., it might have a different growth rate), in which case the 'parallel trend' assumption of DiD is not valid. SC remedies this by choosing a smarter linear combination, rather than the simple average, to weigh more heavily the more similar units. SC's assumption is if there are endogenous factors that affect treatment and future outcomes then you should be able to control them by matching past outcomes. The matching that SC provides can therefore deal with some problems in estimation that DiD cannot handle.”
# Here is the link to the Causal inference book which I found most useful to understand the math behind SC- [Causal Inference for The Brave and True by Matheus Facure - Chapter 15](https://matheusfacure.github.io/python-causality-handbook/15-Synthetic-Control.html).
# ## Why should any Data scientist care about this method?
# Often as a Data Scientist, you will encounter situations as follows where running A/B testing is not feasible because of -
# 1. Lack of infrastructure
# 2. Lack of similar groups for running A/B testing (in case of evaluation of state policies, as there is no state equivalent of other)
# 3. Providing unwanted advantage to one group over others. Sometimes running an A/B test can give an unfair advantage and lead you into anti-trust territory. For example, what if Amazon tries to charge differential pricing for different customers or apply different margins for their sellers for the same product?
# As a data scientist, stakeholders may still ask you to estimate the impact of certain changes/treatments, and Synthetic controls can come to the rescue in this situation. For this reason, it is a valuable tool to keep in your algorithmic toolkit.
# ## Problem Overview
# We will use the Proposition 99 data to explain the use case for this approach and also how to use the SparceSC library and its key features. “In 1988, California passed a famous Tobacco Tax and Health Protection Act, which became known as Proposition 99. Its primary effect is to impose a 25-cent per pack state excise tax on the sale of tobacco cigarettes within California, with approximately equivalent excise taxes similarly imposed on the retail sale of other commercial tobacco products, such as cigars and chewing tobacco. Additional restrictions placed on the sale of tobacco include a ban on cigarette vending machines in public areas accessible by juveniles, and a ban on the individual sale of single cigarettes. Revenue generated by the act was earmarked for various environmental and health care programs, and anti-tobacco advertisements. To evaluate its effect, we can gather data on cigarette sales from multiple states and across a number of years. In our case, we got data from the year 1970 to 2000 from 39 states.”
import os
install = '"git+https://github.com/microsoft/SparseSC.git"'
os.system(f"pip install -Uqq {install}")
import pandas as pd
import numpy as np
import SparseSC
from datetime import datetime
import warnings
import plotly.express as px
import plotly.graph_objects as pgo
pd.set_option("display.max_columns", None)
warnings.filterwarnings("ignore")
# Let's look at the data
# We have data per `state` as treatment unit and yearly (`year` column) per-capita sales of cigarettes in packs (`cigsale` column) and the cigarette retail price (`retprice` column). We are going to pivot this data so that each row is one treatment unit(`state`), and columns represent the yearly `cigsale` value.
df_grp = pd.read_csv("/kaggle/working/in_grp_current.csv")
df_industry = pd.read_csv("/kaggle/working/in_industry_production.csv")
df_investments = pd.read_csv("/kaggle/working/in_investments.csv")
df_fixed_assets = pd.read_csv("/kaggle/working/in_fixed_assets.csv")
def prepare_df(df):
df.index = df["region"].str.rstrip().str.lower()
df.drop(columns="region", inplace=True)
df.columns = df.columns.str.slice(stop=4).astype(int)
for colname in df.columns:
df[colname] = pd.to_numeric(df[colname], errors="coerce")
df[colname] = df[colname].fillna(df[colname].dropna().mean())
return df
df_grp = prepare_df(df_grp)
df_industry = prepare_df(df_industry)
df_investments = prepare_df(df_investments)
df_fixed_assets = prepare_df(df_fixed_assets)
df_fixed_assets.index
wiki_url = "https://en.wikipedia.org/wiki/List_of_special_economic_zones_in_India"
zones_df = pd.read_html(wiki_url)[2]
zones_df["Notification Date"] = pd.to_datetime(zones_df["Notification Date"])
zones_df.groupby(by="DC Name")["Notification Date"].min()
sezs_region_and_year = [
("andhra pradesh", 2007), # Andhra Pradesh Special Economic Zone
("karnataka", 2007), # Mangalore Special Economic Zone
("tamil nadu", 2003), # Salem Special Economic Zone & MEPZ Special Economic Zone
("madhya pradesh", 2003), # Indore Special Economic Zone
("west bengal", 2003), # Falta Special Economic Zone
("uttar pradesh", 2003), # Noida Special Economic Zone
]
LIST_OF_REGIONS_WITH_SEZS = [x for x, y in sezs_region_and_year]
for rname in LIST_OF_REGIONS_WITH_SEZS:
for df in [df_grp, df_industry, df_investments, df_fixed_assets]:
if rname not in df.index.tolist():
print(f"{rname} is not present")
print("finished")
df_grp.head(3)
df_industry.head(3)
df_investments.head(3)
df_fixed_assets.head(3)
DATAFRAMES = {
"grp": df_grp,
"industry": df_industry,
"investments": df_investments,
"fixed_assets": df_fixed_assets,
}
from tqdm.notebook import tqdm
def calculate_synths(current_region_name, current_year):
for rname in LIST_OF_REGIONS_WITH_SEZS:
if rname != current_region_name:
idx_to_drop = [
val for val in list(DATAFRAMES.values())[0].index.values if val == rname
]
synths = {}
for df_name, df in tqdm(DATAFRAMES.items()):
synth = SparseSC.fit(
features=df.drop(idx_to_drop, axis=0)
.iloc[:, df.columns <= current_year]
.values,
targets=df.drop(idx_to_drop).iloc[:, df.columns > current_year].values,
treated_units=[
idx
for idx, val in enumerate(df.drop(idx_to_drop).index.values)
if val == current_region_name
],
progress=0,
print_path=False,
)
treated_units = [
idx
for idx, val in enumerate(df.drop(idx_to_drop).index.values)
if val == current_region_name
]
result = df.loc[df.index == current_region_name].T.reset_index(drop=False)
result.columns = ["year", "Observed"]
result["Synthetic"] = synth.predict(df.drop(idx_to_drop, axis=0).values)[
treated_units, :
][0]
synths[df_name] = {"synth": synth, "res_df": result}
return synths
synths_for_each_region = {}
for current_region, current_year in sezs_region_and_year:
print(f"Calculating for {current_region}")
synths_for_each_region[current_region] = calculate_synths(
current_region, current_year
)
def vizualize(result: pd.DataFrame, region: str, variable: str, year: int):
fig = px.line(
data_frame=result,
x="year",
y=["Observed", "Synthetic"],
template="plotly_dark",
)
fig.add_trace(
pgo.Scatter(
x=[year, year],
y=[
0,
result.Observed.max() * 1.02
if result.Observed.max() > result.Synthetic.max()
else result.Synthetic.max() * 1.02,
],
# y=[result.Observed.min()*0.98,result.Observed.max()*1.02],
line={
"dash": "dash",
},
name="SEZ creation",
)
)
fig.update_layout(
title={
"text": f"Synthetic Control Assessment for {region}",
"y": 0.95,
"x": 0.5,
},
legend=dict(y=1, x=0.1, orientation="v"),
legend_title="",
xaxis_title="Year",
yaxis_title=variable,
font=dict(size=15),
)
fig.show(renderer="notebook")
print(sezs_region_and_year)
vizualize(
synths_for_each_region["tamil nadu"]["fixed_assets"]["res_df"],
"Noida, Uttar Pradesh state",
"Fixed assets",
2003,
)
pd.set_option("display.float_format", lambda x: "%.3f" % x)
# from sklearn.metrics import mean_absolute_percentage_error as MAPE
from sklearn.metrics import mean_absolute_error as MAE
print(sezs_region_and_year)
df = synths_for_each_region["uttar pradesh"]["fixed_assets"]["res_df"]
treatment_year = 2003
mae_pre = MAE(
df[df["year"] <= treatment_year]["Observed"].values,
df[df["year"] <= treatment_year]["Synthetic"].values,
)
mae_post = MAE(
df[df["year"] > treatment_year]["Observed"].values,
df[df["year"] > treatment_year]["Synthetic"].values,
)
pd.DataFrame(
{"Pre": [mae_pre], "Post": [mae_post], "Post/Pre": [mae_post / mae_pre]},
index=["MAE"],
)
def calculate_synths_for_placebo(current_region_name, current_year):
"""
basically the same as calculate_synth(),
but we drop all the regions, and fit() replaced with fit_fast()
so we will spend less time and
the estimations does not have to be all that accurate
"""
for rname in LIST_OF_REGIONS_WITH_SEZS:
idx_to_drop = [
val for val in list(DATAFRAMES.values())[0].index.values if val == rname
]
synths = {}
for df_name, df in tqdm(DATAFRAMES.items()):
try:
synth = SparseSC.fit_fast(
features=df.drop(idx_to_drop, axis=0)
.iloc[:, df.columns <= current_year]
.values,
targets=df.drop(idx_to_drop).iloc[:, df.columns > current_year].values,
treated_units=[
idx
for idx, val in enumerate(df.drop(idx_to_drop).index.values)
if val == current_region_name
],
progress=0,
print_path=False,
)
treated_units = [
idx
for idx, val in enumerate(df.index.values)
if val == current_region_name
]
result = df.loc[df.index == current_region_name].T.reset_index(drop=False)
result.columns = ["year", "Observed"]
result["Synthetic"] = synth.predict(df.drop(idx_to_drop, axis=0).values)[
treated_units, :
][0]
synths[df_name] = {"synth": synth, "res_df": result}
# LinAlgError: Matrix is singular raises for fixed_assets so i just dont calculate for it
except Exception as e:
print(f"{e} occurred for {df_name}")
return synths
synths_for_placebo = {}
for i in range(4):
current_region = df_grp.index.values[np.random.randint(0, len(df_grp))]
current_year = 2011
while current_region in LIST_OF_REGIONS_WITH_SEZS:
current_region = df_grp.index.values[np.random.randint(0, len(df_grp))]
print(f"Calculating for {current_region}")
synths_for_placebo[current_region] = calculate_synths_for_placebo(
current_region, current_year
)
_ = list(synths_for_placebo.keys())[0]
for variable in synths_for_placebo[_].keys():
for rname in synths_for_each_region.keys():
df = synths_for_each_region[rname][variable]["res_df"]
treatment_year = [y for r, y in sezs_region_and_year if r == rname][0]
mae_pre = MAE(
df[df["year"] <= treatment_year]["Observed"].values,
df[df["year"] <= treatment_year]["Synthetic"].values,
)
mae_post = MAE(
df[df["year"] > treatment_year]["Observed"].values,
df[df["year"] > treatment_year]["Synthetic"].values,
)
rel = mae_post / mae_pre
synths_for_each_region[rname][variable]["mae_pre"] = mae_pre
synths_for_each_region[rname][variable]["mae_post"] = mae_post
synths_for_each_region[rname][variable]["rel"] = rel
for rname in synths_for_placebo.keys():
df = synths_for_placebo[rname][variable]["res_df"]
treatment_year = 2011
mae_pre = MAE(
df[df["year"] <= treatment_year]["Observed"].values,
df[df["year"] <= treatment_year]["Synthetic"].values,
)
mae_post = MAE(
df[df["year"] > treatment_year]["Observed"].values,
df[df["year"] > treatment_year]["Synthetic"].values,
)
rel = mae_post / mae_pre
synths_for_placebo[rname][variable]["mae_pre"] = mae_pre
synths_for_placebo[rname][variable]["mae_post"] = mae_post
synths_for_placebo[rname][variable]["rel"] = rel
import matplotlib.pyplot as plt
_ = list(synths_for_placebo.keys())[0]
post_pre_relations = {}
for variable in synths_for_placebo[_].keys():
x = []
heigth = []
for rname in synths_for_each_region.keys():
x.append(rname)
heigth.append(synths_for_each_region[rname][variable]["rel"])
for rname in synths_for_placebo.keys():
heigth.append(synths_for_placebo[rname][variable]["rel"])
x.append(rname)
post_pre_relations[variable] = {"x": x, "height": heigth}
colors = ["cyan" for _ in range(len(post_pre_relations["grp"]["x"]) - 4)]
colors.extend(["green"] * 4)
with plt.style.context("dark_background"):
plt.figure(figsize=(20, 8))
plt.bar(
x=post_pre_relations["grp"]["x"],
height=post_pre_relations["grp"]["height"],
color=colors,
)
plt.xticks(rotation=30, ha="right", fontsize=19)
legend_elements = [
plt.Line2D(
[0],
[0],
marker="o",
color="w",
label="Treated units",
markerfacecolor="cyan",
markersize=15,
),
plt.Line2D(
[0],
[0],
marker="o",
color="w",
label="Placebo units",
markerfacecolor="green",
markersize=15,
),
]
plt.title("MAE Post/Pre relation for GRP", fontsize=23)
plt.legend(handles=legend_elements, loc="upper right", fontsize=19)
with plt.style.context("dark_background"):
plt.figure(figsize=(20, 8))
plt.bar(
x=post_pre_relations["investments"]["x"],
height=post_pre_relations["investments"]["height"],
color=colors,
)
plt.xticks(rotation=30, ha="right", fontsize=19)
legend_elements = [
plt.Line2D(
[0],
[0],
marker="o",
color="w",
label="Treated units",
markerfacecolor="cyan",
markersize=15,
),
plt.Line2D(
[0],
[0],
marker="o",
color="w",
label="Placebo units",
markerfacecolor="green",
markersize=15,
),
]
plt.title("MAE Post/Pre relation for Direct investments", fontsize=23)
plt.legend(handles=legend_elements, loc="upper right", fontsize=19)
with plt.style.context("dark_background"):
plt.figure(figsize=(20, 8))
plt.bar(
x=post_pre_relations["fixed_assets"]["x"],
height=post_pre_relations["fixed_assets"]["height"],
color=colors,
)
plt.xticks(rotation=30, ha="right", fontsize=19)
legend_elements = [
plt.Line2D(
[0],
[0],
marker="o",
color="w",
label="Treated units",
markerfacecolor="cyan",
markersize=15,
),
plt.Line2D(
[0],
[0],
marker="o",
color="w",
label="Placebo units",
markerfacecolor="green",
markersize=15,
),
]
plt.title("MAE Post/Pre relation for Fixed assets", fontsize=23)
plt.legend(handles=legend_elements, loc="upper right", fontsize=19)
result = {"treated": synths_for_each_region, "placebo": synths_for_placebo}
with open("bullshit_for_india.txt", "w") as f:
for rname, year in sezs_region_and_year:
print(rname, year)
f.write(rname)
f.write("\n")
for variable in tqdm(synths_for_each_region[rname].keys()):
f.write(variable)
f.write("\n")
if variable == "grp":
colnames = np.arange(1990, 2022)
else:
colnames = np.arange(1990, 2020)
df = pd.DataFrame(
np.hstack(
(
synths_for_each_region[rname][variable]["synth"].features,
synths_for_each_region[rname][variable]["synth"].targets,
)
),
columns=colnames,
)
year = year
## Creating unit treatment_periods
unit_treatment_periods = np.full((df.values.shape[0]), np.nan)
unit_treatment_periods[
synths_for_each_region[rname][variable]["synth"].treated_units
] = [idx for idx, colname in enumerate(df.columns) if colname > year][0]
try:
## fitting estimate effects method
sc = SparseSC.estimate_effects(
outcomes=df.values,
unit_treatment_periods=unit_treatment_periods,
max_n_pl=50, # Number of placebos
level=0.9, # Level for confidence intervals
)
f.write(str(sc))
f.write("\n\n")
f.write(
f"Estimated effect of SEZ is {np.round(sc.pl_res_post.effect_vec.effect[-1])}, \
with a p-value of {np.round(sc.pl_res_post.effect_vec.p[-1],2)}"
)
f.write("\n\n")
except Exception as e:
print(f"{e} occured for {rname}, {variable}")
print(
f"Estimated effect of SEZ is {np.round(sc.pl_res_post.effect_vec.effect[-1])}, \
with a p-value of {np.round(sc.pl_res_post.effect_vec.p[-1],2)}"
)
for synth_group in result.keys():
for rname in result[synth_group].keys():
for variable in result[synth_group][rname].keys():
result[synth_group][rname][variable]["synth"] = {
"weights": result[synth_group][rname][variable]["synth"].get_weights(
True
),
"v_matrix": result[synth_group][rname][variable]["synth"].V,
"v_pen": result[synth_group][rname][variable]["synth"].fitted_v_pen,
"w_pen": result[synth_group][rname][variable]["synth"].fitted_w_pen,
}
result[synth_group][rname][variable]["res_df"] = result[synth_group][rname][
variable
]["res_df"].to_dict()
import json
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
return json.JSONEncoder.default(self, obj)
with open("result_in.json", "w") as outfile:
json.dump(result, outfile, indent=4, cls=NumpyEncoder)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/561/129561844.ipynb
| null | null |
[{"Id": 129561844, "ScriptId": 38457222, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 10269757, "CreationDate": "05/14/2023 21:15:06", "VersionNumber": 1.0, "Title": "Synthetic Control using SparseSC for India SEZ", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 390.0, "LinesInsertedFromPrevious": 244.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 146.0, "LinesInsertedFromFork": 244.0, "LinesDeletedFromFork": 89.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 146.0, "TotalVotes": 0}]
| null | null | null | null |
# Code for ["Causal inference with Synthetic Control using Python and SparseSC"](https://aayushmnit.github.io/posts/2022-09-19-SyntheticControl/2022-09-19_SyntheticControl.html) blog.
# ## What is Synthetic Control Method?
# I will try to keep this part short and focus more on why Data scientists should care about such methods and how to use them on larger datasets based on practical experience using [SparseSC package](https://github.com/microsoft/SparseSC).
# The Synthetic Control (SC) method is a statistical method used to estimate causal effects from binary treatments on observational panel (longitudinal) data. The method got quite a coverage by being described as [“the most important innovation in the policy evaluation literature in the last few years”](https://www.aeaweb.org/articles?id=10.1257/jep.31.2.3) and got an article published in [Washington Post - Seriously, here’s one amazing math trick to learn what can’t be known](https://www.washingtonpost.com/news/wonk/wp/2015/10/30/how-to-measure-things-in-a-world-of-competing-claims/). “SC is a technique to create an artificial control group by taking a weighted average of untreated units in such a way that it reproduces the characteristics of the treated units before the intervention(treatment). The SC acts as the counterfactual for a treatment unit, and the estimate of a treatment effect is the difference between the observed outcome in the post-treatment period and the SC's outcome.”
# “One way to think of SC is as an improvement upon [difference-in-difference (DiD) estimation](https://en.wikipedia.org/wiki/Difference_in_differences). Typical DiD will compare a treated unit to the average of the control units. But often the treated unit does not look like a typical control (e.g., it might have a different growth rate), in which case the 'parallel trend' assumption of DiD is not valid. SC remedies this by choosing a smarter linear combination, rather than the simple average, to weigh more heavily the more similar units. SC's assumption is if there are endogenous factors that affect treatment and future outcomes then you should be able to control them by matching past outcomes. The matching that SC provides can therefore deal with some problems in estimation that DiD cannot handle.”
# Here is the link to the Causal inference book which I found most useful to understand the math behind SC- [Causal Inference for The Brave and True by Matheus Facure - Chapter 15](https://matheusfacure.github.io/python-causality-handbook/15-Synthetic-Control.html).
# ## Why should any Data scientist care about this method?
# Often as a Data Scientist, you will encounter situations as follows where running A/B testing is not feasible because of -
# 1. Lack of infrastructure
# 2. Lack of similar groups for running A/B testing (in case of evaluation of state policies, as there is no state equivalent of other)
# 3. Providing unwanted advantage to one group over others. Sometimes running an A/B test can give an unfair advantage and lead you into anti-trust territory. For example, what if Amazon tries to charge differential pricing for different customers or apply different margins for their sellers for the same product?
# As a data scientist, stakeholders may still ask you to estimate the impact of certain changes/treatments, and Synthetic controls can come to the rescue in this situation. For this reason, it is a valuable tool to keep in your algorithmic toolkit.
# ## Problem Overview
# We will use the Proposition 99 data to explain the use case for this approach and also how to use the SparceSC library and its key features. “In 1988, California passed a famous Tobacco Tax and Health Protection Act, which became known as Proposition 99. Its primary effect is to impose a 25-cent per pack state excise tax on the sale of tobacco cigarettes within California, with approximately equivalent excise taxes similarly imposed on the retail sale of other commercial tobacco products, such as cigars and chewing tobacco. Additional restrictions placed on the sale of tobacco include a ban on cigarette vending machines in public areas accessible by juveniles, and a ban on the individual sale of single cigarettes. Revenue generated by the act was earmarked for various environmental and health care programs, and anti-tobacco advertisements. To evaluate its effect, we can gather data on cigarette sales from multiple states and across a number of years. In our case, we got data from the year 1970 to 2000 from 39 states.”
import os
install = '"git+https://github.com/microsoft/SparseSC.git"'
os.system(f"pip install -Uqq {install}")
import pandas as pd
import numpy as np
import SparseSC
from datetime import datetime
import warnings
import plotly.express as px
import plotly.graph_objects as pgo
pd.set_option("display.max_columns", None)
warnings.filterwarnings("ignore")
# Let's look at the data
# We have data per `state` as treatment unit and yearly (`year` column) per-capita sales of cigarettes in packs (`cigsale` column) and the cigarette retail price (`retprice` column). We are going to pivot this data so that each row is one treatment unit(`state`), and columns represent the yearly `cigsale` value.
df_grp = pd.read_csv("/kaggle/working/in_grp_current.csv")
df_industry = pd.read_csv("/kaggle/working/in_industry_production.csv")
df_investments = pd.read_csv("/kaggle/working/in_investments.csv")
df_fixed_assets = pd.read_csv("/kaggle/working/in_fixed_assets.csv")
def prepare_df(df):
df.index = df["region"].str.rstrip().str.lower()
df.drop(columns="region", inplace=True)
df.columns = df.columns.str.slice(stop=4).astype(int)
for colname in df.columns:
df[colname] = pd.to_numeric(df[colname], errors="coerce")
df[colname] = df[colname].fillna(df[colname].dropna().mean())
return df
df_grp = prepare_df(df_grp)
df_industry = prepare_df(df_industry)
df_investments = prepare_df(df_investments)
df_fixed_assets = prepare_df(df_fixed_assets)
df_fixed_assets.index
wiki_url = "https://en.wikipedia.org/wiki/List_of_special_economic_zones_in_India"
zones_df = pd.read_html(wiki_url)[2]
zones_df["Notification Date"] = pd.to_datetime(zones_df["Notification Date"])
zones_df.groupby(by="DC Name")["Notification Date"].min()
sezs_region_and_year = [
("andhra pradesh", 2007), # Andhra Pradesh Special Economic Zone
("karnataka", 2007), # Mangalore Special Economic Zone
("tamil nadu", 2003), # Salem Special Economic Zone & MEPZ Special Economic Zone
("madhya pradesh", 2003), # Indore Special Economic Zone
("west bengal", 2003), # Falta Special Economic Zone
("uttar pradesh", 2003), # Noida Special Economic Zone
]
LIST_OF_REGIONS_WITH_SEZS = [x for x, y in sezs_region_and_year]
for rname in LIST_OF_REGIONS_WITH_SEZS:
for df in [df_grp, df_industry, df_investments, df_fixed_assets]:
if rname not in df.index.tolist():
print(f"{rname} is not present")
print("finished")
df_grp.head(3)
df_industry.head(3)
df_investments.head(3)
df_fixed_assets.head(3)
DATAFRAMES = {
"grp": df_grp,
"industry": df_industry,
"investments": df_investments,
"fixed_assets": df_fixed_assets,
}
from tqdm.notebook import tqdm
def calculate_synths(current_region_name, current_year):
for rname in LIST_OF_REGIONS_WITH_SEZS:
if rname != current_region_name:
idx_to_drop = [
val for val in list(DATAFRAMES.values())[0].index.values if val == rname
]
synths = {}
for df_name, df in tqdm(DATAFRAMES.items()):
synth = SparseSC.fit(
features=df.drop(idx_to_drop, axis=0)
.iloc[:, df.columns <= current_year]
.values,
targets=df.drop(idx_to_drop).iloc[:, df.columns > current_year].values,
treated_units=[
idx
for idx, val in enumerate(df.drop(idx_to_drop).index.values)
if val == current_region_name
],
progress=0,
print_path=False,
)
treated_units = [
idx
for idx, val in enumerate(df.drop(idx_to_drop).index.values)
if val == current_region_name
]
result = df.loc[df.index == current_region_name].T.reset_index(drop=False)
result.columns = ["year", "Observed"]
result["Synthetic"] = synth.predict(df.drop(idx_to_drop, axis=0).values)[
treated_units, :
][0]
synths[df_name] = {"synth": synth, "res_df": result}
return synths
synths_for_each_region = {}
for current_region, current_year in sezs_region_and_year:
print(f"Calculating for {current_region}")
synths_for_each_region[current_region] = calculate_synths(
current_region, current_year
)
def vizualize(result: pd.DataFrame, region: str, variable: str, year: int):
fig = px.line(
data_frame=result,
x="year",
y=["Observed", "Synthetic"],
template="plotly_dark",
)
fig.add_trace(
pgo.Scatter(
x=[year, year],
y=[
0,
result.Observed.max() * 1.02
if result.Observed.max() > result.Synthetic.max()
else result.Synthetic.max() * 1.02,
],
# y=[result.Observed.min()*0.98,result.Observed.max()*1.02],
line={
"dash": "dash",
},
name="SEZ creation",
)
)
fig.update_layout(
title={
"text": f"Synthetic Control Assessment for {region}",
"y": 0.95,
"x": 0.5,
},
legend=dict(y=1, x=0.1, orientation="v"),
legend_title="",
xaxis_title="Year",
yaxis_title=variable,
font=dict(size=15),
)
fig.show(renderer="notebook")
print(sezs_region_and_year)
vizualize(
synths_for_each_region["tamil nadu"]["fixed_assets"]["res_df"],
"Noida, Uttar Pradesh state",
"Fixed assets",
2003,
)
pd.set_option("display.float_format", lambda x: "%.3f" % x)
# from sklearn.metrics import mean_absolute_percentage_error as MAPE
from sklearn.metrics import mean_absolute_error as MAE
print(sezs_region_and_year)
df = synths_for_each_region["uttar pradesh"]["fixed_assets"]["res_df"]
treatment_year = 2003
mae_pre = MAE(
df[df["year"] <= treatment_year]["Observed"].values,
df[df["year"] <= treatment_year]["Synthetic"].values,
)
mae_post = MAE(
df[df["year"] > treatment_year]["Observed"].values,
df[df["year"] > treatment_year]["Synthetic"].values,
)
pd.DataFrame(
{"Pre": [mae_pre], "Post": [mae_post], "Post/Pre": [mae_post / mae_pre]},
index=["MAE"],
)
def calculate_synths_for_placebo(current_region_name, current_year):
"""
basically the same as calculate_synth(),
but we drop all the regions, and fit() replaced with fit_fast()
so we will spend less time and
the estimations does not have to be all that accurate
"""
for rname in LIST_OF_REGIONS_WITH_SEZS:
idx_to_drop = [
val for val in list(DATAFRAMES.values())[0].index.values if val == rname
]
synths = {}
for df_name, df in tqdm(DATAFRAMES.items()):
try:
synth = SparseSC.fit_fast(
features=df.drop(idx_to_drop, axis=0)
.iloc[:, df.columns <= current_year]
.values,
targets=df.drop(idx_to_drop).iloc[:, df.columns > current_year].values,
treated_units=[
idx
for idx, val in enumerate(df.drop(idx_to_drop).index.values)
if val == current_region_name
],
progress=0,
print_path=False,
)
treated_units = [
idx
for idx, val in enumerate(df.index.values)
if val == current_region_name
]
result = df.loc[df.index == current_region_name].T.reset_index(drop=False)
result.columns = ["year", "Observed"]
result["Synthetic"] = synth.predict(df.drop(idx_to_drop, axis=0).values)[
treated_units, :
][0]
synths[df_name] = {"synth": synth, "res_df": result}
# LinAlgError: Matrix is singular raises for fixed_assets so i just dont calculate for it
except Exception as e:
print(f"{e} occurred for {df_name}")
return synths
synths_for_placebo = {}
for i in range(4):
current_region = df_grp.index.values[np.random.randint(0, len(df_grp))]
current_year = 2011
while current_region in LIST_OF_REGIONS_WITH_SEZS:
current_region = df_grp.index.values[np.random.randint(0, len(df_grp))]
print(f"Calculating for {current_region}")
synths_for_placebo[current_region] = calculate_synths_for_placebo(
current_region, current_year
)
_ = list(synths_for_placebo.keys())[0]
for variable in synths_for_placebo[_].keys():
for rname in synths_for_each_region.keys():
df = synths_for_each_region[rname][variable]["res_df"]
treatment_year = [y for r, y in sezs_region_and_year if r == rname][0]
mae_pre = MAE(
df[df["year"] <= treatment_year]["Observed"].values,
df[df["year"] <= treatment_year]["Synthetic"].values,
)
mae_post = MAE(
df[df["year"] > treatment_year]["Observed"].values,
df[df["year"] > treatment_year]["Synthetic"].values,
)
rel = mae_post / mae_pre
synths_for_each_region[rname][variable]["mae_pre"] = mae_pre
synths_for_each_region[rname][variable]["mae_post"] = mae_post
synths_for_each_region[rname][variable]["rel"] = rel
for rname in synths_for_placebo.keys():
df = synths_for_placebo[rname][variable]["res_df"]
treatment_year = 2011
mae_pre = MAE(
df[df["year"] <= treatment_year]["Observed"].values,
df[df["year"] <= treatment_year]["Synthetic"].values,
)
mae_post = MAE(
df[df["year"] > treatment_year]["Observed"].values,
df[df["year"] > treatment_year]["Synthetic"].values,
)
rel = mae_post / mae_pre
synths_for_placebo[rname][variable]["mae_pre"] = mae_pre
synths_for_placebo[rname][variable]["mae_post"] = mae_post
synths_for_placebo[rname][variable]["rel"] = rel
import matplotlib.pyplot as plt
_ = list(synths_for_placebo.keys())[0]
post_pre_relations = {}
for variable in synths_for_placebo[_].keys():
x = []
heigth = []
for rname in synths_for_each_region.keys():
x.append(rname)
heigth.append(synths_for_each_region[rname][variable]["rel"])
for rname in synths_for_placebo.keys():
heigth.append(synths_for_placebo[rname][variable]["rel"])
x.append(rname)
post_pre_relations[variable] = {"x": x, "height": heigth}
colors = ["cyan" for _ in range(len(post_pre_relations["grp"]["x"]) - 4)]
colors.extend(["green"] * 4)
with plt.style.context("dark_background"):
plt.figure(figsize=(20, 8))
plt.bar(
x=post_pre_relations["grp"]["x"],
height=post_pre_relations["grp"]["height"],
color=colors,
)
plt.xticks(rotation=30, ha="right", fontsize=19)
legend_elements = [
plt.Line2D(
[0],
[0],
marker="o",
color="w",
label="Treated units",
markerfacecolor="cyan",
markersize=15,
),
plt.Line2D(
[0],
[0],
marker="o",
color="w",
label="Placebo units",
markerfacecolor="green",
markersize=15,
),
]
plt.title("MAE Post/Pre relation for GRP", fontsize=23)
plt.legend(handles=legend_elements, loc="upper right", fontsize=19)
with plt.style.context("dark_background"):
plt.figure(figsize=(20, 8))
plt.bar(
x=post_pre_relations["investments"]["x"],
height=post_pre_relations["investments"]["height"],
color=colors,
)
plt.xticks(rotation=30, ha="right", fontsize=19)
legend_elements = [
plt.Line2D(
[0],
[0],
marker="o",
color="w",
label="Treated units",
markerfacecolor="cyan",
markersize=15,
),
plt.Line2D(
[0],
[0],
marker="o",
color="w",
label="Placebo units",
markerfacecolor="green",
markersize=15,
),
]
plt.title("MAE Post/Pre relation for Direct investments", fontsize=23)
plt.legend(handles=legend_elements, loc="upper right", fontsize=19)
with plt.style.context("dark_background"):
plt.figure(figsize=(20, 8))
plt.bar(
x=post_pre_relations["fixed_assets"]["x"],
height=post_pre_relations["fixed_assets"]["height"],
color=colors,
)
plt.xticks(rotation=30, ha="right", fontsize=19)
legend_elements = [
plt.Line2D(
[0],
[0],
marker="o",
color="w",
label="Treated units",
markerfacecolor="cyan",
markersize=15,
),
plt.Line2D(
[0],
[0],
marker="o",
color="w",
label="Placebo units",
markerfacecolor="green",
markersize=15,
),
]
plt.title("MAE Post/Pre relation for Fixed assets", fontsize=23)
plt.legend(handles=legend_elements, loc="upper right", fontsize=19)
result = {"treated": synths_for_each_region, "placebo": synths_for_placebo}
with open("bullshit_for_india.txt", "w") as f:
for rname, year in sezs_region_and_year:
print(rname, year)
f.write(rname)
f.write("\n")
for variable in tqdm(synths_for_each_region[rname].keys()):
f.write(variable)
f.write("\n")
if variable == "grp":
colnames = np.arange(1990, 2022)
else:
colnames = np.arange(1990, 2020)
df = pd.DataFrame(
np.hstack(
(
synths_for_each_region[rname][variable]["synth"].features,
synths_for_each_region[rname][variable]["synth"].targets,
)
),
columns=colnames,
)
year = year
## Creating unit treatment_periods
unit_treatment_periods = np.full((df.values.shape[0]), np.nan)
unit_treatment_periods[
synths_for_each_region[rname][variable]["synth"].treated_units
] = [idx for idx, colname in enumerate(df.columns) if colname > year][0]
try:
## fitting estimate effects method
sc = SparseSC.estimate_effects(
outcomes=df.values,
unit_treatment_periods=unit_treatment_periods,
max_n_pl=50, # Number of placebos
level=0.9, # Level for confidence intervals
)
f.write(str(sc))
f.write("\n\n")
f.write(
f"Estimated effect of SEZ is {np.round(sc.pl_res_post.effect_vec.effect[-1])}, \
with a p-value of {np.round(sc.pl_res_post.effect_vec.p[-1],2)}"
)
f.write("\n\n")
except Exception as e:
print(f"{e} occured for {rname}, {variable}")
print(
f"Estimated effect of SEZ is {np.round(sc.pl_res_post.effect_vec.effect[-1])}, \
with a p-value of {np.round(sc.pl_res_post.effect_vec.p[-1],2)}"
)
for synth_group in result.keys():
for rname in result[synth_group].keys():
for variable in result[synth_group][rname].keys():
result[synth_group][rname][variable]["synth"] = {
"weights": result[synth_group][rname][variable]["synth"].get_weights(
True
),
"v_matrix": result[synth_group][rname][variable]["synth"].V,
"v_pen": result[synth_group][rname][variable]["synth"].fitted_v_pen,
"w_pen": result[synth_group][rname][variable]["synth"].fitted_w_pen,
}
result[synth_group][rname][variable]["res_df"] = result[synth_group][rname][
variable
]["res_df"].to_dict()
import json
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
return json.JSONEncoder.default(self, obj)
with open("result_in.json", "w") as outfile:
json.dump(result, outfile, indent=4, cls=NumpyEncoder)
| false | 0 | 5,960 | 0 | 5,960 | 5,960 |
||
129561202
|
<jupyter_start><jupyter_text>stenosD
Kaggle dataset identifier: stenosd
<jupyter_script>import os
import glob
import xml.etree.ElementTree as ET
import numpy as np
import tensorflow as tf
import cv2
import matplotlib.pyplot as plt
def read_xml_annotation(xml_file):
tree = ET.parse(xml_file)
root = tree.getroot()
boxes = []
for obj in root.iter("object"):
xmin = int(obj.find("bndbox/xmin").text)
xmax = int(obj.find("bndbox/xmax").text)
ymin = int(obj.find("bndbox/ymin").text)
ymax = int(obj.find("bndbox/ymax").text)
boxes.append([xmin, xmax, ymin, ymax])
return np.array(boxes)
def load_image_and_annotation(image_path, annotation_dir):
img = cv2.imread(image_path)
annotation_file = os.path.join(
annotation_dir, os.path.splitext(os.path.basename(image_path))[0] + ".xml"
)
boxes = read_xml_annotation(annotation_file)
return img, boxes
def preprocess_image(image):
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = image / 255.0
return image
image_dir = "/kaggle/input/stenosd/dataset"
annotation_dir = "/kaggle/input/stenosd/dataset"
batch_size = 32
num_epochs = 10
learning_rate = 0.001
def generate_training_data(image_dir, annotation_dir):
image_paths = glob.glob(os.path.join(image_dir, "*.bmp"))
for i in range(0, len(image_paths), batch_size):
images = []
boxes_list = []
for image_path in image_paths[i : i + batch_size]:
img, boxes = load_image_and_annotation(image_path, annotation_dir)
images.append(preprocess_image(img))
boxes_list.append(boxes)
yield np.array(images), np.array(boxes_list)
from object_detection.utils import visualization_utils as vis_util
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util
from object_detection.utils import label_map_util
def create_model(num_classes):
detection_model = tf.saved_model.load("path_to_pretrained_model")
return detection_model
def detect_objects(image, model):
input_tensor = tf.convert_to_tensor(image)
input_tensor = input_tensor[tf.newaxis, ...]
detections = model(input_tensor)
num_detections = int(detections.pop("num_detections"))
detections = {
key: value[0, :num_detections].numpy() for key, value in detections.items()
}
detections["num_detections"] = num_detections
return detections
def visualize_detections(image, detections, label_map_path):
category_index = label_map_util.create_category_index_from_labelmap(
label_map_path, use_display_name=True
)
vis_util.visualize_boxes_and_labels_on_image_array
def visualize_detections(image, detections, label_map_path):
category_index = label_map_util.create_category_index_from_labelmap(
label_map_path, use_display_name=True
)
vis_util.visualize_boxes_and_labels_on_image_array(
image,
detections["detection_boxes"],
detections["detection_classes"].astype(np.int64),
detections["detection_scores"],
category_index,
use_normalized_coordinates=True,
line_thickness=8,
min_score_thresh=0.5,
)
plt.figure(figsize=(12, 8))
plt.imshow(image)
plt.axis("off")
plt.show()
import tensorflow as tf
def create_object_detection_model():
inputs = tf.keras.Input(shape=(None, None, 3)) # Variable input size
backbone = tf.keras.applications.ResNet50(
include_top=False, weights="imagenet"
) # Pretrained backbone
backbone.trainable = True
# Feature extraction
backbone_outputs = backbone(inputs, training=True)
# Additional convolutional layers for object detection
x = layers.Conv2D(256, (3, 3), activation="relu")(backbone_outputs)
x = layers.Conv2D(256, (3, 3), activation="relu")(x)
x = layers.Conv2D(256, (3, 3), activation="relu")(x)
# Localization head
localization = layers.Conv2D(4, (3, 3), activation="linear", name="localization")(x)
# Classification head
classification = layers.Conv2D(
1, (3, 3), activation="sigmoid", name="classification"
)(x)
# Define the model
model = tf.keras.Model(inputs=inputs, outputs=[localization, classification])
return model
from tensorflow.keras import layers
model = create_object_detection_model()
# Define the loss functions
localization_loss = tf.keras.losses.MeanSquaredError()
classification_loss = tf.keras.losses.BinaryCrossentropy()
# Define the metrics
metrics = [tf.keras.metrics.BinaryAccuracy(), tf.keras.metrics.MeanIoU(num_classes=2)]
# Define the optimizer
optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)
# Compile the model
model.compile(
optimizer=optimizer, loss=[localization_loss, classification_loss], metrics=metrics
)
import os
import glob
import random
from sklearn.model_selection import train_test_split
# Set the directory where your dataset is located
dataset_dir = "/kaggle/input/stenosd/dataset"
# Get the paths of all image files in the dataset directory
image_paths = glob.glob(os.path.join(dataset_dir, "*.bmp"))
# Split the dataset into training and validation sets
train_image_paths, val_image_paths = train_test_split(
image_paths, test_size=0.2, random_state=42
)
# Define a function to load an image and its corresponding annotation
def load_image_and_annotation(image_path):
annotation_path = image_path.replace(".bmp", ".xml")
# Implement the logic to load the image and its annotation
# Return the image and annotation data
# Load training images and annotations
train_images = []
train_labels = []
for image_path in train_image_paths:
img, boxes = load_image_and_annotation(image_path)
train_images.append(preprocess_image(img))
train_labels.append(boxes)
# Load validation images and annotations
val_images = []
val_labels = []
for image_path in val_image_paths:
img, boxes = load_image_and_annotation(image_path)
val_images.append(preprocess_image(img))
val_labels.append(boxes)
# Convert the lists to NumPy arrays
train_images = np.array(train_images)
train_labels = np.array(train_labels)
val_images = np.array(val_images)
val_labels = np.array(val_labels)
# Train the model and save the training history
history = model.fit(
train_images, train_labels, epochs=10, validation_data=(val_images, val_labels)
)
# Save the model weights
model.save_weights("object_detection_model_weights.h5")
# Save the training history to a file
with open("training_history.txt", "w") as file:
file.write(str(history.history))
import tensorflow as tf
from sklearn.model_selection import train_test_split
# Train the model and save the training history
history = model.fit(
train_images, train_labels, epochs=10, validation_data=(val_images, val_labels)
)
# Save the model weights
model.save_weights("object_detection_model_weights.h5")
# Save the training history to a file
with open("training_history.txt", "w") as file:
file.write(str(history.history))
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/561/129561202.ipynb
|
stenosd
|
aimanghrab
|
[{"Id": 129561202, "ScriptId": 38509747, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14561628, "CreationDate": "05/14/2023 21:04:14", "VersionNumber": 1.0, "Title": "notebookb9ca368178", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 238.0, "LinesInsertedFromPrevious": 238.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 185745024, "KernelVersionId": 129561202, "SourceDatasetVersionId": 5682369}]
|
[{"Id": 5682369, "DatasetId": 3266733, "DatasourceVersionId": 5757933, "CreatorUserId": 14561628, "LicenseName": "Unknown", "CreationDate": "05/14/2023 11:39:49", "VersionNumber": 1.0, "Title": "stenosD", "Slug": "stenosd", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 3266733, "CreatorUserId": 14561628, "OwnerUserId": 14561628.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5682369.0, "CurrentDatasourceVersionId": 5757933.0, "ForumId": 3332349, "Type": 2, "CreationDate": "05/14/2023 11:39:49", "LastActivityDate": "05/14/2023", "TotalViews": 48, "TotalDownloads": 3, "TotalVotes": 0, "TotalKernels": 0}]
|
[{"Id": 14561628, "UserName": "aimanghrab", "DisplayName": "aiman ghrab", "RegisterDate": "04/09/2023", "PerformanceTier": 0}]
|
import os
import glob
import xml.etree.ElementTree as ET
import numpy as np
import tensorflow as tf
import cv2
import matplotlib.pyplot as plt
def read_xml_annotation(xml_file):
tree = ET.parse(xml_file)
root = tree.getroot()
boxes = []
for obj in root.iter("object"):
xmin = int(obj.find("bndbox/xmin").text)
xmax = int(obj.find("bndbox/xmax").text)
ymin = int(obj.find("bndbox/ymin").text)
ymax = int(obj.find("bndbox/ymax").text)
boxes.append([xmin, xmax, ymin, ymax])
return np.array(boxes)
def load_image_and_annotation(image_path, annotation_dir):
img = cv2.imread(image_path)
annotation_file = os.path.join(
annotation_dir, os.path.splitext(os.path.basename(image_path))[0] + ".xml"
)
boxes = read_xml_annotation(annotation_file)
return img, boxes
def preprocess_image(image):
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = image / 255.0
return image
image_dir = "/kaggle/input/stenosd/dataset"
annotation_dir = "/kaggle/input/stenosd/dataset"
batch_size = 32
num_epochs = 10
learning_rate = 0.001
def generate_training_data(image_dir, annotation_dir):
image_paths = glob.glob(os.path.join(image_dir, "*.bmp"))
for i in range(0, len(image_paths), batch_size):
images = []
boxes_list = []
for image_path in image_paths[i : i + batch_size]:
img, boxes = load_image_and_annotation(image_path, annotation_dir)
images.append(preprocess_image(img))
boxes_list.append(boxes)
yield np.array(images), np.array(boxes_list)
from object_detection.utils import visualization_utils as vis_util
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util
from object_detection.utils import label_map_util
def create_model(num_classes):
detection_model = tf.saved_model.load("path_to_pretrained_model")
return detection_model
def detect_objects(image, model):
input_tensor = tf.convert_to_tensor(image)
input_tensor = input_tensor[tf.newaxis, ...]
detections = model(input_tensor)
num_detections = int(detections.pop("num_detections"))
detections = {
key: value[0, :num_detections].numpy() for key, value in detections.items()
}
detections["num_detections"] = num_detections
return detections
def visualize_detections(image, detections, label_map_path):
category_index = label_map_util.create_category_index_from_labelmap(
label_map_path, use_display_name=True
)
vis_util.visualize_boxes_and_labels_on_image_array
def visualize_detections(image, detections, label_map_path):
category_index = label_map_util.create_category_index_from_labelmap(
label_map_path, use_display_name=True
)
vis_util.visualize_boxes_and_labels_on_image_array(
image,
detections["detection_boxes"],
detections["detection_classes"].astype(np.int64),
detections["detection_scores"],
category_index,
use_normalized_coordinates=True,
line_thickness=8,
min_score_thresh=0.5,
)
plt.figure(figsize=(12, 8))
plt.imshow(image)
plt.axis("off")
plt.show()
import tensorflow as tf
def create_object_detection_model():
inputs = tf.keras.Input(shape=(None, None, 3)) # Variable input size
backbone = tf.keras.applications.ResNet50(
include_top=False, weights="imagenet"
) # Pretrained backbone
backbone.trainable = True
# Feature extraction
backbone_outputs = backbone(inputs, training=True)
# Additional convolutional layers for object detection
x = layers.Conv2D(256, (3, 3), activation="relu")(backbone_outputs)
x = layers.Conv2D(256, (3, 3), activation="relu")(x)
x = layers.Conv2D(256, (3, 3), activation="relu")(x)
# Localization head
localization = layers.Conv2D(4, (3, 3), activation="linear", name="localization")(x)
# Classification head
classification = layers.Conv2D(
1, (3, 3), activation="sigmoid", name="classification"
)(x)
# Define the model
model = tf.keras.Model(inputs=inputs, outputs=[localization, classification])
return model
from tensorflow.keras import layers
model = create_object_detection_model()
# Define the loss functions
localization_loss = tf.keras.losses.MeanSquaredError()
classification_loss = tf.keras.losses.BinaryCrossentropy()
# Define the metrics
metrics = [tf.keras.metrics.BinaryAccuracy(), tf.keras.metrics.MeanIoU(num_classes=2)]
# Define the optimizer
optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)
# Compile the model
model.compile(
optimizer=optimizer, loss=[localization_loss, classification_loss], metrics=metrics
)
import os
import glob
import random
from sklearn.model_selection import train_test_split
# Set the directory where your dataset is located
dataset_dir = "/kaggle/input/stenosd/dataset"
# Get the paths of all image files in the dataset directory
image_paths = glob.glob(os.path.join(dataset_dir, "*.bmp"))
# Split the dataset into training and validation sets
train_image_paths, val_image_paths = train_test_split(
image_paths, test_size=0.2, random_state=42
)
# Define a function to load an image and its corresponding annotation
def load_image_and_annotation(image_path):
annotation_path = image_path.replace(".bmp", ".xml")
# Implement the logic to load the image and its annotation
# Return the image and annotation data
# Load training images and annotations
train_images = []
train_labels = []
for image_path in train_image_paths:
img, boxes = load_image_and_annotation(image_path)
train_images.append(preprocess_image(img))
train_labels.append(boxes)
# Load validation images and annotations
val_images = []
val_labels = []
for image_path in val_image_paths:
img, boxes = load_image_and_annotation(image_path)
val_images.append(preprocess_image(img))
val_labels.append(boxes)
# Convert the lists to NumPy arrays
train_images = np.array(train_images)
train_labels = np.array(train_labels)
val_images = np.array(val_images)
val_labels = np.array(val_labels)
# Train the model and save the training history
history = model.fit(
train_images, train_labels, epochs=10, validation_data=(val_images, val_labels)
)
# Save the model weights
model.save_weights("object_detection_model_weights.h5")
# Save the training history to a file
with open("training_history.txt", "w") as file:
file.write(str(history.history))
import tensorflow as tf
from sklearn.model_selection import train_test_split
# Train the model and save the training history
history = model.fit(
train_images, train_labels, epochs=10, validation_data=(val_images, val_labels)
)
# Save the model weights
model.save_weights("object_detection_model_weights.h5")
# Save the training history to a file
with open("training_history.txt", "w") as file:
file.write(str(history.history))
| false | 0 | 2,028 | 0 | 2,048 | 2,028 |
||
129516668
|
<jupyter_start><jupyter_text>Bank Customer Segmentation (1M+ Transactions)
### Bank Customer Segmentation
Most banks have a large customer base - with different characteristics in terms of age, income, values, lifestyle, and more. Customer segmentation is the process of dividing a customer dataset into specific groups based on shared traits.
*According to a report from Ernst & Young, “A more granular understanding of consumers is no longer a nice-to-have item, but a strategic and competitive imperative for banking providers. Customer understanding should be a living, breathing part of everyday business, with insights underpinning the full range of banking operations.*
### About this Dataset
This dataset consists of 1 Million+ transaction by over 800K customers for a bank in India. The data contains information such as - customer age (DOB), location, gender, account balance at the time of the transaction, transaction details, transaction amount, etc.
### Interesting Analysis Ideas
The dataset can be used for different analysis, example -
1. Perform Clustering / Segmentation on the dataset and identify popular customer groups along with their definitions/rules
2. Perform Location-wise analysis to identify regional trends in India
3. Perform transaction-related analysis to identify interesting trends that can be used by a bank to improve / optimi their user experiences
4. Customer Recency, Frequency, Monetary analysis
5. Network analysis or Graph analysis of customer data.
Kaggle dataset identifier: bank-customer-segmentation
<jupyter_code>import pandas as pd
df = pd.read_csv('bank-customer-segmentation/bank_transactions.csv')
df.info()
<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 1048567 entries, 0 to 1048566
Data columns (total 9 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 TransactionID 1048567 non-null object
1 CustomerID 1048567 non-null object
2 CustomerDOB 1045170 non-null object
3 CustGender 1047467 non-null object
4 CustLocation 1048416 non-null object
5 CustAccountBalance 1046198 non-null float64
6 TransactionDate 1048567 non-null object
7 TransactionTime 1048567 non-null int64
8 TransactionAmount (INR) 1048567 non-null float64
dtypes: float64(2), int64(1), object(6)
memory usage: 72.0+ MB
<jupyter_text>Examples:
{
"TransactionID": "T1",
"CustomerID": "C5841053",
"CustomerDOB": "10/1/94",
"CustGender": "F",
"CustLocation": "JAMSHEDPUR",
"CustAccountBalance": 17819.05,
"TransactionDate": "2/8/16",
"TransactionTime": 143207,
"TransactionAmount (INR)": 25
}
{
"TransactionID": "T2",
"CustomerID": "C2142763",
"CustomerDOB": "4/4/57",
"CustGender": "M",
"CustLocation": "JHAJJAR",
"CustAccountBalance": 2270.69,
"TransactionDate": "2/8/16",
"TransactionTime": 141858,
"TransactionAmount (INR)": 27999
}
{
"TransactionID": "T3",
"CustomerID": "C4417068",
"CustomerDOB": "26/11/96",
"CustGender": "F",
"CustLocation": "MUMBAI",
"CustAccountBalance": 17874.44,
"TransactionDate": "2/8/16",
"TransactionTime": 142712,
"TransactionAmount (INR)": 459
}
{
"TransactionID": "T4",
"CustomerID": "C5342380",
"CustomerDOB": "14/9/73",
"CustGender": "F",
"CustLocation": "MUMBAI",
"CustAccountBalance": 866503.21,
"TransactionDate": "2/8/16",
"TransactionTime": 142714,
"TransactionAmount (INR)": 2060
}
<jupyter_script># # Customer Segmentation on Bank Customers
# ### The project will look into the demographic attributes of the bank's customers and conduct segmentation based on their lifecycle and value to the bank. The methods used in the project:
# ### 1. RFM Model: Recency, Frequency, Monetary Scores
# ### 2. Customer Lifecycle: New Customer, Active Customer, Non-active Customer, Returning Customer
# ### 3. Pareto Analysis: how many customers contribute to the most transaction volume
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
from datetime import datetime
# Ban the scientific expression
pd.set_option("display.float_format", lambda x: "%.2f" % x)
# Ignore warnings
import warnings
warnings.filterwarnings("ignore")
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# ### 1. Data Cleaning
# Import the data
data = pd.read_csv("/kaggle/input/bank-customer-segmentation/bank_transactions.csv")
data.info()
data.sample(5)
# 1. Check for missing values
data.isnull().sum() / data.shape[0] * 100
# From the results, the missing values only take less than 1% of the total records. Therefore we can drop them
data.dropna(axis=0, inplace=True)
# 2. Check for duplications: the customer could be duplicated given one can make more than 1 transactions. The transaction ID should be uniqie
data.duplicated(subset="TransactionID").sum()
# There is no duplication in the transaction id part.
# 3. Check the distribution of the numeric fields (for potential outliers)
data["TransactionDate"] = pd.to_datetime(data["TransactionDate"])
data["CustomerDOB"] = pd.to_datetime(data["CustomerDOB"])
data[
[
"CustAccountBalance",
"TransactionAmount (INR)",
"CustomerDOB",
"CustGender",
"CustLocation",
"TransactionDate",
"TransactionTime",
]
].describe(percentiles=[0.01, 0.25, 0.50, 0.75, 0.99], include="all").T
# From the results, the numerical variables (balance and transaction amt) seems to follow a right skewed distribution (mean > median), which shouold be due to high net wealth customers
# For date and categorical variables, there are some dates in wrong values (e.g. 1800-01-01) will need to be adjusted.
# The transaction time could be dropped given it seems not containing useful information in the analysis
# 4. Data Transformation
# 4.1 Drop unused fields
data.drop("TransactionTime", axis=1, inplace=True)
# 4.2 Calculate Customer Age
# Here will use the year in the data (2016) as base to get the customer's age
data["age"] = data["TransactionDate"].dt.year - data["CustomerDOB"].dt.year
# 4.3 Change all the age below 12 and above 100 percentile into median age
data.loc[(data["age"] < 12) | (data["age"] >= 100), "age"] = data["age"].median()
# 4.4 Adjust the values of Gender
data["CustGender"] = data["CustGender"].replace(
{"M": "Male", "F": "Female", "T": "Male"}
)
# ### 2. Exploratory Data Analysis (EDA)
# #### 2.1 Gender
# Compare the distribution of customers across genders
plt.style.use("ggplot")
fig, ax = plt.subplots(ncols=2, nrows=1, figsize=(20, 10))
ax[0].pie(
data["CustGender"].value_counts(), autopct="%1.f%%", labels=["Male", "Female"]
)
ax[0].set_title("Customer Gender Frequency", size=20)
ax[1] = sns.distplot(
data.loc[
(data["CustGender"] == "Male")
& (
data["TransactionAmount (INR)"]
< np.percentile(data["TransactionAmount (INR)"], 90)
),
"TransactionAmount (INR)",
],
label=True,
kde=False,
)
ax[1] = sns.distplot(
data.loc[
(data["CustGender"] == "Female")
& (
data["TransactionAmount (INR)"]
< np.percentile(data["TransactionAmount (INR)"], 90)
),
"TransactionAmount (INR)",
],
label="Female",
kde=False,
)
ax[1].set_title("Transaction Amount by Customer Gender", size=20)
# #### 2.2 Location
# Select the top 20 cities with most transactions happened
plt.figure(figsize=(20, 6))
sns.countplot(
y="CustLocation", data=data, order=data["CustLocation"].value_counts()[:20].index
)
plt.title("Top 20 Locations of Customer ", fontsize="20")
# From the plot, it seems Mumbai, New Delhi and Bangalore are the TOP 3 cities that transaction happened. This could be due to that the 3 cities have more population, better economy condition and higher salary range.
# #### 2.3 Age
# Distribution of age based on bins
bins = [0, 20, 30, 40, 50, 60, 100]
labels = ["0 - 20", "20 - 30", "30 - 40", "40 - 50", "50 - 60", "60+"]
data["age_bin"] = pd.cut(x=data["age"], bins=bins, labels=labels, right=True)
plt.figure(figsize=(20, 6))
sns.countplot(data, y="age_bin", order=data["age_bin"].value_counts().index)
# From the plot, 20 - 30 group is the the majority, followed by 30 - 40 customers. There is much less transaction by customers after 50.
# ### 3. Customer Segmentation
# #### 3.1 RFM Segmentation
# #### RFM model is commonly used in marketing to segment customers based on their shopping behaviours, then treat each segment with targeted strategies. The three metrics used in the segmentation are:
# #### 1) Recency: how many days since the the customer's last transaction date? The lower the value, the more loyal of the customer to our firm;
# #### 2) Frequency: how many time the customer make transactions during the period? The higher the value, the more active of the customer to our products and services;
# #### 3) Monetary: The total amount of transactions or money spent by the customer during the period. This is the most important metric in the model. The higher the value, the more monetary value the customer could bring to our firm.
# #### Steps of RFM:
# #### Step 1: Calculate the raw value of each metrics;
# #### Step 2: Assign mark to each raw value based on their distributions
# #### Step 3: Based on the average mark of each metric, decide the class of each customer record (0 or 1, 1 means qualified, 0 means unqualified)
# #### Step 4: segment the customers based in their assigned class (0 or 1) if each metric
# #### The time range selected should be decided by the business team in real place, here just using the whole timeframe in the data. Here we do not take the balance into consideration, given the real transaction would create more values to the business.
# RFM Modeling Process:
# 1. Step 1 - Calculate the raw value of each metrics:
data_RFM = data.copy()
data_RFM = (
data_RFM.groupby(["CustomerID"])
.agg(
Last_Trans_Date=("TransactionDate", "max"),
M=("TransactionAmount (INR)", "sum"),
F=("TransactionID", "count"),
)
.reset_index()
)
# Recency (R) Calculation: use the last transaction date in the data as base, then calculate the date difference between each customer's last transaction date to the base
data_RFM["R"] = data_RFM["Last_Trans_Date"].apply(
lambda x: data_RFM["Last_Trans_Date"].max() - x
)
data_RFM["R"] = data_RFM["Last_Trans_Date"].dt.days
data_RFM = data_RFM[["CustomerID", "R", "F", "M"]]
data_RFM.head()
data_RFM.head()
# Step 2 - Assign mark to each raw value based on their distributions
# There is no strict standard about the selection of marking bins or mark range, it would depend on the business case and the marketing requirments
# Here will take a look at the distribution of each metrics
plt.style.use("ggplot")
fig, ax = plt.subplots(ncols=1, nrows=3, figsize=(6, 20))
ax[:, 0] = sns.displot(data=data_RFM["R"], kde=False)
ax[:, 1] = sns.displot(data=data_RFM["F"], kde=False)
ax[:, 2] = sns.displot(data=data_RFM["M"], kde=False)
data_RFM.head()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/516/129516668.ipynb
|
bank-customer-segmentation
|
shivamb
|
[{"Id": 129516668, "ScriptId": 38489547, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 1270512, "CreationDate": "05/14/2023 13:17:01", "VersionNumber": 2.0, "Title": "Customer Segmentation - RFM Model Practise", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 171.0, "LinesInsertedFromPrevious": 42.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 129.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 185653298, "KernelVersionId": 129516668, "SourceDatasetVersionId": 2743905}]
|
[{"Id": 2743905, "DatasetId": 1672910, "DatasourceVersionId": 2789165, "CreatorUserId": 1571785, "LicenseName": "Data files \u00a9 Original Authors", "CreationDate": "10/26/2021 13:28:18", "VersionNumber": 1.0, "Title": "Bank Customer Segmentation (1M+ Transactions)", "Slug": "bank-customer-segmentation", "Subtitle": "Customer demographics and transactions data from an Indian Bank", "Description": "### Bank Customer Segmentation\n\nMost banks have a large customer base - with different characteristics in terms of age, income, values, lifestyle, and more. Customer segmentation is the process of dividing a customer dataset into specific groups based on shared traits.\n\n*According to a report from Ernst & Young, \u201cA more granular understanding of consumers is no longer a nice-to-have item, but a strategic and competitive imperative for banking providers. Customer understanding should be a living, breathing part of everyday business, with insights underpinning the full range of banking operations.*\n\n### About this Dataset\n\nThis dataset consists of 1 Million+ transaction by over 800K customers for a bank in India. The data contains information such as - customer age (DOB), location, gender, account balance at the time of the transaction, transaction details, transaction amount, etc. \n\n### Interesting Analysis Ideas \n\nThe dataset can be used for different analysis, example - \n\n1. Perform Clustering / Segmentation on the dataset and identify popular customer groups along with their definitions/rules \n2. Perform Location-wise analysis to identify regional trends in India \n3. Perform transaction-related analysis to identify interesting trends that can be used by a bank to improve / optimi their user experiences \n4. Customer Recency, Frequency, Monetary analysis \n5. Network analysis or Graph analysis of customer data.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1672910, "CreatorUserId": 1571785, "OwnerUserId": 1571785.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2743905.0, "CurrentDatasourceVersionId": 2789165.0, "ForumId": 1694135, "Type": 2, "CreationDate": "10/26/2021 13:28:18", "LastActivityDate": "10/26/2021", "TotalViews": 74434, "TotalDownloads": 6281, "TotalVotes": 86, "TotalKernels": 23}]
|
[{"Id": 1571785, "UserName": "shivamb", "DisplayName": "Shivam Bansal", "RegisterDate": "01/22/2018", "PerformanceTier": 4}]
|
# # Customer Segmentation on Bank Customers
# ### The project will look into the demographic attributes of the bank's customers and conduct segmentation based on their lifecycle and value to the bank. The methods used in the project:
# ### 1. RFM Model: Recency, Frequency, Monetary Scores
# ### 2. Customer Lifecycle: New Customer, Active Customer, Non-active Customer, Returning Customer
# ### 3. Pareto Analysis: how many customers contribute to the most transaction volume
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
from datetime import datetime
# Ban the scientific expression
pd.set_option("display.float_format", lambda x: "%.2f" % x)
# Ignore warnings
import warnings
warnings.filterwarnings("ignore")
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# ### 1. Data Cleaning
# Import the data
data = pd.read_csv("/kaggle/input/bank-customer-segmentation/bank_transactions.csv")
data.info()
data.sample(5)
# 1. Check for missing values
data.isnull().sum() / data.shape[0] * 100
# From the results, the missing values only take less than 1% of the total records. Therefore we can drop them
data.dropna(axis=0, inplace=True)
# 2. Check for duplications: the customer could be duplicated given one can make more than 1 transactions. The transaction ID should be uniqie
data.duplicated(subset="TransactionID").sum()
# There is no duplication in the transaction id part.
# 3. Check the distribution of the numeric fields (for potential outliers)
data["TransactionDate"] = pd.to_datetime(data["TransactionDate"])
data["CustomerDOB"] = pd.to_datetime(data["CustomerDOB"])
data[
[
"CustAccountBalance",
"TransactionAmount (INR)",
"CustomerDOB",
"CustGender",
"CustLocation",
"TransactionDate",
"TransactionTime",
]
].describe(percentiles=[0.01, 0.25, 0.50, 0.75, 0.99], include="all").T
# From the results, the numerical variables (balance and transaction amt) seems to follow a right skewed distribution (mean > median), which shouold be due to high net wealth customers
# For date and categorical variables, there are some dates in wrong values (e.g. 1800-01-01) will need to be adjusted.
# The transaction time could be dropped given it seems not containing useful information in the analysis
# 4. Data Transformation
# 4.1 Drop unused fields
data.drop("TransactionTime", axis=1, inplace=True)
# 4.2 Calculate Customer Age
# Here will use the year in the data (2016) as base to get the customer's age
data["age"] = data["TransactionDate"].dt.year - data["CustomerDOB"].dt.year
# 4.3 Change all the age below 12 and above 100 percentile into median age
data.loc[(data["age"] < 12) | (data["age"] >= 100), "age"] = data["age"].median()
# 4.4 Adjust the values of Gender
data["CustGender"] = data["CustGender"].replace(
{"M": "Male", "F": "Female", "T": "Male"}
)
# ### 2. Exploratory Data Analysis (EDA)
# #### 2.1 Gender
# Compare the distribution of customers across genders
plt.style.use("ggplot")
fig, ax = plt.subplots(ncols=2, nrows=1, figsize=(20, 10))
ax[0].pie(
data["CustGender"].value_counts(), autopct="%1.f%%", labels=["Male", "Female"]
)
ax[0].set_title("Customer Gender Frequency", size=20)
ax[1] = sns.distplot(
data.loc[
(data["CustGender"] == "Male")
& (
data["TransactionAmount (INR)"]
< np.percentile(data["TransactionAmount (INR)"], 90)
),
"TransactionAmount (INR)",
],
label=True,
kde=False,
)
ax[1] = sns.distplot(
data.loc[
(data["CustGender"] == "Female")
& (
data["TransactionAmount (INR)"]
< np.percentile(data["TransactionAmount (INR)"], 90)
),
"TransactionAmount (INR)",
],
label="Female",
kde=False,
)
ax[1].set_title("Transaction Amount by Customer Gender", size=20)
# #### 2.2 Location
# Select the top 20 cities with most transactions happened
plt.figure(figsize=(20, 6))
sns.countplot(
y="CustLocation", data=data, order=data["CustLocation"].value_counts()[:20].index
)
plt.title("Top 20 Locations of Customer ", fontsize="20")
# From the plot, it seems Mumbai, New Delhi and Bangalore are the TOP 3 cities that transaction happened. This could be due to that the 3 cities have more population, better economy condition and higher salary range.
# #### 2.3 Age
# Distribution of age based on bins
bins = [0, 20, 30, 40, 50, 60, 100]
labels = ["0 - 20", "20 - 30", "30 - 40", "40 - 50", "50 - 60", "60+"]
data["age_bin"] = pd.cut(x=data["age"], bins=bins, labels=labels, right=True)
plt.figure(figsize=(20, 6))
sns.countplot(data, y="age_bin", order=data["age_bin"].value_counts().index)
# From the plot, 20 - 30 group is the the majority, followed by 30 - 40 customers. There is much less transaction by customers after 50.
# ### 3. Customer Segmentation
# #### 3.1 RFM Segmentation
# #### RFM model is commonly used in marketing to segment customers based on their shopping behaviours, then treat each segment with targeted strategies. The three metrics used in the segmentation are:
# #### 1) Recency: how many days since the the customer's last transaction date? The lower the value, the more loyal of the customer to our firm;
# #### 2) Frequency: how many time the customer make transactions during the period? The higher the value, the more active of the customer to our products and services;
# #### 3) Monetary: The total amount of transactions or money spent by the customer during the period. This is the most important metric in the model. The higher the value, the more monetary value the customer could bring to our firm.
# #### Steps of RFM:
# #### Step 1: Calculate the raw value of each metrics;
# #### Step 2: Assign mark to each raw value based on their distributions
# #### Step 3: Based on the average mark of each metric, decide the class of each customer record (0 or 1, 1 means qualified, 0 means unqualified)
# #### Step 4: segment the customers based in their assigned class (0 or 1) if each metric
# #### The time range selected should be decided by the business team in real place, here just using the whole timeframe in the data. Here we do not take the balance into consideration, given the real transaction would create more values to the business.
# RFM Modeling Process:
# 1. Step 1 - Calculate the raw value of each metrics:
data_RFM = data.copy()
data_RFM = (
data_RFM.groupby(["CustomerID"])
.agg(
Last_Trans_Date=("TransactionDate", "max"),
M=("TransactionAmount (INR)", "sum"),
F=("TransactionID", "count"),
)
.reset_index()
)
# Recency (R) Calculation: use the last transaction date in the data as base, then calculate the date difference between each customer's last transaction date to the base
data_RFM["R"] = data_RFM["Last_Trans_Date"].apply(
lambda x: data_RFM["Last_Trans_Date"].max() - x
)
data_RFM["R"] = data_RFM["Last_Trans_Date"].dt.days
data_RFM = data_RFM[["CustomerID", "R", "F", "M"]]
data_RFM.head()
data_RFM.head()
# Step 2 - Assign mark to each raw value based on their distributions
# There is no strict standard about the selection of marking bins or mark range, it would depend on the business case and the marketing requirments
# Here will take a look at the distribution of each metrics
plt.style.use("ggplot")
fig, ax = plt.subplots(ncols=1, nrows=3, figsize=(6, 20))
ax[:, 0] = sns.displot(data=data_RFM["R"], kde=False)
ax[:, 1] = sns.displot(data=data_RFM["F"], kde=False)
ax[:, 2] = sns.displot(data=data_RFM["M"], kde=False)
data_RFM.head()
|
[{"bank-customer-segmentation/bank_transactions.csv": {"column_names": "[\"TransactionID\", \"CustomerID\", \"CustomerDOB\", \"CustGender\", \"CustLocation\", \"CustAccountBalance\", \"TransactionDate\", \"TransactionTime\", \"TransactionAmount (INR)\"]", "column_data_types": "{\"TransactionID\": \"object\", \"CustomerID\": \"object\", \"CustomerDOB\": \"object\", \"CustGender\": \"object\", \"CustLocation\": \"object\", \"CustAccountBalance\": \"float64\", \"TransactionDate\": \"object\", \"TransactionTime\": \"int64\", \"TransactionAmount (INR)\": \"float64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 1048567 entries, 0 to 1048566\nData columns (total 9 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 TransactionID 1048567 non-null object \n 1 CustomerID 1048567 non-null object \n 2 CustomerDOB 1045170 non-null object \n 3 CustGender 1047467 non-null object \n 4 CustLocation 1048416 non-null object \n 5 CustAccountBalance 1046198 non-null float64\n 6 TransactionDate 1048567 non-null object \n 7 TransactionTime 1048567 non-null int64 \n 8 TransactionAmount (INR) 1048567 non-null float64\ndtypes: float64(2), int64(1), object(6)\nmemory usage: 72.0+ MB\n", "summary": "{\"CustAccountBalance\": {\"count\": 1046198.0, \"mean\": 115403.54005622261, \"std\": 846485.3806006602, \"min\": 0.0, \"25%\": 4721.76, \"50%\": 16792.18, \"75%\": 57657.36, \"max\": 115035495.1}, \"TransactionTime\": {\"count\": 1048567.0, \"mean\": 157087.52939297154, \"std\": 51261.85402232933, \"min\": 0.0, \"25%\": 124030.0, \"50%\": 164226.0, \"75%\": 200010.0, \"max\": 235959.0}, \"TransactionAmount (INR)\": {\"count\": 1048567.0, \"mean\": 1574.3350034570992, \"std\": 6574.742978454002, \"min\": 0.0, \"25%\": 161.0, \"50%\": 459.03, \"75%\": 1200.0, \"max\": 1560034.99}}", "examples": "{\"TransactionID\":{\"0\":\"T1\",\"1\":\"T2\",\"2\":\"T3\",\"3\":\"T4\"},\"CustomerID\":{\"0\":\"C5841053\",\"1\":\"C2142763\",\"2\":\"C4417068\",\"3\":\"C5342380\"},\"CustomerDOB\":{\"0\":\"10\\/1\\/94\",\"1\":\"4\\/4\\/57\",\"2\":\"26\\/11\\/96\",\"3\":\"14\\/9\\/73\"},\"CustGender\":{\"0\":\"F\",\"1\":\"M\",\"2\":\"F\",\"3\":\"F\"},\"CustLocation\":{\"0\":\"JAMSHEDPUR\",\"1\":\"JHAJJAR\",\"2\":\"MUMBAI\",\"3\":\"MUMBAI\"},\"CustAccountBalance\":{\"0\":17819.05,\"1\":2270.69,\"2\":17874.44,\"3\":866503.21},\"TransactionDate\":{\"0\":\"2\\/8\\/16\",\"1\":\"2\\/8\\/16\",\"2\":\"2\\/8\\/16\",\"3\":\"2\\/8\\/16\"},\"TransactionTime\":{\"0\":143207,\"1\":141858,\"2\":142712,\"3\":142714},\"TransactionAmount (INR)\":{\"0\":25.0,\"1\":27999.0,\"2\":459.0,\"3\":2060.0}}"}}]
| true | 1 |
<start_data_description><data_path>bank-customer-segmentation/bank_transactions.csv:
<column_names>
['TransactionID', 'CustomerID', 'CustomerDOB', 'CustGender', 'CustLocation', 'CustAccountBalance', 'TransactionDate', 'TransactionTime', 'TransactionAmount (INR)']
<column_types>
{'TransactionID': 'object', 'CustomerID': 'object', 'CustomerDOB': 'object', 'CustGender': 'object', 'CustLocation': 'object', 'CustAccountBalance': 'float64', 'TransactionDate': 'object', 'TransactionTime': 'int64', 'TransactionAmount (INR)': 'float64'}
<dataframe_Summary>
{'CustAccountBalance': {'count': 1046198.0, 'mean': 115403.54005622261, 'std': 846485.3806006602, 'min': 0.0, '25%': 4721.76, '50%': 16792.18, '75%': 57657.36, 'max': 115035495.1}, 'TransactionTime': {'count': 1048567.0, 'mean': 157087.52939297154, 'std': 51261.85402232933, 'min': 0.0, '25%': 124030.0, '50%': 164226.0, '75%': 200010.0, 'max': 235959.0}, 'TransactionAmount (INR)': {'count': 1048567.0, 'mean': 1574.3350034570992, 'std': 6574.742978454002, 'min': 0.0, '25%': 161.0, '50%': 459.03, '75%': 1200.0, 'max': 1560034.99}}
<dataframe_info>
RangeIndex: 1048567 entries, 0 to 1048566
Data columns (total 9 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 TransactionID 1048567 non-null object
1 CustomerID 1048567 non-null object
2 CustomerDOB 1045170 non-null object
3 CustGender 1047467 non-null object
4 CustLocation 1048416 non-null object
5 CustAccountBalance 1046198 non-null float64
6 TransactionDate 1048567 non-null object
7 TransactionTime 1048567 non-null int64
8 TransactionAmount (INR) 1048567 non-null float64
dtypes: float64(2), int64(1), object(6)
memory usage: 72.0+ MB
<some_examples>
{'TransactionID': {'0': 'T1', '1': 'T2', '2': 'T3', '3': 'T4'}, 'CustomerID': {'0': 'C5841053', '1': 'C2142763', '2': 'C4417068', '3': 'C5342380'}, 'CustomerDOB': {'0': '10/1/94', '1': '4/4/57', '2': '26/11/96', '3': '14/9/73'}, 'CustGender': {'0': 'F', '1': 'M', '2': 'F', '3': 'F'}, 'CustLocation': {'0': 'JAMSHEDPUR', '1': 'JHAJJAR', '2': 'MUMBAI', '3': 'MUMBAI'}, 'CustAccountBalance': {'0': 17819.05, '1': 2270.69, '2': 17874.44, '3': 866503.21}, 'TransactionDate': {'0': '2/8/16', '1': '2/8/16', '2': '2/8/16', '3': '2/8/16'}, 'TransactionTime': {'0': 143207, '1': 141858, '2': 142712, '3': 142714}, 'TransactionAmount (INR)': {'0': 25.0, '1': 27999.0, '2': 459.0, '3': 2060.0}}
<end_description>
| 2,402 | 0 | 3,525 | 2,402 |
129516007
|
<jupyter_start><jupyter_text>Heights and weights
### Context
This data set gives average masses for women as a function of their height in a sample of American women of age 30–39.
### Content
The data contains the variables
Height (m)
Weight (kg)
Kaggle dataset identifier: heights-and-weights
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
df = pd.read_csv("/kaggle/input/heights-and-weights/data.csv")
df.head(5) # first five values
# # description and the info of the data set.
print("Dimensions of the dataset:", df.shape)
print("\nColumn names and data types:")
print(df.dtypes)
print("\nSummary statistics:")
print(df.describe())
print("\nMissing values:")
print(df.isnull().sum())
df.info()
# # Linear Regression work.
# ## defineing x and y axises
x_axis_val = df["Height"].values
x_axis_val
y_axis_val = df["Weight"].values
y_axis_val
from matplotlib import pyplot as plt
# ## plot of the original full_data
plt.scatter(x_axis_val, y_axis_val, color="black")
plt.xlabel("Height")
plt.ylabel("Weight")
plt.plot
x = x_axis_val.reshape(-1, 1)
x, len(x)
from sklearn.model_selection import train_test_split
# ## define the axises for the linear regression
x_train, x_test, y_train, y_test = train_test_split(
x, y_axis_val, train_size=0.7, random_state=150
)
x_train, len(x_train)
y_train, len(y_train)
# ## plot of the training sample
plt.scatter(x_train, y_train, color="black")
plt.xlabel("Height_train_sample")
plt.ylabel("Weight_train_sample")
plt.plot
from sklearn.linear_model import LinearRegression
lr = LinearRegression()
lr.fit(x_train, y_train)
lr.score(x_test, y_test) * 100
y_predict = lr.predict(x_test)
y_predict
y_test # to compare the predict and test values
# ## plot of the training sample(in black) with the predicted values for the test sample(in red)
plt.scatter(x_train, y_train, color="black")
plt.scatter(x_test, y_predict, color="red")
plt.xlabel("Height")
plt.ylabel("Weight")
plt.plot
# ## three conclusions from my regression model.
# ## 1. height vs weight with train size of 60% and random state of 150
x_train_1, x_test_1, y_train_1, y_test_1 = train_test_split(
x, y_axis_val, train_size=0.6, random_state=150
)
lr_1 = LinearRegression()
lr_1.fit(x_train_1, y_train_1)
lr_1.score(x_test_1, y_test_1) * 100
y_predict_1 = lr.predict(x_test_1)
plt.scatter(x_train_1, y_train_1, color="black")
plt.plot(x_test_1, y_predict_1, color="red")
plt.xlabel("Height")
plt.ylabel("Weight")
plt.plot
# ## 2. height vs weight with train size of 15% and random state of 150
x_train_2, x_test_2, y_train_2, y_test_2 = train_test_split(
x, y_axis_val, train_size=0.15, random_state=150
)
lr_2 = LinearRegression()
lr_2.fit(x_train_2, y_train_2)
lr_2.score(x_test_2, y_test_2) * 100
y_predict_2 = lr.predict(x_test_2)
plt.scatter(x_train_2, y_train_2, color="black")
plt.plot(x_test_2, y_predict_2, color="red")
plt.xlabel("Height")
plt.ylabel("Weight")
plt.plot
# ## 3. height vs weight with train size of 90% and random state of 150
x_train_3, x_test_3, y_train_3, y_test_3 = train_test_split(
x, y_axis_val, train_size=0.9, random_state=150
)
lr_3 = LinearRegression()
lr_3.fit(x_train_3, y_train_3)
lr_3.score(x_test_3, y_test_3) * 100
y_predict_3 = lr.predict(x_test_3)
plt.scatter(x_train_3, y_train_3, color="black")
plt.plot(x_test_3, y_predict_3, color="red")
plt.xlabel("Height")
plt.ylabel("Weight")
plt.plot
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/516/129516007.ipynb
|
heights-and-weights
|
tmcketterick
|
[{"Id": 129516007, "ScriptId": 38504956, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14803463, "CreationDate": "05/14/2023 13:11:09", "VersionNumber": 1.0, "Title": "Linear Regression", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 150.0, "LinesInsertedFromPrevious": 150.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 185651620, "KernelVersionId": 129516007, "SourceDatasetVersionId": 12327}]
|
[{"Id": 12327, "DatasetId": 8834, "DatasourceVersionId": 12327, "CreatorUserId": 930751, "LicenseName": "CC0: Public Domain", "CreationDate": "01/06/2018 21:42:20", "VersionNumber": 1.0, "Title": "Heights and weights", "Slug": "heights-and-weights", "Subtitle": "Simple linear regression", "Description": "### Context\n\nThis data set gives average masses for women as a function of their height in a sample of American women of age 30\u201339.\n\n\n### Content\n\nThe data contains the variables\n\nHeight (m) \nWeight (kg)\n\n\n### Acknowledgements\n\nhttps://en.wikipedia.org/wiki/Simple_linear_regression", "VersionNotes": "Initial release", "TotalCompressedBytes": 189.0, "TotalUncompressedBytes": 189.0}]
|
[{"Id": 8834, "CreatorUserId": 930751, "OwnerUserId": 930751.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 12327.0, "CurrentDatasourceVersionId": 12327.0, "ForumId": 16020, "Type": 2, "CreationDate": "01/06/2018 21:42:20", "LastActivityDate": "01/31/2018", "TotalViews": 47661, "TotalDownloads": 8113, "TotalVotes": 72, "TotalKernels": 87}]
|
[{"Id": 930751, "UserName": "tmcketterick", "DisplayName": "T McKetterick", "RegisterDate": "02/26/2017", "PerformanceTier": 1}]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
df = pd.read_csv("/kaggle/input/heights-and-weights/data.csv")
df.head(5) # first five values
# # description and the info of the data set.
print("Dimensions of the dataset:", df.shape)
print("\nColumn names and data types:")
print(df.dtypes)
print("\nSummary statistics:")
print(df.describe())
print("\nMissing values:")
print(df.isnull().sum())
df.info()
# # Linear Regression work.
# ## defineing x and y axises
x_axis_val = df["Height"].values
x_axis_val
y_axis_val = df["Weight"].values
y_axis_val
from matplotlib import pyplot as plt
# ## plot of the original full_data
plt.scatter(x_axis_val, y_axis_val, color="black")
plt.xlabel("Height")
plt.ylabel("Weight")
plt.plot
x = x_axis_val.reshape(-1, 1)
x, len(x)
from sklearn.model_selection import train_test_split
# ## define the axises for the linear regression
x_train, x_test, y_train, y_test = train_test_split(
x, y_axis_val, train_size=0.7, random_state=150
)
x_train, len(x_train)
y_train, len(y_train)
# ## plot of the training sample
plt.scatter(x_train, y_train, color="black")
plt.xlabel("Height_train_sample")
plt.ylabel("Weight_train_sample")
plt.plot
from sklearn.linear_model import LinearRegression
lr = LinearRegression()
lr.fit(x_train, y_train)
lr.score(x_test, y_test) * 100
y_predict = lr.predict(x_test)
y_predict
y_test # to compare the predict and test values
# ## plot of the training sample(in black) with the predicted values for the test sample(in red)
plt.scatter(x_train, y_train, color="black")
plt.scatter(x_test, y_predict, color="red")
plt.xlabel("Height")
plt.ylabel("Weight")
plt.plot
# ## three conclusions from my regression model.
# ## 1. height vs weight with train size of 60% and random state of 150
x_train_1, x_test_1, y_train_1, y_test_1 = train_test_split(
x, y_axis_val, train_size=0.6, random_state=150
)
lr_1 = LinearRegression()
lr_1.fit(x_train_1, y_train_1)
lr_1.score(x_test_1, y_test_1) * 100
y_predict_1 = lr.predict(x_test_1)
plt.scatter(x_train_1, y_train_1, color="black")
plt.plot(x_test_1, y_predict_1, color="red")
plt.xlabel("Height")
plt.ylabel("Weight")
plt.plot
# ## 2. height vs weight with train size of 15% and random state of 150
x_train_2, x_test_2, y_train_2, y_test_2 = train_test_split(
x, y_axis_val, train_size=0.15, random_state=150
)
lr_2 = LinearRegression()
lr_2.fit(x_train_2, y_train_2)
lr_2.score(x_test_2, y_test_2) * 100
y_predict_2 = lr.predict(x_test_2)
plt.scatter(x_train_2, y_train_2, color="black")
plt.plot(x_test_2, y_predict_2, color="red")
plt.xlabel("Height")
plt.ylabel("Weight")
plt.plot
# ## 3. height vs weight with train size of 90% and random state of 150
x_train_3, x_test_3, y_train_3, y_test_3 = train_test_split(
x, y_axis_val, train_size=0.9, random_state=150
)
lr_3 = LinearRegression()
lr_3.fit(x_train_3, y_train_3)
lr_3.score(x_test_3, y_test_3) * 100
y_predict_3 = lr.predict(x_test_3)
plt.scatter(x_train_3, y_train_3, color="black")
plt.plot(x_test_3, y_predict_3, color="red")
plt.xlabel("Height")
plt.ylabel("Weight")
plt.plot
| false | 1 | 1,377 | 0 | 1,459 | 1,377 |
||
129482503
|
from IPython.core.display import HTML
with open("./CSS.css", "r") as file:
custom_css = file.read()
HTML(custom_css)
import os
import numpy as np
import pandas as pd
import os
from sklearn.model_selection import train_test_split
import cv2
import albumentations as A
from albumentations.pytorch.transforms import ToTensorV2
import torch
from torch.utils.data import Dataset, DataLoader
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
from tqdm.notebook import tqdm
root = "/kaggle/input/image-matching-challenge-2023"
train_label_file = "train_labels.csv"
train_path = "/kaggle/input/image-matching-challenge-2023/train"
test_path = "/kaggle/input/image-matching-challenge-2023/test"
def get_datasets(root, path):
file_path = os.path.join(root, path)
df = pd.read_csv(file_path)
return df
get_datasets(train_path, train_label_file).head()
train_transform = A.Compose(
[
A.Resize(224, 224),
A.Normalize(
mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225),
max_pixel_value=255.0,
always_apply=False,
p=1.0,
),
ToTensorV2(),
]
)
validation_transform = A.Compose(
[
A.Resize(224, 224),
A.Normalize(
mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225),
max_pixel_value=255.0,
always_apply=False,
p=1.0,
),
ToTensorV2(),
]
)
print("Follwing classes are there : \n", train_dataset.classes)
def display_image(image, label):
plt.imshow(image.permute(1, 2, 0))
display_image(*train_dataset[2])
class ImageDataset(Dataset):
def __init__(
self, root, img_path, rotation_label, translation_label, transforms=None
):
self.root = root
self.img_path = img_path
self.rotation_label = rotation_label
self.translation_label = translation_label
self.transforms = transforms
def __getitem__(self, index):
img_path = self.img_path[index]
img_path = self.root + img_path
image = cv2.imread(img_path)
if self.transforms is not None:
image = self.transforms(image=image)["image"]
rotation_label = self.rotation_label[index].split(";")
rotation_label = list(map(float, rotation_label))
translation_label = self.translation_label[index].split(";")
translation_label = list(map(float, translation_label))
return image, np.array(rotation_label), np.array(translation_label)
def __len__(self):
return len(self.img_path)
def get_train_validation_set(df):
validtion_data = df.sample(frac=0.3)
training_data = df[~df["image_path"].isin(validtion_data["image_path"])]
return training_data, validtion_data
training_data, validation_data = get_train_validation_set(
get_datasets(train_path, train_label_file)
)
def get_translation_rotation(df):
df["rotation_matrix_split"] = df.apply(
lambda x: list(map(float, x["rotation_matrix"].split(";"))), axis=1
)
df["translation_vector_split"] = df.apply(
lambda x: list(map(float, x["translation_vector"].split(";"))), axis=1
)
rotation_value = np.array(df["rotation_matrix_split"].tolist())
translation_value = np.array(df["translation_vector_split"].tolist())
return translation_value, rotation_value
translation_value, rotation_value = get_translation_rotation(
get_datasets(train_path, train_label_file)
)
def get_train_dataset(train_path, traning_data):
train_dataset = ImageDataset(
train_path,
training_data["image_path"].tolist(),
training_data["rotation_matrix"].tolist(),
training_data["translation_vector"].tolist(),
transforms=train_transform,
)
return train_dataset
def get_validation_dataset(train_path, validation_data):
validation_dataset = ImageDataset(
train_path,
validation_data["image_path"].tolist(),
validation_data["rotation_matrix"].tolist(),
validation_data["translation_vector"].tolist(),
transforms=validation_transform,
)
return validation_dataset
def get_dataloader(train_dataset, validation_dataset):
train_loader = DataLoader(train_dataset, batch_size=4, shuffle=True, num_workers=2)
validation_loader = DataLoader(validation_dataset, batch_size=4, shuffle=False)
return train_loader, validation_loader
train_loader, validation_loader = get_dataloader(
get_train_dataset(train_path, training_data),
get_validation_dataset(train_path, validation_data),
)
class ImagemtachingModel(torch.nn.Module):
def __init__(self, dropout=0.2):
super(ImagemtachingModel, self).__init__()
self.layer_one = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.Dropout(dropout),
nn.MaxPool2d(kernel_size=2),
)
self.layer_tow = nn.Sequential(
nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.Dropout(dropout),
nn.MaxPool2d(kernel_size=2),
)
self.layer_three = nn.Sequential(
nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.Dropout(dropout),
nn.MaxPool2d(kernel_size=2),
)
self.layer_four = nn.Sequential(
nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Dropout(dropout),
nn.MaxPool2d(kernel_size=2),
)
self.layer_five = nn.Sequential(
nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Dropout(dropout),
nn.MaxPool2d(kernel_size=2),
nn.AvgPool2d(kernel_size=7),
)
self.layer_six = nn.Sequential(
nn.Linear(512, 512),
nn.ReLU(),
)
self.rotation_out = nn.Linear(512, 9)
self.tanh = torch.nn.Tanh()
self.translation_out = torch.nn.Linear(512, 3)
def forward(self, x):
x = self.layer_one(x)
x = self.layer_two(x)
x = self.layer_three(x)
x = self.layer_four(x)
x = self.layer_five(x)
x = x.view(-1, 512)
x = self.layer_six(x)
rotation_out = self.rotation_out(x)
rotation_out = self.tanh(rotation_out)
translation_out = self.translation_out(x)
return rotation_out, translation_out
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
model = ImagemtachingModel(dropout=0.2)
model.to(device)
l1_distance = torch.nn.L1Loss()
optimizer = torch.optim.Adam(params=model.parameters(), lr=0.001)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer,
mode="max",
factor=0.5,
patience=2,
threshold_mode="abs",
min_lr=1e-8,
verbose=True,
)
best_loss = 1000000000
epochs = 10
best_model = None
for epoch in range(1, epochs + 1):
train_loss = []
rot_loss = []
trans_loss = []
val_loss = []
val_rot_loss = []
val_trans_loss = []
for imgs, rotation_labels, translation_labels in tqdm(train_loader):
model.train()
optimizer.zero_grad()
imgs = imgs.to(device)
rotation_labels = rotation_labels.to(device)
translation_labels = translation_labels.to(device)
rotation_output, translation_output = model(imgs)
rotation_loss = l1_distance(rotation_output, rotation_labels)
translation_loss = l1_distance(translation_output, translation_labels)
loss = rotation_loss + translation_loss
loss.backward()
optimizer.step()
train_loss.append(loss.item())
rot_loss.append(rotation_loss.item())
trans_loss.append(translation_loss.item())
for imgs, rotation_labels, translation_labels in tqdm(val_loader):
model.eval()
imgs = imgs.to(device)
rotation_labels = rotation_labels.to(device)
translation_labels = translation_labels.to(device)
rotation_output, translation_output = model(imgs)
rotation_loss = l1_distance(rotation_output, rotation_labels)
translation_loss = l1_distance(translation_output, translation_labels)
loss = rotation_loss + translation_loss
val_loss.append(loss.item())
val_rot_loss.append(rotation_loss.item())
val_trans_loss.append(translation_loss.item())
mtrain_loss = np.mean(train_loss)
mval_loss = np.mean(val_loss)
mtrain_rot_loss = np.mean(rot_loss)
mtrain_trans_loss = np.mean(trans_loss)
mval_rot_loss = np.mean(val_rot_loss)
mval_trans_loss = np.mean(val_trans_loss)
print(
f"Epoch [{epoch}], Train Loss : [{mtrain_loss:.5f}] \
Train Rotation Loss : [{mtrain_rot_loss:.5f}] Train Translation Loss : [{mtrain_trans_loss:.5f}] \
Val Loss : [{mval_loss:.5f}] Val Rotation Loss : [{mval_rot_loss:.5f}] Val Translation Loss : [{mval_trans_loss:.5f}]"
)
if scheduler is not None:
scheduler.step(mval_loss)
if best_loss < mval_loss:
best_loss = mval_loss
best_model = model
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/482/129482503.ipynb
| null | null |
[{"Id": 129482503, "ScriptId": 37430982, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 3402809, "CreationDate": "05/14/2023 07:34:12", "VersionNumber": 12.0, "Title": "\ud83d\udd25Pytorch Image Matching \ud83d\udd25", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 290.0, "LinesInsertedFromPrevious": 6.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 284.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
from IPython.core.display import HTML
with open("./CSS.css", "r") as file:
custom_css = file.read()
HTML(custom_css)
import os
import numpy as np
import pandas as pd
import os
from sklearn.model_selection import train_test_split
import cv2
import albumentations as A
from albumentations.pytorch.transforms import ToTensorV2
import torch
from torch.utils.data import Dataset, DataLoader
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
from tqdm.notebook import tqdm
root = "/kaggle/input/image-matching-challenge-2023"
train_label_file = "train_labels.csv"
train_path = "/kaggle/input/image-matching-challenge-2023/train"
test_path = "/kaggle/input/image-matching-challenge-2023/test"
def get_datasets(root, path):
file_path = os.path.join(root, path)
df = pd.read_csv(file_path)
return df
get_datasets(train_path, train_label_file).head()
train_transform = A.Compose(
[
A.Resize(224, 224),
A.Normalize(
mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225),
max_pixel_value=255.0,
always_apply=False,
p=1.0,
),
ToTensorV2(),
]
)
validation_transform = A.Compose(
[
A.Resize(224, 224),
A.Normalize(
mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225),
max_pixel_value=255.0,
always_apply=False,
p=1.0,
),
ToTensorV2(),
]
)
print("Follwing classes are there : \n", train_dataset.classes)
def display_image(image, label):
plt.imshow(image.permute(1, 2, 0))
display_image(*train_dataset[2])
class ImageDataset(Dataset):
def __init__(
self, root, img_path, rotation_label, translation_label, transforms=None
):
self.root = root
self.img_path = img_path
self.rotation_label = rotation_label
self.translation_label = translation_label
self.transforms = transforms
def __getitem__(self, index):
img_path = self.img_path[index]
img_path = self.root + img_path
image = cv2.imread(img_path)
if self.transforms is not None:
image = self.transforms(image=image)["image"]
rotation_label = self.rotation_label[index].split(";")
rotation_label = list(map(float, rotation_label))
translation_label = self.translation_label[index].split(";")
translation_label = list(map(float, translation_label))
return image, np.array(rotation_label), np.array(translation_label)
def __len__(self):
return len(self.img_path)
def get_train_validation_set(df):
validtion_data = df.sample(frac=0.3)
training_data = df[~df["image_path"].isin(validtion_data["image_path"])]
return training_data, validtion_data
training_data, validation_data = get_train_validation_set(
get_datasets(train_path, train_label_file)
)
def get_translation_rotation(df):
df["rotation_matrix_split"] = df.apply(
lambda x: list(map(float, x["rotation_matrix"].split(";"))), axis=1
)
df["translation_vector_split"] = df.apply(
lambda x: list(map(float, x["translation_vector"].split(";"))), axis=1
)
rotation_value = np.array(df["rotation_matrix_split"].tolist())
translation_value = np.array(df["translation_vector_split"].tolist())
return translation_value, rotation_value
translation_value, rotation_value = get_translation_rotation(
get_datasets(train_path, train_label_file)
)
def get_train_dataset(train_path, traning_data):
train_dataset = ImageDataset(
train_path,
training_data["image_path"].tolist(),
training_data["rotation_matrix"].tolist(),
training_data["translation_vector"].tolist(),
transforms=train_transform,
)
return train_dataset
def get_validation_dataset(train_path, validation_data):
validation_dataset = ImageDataset(
train_path,
validation_data["image_path"].tolist(),
validation_data["rotation_matrix"].tolist(),
validation_data["translation_vector"].tolist(),
transforms=validation_transform,
)
return validation_dataset
def get_dataloader(train_dataset, validation_dataset):
train_loader = DataLoader(train_dataset, batch_size=4, shuffle=True, num_workers=2)
validation_loader = DataLoader(validation_dataset, batch_size=4, shuffle=False)
return train_loader, validation_loader
train_loader, validation_loader = get_dataloader(
get_train_dataset(train_path, training_data),
get_validation_dataset(train_path, validation_data),
)
class ImagemtachingModel(torch.nn.Module):
def __init__(self, dropout=0.2):
super(ImagemtachingModel, self).__init__()
self.layer_one = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.Dropout(dropout),
nn.MaxPool2d(kernel_size=2),
)
self.layer_tow = nn.Sequential(
nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.Dropout(dropout),
nn.MaxPool2d(kernel_size=2),
)
self.layer_three = nn.Sequential(
nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.Dropout(dropout),
nn.MaxPool2d(kernel_size=2),
)
self.layer_four = nn.Sequential(
nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Dropout(dropout),
nn.MaxPool2d(kernel_size=2),
)
self.layer_five = nn.Sequential(
nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Dropout(dropout),
nn.MaxPool2d(kernel_size=2),
nn.AvgPool2d(kernel_size=7),
)
self.layer_six = nn.Sequential(
nn.Linear(512, 512),
nn.ReLU(),
)
self.rotation_out = nn.Linear(512, 9)
self.tanh = torch.nn.Tanh()
self.translation_out = torch.nn.Linear(512, 3)
def forward(self, x):
x = self.layer_one(x)
x = self.layer_two(x)
x = self.layer_three(x)
x = self.layer_four(x)
x = self.layer_five(x)
x = x.view(-1, 512)
x = self.layer_six(x)
rotation_out = self.rotation_out(x)
rotation_out = self.tanh(rotation_out)
translation_out = self.translation_out(x)
return rotation_out, translation_out
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
model = ImagemtachingModel(dropout=0.2)
model.to(device)
l1_distance = torch.nn.L1Loss()
optimizer = torch.optim.Adam(params=model.parameters(), lr=0.001)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer,
mode="max",
factor=0.5,
patience=2,
threshold_mode="abs",
min_lr=1e-8,
verbose=True,
)
best_loss = 1000000000
epochs = 10
best_model = None
for epoch in range(1, epochs + 1):
train_loss = []
rot_loss = []
trans_loss = []
val_loss = []
val_rot_loss = []
val_trans_loss = []
for imgs, rotation_labels, translation_labels in tqdm(train_loader):
model.train()
optimizer.zero_grad()
imgs = imgs.to(device)
rotation_labels = rotation_labels.to(device)
translation_labels = translation_labels.to(device)
rotation_output, translation_output = model(imgs)
rotation_loss = l1_distance(rotation_output, rotation_labels)
translation_loss = l1_distance(translation_output, translation_labels)
loss = rotation_loss + translation_loss
loss.backward()
optimizer.step()
train_loss.append(loss.item())
rot_loss.append(rotation_loss.item())
trans_loss.append(translation_loss.item())
for imgs, rotation_labels, translation_labels in tqdm(val_loader):
model.eval()
imgs = imgs.to(device)
rotation_labels = rotation_labels.to(device)
translation_labels = translation_labels.to(device)
rotation_output, translation_output = model(imgs)
rotation_loss = l1_distance(rotation_output, rotation_labels)
translation_loss = l1_distance(translation_output, translation_labels)
loss = rotation_loss + translation_loss
val_loss.append(loss.item())
val_rot_loss.append(rotation_loss.item())
val_trans_loss.append(translation_loss.item())
mtrain_loss = np.mean(train_loss)
mval_loss = np.mean(val_loss)
mtrain_rot_loss = np.mean(rot_loss)
mtrain_trans_loss = np.mean(trans_loss)
mval_rot_loss = np.mean(val_rot_loss)
mval_trans_loss = np.mean(val_trans_loss)
print(
f"Epoch [{epoch}], Train Loss : [{mtrain_loss:.5f}] \
Train Rotation Loss : [{mtrain_rot_loss:.5f}] Train Translation Loss : [{mtrain_trans_loss:.5f}] \
Val Loss : [{mval_loss:.5f}] Val Rotation Loss : [{mval_rot_loss:.5f}] Val Translation Loss : [{mval_trans_loss:.5f}]"
)
if scheduler is not None:
scheduler.step(mval_loss)
if best_loss < mval_loss:
best_loss = mval_loss
best_model = model
| false | 0 | 3,197 | 0 | 3,197 | 3,197 |
||
129482192
|
<jupyter_start><jupyter_text>Chatbot Dataset Topical Chat
This is a Topical Chat dataset from Amazon! It consists of over 8000 conversations and over 184000 messages!
Within each message, there is: A conversation id, which is basically which conversation the message takes place in. Each message is either the start of a conversation or a reply from the previous message. There is also a sentiment, which represents the emotion that the person who sent the message is feeling. There are 8 sentiments: Angry, Curious to Dive Deeper, Disguised, Fearful, Happy, Sad, and Surprised.
This dataset can be used in machine learning to simulate a conversation or to make a chatbot. It can also be used for data visualization, for example you could visualize the word usage for the different emotions.
PS: If you cannot download the dataset, download it from here:
https://docs.google.com/spreadsheets/d/1dFdlvgmyXfN3SriVn5Byv_BNtyroICxdgrQKBzuMA1U/edit?usp=sharing
Original github dataset:
https://github.com/alexa/Topical-Chat
Kaggle dataset identifier: chatbot-dataset-topical-chat
<jupyter_script># #### Importing Libraries and Installing Dependencies
import torch
import pandas as pd
import numpy as np
import transformers
import random
from transformers import GPT2Tokenizer, GPT2LMHeadModel
from torch.utils.data import Dataset, DataLoader
from tqdm.notebook import tqdm
# #### Downloading Topical Chat Data
# https://www.kaggle.com/datasets/arnavsharmaas/chatbot-dataset-topical-chat
# Next, let's load the data and preprocess it. In this example, we will only consider the text. We will also remove any comments that are too long or too short.
def load_data():
df = pd.read_csv("/kaggle/input/chatbot-dataset-topical-chat/topical_chat.csv")
comments = df["message"].tolist()
comments = [c.strip() for c in comments]
comments = [c for c in comments if len(c) > 10 and len(c) < 100]
return comments
comments = load_data()
comments[1:5]
# #### Preprocessing Data
# Now, let's tokenize the data using the GPT2 tokenizer.
tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
tokenizer.pad_token = tokenizer.eos_token
def tokenize_data(data):
tokenized_data = tokenizer.batch_encode_plus(
data, padding=True, return_tensors="pt"
)
return tokenized_data
tokenized_data = tokenize_data(comments)
# We will now define a PyTorch dataset and a dataloader to feed the tokenized data into the model during training.
class topicalDataset(Dataset):
def __init__(self, tokenized_data, block_size):
self.tokenized_data = tokenized_data
self.block_size = block_size
def __len__(self):
return len(self.tokenized_data["input_ids"])
def __getitem__(self, index):
return {
"input_ids": self.tokenized_data["input_ids"][index],
"attention_mask": self.tokenized_data["attention_mask"][index],
}
block_size = 128
dataset = topicalDataset(tokenized_data, block_size)
dataloader = DataLoader(dataset, batch_size=32, shuffle=True)
# #### Training the LLM
model = GPT2LMHeadModel.from_pretrained("gpt2")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
epochs = 3
learning_rate = 5e-5
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
scheduler = transformers.get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=0, num_training_steps=len(dataloader) * epochs
)
def train():
model.train()
total_loss = 0
for batch in tqdm(dataloader):
input_ids = batch["input_ids"].to(device)
attention_mask = batch["attention_mask"].to(device)
optimizer.zero_grad()
outputs = model(input_ids, attention_mask=attention_mask, labels=input_ids)
loss = outputs[0]
total_loss += loss.item()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
optimizer.step()
scheduler.step()
avg_loss = total_loss / len(dataloader)
print(f"Training loss: {avg_loss:.2f}")
# #### Generating Responses
def generate(prompt, max_length=50):
model.eval()
encoded_prompt = tokenizer.encode(
prompt,
add_special_tokens=True,
padding="max_length",
max_length=max_length,
return_tensors="pt",
truncation=True,
)
input_ids = encoded_prompt.to(device)
attention_mask = (input_ids != tokenizer.pad_token_id).to(device)
output_sequences = model.generate(
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length + len(encoded_prompt[0]),
temperature=1.0,
top_k=0,
top_p=0.9,
repetition_penalty=1.0,
do_sample=True,
num_return_sequences=1,
)
generated_sequence = output_sequences[0]
generated_sequence = generated_sequence.tolist()
text = tokenizer.decode(generated_sequence, clean_up_tokenization_spaces=True)
text = text[
len(tokenizer.decode(encoded_prompt[0], clean_up_tokenization_spaces=True)) :
]
return text
generate("Do you like dance?")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/482/129482192.ipynb
|
chatbot-dataset-topical-chat
|
arnavsharmaas
|
[{"Id": 129482192, "ScriptId": 38497778, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4672638, "CreationDate": "05/14/2023 07:31:05", "VersionNumber": 3.0, "Title": "LLM ChatBot", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 142.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 142.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 185582222, "KernelVersionId": 129482192, "SourceDatasetVersionId": 1765643}]
|
[{"Id": 1765643, "DatasetId": 1049526, "DatasourceVersionId": 1802912, "CreatorUserId": 5939391, "LicenseName": "Unknown", "CreationDate": "12/20/2020 21:46:07", "VersionNumber": 1.0, "Title": "Chatbot Dataset Topical Chat", "Slug": "chatbot-dataset-topical-chat", "Subtitle": "Over 8000 conversations", "Description": "This is a Topical Chat dataset from Amazon! It consists of over 8000 conversations and over 184000 messages! \n\nWithin each message, there is: A conversation id, which is basically which conversation the message takes place in. Each message is either the start of a conversation or a reply from the previous message. There is also a sentiment, which represents the emotion that the person who sent the message is feeling. There are 8 sentiments: Angry, Curious to Dive Deeper, Disguised, Fearful, Happy, Sad, and Surprised.\n\nThis dataset can be used in machine learning to simulate a conversation or to make a chatbot. It can also be used for data visualization, for example you could visualize the word usage for the different emotions. \n\nPS: If you cannot download the dataset, download it from here:\nhttps://docs.google.com/spreadsheets/d/1dFdlvgmyXfN3SriVn5Byv_BNtyroICxdgrQKBzuMA1U/edit?usp=sharing\n\nOriginal github dataset:\nhttps://github.com/alexa/Topical-Chat", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1049526, "CreatorUserId": 5939391, "OwnerUserId": 5939391.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 1765643.0, "CurrentDatasourceVersionId": 1802912.0, "ForumId": 1066530, "Type": 2, "CreationDate": "12/20/2020 21:46:07", "LastActivityDate": "12/20/2020", "TotalViews": 25728, "TotalDownloads": 2691, "TotalVotes": 36, "TotalKernels": 6}]
|
[{"Id": 5939391, "UserName": "arnavsharmaas", "DisplayName": "Arnav Sharma AS", "RegisterDate": "10/12/2020", "PerformanceTier": 0}]
|
# #### Importing Libraries and Installing Dependencies
import torch
import pandas as pd
import numpy as np
import transformers
import random
from transformers import GPT2Tokenizer, GPT2LMHeadModel
from torch.utils.data import Dataset, DataLoader
from tqdm.notebook import tqdm
# #### Downloading Topical Chat Data
# https://www.kaggle.com/datasets/arnavsharmaas/chatbot-dataset-topical-chat
# Next, let's load the data and preprocess it. In this example, we will only consider the text. We will also remove any comments that are too long or too short.
def load_data():
df = pd.read_csv("/kaggle/input/chatbot-dataset-topical-chat/topical_chat.csv")
comments = df["message"].tolist()
comments = [c.strip() for c in comments]
comments = [c for c in comments if len(c) > 10 and len(c) < 100]
return comments
comments = load_data()
comments[1:5]
# #### Preprocessing Data
# Now, let's tokenize the data using the GPT2 tokenizer.
tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
tokenizer.pad_token = tokenizer.eos_token
def tokenize_data(data):
tokenized_data = tokenizer.batch_encode_plus(
data, padding=True, return_tensors="pt"
)
return tokenized_data
tokenized_data = tokenize_data(comments)
# We will now define a PyTorch dataset and a dataloader to feed the tokenized data into the model during training.
class topicalDataset(Dataset):
def __init__(self, tokenized_data, block_size):
self.tokenized_data = tokenized_data
self.block_size = block_size
def __len__(self):
return len(self.tokenized_data["input_ids"])
def __getitem__(self, index):
return {
"input_ids": self.tokenized_data["input_ids"][index],
"attention_mask": self.tokenized_data["attention_mask"][index],
}
block_size = 128
dataset = topicalDataset(tokenized_data, block_size)
dataloader = DataLoader(dataset, batch_size=32, shuffle=True)
# #### Training the LLM
model = GPT2LMHeadModel.from_pretrained("gpt2")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
epochs = 3
learning_rate = 5e-5
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
scheduler = transformers.get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=0, num_training_steps=len(dataloader) * epochs
)
def train():
model.train()
total_loss = 0
for batch in tqdm(dataloader):
input_ids = batch["input_ids"].to(device)
attention_mask = batch["attention_mask"].to(device)
optimizer.zero_grad()
outputs = model(input_ids, attention_mask=attention_mask, labels=input_ids)
loss = outputs[0]
total_loss += loss.item()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
optimizer.step()
scheduler.step()
avg_loss = total_loss / len(dataloader)
print(f"Training loss: {avg_loss:.2f}")
# #### Generating Responses
def generate(prompt, max_length=50):
model.eval()
encoded_prompt = tokenizer.encode(
prompt,
add_special_tokens=True,
padding="max_length",
max_length=max_length,
return_tensors="pt",
truncation=True,
)
input_ids = encoded_prompt.to(device)
attention_mask = (input_ids != tokenizer.pad_token_id).to(device)
output_sequences = model.generate(
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length + len(encoded_prompt[0]),
temperature=1.0,
top_k=0,
top_p=0.9,
repetition_penalty=1.0,
do_sample=True,
num_return_sequences=1,
)
generated_sequence = output_sequences[0]
generated_sequence = generated_sequence.tolist()
text = tokenizer.decode(generated_sequence, clean_up_tokenization_spaces=True)
text = text[
len(tokenizer.decode(encoded_prompt[0], clean_up_tokenization_spaces=True)) :
]
return text
generate("Do you like dance?")
| false | 1 | 1,138 | 0 | 1,424 | 1,138 |
||
129836364
|
# # Titanic Machine Learning Challenge
# Titanic Machine learning
# The aim of this challenge is to predict whether or not a passenger will survive the Titanic disaster.
# The Titanic was carrying approximatly 2200 people, of which 1300 were passangers. The 20 lifeboats were only able to carry 1178 people. However, the life boats were not fully loaded with passengers since the crew were afraid the davits would not support the weight of a fully loaded boat. This resulted in only 705 people being rescued in lifeboats. The Titanic disaster lead to more than 1503 fatalities (815 passengers and 688 crew). The crew had 700 fatalities. Third class passengers suffered the greatest loss of aproximatly 700 fatalities, only 174 third class passangers survived. It is claimed that the steerage* passengers were prevented from boarding boats. However, this claim was largly dispelled since the general alarm was sounded too late so some third class passangers did not realise the direness of the situation before it was too late. The large number of fatalites is also due to passangers finding it difficult to navigate the complex lower levels of the ship which meant they reached the deck after the lifeboats had been launched. Many women also refused to leave their husbands and sons behind. 31.6% is the total percentage of passengers and crew who survived.
# The challenge is to accuratly predict if a person will survive.
# *The part of a ship providing the cheapest accommodation for passengers https://www.britannica.com/topic/Titanic/Discovery-and-legacy https://titanicfacts.net/ https://www.rmg.co.uk/stories/topics/rms-titanic-facts
# # Import the data
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from sklearn.feature_selection import mutual_info_classif as MIC
from sklearn.feature_selection import mutual_info_regression as MIR
from sklearn import tree
from sklearn import metrics
import matplotlib.pyplot as plt
import seaborn as sns
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# Load training data
trainDF = pd.read_csv("/kaggle/input/titanic/train.csv")
print("Train data shape: ", trainDF.shape)
# View data headings
trainDF.head()
# Load test data
testDF = pd.read_csv("/kaggle/input/titanic/test.csv")
print("Test data shape: ", testDF.shape)
# View data headings
testDF.head()
# # Format and explore the data
# This is binary classification (supervised learning) problem. Use logistic regression or decision tree. Use one hot encoding.
# Firts check which data will be most usefull with mutual information.
# The name of a passanger should not determine if they survived or not.
# From a visual inspection the ticket information seems to provide a ticket number and details of embarking or disembarking. This informaiton will not add value.
# The cabin might if the ticket holder was in their cabin at the time.
# The ticket fare will relate more to the class and therfore survival than the actual value of the ticket.
# The point of embarkment will not affect if they survive or not since this will not determine their location on the boat at the time of sinking.
# It is suspected that the age, class, sex and if they are travling with a child will carry the most influence in if the passanger survived or not.
# Drop the columns that will not add value from the initial suspicion
trainDF.drop(columns=["Name", "Ticket", "Fare", "Embarked"], inplace=True)
# For NaN age replace with the average age of people on the ship
trainDF["Age"].fillna(trainDF["Age"].mean(), inplace=True)
# Make all float values (age) into integer
trainDF["Age"] = (trainDF["Age"] * 100).astype("int")
# The cabin were assigend as
# deck T : Boat deck - 1 passanger
# First class
# Deck A : 42 passangers
# Deck B : 123 passangers
# Deck C : 310 passangers
# Deck D : 117 passangers
# Deck E : 97 passangers
# Second class
# Deck D : 118 passangers
# Deck E : 226 passangers
# Deck F : 218 passangers
# Deck G : 112 passangers
# Third class
# Deck D : 50 passangers
# Deck E : 260 passangers
# Deck F : 466 passangers
# Deck G : 250 passangers
# Ref
# #https://www.scribd.com/document/392911689/Cabin-Allocations#
# #https://titanic.fandom.com/wiki/Second_Class_Cabins#:~:text=D%2DDeck%3A%20At%20the%20stern,%2D43%20to%20E%2D107.
# Select cabin zone
trainDF["Deck"] = trainDF["Cabin"].astype(str).str[0]
trainDF.drop(columns="Cabin", inplace=True)
# For NaN Cabin replace with the most likely cabin location
trainDF["Deck"] = np.where(
(trainDF["Deck"] == "n") & (trainDF["Pclass"] == 1), "C", trainDF["Deck"]
)
trainDF["Deck"] = np.where(
(trainDF["Deck"] == "n") & (trainDF["Pclass"] == 2), "E", trainDF["Deck"]
)
trainDF["Deck"] = np.where(
(trainDF["Deck"] == "n") & (trainDF["Pclass"] == 3), "F", trainDF["Deck"]
)
# One hot encode the text data
print(trainDF.columns)
trainDF = pd.get_dummies(data=trainDF, columns=["Sex"], drop_first=True, dtype="int8")
trainDF = pd.get_dummies(data=trainDF, prefix="Deck", prefix_sep="", columns=["Deck"])
trainDF = pd.get_dummies(data=trainDF, prefix="SibSp", prefix_sep="", columns=["SibSp"])
trainDF = pd.get_dummies(data=trainDF, prefix="Parch", prefix_sep="", columns=["Parch"])
print(trainDF)
# Get an average mutual information score to highlight important features
miScores = MIC(trainDF, trainDF["Survived"]) # , discrete_features=True)
miScoreSer = pd.Series(miScores, index=trainDF.columns)
print(miScoreSer)
# # Set train and test data
# Select a sample of 90% of the data frame to train with
xTrainDF = trainDF.sample(frac=0.9)
print(xTrainDF.shape)
# Use the remaining 10% of the data frame to check model with
xValDF = pd.concat([xTrainDF, trainDF]).drop_duplicates(keep=False)
print(xValDF.shape)
# Get training and testing y
yTrainDF = xTrainDF["Survived"]
xTrainDF.drop(columns=["Survived"], inplace=True)
yValDF = xValDF["Survived"]
xValDF.drop(columns=["Survived"], inplace=True)
# From the mi-score, the passanger sex, class and age are most likely to contribute to the survival rate. The cabin location and travel partner also have an influence.
# # Decision tree
# Fit the decision tree model
treeClassifier = tree.DecisionTreeClassifier()
treeClassifier = treeClassifier.fit(xTrainDF, yTrainDF)
# tree.plot_tree(treeClassifier)
# Predict the outcome
treePredict = treeClassifier.predict(xValDF)
# https://www.simplilearn.com/tutorials/scikit-learn-tutorial/sklearn-decision-trees
confusion_matrix = metrics.confusion_matrix(yValDF, treePredict)
labels = yValDF.unique()
matrix_df = pd.DataFrame(confusion_matrix)
ax = plt.axes()
sns.set(font_scale=1.3)
plt.figure(figsize=(10, 7))
sns.heatmap(matrix_df, annot=True, fmt="g", ax=ax, cmap="magma")
ax.set_title("Confusion Matrix - Decision Tree")
ax.set_xlabel("Predicted label", fontsize=15)
ax.set_xticklabels([""] + labels)
ax.set_ylabel("True Label", fontsize=15)
ax.set_yticklabels(list(labels), rotation=0)
plt.show()
# # Format test data
# Remove unused parameters
testDF.drop(columns=["Name", "Ticket", "Fare", "Embarked"], inplace=True)
# Format Age
testDF["Age"].fillna(testDF["Age"].mean(), inplace=True)
testDF["Age"] = (testDF["Age"] * 100).astype("int")
# Select cabin zone
testDF["Deck"] = testDF["Cabin"].astype(str).str[0]
testDF.drop(columns="Cabin", inplace=True)
# For NaN Cabin replace with the most likely cabin location
testDF["Deck"] = np.where(
(testDF["Deck"] == "n") & (testDF["Pclass"] == 1), "C", testDF["Deck"]
)
testDF["Deck"] = np.where(
(testDF["Deck"] == "n") & (testDF["Pclass"] == 2), "E", testDF["Deck"]
)
testDF["Deck"] = np.where(
(testDF["Deck"] == "n") & (testDF["Pclass"] == 3), "F", testDF["Deck"]
)
# One hot encode the text data
print(testDF.columns)
testDF = pd.get_dummies(data=testDF, columns=["Sex"], drop_first=True, dtype="int8")
testDF = pd.get_dummies(data=testDF, prefix="Deck", prefix_sep="", columns=["Deck"])
testDF = pd.get_dummies(data=testDF, prefix="SibSp", prefix_sep="", columns=["SibSp"])
testDF = pd.get_dummies(data=testDF, prefix="Parch", prefix_sep="", columns=["Parch"])
print(testDF)
treePredict = treeClassifier.predict(testDF)
# Generate results file
output = pd.DataFrame({"PassengerId": testDF.PassengerId, "Survived": Ps})
output.to_csv("ClassificationTry2.csv", index=False)
print("Your file was successfully saved!")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/836/129836364.ipynb
| null | null |
[{"Id": 129836364, "ScriptId": 38613574, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11417667, "CreationDate": "05/16/2023 20:50:06", "VersionNumber": 1.0, "Title": "TitanicML_try2_2023-05-16", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 230.0, "LinesInsertedFromPrevious": 230.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# # Titanic Machine Learning Challenge
# Titanic Machine learning
# The aim of this challenge is to predict whether or not a passenger will survive the Titanic disaster.
# The Titanic was carrying approximatly 2200 people, of which 1300 were passangers. The 20 lifeboats were only able to carry 1178 people. However, the life boats were not fully loaded with passengers since the crew were afraid the davits would not support the weight of a fully loaded boat. This resulted in only 705 people being rescued in lifeboats. The Titanic disaster lead to more than 1503 fatalities (815 passengers and 688 crew). The crew had 700 fatalities. Third class passengers suffered the greatest loss of aproximatly 700 fatalities, only 174 third class passangers survived. It is claimed that the steerage* passengers were prevented from boarding boats. However, this claim was largly dispelled since the general alarm was sounded too late so some third class passangers did not realise the direness of the situation before it was too late. The large number of fatalites is also due to passangers finding it difficult to navigate the complex lower levels of the ship which meant they reached the deck after the lifeboats had been launched. Many women also refused to leave their husbands and sons behind. 31.6% is the total percentage of passengers and crew who survived.
# The challenge is to accuratly predict if a person will survive.
# *The part of a ship providing the cheapest accommodation for passengers https://www.britannica.com/topic/Titanic/Discovery-and-legacy https://titanicfacts.net/ https://www.rmg.co.uk/stories/topics/rms-titanic-facts
# # Import the data
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from sklearn.feature_selection import mutual_info_classif as MIC
from sklearn.feature_selection import mutual_info_regression as MIR
from sklearn import tree
from sklearn import metrics
import matplotlib.pyplot as plt
import seaborn as sns
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# Load training data
trainDF = pd.read_csv("/kaggle/input/titanic/train.csv")
print("Train data shape: ", trainDF.shape)
# View data headings
trainDF.head()
# Load test data
testDF = pd.read_csv("/kaggle/input/titanic/test.csv")
print("Test data shape: ", testDF.shape)
# View data headings
testDF.head()
# # Format and explore the data
# This is binary classification (supervised learning) problem. Use logistic regression or decision tree. Use one hot encoding.
# Firts check which data will be most usefull with mutual information.
# The name of a passanger should not determine if they survived or not.
# From a visual inspection the ticket information seems to provide a ticket number and details of embarking or disembarking. This informaiton will not add value.
# The cabin might if the ticket holder was in their cabin at the time.
# The ticket fare will relate more to the class and therfore survival than the actual value of the ticket.
# The point of embarkment will not affect if they survive or not since this will not determine their location on the boat at the time of sinking.
# It is suspected that the age, class, sex and if they are travling with a child will carry the most influence in if the passanger survived or not.
# Drop the columns that will not add value from the initial suspicion
trainDF.drop(columns=["Name", "Ticket", "Fare", "Embarked"], inplace=True)
# For NaN age replace with the average age of people on the ship
trainDF["Age"].fillna(trainDF["Age"].mean(), inplace=True)
# Make all float values (age) into integer
trainDF["Age"] = (trainDF["Age"] * 100).astype("int")
# The cabin were assigend as
# deck T : Boat deck - 1 passanger
# First class
# Deck A : 42 passangers
# Deck B : 123 passangers
# Deck C : 310 passangers
# Deck D : 117 passangers
# Deck E : 97 passangers
# Second class
# Deck D : 118 passangers
# Deck E : 226 passangers
# Deck F : 218 passangers
# Deck G : 112 passangers
# Third class
# Deck D : 50 passangers
# Deck E : 260 passangers
# Deck F : 466 passangers
# Deck G : 250 passangers
# Ref
# #https://www.scribd.com/document/392911689/Cabin-Allocations#
# #https://titanic.fandom.com/wiki/Second_Class_Cabins#:~:text=D%2DDeck%3A%20At%20the%20stern,%2D43%20to%20E%2D107.
# Select cabin zone
trainDF["Deck"] = trainDF["Cabin"].astype(str).str[0]
trainDF.drop(columns="Cabin", inplace=True)
# For NaN Cabin replace with the most likely cabin location
trainDF["Deck"] = np.where(
(trainDF["Deck"] == "n") & (trainDF["Pclass"] == 1), "C", trainDF["Deck"]
)
trainDF["Deck"] = np.where(
(trainDF["Deck"] == "n") & (trainDF["Pclass"] == 2), "E", trainDF["Deck"]
)
trainDF["Deck"] = np.where(
(trainDF["Deck"] == "n") & (trainDF["Pclass"] == 3), "F", trainDF["Deck"]
)
# One hot encode the text data
print(trainDF.columns)
trainDF = pd.get_dummies(data=trainDF, columns=["Sex"], drop_first=True, dtype="int8")
trainDF = pd.get_dummies(data=trainDF, prefix="Deck", prefix_sep="", columns=["Deck"])
trainDF = pd.get_dummies(data=trainDF, prefix="SibSp", prefix_sep="", columns=["SibSp"])
trainDF = pd.get_dummies(data=trainDF, prefix="Parch", prefix_sep="", columns=["Parch"])
print(trainDF)
# Get an average mutual information score to highlight important features
miScores = MIC(trainDF, trainDF["Survived"]) # , discrete_features=True)
miScoreSer = pd.Series(miScores, index=trainDF.columns)
print(miScoreSer)
# # Set train and test data
# Select a sample of 90% of the data frame to train with
xTrainDF = trainDF.sample(frac=0.9)
print(xTrainDF.shape)
# Use the remaining 10% of the data frame to check model with
xValDF = pd.concat([xTrainDF, trainDF]).drop_duplicates(keep=False)
print(xValDF.shape)
# Get training and testing y
yTrainDF = xTrainDF["Survived"]
xTrainDF.drop(columns=["Survived"], inplace=True)
yValDF = xValDF["Survived"]
xValDF.drop(columns=["Survived"], inplace=True)
# From the mi-score, the passanger sex, class and age are most likely to contribute to the survival rate. The cabin location and travel partner also have an influence.
# # Decision tree
# Fit the decision tree model
treeClassifier = tree.DecisionTreeClassifier()
treeClassifier = treeClassifier.fit(xTrainDF, yTrainDF)
# tree.plot_tree(treeClassifier)
# Predict the outcome
treePredict = treeClassifier.predict(xValDF)
# https://www.simplilearn.com/tutorials/scikit-learn-tutorial/sklearn-decision-trees
confusion_matrix = metrics.confusion_matrix(yValDF, treePredict)
labels = yValDF.unique()
matrix_df = pd.DataFrame(confusion_matrix)
ax = plt.axes()
sns.set(font_scale=1.3)
plt.figure(figsize=(10, 7))
sns.heatmap(matrix_df, annot=True, fmt="g", ax=ax, cmap="magma")
ax.set_title("Confusion Matrix - Decision Tree")
ax.set_xlabel("Predicted label", fontsize=15)
ax.set_xticklabels([""] + labels)
ax.set_ylabel("True Label", fontsize=15)
ax.set_yticklabels(list(labels), rotation=0)
plt.show()
# # Format test data
# Remove unused parameters
testDF.drop(columns=["Name", "Ticket", "Fare", "Embarked"], inplace=True)
# Format Age
testDF["Age"].fillna(testDF["Age"].mean(), inplace=True)
testDF["Age"] = (testDF["Age"] * 100).astype("int")
# Select cabin zone
testDF["Deck"] = testDF["Cabin"].astype(str).str[0]
testDF.drop(columns="Cabin", inplace=True)
# For NaN Cabin replace with the most likely cabin location
testDF["Deck"] = np.where(
(testDF["Deck"] == "n") & (testDF["Pclass"] == 1), "C", testDF["Deck"]
)
testDF["Deck"] = np.where(
(testDF["Deck"] == "n") & (testDF["Pclass"] == 2), "E", testDF["Deck"]
)
testDF["Deck"] = np.where(
(testDF["Deck"] == "n") & (testDF["Pclass"] == 3), "F", testDF["Deck"]
)
# One hot encode the text data
print(testDF.columns)
testDF = pd.get_dummies(data=testDF, columns=["Sex"], drop_first=True, dtype="int8")
testDF = pd.get_dummies(data=testDF, prefix="Deck", prefix_sep="", columns=["Deck"])
testDF = pd.get_dummies(data=testDF, prefix="SibSp", prefix_sep="", columns=["SibSp"])
testDF = pd.get_dummies(data=testDF, prefix="Parch", prefix_sep="", columns=["Parch"])
print(testDF)
treePredict = treeClassifier.predict(testDF)
# Generate results file
output = pd.DataFrame({"PassengerId": testDF.PassengerId, "Survived": Ps})
output.to_csv("ClassificationTry2.csv", index=False)
print("Your file was successfully saved!")
| false | 0 | 2,787 | 0 | 2,787 | 2,787 |
||
129841155
|
<jupyter_start><jupyter_text>Auto-mpg dataset
### Context
The data is technical spec of cars. The dataset is downloaded from UCI Machine Learning Repository
### Content
1. Title: Auto-Mpg Data
2. Sources:
(a) Origin: This dataset was taken from the StatLib library which is
maintained at Carnegie Mellon University. The dataset was
used in the 1983 American Statistical Association Exposition.
(c) Date: July 7, 1993
3. Past Usage:
- See 2b (above)
- Quinlan,R. (1993). Combining Instance-Based and Model-Based Learning.
In Proceedings on the Tenth International Conference of Machine
Learning, 236-243, University of Massachusetts, Amherst. Morgan
Kaufmann.
4. Relevant Information:
This dataset is a slightly modified version of the dataset provided in
the StatLib library. In line with the use by Ross Quinlan (1993) in
predicting the attribute "mpg", 8 of the original instances were removed
because they had unknown values for the "mpg" attribute. The original
dataset is available in the file "auto-mpg.data-original".
"The data concerns city-cycle fuel consumption in miles per gallon,
to be predicted in terms of 3 multivalued discrete and 5 continuous
attributes." (Quinlan, 1993)
5. Number of Instances: 398
6. Number of Attributes: 9 including the class attribute
7. Attribute Information:
1. mpg: continuous
2. cylinders: multi-valued discrete
3. displacement: continuous
4. horsepower: continuous
5. weight: continuous
6. acceleration: continuous
7. model year: multi-valued discrete
8. origin: multi-valued discrete
9. car name: string (unique for each instance)
8. Missing Attribute Values: horsepower has 6 missing values
Kaggle dataset identifier: autompg-dataset
<jupyter_script>import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# warning
import warnings
warnings.filterwarnings("ignore")
# mpg: The "mpg" column represents the miles per gallon, indicating how many miles a vehicle can travel on one gallon of fuel. It is a continuous value.
# cylinders: The "cylinders" column denotes the number of cylinders in the engine of the vehicle. It is a multi-valued discrete feature, indicating the count of cylinders, such as 3, 4, 5, 6, 8.
# displacement: The "displacement" column represents the total volume swept by all the cylinders in the engine. It is a continuous value and is typically measured in liters.
# horsepower: The "horsepower" column indicates the power output of the vehicle's engine. It is a continuous value, representing the strength of the engine.
# weight: The "weight" column represents the weight of the vehicle. It is a continuous value, often measured in pounds or kilograms.
# acceleration: The "acceleration" column denotes the time it takes for the vehicle to reach a certain speed. It is a continuous value, indicating the rate of change of velocity.
# model year: The "model year" column represents the year in which the vehicle was manufactured. It is a multi-valued discrete feature, indicating different years of production.
# origin: The "origin" column indicates the geographic origin or manufacturing region of the vehicle. It is a multi-valued discrete feature, representing different countries or regions.
# car name: The "car name" column specifies the unique name of each vehicle instance. It is a string feature, providing a distinct identifier for each car
df = pd.read_csv("/kaggle/input/autompg-dataset/auto-mpg.csv")
df.head()
df.cylinders.value_counts()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/841/129841155.ipynb
|
autompg-dataset
| null |
[{"Id": 129841155, "ScriptId": 38616820, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 12449603, "CreationDate": "05/16/2023 22:01:24", "VersionNumber": 1.0, "Title": "Vehicle Fuel Consumption (EDA-ML)", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 39.0, "LinesInsertedFromPrevious": 39.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 186225522, "KernelVersionId": 129841155, "SourceDatasetVersionId": 2704}]
|
[{"Id": 2704, "DatasetId": 1489, "DatasourceVersionId": 2704, "CreatorUserId": 693660, "LicenseName": "CC0: Public Domain", "CreationDate": "07/02/2017 05:25:54", "VersionNumber": 3.0, "Title": "Auto-mpg dataset", "Slug": "autompg-dataset", "Subtitle": "Mileage per gallon performances of various cars", "Description": "### Context\n\nThe data is technical spec of cars. The dataset is downloaded from UCI Machine Learning Repository\n\n\n### Content\n\n1. Title: Auto-Mpg Data\n\n2. Sources:\n (a) Origin: This dataset was taken from the StatLib library which is\n maintained at Carnegie Mellon University. The dataset was \n used in the 1983 American Statistical Association Exposition.\n (c) Date: July 7, 1993\n\n3. Past Usage:\n - See 2b (above)\n - Quinlan,R. (1993). Combining Instance-Based and Model-Based Learning.\n In Proceedings on the Tenth International Conference of Machine \n Learning, 236-243, University of Massachusetts, Amherst. Morgan\n Kaufmann.\n\n4. Relevant Information:\n\n This dataset is a slightly modified version of the dataset provided in\n the StatLib library. In line with the use by Ross Quinlan (1993) in\n predicting the attribute \"mpg\", 8 of the original instances were removed \n because they had unknown values for the \"mpg\" attribute. The original \n dataset is available in the file \"auto-mpg.data-original\".\n\n \"The data concerns city-cycle fuel consumption in miles per gallon,\n to be predicted in terms of 3 multivalued discrete and 5 continuous\n attributes.\" (Quinlan, 1993)\n\n5. Number of Instances: 398\n\n6. Number of Attributes: 9 including the class attribute\n\n7. Attribute Information:\n\n 1. mpg: continuous\n 2. cylinders: multi-valued discrete\n 3. displacement: continuous\n 4. horsepower: continuous\n 5. weight: continuous\n 6. acceleration: continuous\n 7. model year: multi-valued discrete\n 8. origin: multi-valued discrete\n 9. car name: string (unique for each instance)\n\n8. Missing Attribute Values: horsepower has 6 missing values\n\n### Acknowledgements\n\nDataset: UCI Machine Learning Repository \nData link : https://archive.ics.uci.edu/ml/datasets/auto+mpg\n\n\n### Inspiration\n\nI have used this dataset for practicing my exploratory analysis skills.", "VersionNotes": "Auto-mpg.csv with header", "TotalCompressedBytes": 18131.0, "TotalUncompressedBytes": 18131.0}]
|
[{"Id": 1489, "CreatorUserId": 693660, "OwnerUserId": NaN, "OwnerOrganizationId": 7.0, "CurrentDatasetVersionId": 2704.0, "CurrentDatasourceVersionId": 2704.0, "ForumId": 4406, "Type": 2, "CreationDate": "06/28/2017 10:09:21", "LastActivityDate": "02/05/2018", "TotalViews": 274788, "TotalDownloads": 40258, "TotalVotes": 275, "TotalKernels": 260}]
| null |
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# warning
import warnings
warnings.filterwarnings("ignore")
# mpg: The "mpg" column represents the miles per gallon, indicating how many miles a vehicle can travel on one gallon of fuel. It is a continuous value.
# cylinders: The "cylinders" column denotes the number of cylinders in the engine of the vehicle. It is a multi-valued discrete feature, indicating the count of cylinders, such as 3, 4, 5, 6, 8.
# displacement: The "displacement" column represents the total volume swept by all the cylinders in the engine. It is a continuous value and is typically measured in liters.
# horsepower: The "horsepower" column indicates the power output of the vehicle's engine. It is a continuous value, representing the strength of the engine.
# weight: The "weight" column represents the weight of the vehicle. It is a continuous value, often measured in pounds or kilograms.
# acceleration: The "acceleration" column denotes the time it takes for the vehicle to reach a certain speed. It is a continuous value, indicating the rate of change of velocity.
# model year: The "model year" column represents the year in which the vehicle was manufactured. It is a multi-valued discrete feature, indicating different years of production.
# origin: The "origin" column indicates the geographic origin or manufacturing region of the vehicle. It is a multi-valued discrete feature, representing different countries or regions.
# car name: The "car name" column specifies the unique name of each vehicle instance. It is a string feature, providing a distinct identifier for each car
df = pd.read_csv("/kaggle/input/autompg-dataset/auto-mpg.csv")
df.head()
df.cylinders.value_counts()
| false | 0 | 485 | 0 | 980 | 485 |
||
129841525
|
<jupyter_start><jupyter_text>College Basketball Dataset
### Content
Data from the 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, and 2021 Division I college basketball seasons.
cbb.csv has seasons 2013-2019 and seasons 2021-2023 combined
The 2020 season's data set is kept separate from the other seasons, because there was no postseason due to the Coronavirus.
###Variables
RK (Only in cbb20): The ranking of the team at the end of the regular season according to barttorvik
TEAM: The Division I college basketball school
CONF: The Athletic Conference in which the school participates in (A10 = Atlantic 10, ACC = Atlantic Coast Conference, AE = America East, Amer = American, ASun = ASUN, B10 = Big Ten, B12 = Big 12, BE = Big East, BSky = Big Sky, BSth = Big South, BW = Big West, CAA = Colonial Athletic Association, CUSA = Conference USA, Horz = Horizon League, Ivy = Ivy League, MAAC = Metro Atlantic Athletic Conference, MAC = Mid-American Conference, MEAC = Mid-Eastern Athletic Conference, MVC = Missouri Valley Conference, MWC = Mountain West, NEC = Northeast Conference, OVC = Ohio Valley Conference, P12 = Pac-12, Pat = Patriot League, SB = Sun Belt, SC = Southern Conference, SEC = South Eastern Conference, Slnd = Southland Conference, Sum = Summit League, SWAC = Southwestern Athletic Conference, WAC = Western Athletic Conference, WCC = West Coast Conference)
G: Number of games played
W: Number of games won
ADJOE: Adjusted Offensive Efficiency (An estimate of the offensive efficiency (points scored per 100 possessions) a team would have against the average Division I defense)
ADJDE: Adjusted Defensive Efficiency (An estimate of the defensive efficiency (points allowed per 100 possessions) a team would have against the average Division I offense)
BARTHAG: Power Rating (Chance of beating an average Division I team)
EFG_O: Effective Field Goal Percentage Shot
EFG_D: Effective Field Goal Percentage Allowed
TOR: Turnover Percentage Allowed (Turnover Rate)
TORD: Turnover Percentage Committed (Steal Rate)
ORB: Offensive Rebound Rate
DRB: Offensive Rebound Rate Allowed
FTR : Free Throw Rate (How often the given team shoots Free Throws)
FTRD: Free Throw Rate Allowed
2P_O: Two-Point Shooting Percentage
2P_D: Two-Point Shooting Percentage Allowed
3P_O: Three-Point Shooting Percentage
3P_D: Three-Point Shooting Percentage Allowed
ADJ_T: Adjusted Tempo (An estimate of the tempo (possessions per 40 minutes) a team would have against the team that wants to play at an average Division I tempo)
WAB: Wins Above Bubble (The bubble refers to the cut off between making the NCAA March Madness Tournament and not making it)
POSTSEASON: Round where the given team was eliminated or where their season ended (R68 = First Four, R64 = Round of 64, R32 = Round of 32, S16 = Sweet Sixteen, E8 = Elite Eight, F4 = Final Four, 2ND = Runner-up, Champion = Winner of the NCAA March Madness Tournament for that given year)
SEED: Seed in the NCAA March Madness Tournament
YEAR: Season
Kaggle dataset identifier: college-basketball-dataset
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
print("Setup complete.")
df = pd.read_csv("/kaggle/input/college-basketball-dataset/cbb.csv")
df = df.sort_values(by=["TEAM", "YEAR"])
duke_years = df[df["TEAM"] == "Duke"]
columns_list = df.columns.tolist()
print(columns_list)
postseason_mapping = {
"Champions": 1,
"2ND": 2,
"F4": 4,
"E8": 8,
"S16": 16,
"R32": 32,
"R64": 64,
"R68": 68,
}
df["POSTSEASON"] = df["POSTSEASON"].replace(postseason_mapping)
# Analyzing three point offense and defense on team success...
selected_columns = ["TEAM", "W", "POSTSEASON", "3P_O", "3P_D"]
data_filtered = df[selected_columns].copy()
data_filtered = (
data_filtered.dropna()
) # Includes only teams that made it to the postseason
data_filtered.sort_values(by="POSTSEASON", key=lambda x: x.astype(int))
# Create correlation matrix...
correlation_matrix = data_filtered[["3P_O", "3P_D", "W", "POSTSEASON"]].corr()
print(correlation_matrix)
# Clearly, the strongest correlation lies between wins and postseason success. I am surprised to find that 3 point efficiency and defense has relatively low correlation to post season success. I think it's important to note, however, that 3 point defense has a higher correlation to wins and postseason success than 3 point efficiency.
# It's a common phrase that "defense wins championships." Let's look at if defense rating has a higher correlation to wins and postseason success than offensive rating.
adje_columns = ["TEAM", "YEAR", "W", "POSTSEASON", "ADJOE", "ADJDE"]
adje_df = df[adje_columns].copy()
adje_df = adje_df.dropna()
adje_df.sort_values(by="ADJOE", ascending=False)
adje_correlation_matrix = adje_df[["W", "POSTSEASON", "ADJOE", "ADJDE"]].corr()
print(adje_correlation_matrix)
# Interesting. The correlation between wins and ADJOE and ADJDE was relatively similar which is unsurprising, as those metrics are "adjusted" to show points scored on and getting scored on the average D-1 basketball team. However, I was surprised to learn that ADJOE has a higher correlation to postseason success than ADJDE.
effectiveness = adje_df.copy()
effectiveness["EFF"] = effectiveness["ADJOE"] - effectiveness["ADJDE"]
effectiveness_correlation_matrix = effectiveness[["W", "POSTSEASON", "EFF"]].corr()
print(effectiveness_correlation_matrix)
# This is a much better metric. But I also wonder if perhaps defense plays more of a roll in winning in certain years rather than others...
good_postseason_teams = adje_df[adje_df["POSTSEASON"] <= 4]
good_postseason_teams.reset_index()
avg_adjde_by_year = good_postseason_teams.groupby("YEAR")["ADJDE"].mean()
avg_adjoe_by_year = good_postseason_teams.groupby("YEAR")["ADJOE"].mean()
import matplotlib.pyplot as plt
plt.plot(avg_adjde_by_year.index, avg_adjde_by_year.values)
plt.xlabel("Year")
plt.ylabel("Average ADJDE")
plt.title("Average ADJDE of Teams with Postseason Success")
plt.show()
plt.plot(avg_adjoe_by_year.index, avg_adjoe_by_year.values)
plt.xlabel("Year")
plt.ylabel("Average ADJOE")
plt.title("Average ADJOE of Teams with Postseason Success")
plt.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/841/129841525.ipynb
|
college-basketball-dataset
|
andrewsundberg
|
[{"Id": 129841525, "ScriptId": 38556575, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 10847921, "CreationDate": "05/16/2023 22:06:28", "VersionNumber": 2.0, "Title": "College Basketball Correlations", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 87.0, "LinesInsertedFromPrevious": 34.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 53.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 186226085, "KernelVersionId": 129841525, "SourceDatasetVersionId": 2027426}]
|
[{"Id": 2027426, "DatasetId": 418778, "DatasourceVersionId": 2067134, "CreatorUserId": 2192630, "LicenseName": "CC0: Public Domain", "CreationDate": "03/16/2021 00:56:42", "VersionNumber": 4.0, "Title": "College Basketball Dataset", "Slug": "college-basketball-dataset", "Subtitle": "Datasets for the 2013 through 2021 seasons", "Description": "### Content\nData from the 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, and 2021 Division I college basketball seasons.\n\ncbb.csv has seasons 2013-2019 and seasons 2021-2023 combined\n\nThe 2020 season's data set is kept separate from the other seasons, because there was no postseason due to the Coronavirus.\n\n###Variables\n\nRK (Only in cbb20): The ranking of the team at the end of the regular season according to barttorvik\n\nTEAM: The Division I college basketball school\n\nCONF: The Athletic Conference in which the school participates in (A10 = Atlantic 10, ACC = Atlantic Coast Conference, AE = America East, Amer = American, ASun = ASUN, B10 = Big Ten, B12 = Big 12, BE = Big East, BSky = Big Sky, BSth = Big South, BW = Big West, CAA = Colonial Athletic Association, CUSA = Conference USA, Horz = Horizon League, Ivy = Ivy League, MAAC = Metro Atlantic Athletic Conference, MAC = Mid-American Conference, MEAC = Mid-Eastern Athletic Conference, MVC = Missouri Valley Conference, MWC = Mountain West, NEC = Northeast Conference, OVC = Ohio Valley Conference, P12 = Pac-12, Pat = Patriot League, SB = Sun Belt, SC = Southern Conference, SEC = South Eastern Conference, Slnd = Southland Conference, Sum = Summit League, SWAC = Southwestern Athletic Conference, WAC = Western Athletic Conference, WCC = West Coast Conference)\n\nG: Number of games played\n\nW: Number of games won\n\nADJOE: Adjusted Offensive Efficiency (An estimate of the offensive efficiency (points scored per 100 possessions) a team would have against the average Division I defense)\n\nADJDE: Adjusted Defensive Efficiency (An estimate of the defensive efficiency (points allowed per 100 possessions) a team would have against the average Division I offense)\n\nBARTHAG: Power Rating (Chance of beating an average Division I team)\n\nEFG_O: Effective Field Goal Percentage Shot\n\nEFG_D: Effective Field Goal Percentage Allowed\n\nTOR: Turnover Percentage Allowed (Turnover Rate)\n\nTORD: Turnover Percentage Committed (Steal Rate)\n\nORB: Offensive Rebound Rate\n\nDRB: Offensive Rebound Rate Allowed\n\nFTR\t: Free Throw Rate (How often the given team shoots Free Throws)\n\nFTRD: Free Throw Rate Allowed\n\n2P_O: Two-Point Shooting Percentage\n\n2P_D: Two-Point Shooting Percentage Allowed\n\n3P_O: Three-Point Shooting Percentage\n\n3P_D: Three-Point Shooting Percentage Allowed\n\nADJ_T: Adjusted Tempo (An estimate of the tempo (possessions per 40 minutes) a team would have against the team that wants to play at an average Division I tempo)\n\nWAB: Wins Above Bubble (The bubble refers to the cut off between making the NCAA March Madness Tournament and not making it)\t\n\nPOSTSEASON: Round where the given team was eliminated or where their season ended (R68 = First Four, R64 = Round of 64, R32 = Round of 32, S16 = Sweet Sixteen, E8 = Elite Eight, F4 = Final Four, 2ND = Runner-up, Champion = Winner of the NCAA March Madness Tournament for that given year)\n\nSEED: Seed in the NCAA March Madness Tournament\n\nYEAR: Season\n\n\n### Acknowledgements\n\nThis data was scraped from from http://barttorvik.com/trank.php#. I cleaned the data set and added the POSTSEASON, SEED, and YEAR columns", "VersionNotes": "Added 2013 and 2014 as well as 2021 data from the start of the tournament", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 418778, "CreatorUserId": 2192630, "OwnerUserId": 2192630.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 6097216.0, "CurrentDatasourceVersionId": 6175761.0, "ForumId": 431212, "Type": 2, "CreationDate": "11/18/2019 02:54:22", "LastActivityDate": "11/18/2019", "TotalViews": 139857, "TotalDownloads": 18994, "TotalVotes": 285, "TotalKernels": 33}]
|
[{"Id": 2192630, "UserName": "andrewsundberg", "DisplayName": "Andrew Sundberg", "RegisterDate": "08/29/2018", "PerformanceTier": 1}]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
print("Setup complete.")
df = pd.read_csv("/kaggle/input/college-basketball-dataset/cbb.csv")
df = df.sort_values(by=["TEAM", "YEAR"])
duke_years = df[df["TEAM"] == "Duke"]
columns_list = df.columns.tolist()
print(columns_list)
postseason_mapping = {
"Champions": 1,
"2ND": 2,
"F4": 4,
"E8": 8,
"S16": 16,
"R32": 32,
"R64": 64,
"R68": 68,
}
df["POSTSEASON"] = df["POSTSEASON"].replace(postseason_mapping)
# Analyzing three point offense and defense on team success...
selected_columns = ["TEAM", "W", "POSTSEASON", "3P_O", "3P_D"]
data_filtered = df[selected_columns].copy()
data_filtered = (
data_filtered.dropna()
) # Includes only teams that made it to the postseason
data_filtered.sort_values(by="POSTSEASON", key=lambda x: x.astype(int))
# Create correlation matrix...
correlation_matrix = data_filtered[["3P_O", "3P_D", "W", "POSTSEASON"]].corr()
print(correlation_matrix)
# Clearly, the strongest correlation lies between wins and postseason success. I am surprised to find that 3 point efficiency and defense has relatively low correlation to post season success. I think it's important to note, however, that 3 point defense has a higher correlation to wins and postseason success than 3 point efficiency.
# It's a common phrase that "defense wins championships." Let's look at if defense rating has a higher correlation to wins and postseason success than offensive rating.
adje_columns = ["TEAM", "YEAR", "W", "POSTSEASON", "ADJOE", "ADJDE"]
adje_df = df[adje_columns].copy()
adje_df = adje_df.dropna()
adje_df.sort_values(by="ADJOE", ascending=False)
adje_correlation_matrix = adje_df[["W", "POSTSEASON", "ADJOE", "ADJDE"]].corr()
print(adje_correlation_matrix)
# Interesting. The correlation between wins and ADJOE and ADJDE was relatively similar which is unsurprising, as those metrics are "adjusted" to show points scored on and getting scored on the average D-1 basketball team. However, I was surprised to learn that ADJOE has a higher correlation to postseason success than ADJDE.
effectiveness = adje_df.copy()
effectiveness["EFF"] = effectiveness["ADJOE"] - effectiveness["ADJDE"]
effectiveness_correlation_matrix = effectiveness[["W", "POSTSEASON", "EFF"]].corr()
print(effectiveness_correlation_matrix)
# This is a much better metric. But I also wonder if perhaps defense plays more of a roll in winning in certain years rather than others...
good_postseason_teams = adje_df[adje_df["POSTSEASON"] <= 4]
good_postseason_teams.reset_index()
avg_adjde_by_year = good_postseason_teams.groupby("YEAR")["ADJDE"].mean()
avg_adjoe_by_year = good_postseason_teams.groupby("YEAR")["ADJOE"].mean()
import matplotlib.pyplot as plt
plt.plot(avg_adjde_by_year.index, avg_adjde_by_year.values)
plt.xlabel("Year")
plt.ylabel("Average ADJDE")
plt.title("Average ADJDE of Teams with Postseason Success")
plt.show()
plt.plot(avg_adjoe_by_year.index, avg_adjoe_by_year.values)
plt.xlabel("Year")
plt.ylabel("Average ADJOE")
plt.title("Average ADJOE of Teams with Postseason Success")
plt.show()
| false | 1 | 1,168 | 0 | 2,163 | 1,168 |
||
129151809
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
from nltk.tokenize import RegexpTokenizer
def tokenizer(txt):
token = RegexpTokenizer(r"\w+")
return " ".join(list(token.tokenize(txt)))
# stopwords
from nltk.corpus import stopwords
st_words = stopwords.words("english")
def remove_stopwords(lst):
res = []
lst = lst.split(" ")
for word in lst:
if word.lower() not in st_words:
res.append(word.lower())
return " ".join(res)
# Stemming
from nltk.stem import WordNetLemmatizer
Lemmatizer = WordNetLemmatizer()
def lemmatize_words(lst):
res = []
for word in lst.split(" "):
res.append(Lemmatizer.lemmatize(word))
return " ".join(res)
# list to str
def convert_tostr(lst):
return " ".join(lst)
def convert_tolst(s):
return [s]
import pandas as pd
import numpy as np
b = pd.read_json("/kaggle/input/nctc-may-1-7/NCTC_may_7.json")
b["_data"] = b["Text"].apply(tokenizer)
b["_data"] = b["_data"].apply(remove_stopwords)
b["_data"] = b["_data"].apply(lemmatize_words)
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics.pairwise import cosine_similarity
vectorizer = TfidfVectorizer()
articles = list(b["_data"])
tfidf_matrix = vectorizer.fit_transform(articles)
for i in tfidf_matrix:
print(i)
cosine_sim = cosine_similarity(tfidf_matrix)
cosine_sim
cluster = AgglomerativeClustering(
n_clusters=None, distance_threshold=0.5, linkage="single"
)
cluster_labels = cluster.fit_predict(cosine_sim)
b["cluster_label_tf_af"] = cluster_labels
n = cluster_labels.max()
for i in range(0, n + 1):
dt = b.query("cluster_label_tf_af==" + str(i))
if dt.shape[0] == 1:
b["cluster_label_tf_af"] = b["cluster_label_tf_af"].replace(
[dt["cluster_label_tf_af"].values[0]], -1
)
dic = {}
for i in b["cluster_label_tf_af"]:
if dic.get(i) != None:
dic[i] = dic[i] + 1
else:
dic[i] = 1
print("total_clusters: ", max(dic.keys()))
for i, j in dic.items():
print(f"cluster :{i} , No of Articles :{j}")
b.to_csv("May_7.csv")
dt = b.query(
"cluster_label_tf_af==0"
) # defining cluster id and id must be >=40 and <=2480
for i, r in dt.iterrows():
print("** Article id---", r["_id"])
print("\n")
# print('hashlink---',r['hashlink'])
# print('\n')
print("source---", r["_source"]["source"])
print("\n")
print("Content -----", r["Text"])
print(
"----------------------------------------------------------------------------------"
)
print("\n")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/151/129151809.ipynb
| null | null |
[{"Id": 129151809, "ScriptId": 38390627, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14218315, "CreationDate": "05/11/2023 11:29:38", "VersionNumber": 1.0, "Title": "AgglomerativeClustering", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 129.0, "LinesInsertedFromPrevious": 129.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
from nltk.tokenize import RegexpTokenizer
def tokenizer(txt):
token = RegexpTokenizer(r"\w+")
return " ".join(list(token.tokenize(txt)))
# stopwords
from nltk.corpus import stopwords
st_words = stopwords.words("english")
def remove_stopwords(lst):
res = []
lst = lst.split(" ")
for word in lst:
if word.lower() not in st_words:
res.append(word.lower())
return " ".join(res)
# Stemming
from nltk.stem import WordNetLemmatizer
Lemmatizer = WordNetLemmatizer()
def lemmatize_words(lst):
res = []
for word in lst.split(" "):
res.append(Lemmatizer.lemmatize(word))
return " ".join(res)
# list to str
def convert_tostr(lst):
return " ".join(lst)
def convert_tolst(s):
return [s]
import pandas as pd
import numpy as np
b = pd.read_json("/kaggle/input/nctc-may-1-7/NCTC_may_7.json")
b["_data"] = b["Text"].apply(tokenizer)
b["_data"] = b["_data"].apply(remove_stopwords)
b["_data"] = b["_data"].apply(lemmatize_words)
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics.pairwise import cosine_similarity
vectorizer = TfidfVectorizer()
articles = list(b["_data"])
tfidf_matrix = vectorizer.fit_transform(articles)
for i in tfidf_matrix:
print(i)
cosine_sim = cosine_similarity(tfidf_matrix)
cosine_sim
cluster = AgglomerativeClustering(
n_clusters=None, distance_threshold=0.5, linkage="single"
)
cluster_labels = cluster.fit_predict(cosine_sim)
b["cluster_label_tf_af"] = cluster_labels
n = cluster_labels.max()
for i in range(0, n + 1):
dt = b.query("cluster_label_tf_af==" + str(i))
if dt.shape[0] == 1:
b["cluster_label_tf_af"] = b["cluster_label_tf_af"].replace(
[dt["cluster_label_tf_af"].values[0]], -1
)
dic = {}
for i in b["cluster_label_tf_af"]:
if dic.get(i) != None:
dic[i] = dic[i] + 1
else:
dic[i] = 1
print("total_clusters: ", max(dic.keys()))
for i, j in dic.items():
print(f"cluster :{i} , No of Articles :{j}")
b.to_csv("May_7.csv")
dt = b.query(
"cluster_label_tf_af==0"
) # defining cluster id and id must be >=40 and <=2480
for i, r in dt.iterrows():
print("** Article id---", r["_id"])
print("\n")
# print('hashlink---',r['hashlink'])
# print('\n')
print("source---", r["_source"]["source"])
print("\n")
print("Content -----", r["Text"])
print(
"----------------------------------------------------------------------------------"
)
print("\n")
| false | 0 | 1,039 | 0 | 1,039 | 1,039 |
||
129126559
|
<jupyter_start><jupyter_text>Marijuana Arrests in Toronto: Racial Disparities
```
Data on police treatment of individuals arrested in Toronto for simple possession of small quantities of marijuana. The data are part of a larger data set featured in a series of articles in the Toronto Star newspaper. A data frame with 5226 observations on the following 8 variables.
```
| Column | Description |
| --- | --- |
| released | Whether or not the arrestee was released with a summons; a factor with levels: No; Yes.
|
| colour | The arrestee's race; a factor with levels: Black; White. |
| year | 1997 through 2002; a numeric vector. |
| age | in years; a numeric vector. |
| sex | a factor with levels: Female; Male. |
| employed | a factor with levels: No; Yes. |
| citizen | a factor with levels: No; Yes. |
| checks | Number of police data bases (of previous arrests, previous convictions, parole status, etc. – 6 in all) on which the arrestee's name appeared; a numeric vector |
# Source
Personal communication from Michael Friendly, York University.
Kaggle dataset identifier: arrests-for-marijuana-possession
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # Import Libraries and Load Dataset
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import LabelBinarizer
# load dataset
data = pd.read_csv("/kaggle/input/arrests-for-marijuana-possession/Arrests.csv")
data
from sklearn.model_selection import RandomizedSearchCV, GridSearchCV
from sklearn.metrics import accuracy_score
import seaborn as sns
import matplotlib.pyplot as plt
corr_matrix = data.corr()
fig, ax = plt.subplots(figsize=(10, 10))
sns.heatmap(corr_matrix, annot=True, cmap="coolwarm", square=True, ax=ax)
plt.show()
data.describe()
# # Looking for Null values
num_nan = data.isna().sum()
print(num_nan)
data.columns
data = data.drop("Unnamed: 0", axis=1)
data
# # Creating Dummy Variables and Applying Binary Labeling
lb = LabelBinarizer()
data["released_binary"] = lb.fit_transform(data["released"])
data["colour_binary"] = lb.fit_transform(data["colour"])
data["sex_binary"] = lb.fit_transform(data["sex"])
data["employed_binary"] = lb.fit_transform(data["employed"])
data["citizen_binary"] = lb.fit_transform(data["citizen"])
data
# # Getting rid of Categorical Features
data = data.drop(["released", "colour", "sex", "employed", "citizen"], axis=1)
data
corr_matrix = data.corr()
fig, ax = plt.subplots(figsize=(10, 10))
sns.heatmap(corr_matrix, annot=True, cmap="coolwarm", square=True, ax=ax)
plt.show()
# # Applying Standard Scaling
scaler = StandardScaler()
scaler.fit(data[["age", "year", "checks"]])
data[["age", "year", "checks"]] = scaler.transform(data[["age", "year", "checks"]])
data
# # Splitting the dataset into Training and Test sets
X_train, X_test, y_train, y_test = train_test_split(
data.drop("released_binary", axis=1),
data["released_binary"],
test_size=0.2,
random_state=42,
)
X_train
y_train
X_test
y_test
# # Training the dataset using Logistic Regression Classifier
logreg = LogisticRegression()
logreg.fit(X_train, y_train)
# # Testing on dataset using Logistic Regression Classifier
y_pred = logreg.predict(X_test)
y_pred
accuracy = logreg.score(X_test, y_test)
print(accuracy)
# # Importing Random Forest Classifier
from sklearn.ensemble import RandomForestClassifier
from scipy.stats import randint
# # Creating a Grid Search to find best hyperparameters for the RF classifier
# Define the range of hyperparameters to tune
param_grid = {
"n_estimators": [50, 100, 150, 200],
"max_depth": [2, 5, 10, 20],
"min_samples_split": [2, 5, 10],
"min_samples_leaf": [1, 2, 4],
}
rf = RandomForestClassifier(random_state=42)
grid_search = GridSearchCV(estimator=rf, param_grid=param_grid, cv=5)
grid_search.fit(X_train, y_train)
print("Best hyperparameters: ", grid_search.best_params_)
# # Training the dataset using RF Classifier and best hyperparameters
rf = RandomForestClassifier(
random_state=42, max_depth=5, min_samples_leaf=1, n_estimators=200
)
rf.fit(X_train, y_train)
# # Predicting results using RF Classifier
y_pred = rf.predict(X_test)
y_pred
print(classification_report(y_test, y_pred))
# # Importing Decision Tree Classifier
from sklearn.tree import DecisionTreeClassifier
# # Creating a Grid Search to find best hyperparameters for the Decision Tree classifier
param_grid = {
"max_depth": [2, 4, 6, 8, 10],
"min_samples_split": [2, 4, 6, 8, 10],
"min_samples_leaf": [1, 2, 3, 4, 5],
"max_features": ["sqrt", "log2", None],
"criterion": ["gini", "entropy"],
}
clf = DecisionTreeClassifier()
grid_search = GridSearchCV(estimator=clf, param_grid=param_grid, cv=5, n_jobs=-1)
grid_search.fit(X_train, y_train)
# # Finding best hyperparameters for Decision Tree
print("Best Hyperparameters:", grid_search.best_params_)
# # Training the Decision Tree Model using best hyperparameters
best_clf = grid_search.best_estimator_
y_pred = best_clf.predict(X_test)
# # Predicting results using Decision Tree Classifier
accuracy = accuracy_score(y_test, y_pred)
print(accuracy)
# # Importing Gradient Boosting Classifier
from sklearn.ensemble import GradientBoostingClassifier
# # Creating a Grid Search to find best hyperparameters for the GB classifier
param_grid = {
"n_estimators": [50, 100, 150],
"max_depth": [3, 5, 7],
"learning_rate": [0.05, 0.1, 0.2],
"min_samples_split": [2, 5, 10],
"min_samples_leaf": [1, 2, 4],
"max_features": [None, "sqrt", "log2"],
}
clf = GradientBoostingClassifier(random_state=42)
grid_search = GridSearchCV(clf, param_grid=param_grid, cv=5)
grid_search.fit(X_train, y_train)
# # Finding best hyperparameters for GB Classifier
print("Best Hyperparameters: ", grid_search.best_params_)
print("Best Accuracy Score: {:.2f}%".format(grid_search.best_score_ * 100))
# # Training the GB Classifier using best hyperparameters
best_clf = grid_search.best_estimator_
y_pred = best_clf.predict(X_test)
# # Predicting results using GB Classifier
accuracy = accuracy_score(y_test, y_pred)
print(accuracy)
# # Importing SVM and Training on dataset
from sklearn import svm
clf = svm.SVC(kernel="poly", C=100)
clf.fit(X_train, y_train)
# # Predicting results using SVM
y_pred = clf.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
print(accuracy)
# # Importing Naive Bayes and Training on the Dataset
from sklearn.naive_bayes import GaussianNB
clf = GaussianNB()
clf.fit(X_train, y_train)
# # Predicting results using Naive Bayes
y_pred = clf.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
print(accuracy)
# # Importing KNN Classifier
from sklearn.neighbors import KNeighborsClassifier
# # Finding the best value for Number of Neighbours for KNN
n_neighbors_list = range(1, 21)
accuracy_list = []
for n in n_neighbors_list:
clf = KNeighborsClassifier(n_neighbors=n)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
accuracy_list.append(accuracy)
# Plot the results of the elbow method
plt.plot(n_neighbors_list, accuracy_list)
plt.title("Elbow Curve for KNN")
plt.xlabel("Number of Neighbors")
plt.ylabel("Accuracy")
plt.show()
# # Training the Dataset with best Hyperparameters of KNN
clf = KNeighborsClassifier(n_neighbors=20)
clf.fit(X_train, y_train)
# # Predicting the results using KNN
y_pred = clf.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
print(accuracy)
# # And, Finally importing ANN
from keras.models import Sequential
from keras.layers import Dense
model = Sequential()
model.add(Dense(64, input_dim=X_train.shape[1], activation="relu"))
model.add(Dense(64, activation="relu"))
model.add(Dense(1, activation="sigmoid"))
# # Model Compilation
model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"])
model.fit(X_train, y_train, epochs=100, batch_size=32, validation_split=0.2)
# # ANN Model Prediction
loss, accuracy = model.evaluate(X_test, y_test)
print("Accuracy: {:.2f}%".format(accuracy * 100))
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/126/129126559.ipynb
|
arrests-for-marijuana-possession
|
utkarshx27
|
[{"Id": 129126559, "ScriptId": 38320485, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7719373, "CreationDate": "05/11/2023 07:44:01", "VersionNumber": 2.0, "Title": "Classification of Criminals Rereleased or Not", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 294.0, "LinesInsertedFromPrevious": 96.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 198.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 3}]
|
[{"Id": 184903786, "KernelVersionId": 129126559, "SourceDatasetVersionId": 5631796}]
|
[{"Id": 5631796, "DatasetId": 3238325, "DatasourceVersionId": 5707058, "CreatorUserId": 13364933, "LicenseName": "CC0: Public Domain", "CreationDate": "05/08/2023 10:17:21", "VersionNumber": 1.0, "Title": "Marijuana Arrests in Toronto: Racial Disparities", "Slug": "arrests-for-marijuana-possession", "Subtitle": "Marijuana Arrests in Toronto: Race, Release, and Policing (1997-2002)", "Description": "``` \nData on police treatment of individuals arrested in Toronto for simple possession of small quantities of marijuana. The data are part of a larger data set featured in a series of articles in the Toronto Star newspaper. A data frame with 5226 observations on the following 8 variables.\n```\n| Column | Description |\n| --- | --- |\n| released | Whether or not the arrestee was released with a summons; a factor with levels: No; Yes.\n |\n| colour | The arrestee's race; a factor with levels: Black; White. |\n| year | 1997 through 2002; a numeric vector. |\n| age | in years; a numeric vector. |\n| sex | a factor with levels: Female; Male. |\n| employed | a factor with levels: No; Yes. |\n| citizen | a factor with levels: No; Yes. |\n| checks | Number of police data bases (of previous arrests, previous convictions, parole status, etc. \u2013 6 in all) on which the arrestee's name appeared; a numeric vector |\n\n# Source\nPersonal communication from Michael Friendly, York University.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 3238325, "CreatorUserId": 13364933, "OwnerUserId": 13364933.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5631796.0, "CurrentDatasourceVersionId": 5707058.0, "ForumId": 3303517, "Type": 2, "CreationDate": "05/08/2023 10:17:21", "LastActivityDate": "05/08/2023", "TotalViews": 8788, "TotalDownloads": 1614, "TotalVotes": 49, "TotalKernels": 14}]
|
[{"Id": 13364933, "UserName": "utkarshx27", "DisplayName": "Utkarsh Singh", "RegisterDate": "01/21/2023", "PerformanceTier": 2}]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # Import Libraries and Load Dataset
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import LabelBinarizer
# load dataset
data = pd.read_csv("/kaggle/input/arrests-for-marijuana-possession/Arrests.csv")
data
from sklearn.model_selection import RandomizedSearchCV, GridSearchCV
from sklearn.metrics import accuracy_score
import seaborn as sns
import matplotlib.pyplot as plt
corr_matrix = data.corr()
fig, ax = plt.subplots(figsize=(10, 10))
sns.heatmap(corr_matrix, annot=True, cmap="coolwarm", square=True, ax=ax)
plt.show()
data.describe()
# # Looking for Null values
num_nan = data.isna().sum()
print(num_nan)
data.columns
data = data.drop("Unnamed: 0", axis=1)
data
# # Creating Dummy Variables and Applying Binary Labeling
lb = LabelBinarizer()
data["released_binary"] = lb.fit_transform(data["released"])
data["colour_binary"] = lb.fit_transform(data["colour"])
data["sex_binary"] = lb.fit_transform(data["sex"])
data["employed_binary"] = lb.fit_transform(data["employed"])
data["citizen_binary"] = lb.fit_transform(data["citizen"])
data
# # Getting rid of Categorical Features
data = data.drop(["released", "colour", "sex", "employed", "citizen"], axis=1)
data
corr_matrix = data.corr()
fig, ax = plt.subplots(figsize=(10, 10))
sns.heatmap(corr_matrix, annot=True, cmap="coolwarm", square=True, ax=ax)
plt.show()
# # Applying Standard Scaling
scaler = StandardScaler()
scaler.fit(data[["age", "year", "checks"]])
data[["age", "year", "checks"]] = scaler.transform(data[["age", "year", "checks"]])
data
# # Splitting the dataset into Training and Test sets
X_train, X_test, y_train, y_test = train_test_split(
data.drop("released_binary", axis=1),
data["released_binary"],
test_size=0.2,
random_state=42,
)
X_train
y_train
X_test
y_test
# # Training the dataset using Logistic Regression Classifier
logreg = LogisticRegression()
logreg.fit(X_train, y_train)
# # Testing on dataset using Logistic Regression Classifier
y_pred = logreg.predict(X_test)
y_pred
accuracy = logreg.score(X_test, y_test)
print(accuracy)
# # Importing Random Forest Classifier
from sklearn.ensemble import RandomForestClassifier
from scipy.stats import randint
# # Creating a Grid Search to find best hyperparameters for the RF classifier
# Define the range of hyperparameters to tune
param_grid = {
"n_estimators": [50, 100, 150, 200],
"max_depth": [2, 5, 10, 20],
"min_samples_split": [2, 5, 10],
"min_samples_leaf": [1, 2, 4],
}
rf = RandomForestClassifier(random_state=42)
grid_search = GridSearchCV(estimator=rf, param_grid=param_grid, cv=5)
grid_search.fit(X_train, y_train)
print("Best hyperparameters: ", grid_search.best_params_)
# # Training the dataset using RF Classifier and best hyperparameters
rf = RandomForestClassifier(
random_state=42, max_depth=5, min_samples_leaf=1, n_estimators=200
)
rf.fit(X_train, y_train)
# # Predicting results using RF Classifier
y_pred = rf.predict(X_test)
y_pred
print(classification_report(y_test, y_pred))
# # Importing Decision Tree Classifier
from sklearn.tree import DecisionTreeClassifier
# # Creating a Grid Search to find best hyperparameters for the Decision Tree classifier
param_grid = {
"max_depth": [2, 4, 6, 8, 10],
"min_samples_split": [2, 4, 6, 8, 10],
"min_samples_leaf": [1, 2, 3, 4, 5],
"max_features": ["sqrt", "log2", None],
"criterion": ["gini", "entropy"],
}
clf = DecisionTreeClassifier()
grid_search = GridSearchCV(estimator=clf, param_grid=param_grid, cv=5, n_jobs=-1)
grid_search.fit(X_train, y_train)
# # Finding best hyperparameters for Decision Tree
print("Best Hyperparameters:", grid_search.best_params_)
# # Training the Decision Tree Model using best hyperparameters
best_clf = grid_search.best_estimator_
y_pred = best_clf.predict(X_test)
# # Predicting results using Decision Tree Classifier
accuracy = accuracy_score(y_test, y_pred)
print(accuracy)
# # Importing Gradient Boosting Classifier
from sklearn.ensemble import GradientBoostingClassifier
# # Creating a Grid Search to find best hyperparameters for the GB classifier
param_grid = {
"n_estimators": [50, 100, 150],
"max_depth": [3, 5, 7],
"learning_rate": [0.05, 0.1, 0.2],
"min_samples_split": [2, 5, 10],
"min_samples_leaf": [1, 2, 4],
"max_features": [None, "sqrt", "log2"],
}
clf = GradientBoostingClassifier(random_state=42)
grid_search = GridSearchCV(clf, param_grid=param_grid, cv=5)
grid_search.fit(X_train, y_train)
# # Finding best hyperparameters for GB Classifier
print("Best Hyperparameters: ", grid_search.best_params_)
print("Best Accuracy Score: {:.2f}%".format(grid_search.best_score_ * 100))
# # Training the GB Classifier using best hyperparameters
best_clf = grid_search.best_estimator_
y_pred = best_clf.predict(X_test)
# # Predicting results using GB Classifier
accuracy = accuracy_score(y_test, y_pred)
print(accuracy)
# # Importing SVM and Training on dataset
from sklearn import svm
clf = svm.SVC(kernel="poly", C=100)
clf.fit(X_train, y_train)
# # Predicting results using SVM
y_pred = clf.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
print(accuracy)
# # Importing Naive Bayes and Training on the Dataset
from sklearn.naive_bayes import GaussianNB
clf = GaussianNB()
clf.fit(X_train, y_train)
# # Predicting results using Naive Bayes
y_pred = clf.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
print(accuracy)
# # Importing KNN Classifier
from sklearn.neighbors import KNeighborsClassifier
# # Finding the best value for Number of Neighbours for KNN
n_neighbors_list = range(1, 21)
accuracy_list = []
for n in n_neighbors_list:
clf = KNeighborsClassifier(n_neighbors=n)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
accuracy_list.append(accuracy)
# Plot the results of the elbow method
plt.plot(n_neighbors_list, accuracy_list)
plt.title("Elbow Curve for KNN")
plt.xlabel("Number of Neighbors")
plt.ylabel("Accuracy")
plt.show()
# # Training the Dataset with best Hyperparameters of KNN
clf = KNeighborsClassifier(n_neighbors=20)
clf.fit(X_train, y_train)
# # Predicting the results using KNN
y_pred = clf.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
print(accuracy)
# # And, Finally importing ANN
from keras.models import Sequential
from keras.layers import Dense
model = Sequential()
model.add(Dense(64, input_dim=X_train.shape[1], activation="relu"))
model.add(Dense(64, activation="relu"))
model.add(Dense(1, activation="sigmoid"))
# # Model Compilation
model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"])
model.fit(X_train, y_train, epochs=100, batch_size=32, validation_split=0.2)
# # ANN Model Prediction
loss, accuracy = model.evaluate(X_test, y_test)
print("Accuracy: {:.2f}%".format(accuracy * 100))
| false | 1 | 2,492 | 3 | 2,816 | 2,492 |
||
129260220
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
import numpy as np
import matplotlib as plt
import seaborn as sns
df = pd.read_csv("/kaggle/input/indian-food-dataset/data.csv")
df.head()
# **Summary statistics:
# ****
# Compute summary statistics for Preparation Time column
df["Preparation Time"].describe()
# Compute summary statistics for Cooking Time column
df["Cooking Time"].describe()
# Compute summary statistics for Total Time column
df["Total Time"].describe()
# Compute summary statistics for Protein per Serving column
df["Protein per Serving"].describe()
# Compute summary statistics for Calories per Serving column
df["Calories per Serving"].describe()
# **Frequency distribution:
# **
# Calculate frequency distribution for Dish Name column
df["Dish Name"].value_counts()
# Calculate frequency distribution for Region column
df["Region"].value_counts()
# Calculate frequency distribution for Spiciness column
df["Spiciness"].value_counts()
# Calculate frequency distribution for Serves column
df["Serves"].value_counts()
# **Histograms**
# Plot histogram for Preparation Time column
import matplotlib.pyplot as plt
plt.hist(df["Preparation Time"])
# Plot histogram for Cooking Time column
plt.hist(df["Cooking Time"], color="red")
# Plot histogram for Total Time column
plt.hist(df["Total Time"], color="orange")
# Plot histogram for Protein per Serving column
plt.hist(df["Protein per Serving"], color="green")
# Plot histogram for Calories per Serving column
plt.hist(df["Calories per Serving"], color="pink")
df.head()
# **Pair plot**
sns.pairplot(df)
# Create a bar plot of the number of dishes by region
plt.bar(df["Region"].unique(), df["Region"].value_counts())
# Create a stacked bar plot of the number of dishes by region and spiciness
df.groupby(["Region", "Spiciness"]).size().unstack().plot(kind="bar", stacked=True)
# Create a pie chart of the proportion of dishes by region
plt.pie(df["Region"].value_counts(), labels=df["Region"].unique())
# Create a pie chart of the proportion of dishes by spiciness
plt.pie(df["Spiciness"].value_counts(), labels=df["Spiciness"].unique())
# Create a scatter plot of Preparation Time vs Calories per Serving
plt.scatter(df["Preparation Time"], df["Calories per Serving"])
# Create a scatter plot of Cooking Time vs Protein per Serving
plt.scatter(df["Cooking Time"], df["Protein per Serving"])
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/260/129260220.ipynb
| null | null |
[{"Id": 129260220, "ScriptId": 38306133, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14927546, "CreationDate": "05/12/2023 08:55:47", "VersionNumber": 2.0, "Title": "Indian Food Dataset Notebook", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 113.0, "LinesInsertedFromPrevious": 86.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 27.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
import numpy as np
import matplotlib as plt
import seaborn as sns
df = pd.read_csv("/kaggle/input/indian-food-dataset/data.csv")
df.head()
# **Summary statistics:
# ****
# Compute summary statistics for Preparation Time column
df["Preparation Time"].describe()
# Compute summary statistics for Cooking Time column
df["Cooking Time"].describe()
# Compute summary statistics for Total Time column
df["Total Time"].describe()
# Compute summary statistics for Protein per Serving column
df["Protein per Serving"].describe()
# Compute summary statistics for Calories per Serving column
df["Calories per Serving"].describe()
# **Frequency distribution:
# **
# Calculate frequency distribution for Dish Name column
df["Dish Name"].value_counts()
# Calculate frequency distribution for Region column
df["Region"].value_counts()
# Calculate frequency distribution for Spiciness column
df["Spiciness"].value_counts()
# Calculate frequency distribution for Serves column
df["Serves"].value_counts()
# **Histograms**
# Plot histogram for Preparation Time column
import matplotlib.pyplot as plt
plt.hist(df["Preparation Time"])
# Plot histogram for Cooking Time column
plt.hist(df["Cooking Time"], color="red")
# Plot histogram for Total Time column
plt.hist(df["Total Time"], color="orange")
# Plot histogram for Protein per Serving column
plt.hist(df["Protein per Serving"], color="green")
# Plot histogram for Calories per Serving column
plt.hist(df["Calories per Serving"], color="pink")
df.head()
# **Pair plot**
sns.pairplot(df)
# Create a bar plot of the number of dishes by region
plt.bar(df["Region"].unique(), df["Region"].value_counts())
# Create a stacked bar plot of the number of dishes by region and spiciness
df.groupby(["Region", "Spiciness"]).size().unstack().plot(kind="bar", stacked=True)
# Create a pie chart of the proportion of dishes by region
plt.pie(df["Region"].value_counts(), labels=df["Region"].unique())
# Create a pie chart of the proportion of dishes by spiciness
plt.pie(df["Spiciness"].value_counts(), labels=df["Spiciness"].unique())
# Create a scatter plot of Preparation Time vs Calories per Serving
plt.scatter(df["Preparation Time"], df["Calories per Serving"])
# Create a scatter plot of Cooking Time vs Protein per Serving
plt.scatter(df["Cooking Time"], df["Protein per Serving"])
| false | 0 | 811 | 0 | 811 | 811 |
||
129260808
|
# # [Attention] はじめに
# **This notebook is simple-baseline for ICR Identifying Age-Related Conditions competition.**
# **You can refer and copy this notebook freely, but this will need a lot of improvement(e.g., use Greeks, feature-engineering, and more).**
# **If you referred or copied this notebook, please vote for this.**
# **Have fun!**
# **このノートブックはシンプルなベースラインです。**
# **参照や複製は自由ですが、多くの改善を必要とするでしょう(Greeksの活用や特徴量エンジニアリングなど)。**
# **もし参照や複製をされた場合は、このノートブックにvoteをお願いします。**
# **楽しんでいきましょう!**
# import libraries
# ライブラリのインポート
import gc
import os
import math
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split, StratifiedKFold
from sklearn.metrics import log_loss
from catboost import CatBoostClassifier, Pool
import warnings
warnings.simplefilter("ignore")
print("imported.")
# read train-data CSV
# 訓練データCSVの読込
df_train = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/train.csv")
df_train
# **Number of records are 617, it is very light data.**
# **target is "Class", and there are 56 features except for "Id".**
# **It is better to refer to the Greeks as they are deeply related to the target.**
# **レコード件数は617件で、かなり軽量のデータです。**
# **目的変数は"Class"で、"Id"以外に56個の特徴量があります。**
# **ターゲットに深く関係していると思われるので、Greeksを参照したほうがよさそうです。**
# information of train-data
# 訓練データの基本情報
df_train.info()
# **Almost features are float type except for "Id" and "Class", but "EJ" is only categorical feature.**
# **"Id"と"Class"を除いてほぼfloat型の特徴量ですが、"EJ"だけがカテゴリー変数です。**
# features which include Null
isnull = {}
for col in df_train.columns:
k = df_train[col].isnull().sum()
if k > 0:
isnull[col] = k
isnull
# statistical information of train-data
# 訓練データの基本統計量
df_train.describe()
# Visualization of "Class"
# "Class"の可視化
print(df_train["Class"].value_counts())
sns.countplot(data=df_train, x="Class")
plt.grid()
# **The ratio of "0" to "1" is approximately 5 to 1.**
# **0と1の割合はおよそ5対1です。**
# Visualization of "EJ"
# "EJ"の可視化
print(df_train["EJ"].value_counts())
sns.countplot(data=df_train, x="EJ")
plt.grid()
# **The ratio of "A" to "B" is approximately 1 to 2.**
# **AとBの割合はおよそ1対2です。**
# simple histgram of train-data
# ヒストグラム表示
bins = 20
# bins = int(math.log2(len(df_train)) + 1)
df_hist = df_train.drop(columns=["Id", "Class"])
fig, axs = plt.subplots(8, 7, figsize=(16, 28))
cnt = 0
for row in range(8):
for col in range(7):
axs[row, col].hist(df_hist.iloc[:, cnt], bins=bins)
axs[row, col].set_title(df_hist.columns[cnt])
cnt += 1
plt.show()
# **We need more deeper feature engineering. But here, we aim to complete the simple baseline first.**
# **もっと深く特徴量エンジニアリングをしたほうがよいですが、ここではシンプルなベースラインの構築を目指すことにします。**
# correlation between features in train-data
# 特徴量間の相関関係の図示
plt.figure(figsize=(14, 12))
colormap = plt.cm.RdBu
sns.heatmap(
df_train.corr(),
linewidths=0.1,
vmax=1.0,
square=True,
cmap=colormap,
linecolor="white",
annot=False,
)
# **We need more deeper feature engineering. But here, we aim to complete the simple baseline first.**
# **もっと深く特徴量エンジニアリングをしたほうがよいですが、ここではシンプルなベースラインの構築を目指すことにします。**
# read greeks-data CSV
# greeksデータCSVの読込
df_greeks = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/greeks.csv")
df_greeks
# information of greeks-data
df_greeks.info()
# detail of "Alpha"
# "Alpha"の詳細
df_greeks["Alpha"].value_counts()
# **"A" is class-0, and "B"/"G"/"D" are class-1. Numbers of "A" and "B"+"G"+"D" are the same as the numbers of 0 and 1 in "Class".**
# **"A"はクラス0で"B"と"G"と"D"はクラス1です。それぞれ、"A"の数と"Class"=0、"B"+"G"+"D"の数と"Class"=1の数は一致します。**
# detail of "Beta"
df_greeks["Beta"].value_counts()
# detail of "Gamma"
df_greeks["Gamma"].value_counts()
# detail of "Epsilon"
df_greeks["Epsilon"].value_counts()
# read test-data CSV
# テストデータCSVの読込
df_test = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/test.csv")
df_test
# **Test data has only 5 records, simple.**
# **テストデータは5件だけです。**
# Information of test-data
# テストデータの基本情報
df_test.info()
# statistical information of test-data
# テストデータの基本統計量
df_test.describe()
# set x/y dataset for train
# 訓練用のx/yデータセットの準備
x_train = df_train.drop(columns=["Id", "Class"])
x_train["EJ"] = x_train["EJ"].astype("category")
y_train = df_train[["Class"]]
print(x_train.shape, y_train.shape)
x_train
# fitting by CatBoost with Stratified K-Fold cross-validation (splits=3)
# CatBoostによる訓練(3分割でのStratified K-Foldによるクロスバリデーション)
# parameter
params = {
"loss_function": "Logloss",
"n_estimators": 2000,
# "learning_rate": 0.03,
"random_state": 45,
# "l2_leaf_reg": 3.0,
# "bootstrap_type": "Bayesian",
# "bagging_temperature": 1.0,
# "subsample": 1.0,
# "random_strength": 1.0,
# "depth": 6,
# "grow_policy": "SymmetricTree", "Deptwise", "Lossguide",
# "grow_policy": "Lossguide",
# "max_leaves": 31,
# "od_type": "Iter",
# "od_wait": 20,
# "border_count": 254,
}
n_splits = 3
cv = list(
StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=45).split(
x_train, y_train
)
)
metrics = []
imp = pd.DataFrame()
for nfold in np.arange(n_splits):
print("-" * 30, "fold:", nfold, "-" * 30)
idx_tr, idx_va = cv[nfold][0], cv[nfold][1]
x_tr, y_tr = x_train.loc[idx_tr, :], y_train.loc[idx_tr, :]
x_va, y_va = x_train.loc[idx_va, :], y_train.loc[idx_va, :]
print("x/y train-data shapes:", x_tr.shape, y_tr.shape)
print("x/y valid-data shapes:", x_va.shape, y_va.shape)
# fitting
model = CatBoostClassifier(**params)
model.fit(
x_tr,
y_tr,
cat_features=["EJ"],
eval_set=[(x_tr, y_tr), (x_va, y_va)],
verbose=100,
early_stopping_rounds=300,
)
# prediction
y_tr_pred = model.predict_proba(x_tr)
y_va_pred = model.predict_proba(x_va)
# set metrics(LogLoss)
metric_tr = log_loss(y_tr, y_tr_pred)
metric_va = log_loss(y_va, y_va_pred)
metrics.append([nfold, metric_tr, metric_va])
# importance of features
_imp = pd.DataFrame(
{
"features": x_train.columns,
"importance": model.feature_importances_,
"nfold": nfold,
}
)
imp = pd.concat([imp, _imp], axis=0, ignore_index=True)
print("-" * 30, "result (LogLoss)", "-" * 30)
metrics = np.array(metrics)
print(
"train-mean-LogLoss:",
"{:.3f}".format(np.mean(metrics[:, 1])),
"valid-mean-LogLoss:",
"{:.3f}".format(np.mean(metrics[:, 2])),
)
print(
"train-std-LogLoss:",
"{:.3f}".format(np.std(metrics[:, 1])),
"valid-std-LogLoss:",
"{:.3f}".format(np.std(metrics[:, 2])),
)
print(
"LogLoss:",
"{:.3f}".format(np.mean(metrics[:, 2]) - np.std(metrics[:, 2])),
"-",
"{:.3f}".format(np.mean(metrics[:, 2]) + np.std(metrics[:, 2])),
)
display(metrics)
imp = imp.groupby("features")["importance"].agg(["mean", "std"])
imp.columns = ["importance", "importance_std"]
imp["importance_cov"] = imp["importance_std"] / imp["importance"]
imp = imp.reset_index(drop=False)
display(imp.sort_values("importance", ascending=False, ignore_index=True))
# set x/id dataset for test
# 予測用のx/idデータセットの準備
x_test = df_test.drop(columns=["Id"])
x_test["EJ"] = x_test["EJ"].astype("category")
id_test = df_test[["Id"]]
print(x_test.shape, id_test.shape)
x_test
# prediction of probability with test-data
# テストデータによる確率の予測
y_test_pred = model.predict_proba(x_test)
y_test_pred
# submission
# 提出用データの整形・CSV出力
sample_sub = pd.read_csv(
"/kaggle/input/icr-identify-age-related-conditions/sample_submission.csv"
)
df_submit = pd.DataFrame(columns=sample_sub.columns)
df_submit["Id"] = id_test["Id"]
df_submit[["class_0", "class_1"]] = y_test_pred
df_submit.to_csv("submission.csv", index=None)
print("completed.")
df_submit
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/260/129260808.ipynb
| null | null |
[{"Id": 129260808, "ScriptId": 38422323, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14597111, "CreationDate": "05/12/2023 09:00:23", "VersionNumber": 1.0, "Title": "ICR_ARC_01-SimpleBaseline(EN/JP)_20230512", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 259.0, "LinesInsertedFromPrevious": 259.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 2}]
| null | null | null | null |
# # [Attention] はじめに
# **This notebook is simple-baseline for ICR Identifying Age-Related Conditions competition.**
# **You can refer and copy this notebook freely, but this will need a lot of improvement(e.g., use Greeks, feature-engineering, and more).**
# **If you referred or copied this notebook, please vote for this.**
# **Have fun!**
# **このノートブックはシンプルなベースラインです。**
# **参照や複製は自由ですが、多くの改善を必要とするでしょう(Greeksの活用や特徴量エンジニアリングなど)。**
# **もし参照や複製をされた場合は、このノートブックにvoteをお願いします。**
# **楽しんでいきましょう!**
# import libraries
# ライブラリのインポート
import gc
import os
import math
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split, StratifiedKFold
from sklearn.metrics import log_loss
from catboost import CatBoostClassifier, Pool
import warnings
warnings.simplefilter("ignore")
print("imported.")
# read train-data CSV
# 訓練データCSVの読込
df_train = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/train.csv")
df_train
# **Number of records are 617, it is very light data.**
# **target is "Class", and there are 56 features except for "Id".**
# **It is better to refer to the Greeks as they are deeply related to the target.**
# **レコード件数は617件で、かなり軽量のデータです。**
# **目的変数は"Class"で、"Id"以外に56個の特徴量があります。**
# **ターゲットに深く関係していると思われるので、Greeksを参照したほうがよさそうです。**
# information of train-data
# 訓練データの基本情報
df_train.info()
# **Almost features are float type except for "Id" and "Class", but "EJ" is only categorical feature.**
# **"Id"と"Class"を除いてほぼfloat型の特徴量ですが、"EJ"だけがカテゴリー変数です。**
# features which include Null
isnull = {}
for col in df_train.columns:
k = df_train[col].isnull().sum()
if k > 0:
isnull[col] = k
isnull
# statistical information of train-data
# 訓練データの基本統計量
df_train.describe()
# Visualization of "Class"
# "Class"の可視化
print(df_train["Class"].value_counts())
sns.countplot(data=df_train, x="Class")
plt.grid()
# **The ratio of "0" to "1" is approximately 5 to 1.**
# **0と1の割合はおよそ5対1です。**
# Visualization of "EJ"
# "EJ"の可視化
print(df_train["EJ"].value_counts())
sns.countplot(data=df_train, x="EJ")
plt.grid()
# **The ratio of "A" to "B" is approximately 1 to 2.**
# **AとBの割合はおよそ1対2です。**
# simple histgram of train-data
# ヒストグラム表示
bins = 20
# bins = int(math.log2(len(df_train)) + 1)
df_hist = df_train.drop(columns=["Id", "Class"])
fig, axs = plt.subplots(8, 7, figsize=(16, 28))
cnt = 0
for row in range(8):
for col in range(7):
axs[row, col].hist(df_hist.iloc[:, cnt], bins=bins)
axs[row, col].set_title(df_hist.columns[cnt])
cnt += 1
plt.show()
# **We need more deeper feature engineering. But here, we aim to complete the simple baseline first.**
# **もっと深く特徴量エンジニアリングをしたほうがよいですが、ここではシンプルなベースラインの構築を目指すことにします。**
# correlation between features in train-data
# 特徴量間の相関関係の図示
plt.figure(figsize=(14, 12))
colormap = plt.cm.RdBu
sns.heatmap(
df_train.corr(),
linewidths=0.1,
vmax=1.0,
square=True,
cmap=colormap,
linecolor="white",
annot=False,
)
# **We need more deeper feature engineering. But here, we aim to complete the simple baseline first.**
# **もっと深く特徴量エンジニアリングをしたほうがよいですが、ここではシンプルなベースラインの構築を目指すことにします。**
# read greeks-data CSV
# greeksデータCSVの読込
df_greeks = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/greeks.csv")
df_greeks
# information of greeks-data
df_greeks.info()
# detail of "Alpha"
# "Alpha"の詳細
df_greeks["Alpha"].value_counts()
# **"A" is class-0, and "B"/"G"/"D" are class-1. Numbers of "A" and "B"+"G"+"D" are the same as the numbers of 0 and 1 in "Class".**
# **"A"はクラス0で"B"と"G"と"D"はクラス1です。それぞれ、"A"の数と"Class"=0、"B"+"G"+"D"の数と"Class"=1の数は一致します。**
# detail of "Beta"
df_greeks["Beta"].value_counts()
# detail of "Gamma"
df_greeks["Gamma"].value_counts()
# detail of "Epsilon"
df_greeks["Epsilon"].value_counts()
# read test-data CSV
# テストデータCSVの読込
df_test = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/test.csv")
df_test
# **Test data has only 5 records, simple.**
# **テストデータは5件だけです。**
# Information of test-data
# テストデータの基本情報
df_test.info()
# statistical information of test-data
# テストデータの基本統計量
df_test.describe()
# set x/y dataset for train
# 訓練用のx/yデータセットの準備
x_train = df_train.drop(columns=["Id", "Class"])
x_train["EJ"] = x_train["EJ"].astype("category")
y_train = df_train[["Class"]]
print(x_train.shape, y_train.shape)
x_train
# fitting by CatBoost with Stratified K-Fold cross-validation (splits=3)
# CatBoostによる訓練(3分割でのStratified K-Foldによるクロスバリデーション)
# parameter
params = {
"loss_function": "Logloss",
"n_estimators": 2000,
# "learning_rate": 0.03,
"random_state": 45,
# "l2_leaf_reg": 3.0,
# "bootstrap_type": "Bayesian",
# "bagging_temperature": 1.0,
# "subsample": 1.0,
# "random_strength": 1.0,
# "depth": 6,
# "grow_policy": "SymmetricTree", "Deptwise", "Lossguide",
# "grow_policy": "Lossguide",
# "max_leaves": 31,
# "od_type": "Iter",
# "od_wait": 20,
# "border_count": 254,
}
n_splits = 3
cv = list(
StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=45).split(
x_train, y_train
)
)
metrics = []
imp = pd.DataFrame()
for nfold in np.arange(n_splits):
print("-" * 30, "fold:", nfold, "-" * 30)
idx_tr, idx_va = cv[nfold][0], cv[nfold][1]
x_tr, y_tr = x_train.loc[idx_tr, :], y_train.loc[idx_tr, :]
x_va, y_va = x_train.loc[idx_va, :], y_train.loc[idx_va, :]
print("x/y train-data shapes:", x_tr.shape, y_tr.shape)
print("x/y valid-data shapes:", x_va.shape, y_va.shape)
# fitting
model = CatBoostClassifier(**params)
model.fit(
x_tr,
y_tr,
cat_features=["EJ"],
eval_set=[(x_tr, y_tr), (x_va, y_va)],
verbose=100,
early_stopping_rounds=300,
)
# prediction
y_tr_pred = model.predict_proba(x_tr)
y_va_pred = model.predict_proba(x_va)
# set metrics(LogLoss)
metric_tr = log_loss(y_tr, y_tr_pred)
metric_va = log_loss(y_va, y_va_pred)
metrics.append([nfold, metric_tr, metric_va])
# importance of features
_imp = pd.DataFrame(
{
"features": x_train.columns,
"importance": model.feature_importances_,
"nfold": nfold,
}
)
imp = pd.concat([imp, _imp], axis=0, ignore_index=True)
print("-" * 30, "result (LogLoss)", "-" * 30)
metrics = np.array(metrics)
print(
"train-mean-LogLoss:",
"{:.3f}".format(np.mean(metrics[:, 1])),
"valid-mean-LogLoss:",
"{:.3f}".format(np.mean(metrics[:, 2])),
)
print(
"train-std-LogLoss:",
"{:.3f}".format(np.std(metrics[:, 1])),
"valid-std-LogLoss:",
"{:.3f}".format(np.std(metrics[:, 2])),
)
print(
"LogLoss:",
"{:.3f}".format(np.mean(metrics[:, 2]) - np.std(metrics[:, 2])),
"-",
"{:.3f}".format(np.mean(metrics[:, 2]) + np.std(metrics[:, 2])),
)
display(metrics)
imp = imp.groupby("features")["importance"].agg(["mean", "std"])
imp.columns = ["importance", "importance_std"]
imp["importance_cov"] = imp["importance_std"] / imp["importance"]
imp = imp.reset_index(drop=False)
display(imp.sort_values("importance", ascending=False, ignore_index=True))
# set x/id dataset for test
# 予測用のx/idデータセットの準備
x_test = df_test.drop(columns=["Id"])
x_test["EJ"] = x_test["EJ"].astype("category")
id_test = df_test[["Id"]]
print(x_test.shape, id_test.shape)
x_test
# prediction of probability with test-data
# テストデータによる確率の予測
y_test_pred = model.predict_proba(x_test)
y_test_pred
# submission
# 提出用データの整形・CSV出力
sample_sub = pd.read_csv(
"/kaggle/input/icr-identify-age-related-conditions/sample_submission.csv"
)
df_submit = pd.DataFrame(columns=sample_sub.columns)
df_submit["Id"] = id_test["Id"]
df_submit[["class_0", "class_1"]] = y_test_pred
df_submit.to_csv("submission.csv", index=None)
print("completed.")
df_submit
| false | 0 | 3,071 | 2 | 3,071 | 3,071 |
||
129383110
|
<jupyter_start><jupyter_text>Breast Cancer Wisconsin (Diagnostic) Data Set
Features are computed from a digitized image of a fine needle aspirate (FNA) of a breast mass. They describe characteristics of the cell nuclei present in the image.
n the 3-dimensional space is that described in: [K. P. Bennett and O. L. Mangasarian: "Robust Linear Programming Discrimination of Two Linearly Inseparable Sets", Optimization Methods and Software 1, 1992, 23-34].
This database is also available through the UW CS ftp server:
ftp ftp.cs.wisc.edu
cd math-prog/cpo-dataset/machine-learn/WDBC/
Also can be found on UCI Machine Learning Repository: https://archive.ics.uci.edu/ml/datasets/Breast+Cancer+Wisconsin+%28Diagnostic%29
Attribute Information:
1) ID number
2) Diagnosis (M = malignant, B = benign)
3-32)
Ten real-valued features are computed for each cell nucleus:
a) radius (mean of distances from center to points on the perimeter)
b) texture (standard deviation of gray-scale values)
c) perimeter
d) area
e) smoothness (local variation in radius lengths)
f) compactness (perimeter^2 / area - 1.0)
g) concavity (severity of concave portions of the contour)
h) concave points (number of concave portions of the contour)
i) symmetry
j) fractal dimension ("coastline approximation" - 1)
The mean, standard error and "worst" or largest (mean of the three
largest values) of these features were computed for each image,
resulting in 30 features. For instance, field 3 is Mean Radius, field
13 is Radius SE, field 23 is Worst Radius.
All feature values are recoded with four significant digits.
Missing attribute values: none
Class distribution: 357 benign, 212 malignant
Kaggle dataset identifier: breast-cancer-wisconsin-data
<jupyter_script># # import libraries
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.metrics import confusion_matrix
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # Data Preprocessing
df = pd.read_csv("/kaggle/input/breast-cancer-wisconsin-data/data.csv")
df.head()
df.shape
df.info()
df.drop(columns="Unnamed: 32", inplace=True)
df["diagnosis"].unique()
df = df.replace({"M": 1, "B": 0})
df.describe().T
plt.figure(figsize=(20, 12))
sns.heatmap(df.drop(columns="id").corr(), annot=True)
plt.hist(df["fractal_dimension_mean"])
plt.hist(df["texture_se"])
plt.hist(df["symmetry_se"])
# # Splitting Data
X = df.drop(columns=["id", "diagnosis"])
y = df["diagnosis"]
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
# # Logistic Regression
lr_pipeline = Pipeline(
[("scaler", StandardScaler()), ("logistic_regression", LogisticRegression())]
)
lr_pipeline.fit(X_train, y_train)
lr_pipeline.score(X_test, y_test)
predicted = lr_pipeline.predict(X_test)
truth = y_test
cm = confusion_matrix(truth, predicted)
sns.heatmap(cm, annot=True)
plt.xlabel("Predicted")
plt.ylabel("Truth")
precision = np.sum(truth & predicted) / np.sum(predicted)
recall = np.sum(truth & predicted) / np.sum(truth)
f1 = 2 * precision * recall / (precision + recall)
print("Precision:", precision)
print("Recall:", recall)
print("F1 score:", f1)
# # Decision Trees
tree_pipeline = Pipeline(
[("scaler", StandardScaler()), ("logistic_regression", DecisionTreeClassifier())]
)
tree_pipeline.fit(X_train, y_train)
tree_pipeline.score(X_test, y_test)
predicted = tree_pipeline.predict(X_test)
truth = y_test
cm = confusion_matrix(truth, predicted)
sns.heatmap(cm, annot=True)
plt.xlabel("Predicted")
plt.ylabel("Truth")
precision = np.sum(truth & predicted) / np.sum(predicted)
recall = np.sum(truth & predicted) / np.sum(truth)
f1 = 2 * precision * recall / (precision + recall)
print("Precision:", precision)
print("Recall:", recall)
print("F1 score:", f1)
# # Random Forest
rf_pipeline = Pipeline(
[("scaler", StandardScaler()), ("random_forest", RandomForestClassifier())]
)
rf_pipeline.fit(X_train, y_train)
rf_pipeline.score(X_test, y_test)
predicted = rf_pipeline.predict(X_test)
truth = y_test
cm = confusion_matrix(truth, predicted)
sns.heatmap(cm, annot=True)
plt.xlabel("Predicted")
plt.ylabel("Truth")
precision = np.sum(truth & predicted) / np.sum(predicted)
recall = np.sum(truth & predicted) / np.sum(truth)
f1 = 2 * precision * recall / (precision + recall)
print("Precision:", precision)
print("Recall:", recall)
print("F1 score:", f1)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/383/129383110.ipynb
|
breast-cancer-wisconsin-data
| null |
[{"Id": 129383110, "ScriptId": 38468518, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 10444652, "CreationDate": "05/13/2023 10:17:16", "VersionNumber": 1.0, "Title": "Breast Cancer Prediction", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 139.0, "LinesInsertedFromPrevious": 139.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 2}]
|
[{"Id": 185380006, "KernelVersionId": 129383110, "SourceDatasetVersionId": 408}]
|
[{"Id": 408, "DatasetId": 180, "DatasourceVersionId": 408, "CreatorUserId": 711301, "LicenseName": "CC BY-NC-SA 4.0", "CreationDate": "09/25/2016 10:49:04", "VersionNumber": 2.0, "Title": "Breast Cancer Wisconsin (Diagnostic) Data Set", "Slug": "breast-cancer-wisconsin-data", "Subtitle": "Predict whether the cancer is benign or malignant", "Description": "Features are computed from a digitized image of a fine needle aspirate (FNA) of a breast mass. They describe characteristics of the cell nuclei present in the image. \nn the 3-dimensional space is that described in: [K. P. Bennett and O. L. Mangasarian: \"Robust Linear Programming Discrimination of Two Linearly Inseparable Sets\", Optimization Methods and Software 1, 1992, 23-34]. \n\nThis database is also available through the UW CS ftp server: \nftp ftp.cs.wisc.edu \ncd math-prog/cpo-dataset/machine-learn/WDBC/\n\nAlso can be found on UCI Machine Learning Repository: https://archive.ics.uci.edu/ml/datasets/Breast+Cancer+Wisconsin+%28Diagnostic%29\n\nAttribute Information:\n\n1) ID number \n2) Diagnosis (M = malignant, B = benign) \n3-32) \n\nTen real-valued features are computed for each cell nucleus: \n\na) radius (mean of distances from center to points on the perimeter) \nb) texture (standard deviation of gray-scale values) \nc) perimeter \nd) area \ne) smoothness (local variation in radius lengths) \nf) compactness (perimeter^2 / area - 1.0) \ng) concavity (severity of concave portions of the contour) \nh) concave points (number of concave portions of the contour) \ni) symmetry \nj) fractal dimension (\"coastline approximation\" - 1)\n\nThe mean, standard error and \"worst\" or largest (mean of the three\nlargest values) of these features were computed for each image,\nresulting in 30 features. For instance, field 3 is Mean Radius, field\n13 is Radius SE, field 23 is Worst Radius.\n\nAll feature values are recoded with four significant digits.\n\nMissing attribute values: none\n\nClass distribution: 357 benign, 212 malignant", "VersionNotes": "This updated dataset has column names added", "TotalCompressedBytes": 125204.0, "TotalUncompressedBytes": 125204.0}]
|
[{"Id": 180, "CreatorUserId": 711301, "OwnerUserId": NaN, "OwnerOrganizationId": 7.0, "CurrentDatasetVersionId": 408.0, "CurrentDatasourceVersionId": 408.0, "ForumId": 1547, "Type": 2, "CreationDate": "09/19/2016 20:27:05", "LastActivityDate": "02/06/2018", "TotalViews": 1744898, "TotalDownloads": 301790, "TotalVotes": 3191, "TotalKernels": 2628}]
| null |
# # import libraries
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.metrics import confusion_matrix
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # Data Preprocessing
df = pd.read_csv("/kaggle/input/breast-cancer-wisconsin-data/data.csv")
df.head()
df.shape
df.info()
df.drop(columns="Unnamed: 32", inplace=True)
df["diagnosis"].unique()
df = df.replace({"M": 1, "B": 0})
df.describe().T
plt.figure(figsize=(20, 12))
sns.heatmap(df.drop(columns="id").corr(), annot=True)
plt.hist(df["fractal_dimension_mean"])
plt.hist(df["texture_se"])
plt.hist(df["symmetry_se"])
# # Splitting Data
X = df.drop(columns=["id", "diagnosis"])
y = df["diagnosis"]
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
# # Logistic Regression
lr_pipeline = Pipeline(
[("scaler", StandardScaler()), ("logistic_regression", LogisticRegression())]
)
lr_pipeline.fit(X_train, y_train)
lr_pipeline.score(X_test, y_test)
predicted = lr_pipeline.predict(X_test)
truth = y_test
cm = confusion_matrix(truth, predicted)
sns.heatmap(cm, annot=True)
plt.xlabel("Predicted")
plt.ylabel("Truth")
precision = np.sum(truth & predicted) / np.sum(predicted)
recall = np.sum(truth & predicted) / np.sum(truth)
f1 = 2 * precision * recall / (precision + recall)
print("Precision:", precision)
print("Recall:", recall)
print("F1 score:", f1)
# # Decision Trees
tree_pipeline = Pipeline(
[("scaler", StandardScaler()), ("logistic_regression", DecisionTreeClassifier())]
)
tree_pipeline.fit(X_train, y_train)
tree_pipeline.score(X_test, y_test)
predicted = tree_pipeline.predict(X_test)
truth = y_test
cm = confusion_matrix(truth, predicted)
sns.heatmap(cm, annot=True)
plt.xlabel("Predicted")
plt.ylabel("Truth")
precision = np.sum(truth & predicted) / np.sum(predicted)
recall = np.sum(truth & predicted) / np.sum(truth)
f1 = 2 * precision * recall / (precision + recall)
print("Precision:", precision)
print("Recall:", recall)
print("F1 score:", f1)
# # Random Forest
rf_pipeline = Pipeline(
[("scaler", StandardScaler()), ("random_forest", RandomForestClassifier())]
)
rf_pipeline.fit(X_train, y_train)
rf_pipeline.score(X_test, y_test)
predicted = rf_pipeline.predict(X_test)
truth = y_test
cm = confusion_matrix(truth, predicted)
sns.heatmap(cm, annot=True)
plt.xlabel("Predicted")
plt.ylabel("Truth")
precision = np.sum(truth & predicted) / np.sum(predicted)
recall = np.sum(truth & predicted) / np.sum(truth)
f1 = 2 * precision * recall / (precision + recall)
print("Precision:", precision)
print("Recall:", recall)
print("F1 score:", f1)
| false | 0 | 1,099 | 2 | 1,624 | 1,099 |
||
129383655
|
<jupyter_start><jupyter_text>Starbucks Nutrition Facts
```
Nutrition facts for several Starbucks food items
```
| Column | Description |
| ------- | ------------------------------------------------------------ |
| item | The name of the food item. |
| calories| The amount of calories in the food item. |
| fat | The quantity of fat in grams present in the food item. |
| carb | The amount of carbohydrates in grams found in the food item. |
| fiber | The quantity of dietary fiber in grams in the food item. |
| protein | The amount of protein in grams contained in the food item. |
| type | The category or type of food item (bakery, bistro box, hot breakfast, parfait, petite, salad, or sandwich). |
Kaggle dataset identifier: starbucks-nutrition
<jupyter_script># # StarBucks Menu Nutrition Facts Exploratory Data Analysis
# ## Starbucks Intro
# Starbucks, the renowned coffeehouse chain founded in 1971, has become a global icon with over 33,000 stores in 80 countries. With a staggering 4 billion cups of coffee sold in 2020, equivalent to 11 million cups per day, Starbucks has established itself as a leader in the coffee industry.
# ### Dataset Details
# This Dataset contains information about the nutrition facts of items in the Menu such fat, carb, protein, fiber and calories per serving.
# The items are also categorized by type as well.
# ## Objectives of Data Analysis
# 1. FInd the items with maximum and minimum carb, fat, protein, fiber and calories.
# 2. Relationship of Calories with Carbs, Fat, Protein, Fiber and Calories
# 3. Distribution of Nutritional contents in the Menu
# 4. Analysing the nutritional content by type of Items in the Menu.
# ### Importing Libraries
# importing libararies
import numpy as np
import plotly.express as px
import pandas as pd
import plotly.graph_objects as go
import plotly.subplots as sp
# ### Importing Dataset
# importing dataset
data_df = pd.read_csv("/kaggle/input/starbucks-nutrition/starbucks.csv")
data_df.head()
data_df.shape # no of rows and columns
data_df.info() # any null values
# dropping unwanted column
data_df = data_df.drop("Unnamed: 0", axis=1)
# descriptive stats
data_df.describe()
print(f"Duplicated values: {data_df.duplicated().sum()}") # checking duplicated values
data_df.head()
# ### Analysing data by type of items in the Menu
type_df = data_df.groupby("type").agg(
{
"item": pd.Series.count,
"calories": pd.Series.mean,
"fat": pd.Series.mean,
"carb": pd.Series.mean,
"fiber": pd.Series.mean,
"protein": pd.Series.mean,
}
)
type_df
pie = px.pie(
type_df,
labels=type_df.index,
values=type_df.item,
title="Percentage by type of items in Starbucks Menu",
names=type_df.index,
)
pie.update_traces(
textposition="outside",
textfont_size=15,
textinfo="percent + label",
showlegend=False,
)
pie.show()
# We can clearly see that the majority ot the items in the Menu belongs to bakery type and it also makes sense because bakery items are mostly consumed with coffee.
fig = go.Figure()
fig.add_bar(x=type_df.index, y=type_df.fat, name="Fat Content")
fig.add_bar(x=type_df.index, y=type_df.protein, name="Protein Content")
fig.add_bar(x=type_df.index, y=type_df.carb, name="Carb Content")
fig.add_bar(x=type_df.index, y=type_df.fiber, name="Fiber Content")
fig.update_layout(barmode="group")
fig.update_layout(
title="Average Calories, Fat, Protein, Carb and fiber content of items in Menu by Type",
xaxis_title="Type",
yaxis_title="Quantity",
)
fig.show()
type_df = type_df.sort_values(by="calories", ascending=True)
h_bar = px.bar(
x=type_df.calories,
y=type_df.index,
orientation="h",
color=type_df.calories,
color_continuous_scale="reds",
title="Average Calories in Menu Items by Type",
)
h_bar.update_layout(xaxis_title="Type", yaxis_title="Calories")
h_bar.show()
# ### Distribution of Nutritional Content in the Menu
import plotly.subplots as sp
import plotly.graph_objects as go
# Create subplots
fig = sp.make_subplots(rows=3, cols=2)
# Add histogram traces to subplots
fig.add_trace(
go.Histogram(x=data_df.calories, name="Calories", nbinsx=20), row=1, col=1
)
fig.add_trace(
go.Histogram(x=data_df.fat, name="Fat Content", nbinsx=10),
row=1,
col=2,
)
fig.add_trace(
go.Histogram(x=data_df.protein, name="Protein Content", nbinsx=10),
row=2,
col=1,
)
fig.add_trace(
go.Histogram(x=data_df.carb, name="Carbs Content"),
row=2,
col=2,
)
fig.add_trace(
go.Histogram(x=data_df.fiber, name="Fiber Content"),
row=3,
col=1,
)
# Customize the layout
fig.update_layout(
title="Distribution of Calories, Fat, Carb, Protein and Fiber Content",
xaxis_title="Values",
yaxis_title="Frequency",
autosize=False,
width=1000,
height=700,
)
# Display the plot
fig.show()
# ### Relationship of Calories with other nutritional content
# Create the subplots
fig = sp.make_subplots(
rows=2,
cols=2,
subplot_titles=(
"Fat vs Calories",
"Protein vs Calories",
"Carb vs Calories",
"Fiber vs Calories",
),
)
fig.add_trace(
go.Scatter(x=data_df.fat, y=data_df.calories, name="fat", mode="markers"),
row=1,
col=1,
)
fig.add_trace(
go.Scatter(x=data_df.protein, y=data_df.calories, name="protein", mode="markers"),
row=1,
col=2,
)
fig.add_trace(
go.Scatter(
x=data_df.carb,
y=data_df.calories,
name="carbs",
mode="markers",
),
row=2,
col=1,
)
fig.add_trace(
go.Scatter(
x=data_df.fiber,
y=data_df.calories,
name="fiber",
mode="markers",
),
row=2,
col=2,
)
# Add chart title
fig.update_layout(
title="Relationship between carb, fat, proetin, fiber and Calories",
autosize=False,
width=1000,
height=700,
)
# Update x and y axis titles for each subplot
fig.update_xaxes(title_text="fat", row=1, col=1)
fig.update_xaxes(title_text="protein", row=1, col=2)
fig.update_xaxes(title_text="carb", row=2, col=1)
fig.update_xaxes(title_text="fiber", row=2, col=2)
fig.update_yaxes(title_text="calories", row=1, col=1)
fig.update_yaxes(title_text="calories", row=1, col=2)
fig.update_yaxes(title_text="calories", row=2, col=1)
fig.update_yaxes(title_text="calories", row=2, col=2)
# Display the chart
fig.show()
# ### Finding Items with Maximum and Minimum Nutrional components
# ### Top 10 items with Minimum and Maximum Calories
cal_max = data_df.sort_values(by=["calories"], ascending=True)[-10:]
cal_min = data_df.sort_values(by=["calories"], ascending=False)[-10:]
# Create the subplots
fig = sp.make_subplots(
rows=1,
cols=2,
subplot_titles=(
"Top 10 Menu item with highest calories",
"Top 10 Menu item with lowest calories",
),
)
fig.add_trace(
go.Bar(
x=cal_max.item,
y=cal_max.calories,
name="Max Calories",
),
row=1,
col=1,
)
fig.add_trace(
go.Bar(
x=cal_min.item,
y=cal_min.calories,
name="Min Calories",
),
row=1,
col=2,
)
# Add chart title
fig.update_layout(
title="Top 10 Menu Items With Highest & Lowest Calories",
autosize=False,
width=1200,
height=700,
)
# Update x and y axis titles for each subplot
fig.update_xaxes(title_text="items", row=1, col=1)
fig.update_xaxes(title_text="items", row=1, col=2)
fig.update_yaxes(title_text="calories", row=1, col=1)
fig.update_yaxes(title_text="calories", row=1, col=2)
# Display the chart
fig.show()
# ### Top 10 items with Minimum and Maximum Proteins
pro_max = data_df.sort_values(by="protein", ascending=True)[-10:]
pro_min = data_df.sort_values(by="protein", ascending=False)[-10:]
# Create the subplots
fig = sp.make_subplots(
rows=1,
cols=2,
subplot_titles=(
"Top 10 Menu item with highest Proteins",
"Top 10 Menu item with lowest Proteins",
),
)
fig.add_trace(
go.Bar(
x=pro_max.item,
y=pro_max.protein,
name="Max Protein content",
),
row=1,
col=1,
)
fig.add_trace(
go.Bar(
x=pro_min.item,
y=pro_min.protein,
name="Min Protein content",
),
row=1,
col=2,
)
# Add chart title
fig.update_layout(
title="Top 10 Menu Items With Highest & Lowest Proteins",
autosize=False,
width=1200,
height=700,
)
# Update x and y axis titles for each subplot
fig.update_xaxes(title_text="items", row=1, col=1)
fig.update_xaxes(title_text="items", row=1, col=2)
fig.update_yaxes(title_text="Protein", row=1, col=1)
fig.update_yaxes(title_text="Protein", row=1, col=2)
# Display the chart
fig.show()
# ### Top 10 items with Minimum and Maximum Fat
fat_max = data_df.sort_values(by="fat", ascending=True)[-10:]
fat_min = data_df.sort_values(by="fat", ascending=False)[-10:]
# Create the subplots
fig = sp.make_subplots(
rows=1,
cols=2,
subplot_titles=(
"Top 10 Menu item with highest Fat",
"Top 10 Menu item with lowest Fat",
),
)
fig.add_trace(
go.Bar(
x=fat_max.item,
y=fat_max.fat,
name="Max Fat content",
),
row=1,
col=1,
)
fig.add_trace(
go.Bar(
x=fat_min.item,
y=fat_min.fat,
name="Min Fat content",
),
row=1,
col=2,
)
# Add chart title
fig.update_layout(
title="Top 10 Menu Items With Highest & Lowest Fat",
autosize=False,
width=1200,
height=700,
)
# Update x and y axis titles for each subplot
fig.update_xaxes(title_text="items", row=1, col=1)
fig.update_xaxes(title_text="items", row=1, col=2)
fig.update_yaxes(title_text="Fat", row=1, col=1)
fig.update_yaxes(title_text="Fat", row=1, col=2)
# Display the chart
fig.show()
# ### Top 10 items with Minimum and Maximum Fiber
fiber_max = data_df.sort_values(by="fiber", ascending=True)[-10:]
fiber_min = data_df.sort_values(by="fiber", ascending=False)[-10:]
# Create the subplots
fig = sp.make_subplots(
rows=1,
cols=2,
subplot_titles=(
"Top 10 Menu item with highest Fiber",
"Top 10 Menu item with lowest Fiber",
),
)
fig.add_trace(
go.Bar(
x=fiber_max.item,
y=fiber_max.fiber,
name="Max Fiber content",
),
row=1,
col=1,
)
fig.add_trace(
go.Bar(
x=fiber_min.item,
y=fiber_min.fiber,
name="Min Fiber content",
),
row=1,
col=2,
)
# Add chart title
fig.update_layout(
title="Top 10 Menu Items With Highest & Lowest Fiber",
autosize=False,
width=1200,
height=700,
)
# Update x and y axis titles for each subplot
fig.update_xaxes(title_text="items", row=1, col=1)
fig.update_xaxes(title_text="items", row=1, col=2)
fig.update_yaxes(title_text="Fiber", row=1, col=1)
fig.update_yaxes(title_text="Fiber", row=1, col=2)
# Display the chart
fig.show()
# ### Top 10 items with Minimum and Maximum Carbs
carb_max = data_df.sort_values(by="carb", ascending=True)[-10:]
carb_min = data_df.sort_values(by="carb", ascending=False)[-10:]
# Create the subplots
fig = sp.make_subplots(
rows=1,
cols=2,
subplot_titles=(
"Top 10 Menu item with highest Carbs",
"Top 10 Menu item with lowest Carbs",
),
)
fig.add_trace(
go.Bar(
x=carb_max.item,
y=carb_max.carb,
name="Max Carbs content",
),
row=1,
col=1,
)
fig.add_trace(
go.Bar(
x=carb_min.item,
y=carb_min.carb,
name="Min Carbs content",
),
row=1,
col=2,
)
# Add chart title
fig.update_layout(
title="Top 10 Menu Items With Highest & Lowest Carbs",
autosize=False,
width=1200,
height=700,
)
# Update x and y axis titles for each subplot
fig.update_xaxes(title_text="items", row=1, col=1)
fig.update_xaxes(title_text="items", row=1, col=2)
fig.update_yaxes(title_text="Carbs", row=1, col=1)
fig.update_yaxes(title_text="Carbs", row=1, col=2)
# Display the chart
fig.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/383/129383655.ipynb
|
starbucks-nutrition
|
utkarshx27
|
[{"Id": 129383655, "ScriptId": 38404114, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 10493034, "CreationDate": "05/13/2023 10:23:20", "VersionNumber": 1.0, "Title": "Star Bucks Menu Nutrition Facts EDA", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 353.0, "LinesInsertedFromPrevious": 353.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 2}]
|
[{"Id": 185380982, "KernelVersionId": 129383655, "SourceDatasetVersionId": 5651811}]
|
[{"Id": 5651811, "DatasetId": 3248696, "DatasourceVersionId": 5727183, "CreatorUserId": 13364933, "LicenseName": "CC0: Public Domain", "CreationDate": "05/10/2023 05:42:59", "VersionNumber": 1.0, "Title": "Starbucks Nutrition Facts", "Slug": "starbucks-nutrition", "Subtitle": "Nutrition facts for several Starbucks food items", "Description": "```\nNutrition facts for several Starbucks food items\n```\n| Column | Description |\n| ------- | ------------------------------------------------------------ |\n| item | The name of the food item. |\n| calories| The amount of calories in the food item. |\n| fat | The quantity of fat in grams present in the food item. |\n| carb | The amount of carbohydrates in grams found in the food item. |\n| fiber | The quantity of dietary fiber in grams in the food item. |\n| protein | The amount of protein in grams contained in the food item. |\n| type | The category or type of food item (bakery, bistro box, hot breakfast, parfait, petite, salad, or sandwich). |", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 3248696, "CreatorUserId": 13364933, "OwnerUserId": 13364933.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5651811.0, "CurrentDatasourceVersionId": 5727183.0, "ForumId": 3314049, "Type": 2, "CreationDate": "05/10/2023 05:42:59", "LastActivityDate": "05/10/2023", "TotalViews": 12557, "TotalDownloads": 2321, "TotalVotes": 59, "TotalKernels": 17}]
|
[{"Id": 13364933, "UserName": "utkarshx27", "DisplayName": "Utkarsh Singh", "RegisterDate": "01/21/2023", "PerformanceTier": 2}]
|
# # StarBucks Menu Nutrition Facts Exploratory Data Analysis
# ## Starbucks Intro
# Starbucks, the renowned coffeehouse chain founded in 1971, has become a global icon with over 33,000 stores in 80 countries. With a staggering 4 billion cups of coffee sold in 2020, equivalent to 11 million cups per day, Starbucks has established itself as a leader in the coffee industry.
# ### Dataset Details
# This Dataset contains information about the nutrition facts of items in the Menu such fat, carb, protein, fiber and calories per serving.
# The items are also categorized by type as well.
# ## Objectives of Data Analysis
# 1. FInd the items with maximum and minimum carb, fat, protein, fiber and calories.
# 2. Relationship of Calories with Carbs, Fat, Protein, Fiber and Calories
# 3. Distribution of Nutritional contents in the Menu
# 4. Analysing the nutritional content by type of Items in the Menu.
# ### Importing Libraries
# importing libararies
import numpy as np
import plotly.express as px
import pandas as pd
import plotly.graph_objects as go
import plotly.subplots as sp
# ### Importing Dataset
# importing dataset
data_df = pd.read_csv("/kaggle/input/starbucks-nutrition/starbucks.csv")
data_df.head()
data_df.shape # no of rows and columns
data_df.info() # any null values
# dropping unwanted column
data_df = data_df.drop("Unnamed: 0", axis=1)
# descriptive stats
data_df.describe()
print(f"Duplicated values: {data_df.duplicated().sum()}") # checking duplicated values
data_df.head()
# ### Analysing data by type of items in the Menu
type_df = data_df.groupby("type").agg(
{
"item": pd.Series.count,
"calories": pd.Series.mean,
"fat": pd.Series.mean,
"carb": pd.Series.mean,
"fiber": pd.Series.mean,
"protein": pd.Series.mean,
}
)
type_df
pie = px.pie(
type_df,
labels=type_df.index,
values=type_df.item,
title="Percentage by type of items in Starbucks Menu",
names=type_df.index,
)
pie.update_traces(
textposition="outside",
textfont_size=15,
textinfo="percent + label",
showlegend=False,
)
pie.show()
# We can clearly see that the majority ot the items in the Menu belongs to bakery type and it also makes sense because bakery items are mostly consumed with coffee.
fig = go.Figure()
fig.add_bar(x=type_df.index, y=type_df.fat, name="Fat Content")
fig.add_bar(x=type_df.index, y=type_df.protein, name="Protein Content")
fig.add_bar(x=type_df.index, y=type_df.carb, name="Carb Content")
fig.add_bar(x=type_df.index, y=type_df.fiber, name="Fiber Content")
fig.update_layout(barmode="group")
fig.update_layout(
title="Average Calories, Fat, Protein, Carb and fiber content of items in Menu by Type",
xaxis_title="Type",
yaxis_title="Quantity",
)
fig.show()
type_df = type_df.sort_values(by="calories", ascending=True)
h_bar = px.bar(
x=type_df.calories,
y=type_df.index,
orientation="h",
color=type_df.calories,
color_continuous_scale="reds",
title="Average Calories in Menu Items by Type",
)
h_bar.update_layout(xaxis_title="Type", yaxis_title="Calories")
h_bar.show()
# ### Distribution of Nutritional Content in the Menu
import plotly.subplots as sp
import plotly.graph_objects as go
# Create subplots
fig = sp.make_subplots(rows=3, cols=2)
# Add histogram traces to subplots
fig.add_trace(
go.Histogram(x=data_df.calories, name="Calories", nbinsx=20), row=1, col=1
)
fig.add_trace(
go.Histogram(x=data_df.fat, name="Fat Content", nbinsx=10),
row=1,
col=2,
)
fig.add_trace(
go.Histogram(x=data_df.protein, name="Protein Content", nbinsx=10),
row=2,
col=1,
)
fig.add_trace(
go.Histogram(x=data_df.carb, name="Carbs Content"),
row=2,
col=2,
)
fig.add_trace(
go.Histogram(x=data_df.fiber, name="Fiber Content"),
row=3,
col=1,
)
# Customize the layout
fig.update_layout(
title="Distribution of Calories, Fat, Carb, Protein and Fiber Content",
xaxis_title="Values",
yaxis_title="Frequency",
autosize=False,
width=1000,
height=700,
)
# Display the plot
fig.show()
# ### Relationship of Calories with other nutritional content
# Create the subplots
fig = sp.make_subplots(
rows=2,
cols=2,
subplot_titles=(
"Fat vs Calories",
"Protein vs Calories",
"Carb vs Calories",
"Fiber vs Calories",
),
)
fig.add_trace(
go.Scatter(x=data_df.fat, y=data_df.calories, name="fat", mode="markers"),
row=1,
col=1,
)
fig.add_trace(
go.Scatter(x=data_df.protein, y=data_df.calories, name="protein", mode="markers"),
row=1,
col=2,
)
fig.add_trace(
go.Scatter(
x=data_df.carb,
y=data_df.calories,
name="carbs",
mode="markers",
),
row=2,
col=1,
)
fig.add_trace(
go.Scatter(
x=data_df.fiber,
y=data_df.calories,
name="fiber",
mode="markers",
),
row=2,
col=2,
)
# Add chart title
fig.update_layout(
title="Relationship between carb, fat, proetin, fiber and Calories",
autosize=False,
width=1000,
height=700,
)
# Update x and y axis titles for each subplot
fig.update_xaxes(title_text="fat", row=1, col=1)
fig.update_xaxes(title_text="protein", row=1, col=2)
fig.update_xaxes(title_text="carb", row=2, col=1)
fig.update_xaxes(title_text="fiber", row=2, col=2)
fig.update_yaxes(title_text="calories", row=1, col=1)
fig.update_yaxes(title_text="calories", row=1, col=2)
fig.update_yaxes(title_text="calories", row=2, col=1)
fig.update_yaxes(title_text="calories", row=2, col=2)
# Display the chart
fig.show()
# ### Finding Items with Maximum and Minimum Nutrional components
# ### Top 10 items with Minimum and Maximum Calories
cal_max = data_df.sort_values(by=["calories"], ascending=True)[-10:]
cal_min = data_df.sort_values(by=["calories"], ascending=False)[-10:]
# Create the subplots
fig = sp.make_subplots(
rows=1,
cols=2,
subplot_titles=(
"Top 10 Menu item with highest calories",
"Top 10 Menu item with lowest calories",
),
)
fig.add_trace(
go.Bar(
x=cal_max.item,
y=cal_max.calories,
name="Max Calories",
),
row=1,
col=1,
)
fig.add_trace(
go.Bar(
x=cal_min.item,
y=cal_min.calories,
name="Min Calories",
),
row=1,
col=2,
)
# Add chart title
fig.update_layout(
title="Top 10 Menu Items With Highest & Lowest Calories",
autosize=False,
width=1200,
height=700,
)
# Update x and y axis titles for each subplot
fig.update_xaxes(title_text="items", row=1, col=1)
fig.update_xaxes(title_text="items", row=1, col=2)
fig.update_yaxes(title_text="calories", row=1, col=1)
fig.update_yaxes(title_text="calories", row=1, col=2)
# Display the chart
fig.show()
# ### Top 10 items with Minimum and Maximum Proteins
pro_max = data_df.sort_values(by="protein", ascending=True)[-10:]
pro_min = data_df.sort_values(by="protein", ascending=False)[-10:]
# Create the subplots
fig = sp.make_subplots(
rows=1,
cols=2,
subplot_titles=(
"Top 10 Menu item with highest Proteins",
"Top 10 Menu item with lowest Proteins",
),
)
fig.add_trace(
go.Bar(
x=pro_max.item,
y=pro_max.protein,
name="Max Protein content",
),
row=1,
col=1,
)
fig.add_trace(
go.Bar(
x=pro_min.item,
y=pro_min.protein,
name="Min Protein content",
),
row=1,
col=2,
)
# Add chart title
fig.update_layout(
title="Top 10 Menu Items With Highest & Lowest Proteins",
autosize=False,
width=1200,
height=700,
)
# Update x and y axis titles for each subplot
fig.update_xaxes(title_text="items", row=1, col=1)
fig.update_xaxes(title_text="items", row=1, col=2)
fig.update_yaxes(title_text="Protein", row=1, col=1)
fig.update_yaxes(title_text="Protein", row=1, col=2)
# Display the chart
fig.show()
# ### Top 10 items with Minimum and Maximum Fat
fat_max = data_df.sort_values(by="fat", ascending=True)[-10:]
fat_min = data_df.sort_values(by="fat", ascending=False)[-10:]
# Create the subplots
fig = sp.make_subplots(
rows=1,
cols=2,
subplot_titles=(
"Top 10 Menu item with highest Fat",
"Top 10 Menu item with lowest Fat",
),
)
fig.add_trace(
go.Bar(
x=fat_max.item,
y=fat_max.fat,
name="Max Fat content",
),
row=1,
col=1,
)
fig.add_trace(
go.Bar(
x=fat_min.item,
y=fat_min.fat,
name="Min Fat content",
),
row=1,
col=2,
)
# Add chart title
fig.update_layout(
title="Top 10 Menu Items With Highest & Lowest Fat",
autosize=False,
width=1200,
height=700,
)
# Update x and y axis titles for each subplot
fig.update_xaxes(title_text="items", row=1, col=1)
fig.update_xaxes(title_text="items", row=1, col=2)
fig.update_yaxes(title_text="Fat", row=1, col=1)
fig.update_yaxes(title_text="Fat", row=1, col=2)
# Display the chart
fig.show()
# ### Top 10 items with Minimum and Maximum Fiber
fiber_max = data_df.sort_values(by="fiber", ascending=True)[-10:]
fiber_min = data_df.sort_values(by="fiber", ascending=False)[-10:]
# Create the subplots
fig = sp.make_subplots(
rows=1,
cols=2,
subplot_titles=(
"Top 10 Menu item with highest Fiber",
"Top 10 Menu item with lowest Fiber",
),
)
fig.add_trace(
go.Bar(
x=fiber_max.item,
y=fiber_max.fiber,
name="Max Fiber content",
),
row=1,
col=1,
)
fig.add_trace(
go.Bar(
x=fiber_min.item,
y=fiber_min.fiber,
name="Min Fiber content",
),
row=1,
col=2,
)
# Add chart title
fig.update_layout(
title="Top 10 Menu Items With Highest & Lowest Fiber",
autosize=False,
width=1200,
height=700,
)
# Update x and y axis titles for each subplot
fig.update_xaxes(title_text="items", row=1, col=1)
fig.update_xaxes(title_text="items", row=1, col=2)
fig.update_yaxes(title_text="Fiber", row=1, col=1)
fig.update_yaxes(title_text="Fiber", row=1, col=2)
# Display the chart
fig.show()
# ### Top 10 items with Minimum and Maximum Carbs
carb_max = data_df.sort_values(by="carb", ascending=True)[-10:]
carb_min = data_df.sort_values(by="carb", ascending=False)[-10:]
# Create the subplots
fig = sp.make_subplots(
rows=1,
cols=2,
subplot_titles=(
"Top 10 Menu item with highest Carbs",
"Top 10 Menu item with lowest Carbs",
),
)
fig.add_trace(
go.Bar(
x=carb_max.item,
y=carb_max.carb,
name="Max Carbs content",
),
row=1,
col=1,
)
fig.add_trace(
go.Bar(
x=carb_min.item,
y=carb_min.carb,
name="Min Carbs content",
),
row=1,
col=2,
)
# Add chart title
fig.update_layout(
title="Top 10 Menu Items With Highest & Lowest Carbs",
autosize=False,
width=1200,
height=700,
)
# Update x and y axis titles for each subplot
fig.update_xaxes(title_text="items", row=1, col=1)
fig.update_xaxes(title_text="items", row=1, col=2)
fig.update_yaxes(title_text="Carbs", row=1, col=1)
fig.update_yaxes(title_text="Carbs", row=1, col=2)
# Display the chart
fig.show()
| false | 1 | 3,886 | 2 | 4,101 | 3,886 |
||
129383348
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from sklearn.preprocessing import LabelEncoder
# from sklearn.pandas import CategoricalImputer
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
data_dir = "/kaggle/input/loan-status-binary-classification/"
train = pd.read_csv(data_dir + "train.csv")
test = pd.read_csv(data_dir + "test.csv")
print(train.shape)
print(train.head())
print("==============")
print(test.shape)
print(test.head())
train["Gender"].value_counts(dropna=False)
test["Gender"].value_counts(dropna=False)
train["Gender"].fillna("Third sex", inplace=True)
test["Gender"].fillna("Third sex", inplace=True)
train_1 = pd.DataFrame(train)
dummies = pd.get_dummies(train_1["Gender"], prefix="Gender")
train_1 = pd.concat([train_1, dummies], axis=1)
train_1 = train_1.drop(["Gender"], axis=1)
print(train_1)
test_1 = pd.DataFrame(test)
dummies = pd.get_dummies(test_1["Gender"], prefix="Gender")
test_1 = pd.concat([test_1, dummies], axis=1)
test_1 = test_1.drop(["Gender"], axis=1)
print(test_1)
train_1["Married"].value_counts(dropna=False)
test_1["Married"].value_counts(dropna=False)
train_1["Married"].fillna("Yes", inplace=True)
test_1["Married"].fillna("Yes", inplace=True)
train_1["Married"] = train_1["Married"].map({"Yes": 1, "No": 0})
test_1["Married"] = test_1["Married"].map({"Yes": 1, "No": 0})
print(train_1)
print(test_1)
train_1["Dependents"].value_counts(dropna=False)
test_1["Dependents"].value_counts(dropna=False)
train_1["Dependents"].fillna("0", inplace=True)
test_1["Dependents"].fillna("0", inplace=True)
encoder = LabelEncoder()
train_1["Dependents"] = encoder.fit_transform(train_1["Dependents"])
print(train_1)
encoder = LabelEncoder()
test_1["Dependents"] = encoder.fit_transform(test_1["Dependents"])
print(test_1)
train_1["Education"].value_counts(dropna=False)
test_1["Education"].value_counts(dropna=False)
train_1["Education"] = train_1["Education"].map({"Graduate": 1, "Not Graduate": 0})
test_1["Education"] = test_1["Education"].map({"Graduate": 1, "Not Graduate": 0})
train_1["Self_Employed"].value_counts(dropna=False)
test_1["Self_Employed"].value_counts(dropna=False)
train_1["Self_Employed"].fillna("No", inplace=True)
test_1["Self_Employed"].fillna("No", inplace=True)
train_1["Self_Employed"] = train_1["Self_Employed"].map({"Yes": 1, "No": 0})
test_1["Self_Employed"] = test_1["Self_Employed"].map({"Yes": 1, "No": 0})
print(train_1)
train_1["Applicant_Income"].value_counts(dropna=False)
train_1["Coapplicant_Income"].value_counts(dropna=False)
train_1["Loan_Amount"].value_counts(dropna=False)
train_1["Term"].value_counts(dropna=False)
mean_term = train_1["Term"].mean()
train_1["Term"].fillna(mean_term, inplace=True)
test_1["Term"].fillna(mean_term, inplace=True)
train["Credit_History"].value_counts(dropna=False)
train_1["Credit_History"] = train["Credit_History"]
test_1["Credit_History"] = test["Credit_History"]
train_1["Credit_History"].fillna(1.0, inplace=True)
test_1["Credit_History"].fillna(1.0, inplace=True)
print(train_1)
train_1["Area"].value_counts(dropna=False)
train_end = pd.DataFrame(train_1)
dummies = pd.get_dummies(train_end["Area"], prefix="Area")
train_end = pd.concat([train_end, dummies], axis=1)
train_end = train_end.drop(["Area"], axis=1)
print(train_end)
test_end = pd.DataFrame(test_1)
dummies = pd.get_dummies(test_end["Area"], prefix="Area")
test_end = pd.concat([test_end, dummies], axis=1)
test_end = test_end.drop(["Area"], axis=1)
print(test_end)
from sklearn.linear_model import LogisticRegression
clf = LogisticRegression()
clf.fit(train_end.drop(["id", "Status"], axis=1), train_end["Status"])
preds = clf.predict(test_end.drop("id", axis=1))
sub = pd.DataFrame({"id": test_end["id"], "Status": preds})
# Write the submission dataframe to a csv file
sub.to_csv("/kaggle/working/submission.csv", index=False)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/383/129383348.ipynb
| null | null |
[{"Id": 129383348, "ScriptId": 38435961, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 15037956, "CreationDate": "05/13/2023 10:19:50", "VersionNumber": 3.0, "Title": "loan-baseline2222", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 157.0, "LinesInsertedFromPrevious": 82.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 75.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from sklearn.preprocessing import LabelEncoder
# from sklearn.pandas import CategoricalImputer
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
data_dir = "/kaggle/input/loan-status-binary-classification/"
train = pd.read_csv(data_dir + "train.csv")
test = pd.read_csv(data_dir + "test.csv")
print(train.shape)
print(train.head())
print("==============")
print(test.shape)
print(test.head())
train["Gender"].value_counts(dropna=False)
test["Gender"].value_counts(dropna=False)
train["Gender"].fillna("Third sex", inplace=True)
test["Gender"].fillna("Third sex", inplace=True)
train_1 = pd.DataFrame(train)
dummies = pd.get_dummies(train_1["Gender"], prefix="Gender")
train_1 = pd.concat([train_1, dummies], axis=1)
train_1 = train_1.drop(["Gender"], axis=1)
print(train_1)
test_1 = pd.DataFrame(test)
dummies = pd.get_dummies(test_1["Gender"], prefix="Gender")
test_1 = pd.concat([test_1, dummies], axis=1)
test_1 = test_1.drop(["Gender"], axis=1)
print(test_1)
train_1["Married"].value_counts(dropna=False)
test_1["Married"].value_counts(dropna=False)
train_1["Married"].fillna("Yes", inplace=True)
test_1["Married"].fillna("Yes", inplace=True)
train_1["Married"] = train_1["Married"].map({"Yes": 1, "No": 0})
test_1["Married"] = test_1["Married"].map({"Yes": 1, "No": 0})
print(train_1)
print(test_1)
train_1["Dependents"].value_counts(dropna=False)
test_1["Dependents"].value_counts(dropna=False)
train_1["Dependents"].fillna("0", inplace=True)
test_1["Dependents"].fillna("0", inplace=True)
encoder = LabelEncoder()
train_1["Dependents"] = encoder.fit_transform(train_1["Dependents"])
print(train_1)
encoder = LabelEncoder()
test_1["Dependents"] = encoder.fit_transform(test_1["Dependents"])
print(test_1)
train_1["Education"].value_counts(dropna=False)
test_1["Education"].value_counts(dropna=False)
train_1["Education"] = train_1["Education"].map({"Graduate": 1, "Not Graduate": 0})
test_1["Education"] = test_1["Education"].map({"Graduate": 1, "Not Graduate": 0})
train_1["Self_Employed"].value_counts(dropna=False)
test_1["Self_Employed"].value_counts(dropna=False)
train_1["Self_Employed"].fillna("No", inplace=True)
test_1["Self_Employed"].fillna("No", inplace=True)
train_1["Self_Employed"] = train_1["Self_Employed"].map({"Yes": 1, "No": 0})
test_1["Self_Employed"] = test_1["Self_Employed"].map({"Yes": 1, "No": 0})
print(train_1)
train_1["Applicant_Income"].value_counts(dropna=False)
train_1["Coapplicant_Income"].value_counts(dropna=False)
train_1["Loan_Amount"].value_counts(dropna=False)
train_1["Term"].value_counts(dropna=False)
mean_term = train_1["Term"].mean()
train_1["Term"].fillna(mean_term, inplace=True)
test_1["Term"].fillna(mean_term, inplace=True)
train["Credit_History"].value_counts(dropna=False)
train_1["Credit_History"] = train["Credit_History"]
test_1["Credit_History"] = test["Credit_History"]
train_1["Credit_History"].fillna(1.0, inplace=True)
test_1["Credit_History"].fillna(1.0, inplace=True)
print(train_1)
train_1["Area"].value_counts(dropna=False)
train_end = pd.DataFrame(train_1)
dummies = pd.get_dummies(train_end["Area"], prefix="Area")
train_end = pd.concat([train_end, dummies], axis=1)
train_end = train_end.drop(["Area"], axis=1)
print(train_end)
test_end = pd.DataFrame(test_1)
dummies = pd.get_dummies(test_end["Area"], prefix="Area")
test_end = pd.concat([test_end, dummies], axis=1)
test_end = test_end.drop(["Area"], axis=1)
print(test_end)
from sklearn.linear_model import LogisticRegression
clf = LogisticRegression()
clf.fit(train_end.drop(["id", "Status"], axis=1), train_end["Status"])
preds = clf.predict(test_end.drop("id", axis=1))
sub = pd.DataFrame({"id": test_end["id"], "Status": preds})
# Write the submission dataframe to a csv file
sub.to_csv("/kaggle/working/submission.csv", index=False)
| false | 0 | 1,507 | 0 | 1,507 | 1,507 |
||
129217578
|
# 
import os, glob
import pandas as pd
import matplotlib.pyplot as plt
DATASET_FOLDER = (
"/kaggle/input/google-research-identify-contrails-reduce-global-warming"
)
path_json = os.path.join(DATASET_FOLDER, "train_metadata.json")
df_train = pd.read_json(path_json)
display(df_train.head())
from pprint import pprint
pprint(df_train.iloc[0]["projection_wkt"])
# ## Show raw image
# - **band_{08-16}.npy:** array with size of H x W x T, where T = n_times_before + n_times_after + 1, representing the number of images in the sequence. There are n_times_before and n_times_after images before and after the labeled frame respectively. In our dataset all examples have n_times_before=4 and n_times_after=3. Each band represents an infrared channel at different wavelengths and is converted to brightness temperatures based on the calibration parameters. The number in the filename corresponds to the GOES-16 ABI band number. Details of the ABI bands can be found here.
# - **human_individual_masks.npy:** array with size of H x W x 1 x R. Each example is labeled by R individual human labelers. R is not the same for all samples. The labeled masks have value either 0 or 1 and correspond to the (n_times_before+1)-th image in band_{08-16}.npy. They are available only in the training set.
# - **human_pixel_masks.npy:** array with size of H x W x 1 containing the binary ground truth. A pixel is regarded as contrail pixel in evaluation if it is labeled as contrail by more than half of the labelers.
import numpy as np
def show_sample(spl_id):
folder = os.path.join(DATASET_FOLDER, "train", spl_id)
fig, axarr = plt.subplots(ncols=8, nrows=9, figsize=(16, 14))
for i, bi in enumerate(range(8, 17)):
img = np.load(os.path.join(folder, f"band_{bi:02}.npy"))
print(img.shape, img.min(), img.max())
for j in range(8):
axarr[i, j].imshow(img[:, :, j])
for n in ("human_individual_masks", "human_pixel_masks"):
mask = np.load(os.path.join(folder, f"{n}.npy"))
print(mask.shape, mask.min(), mask.max())
show_sample("1000823728928031783")
# ## Conver to RGB
# **Following example from: https://www.kaggle.com/code/inversion/visualizing-contrails**
_T11_BOUNDS = (243, 303)
_CLOUD_TOP_TDIFF_BOUNDS = (-4, 5)
_TDIFF_BOUNDS = (-4, 2)
def normalize_range(data, bounds):
"""Maps data to the range [0, 1]."""
return (data - bounds[0]) / (bounds[1] - bounds[0])
def show_rgb_sample(spl_id, t=4):
folder = os.path.join(DATASET_FOLDER, "train", spl_id)
bands = [None] * 17
for bi in range(8, 17):
bands[bi] = np.load(os.path.join(folder, f"band_{bi:02}.npy"))
# print(bands[bi].shape, bands[bi].min(), bands[bi].max())
r = normalize_range(bands[15] - bands[14], _TDIFF_BOUNDS)
g = normalize_range(bands[14] - bands[11], _CLOUD_TOP_TDIFF_BOUNDS)
b = normalize_range(bands[14], _T11_BOUNDS)
img = np.clip(np.stack([r, g, b], axis=2), 0, 1)
mask = np.load(os.path.join(folder, "human_pixel_masks.npy"))
# print(mask.shape, mask.min(), mask.max())
fig, axarr = plt.subplots(ncols=2, nrows=2, figsize=(10, 10))
axarr[0, 0].imshow(img[..., t])
axarr[0, 1].imshow(mask[..., 0], interpolation="none")
axarr[1, 0].imshow(img[..., t])
axarr[1, 0].contour(mask[..., 0], linewidths=1, colors="red")
show_rgb_sample("1000823728928031783")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/217/129217578.ipynb
| null | null |
[{"Id": 129217578, "ScriptId": 38390791, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 5069700, "CreationDate": "05/11/2023 23:46:51", "VersionNumber": 2.0, "Title": "\u2708\ufe0fContrails: EDA \ud83e\uddd1\u200d\u2708\ufe0f & interactive \ud83d\udd0e viewer", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 71.0, "LinesInsertedFromPrevious": 35.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 36.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 7}]
| null | null | null | null |
# 
import os, glob
import pandas as pd
import matplotlib.pyplot as plt
DATASET_FOLDER = (
"/kaggle/input/google-research-identify-contrails-reduce-global-warming"
)
path_json = os.path.join(DATASET_FOLDER, "train_metadata.json")
df_train = pd.read_json(path_json)
display(df_train.head())
from pprint import pprint
pprint(df_train.iloc[0]["projection_wkt"])
# ## Show raw image
# - **band_{08-16}.npy:** array with size of H x W x T, where T = n_times_before + n_times_after + 1, representing the number of images in the sequence. There are n_times_before and n_times_after images before and after the labeled frame respectively. In our dataset all examples have n_times_before=4 and n_times_after=3. Each band represents an infrared channel at different wavelengths and is converted to brightness temperatures based on the calibration parameters. The number in the filename corresponds to the GOES-16 ABI band number. Details of the ABI bands can be found here.
# - **human_individual_masks.npy:** array with size of H x W x 1 x R. Each example is labeled by R individual human labelers. R is not the same for all samples. The labeled masks have value either 0 or 1 and correspond to the (n_times_before+1)-th image in band_{08-16}.npy. They are available only in the training set.
# - **human_pixel_masks.npy:** array with size of H x W x 1 containing the binary ground truth. A pixel is regarded as contrail pixel in evaluation if it is labeled as contrail by more than half of the labelers.
import numpy as np
def show_sample(spl_id):
folder = os.path.join(DATASET_FOLDER, "train", spl_id)
fig, axarr = plt.subplots(ncols=8, nrows=9, figsize=(16, 14))
for i, bi in enumerate(range(8, 17)):
img = np.load(os.path.join(folder, f"band_{bi:02}.npy"))
print(img.shape, img.min(), img.max())
for j in range(8):
axarr[i, j].imshow(img[:, :, j])
for n in ("human_individual_masks", "human_pixel_masks"):
mask = np.load(os.path.join(folder, f"{n}.npy"))
print(mask.shape, mask.min(), mask.max())
show_sample("1000823728928031783")
# ## Conver to RGB
# **Following example from: https://www.kaggle.com/code/inversion/visualizing-contrails**
_T11_BOUNDS = (243, 303)
_CLOUD_TOP_TDIFF_BOUNDS = (-4, 5)
_TDIFF_BOUNDS = (-4, 2)
def normalize_range(data, bounds):
"""Maps data to the range [0, 1]."""
return (data - bounds[0]) / (bounds[1] - bounds[0])
def show_rgb_sample(spl_id, t=4):
folder = os.path.join(DATASET_FOLDER, "train", spl_id)
bands = [None] * 17
for bi in range(8, 17):
bands[bi] = np.load(os.path.join(folder, f"band_{bi:02}.npy"))
# print(bands[bi].shape, bands[bi].min(), bands[bi].max())
r = normalize_range(bands[15] - bands[14], _TDIFF_BOUNDS)
g = normalize_range(bands[14] - bands[11], _CLOUD_TOP_TDIFF_BOUNDS)
b = normalize_range(bands[14], _T11_BOUNDS)
img = np.clip(np.stack([r, g, b], axis=2), 0, 1)
mask = np.load(os.path.join(folder, "human_pixel_masks.npy"))
# print(mask.shape, mask.min(), mask.max())
fig, axarr = plt.subplots(ncols=2, nrows=2, figsize=(10, 10))
axarr[0, 0].imshow(img[..., t])
axarr[0, 1].imshow(mask[..., 0], interpolation="none")
axarr[1, 0].imshow(img[..., t])
axarr[1, 0].contour(mask[..., 0], linewidths=1, colors="red")
show_rgb_sample("1000823728928031783")
| false | 0 | 1,187 | 7 | 1,187 | 1,187 |
||
129650672
|
# # Kagle Intro
# * Please register on kaggle. Use your Levi9 email. If already registered, you can use your account, but identify yourself for org team
# * **Important:** confirm your phone number. Otherwise you will not have access to some features like Internet access from Notebook or GPU
# * You have 30 free GPU hours per week per user. Quota available in your profile.
# ## Notebook configuration
# * Enabling internet access - on the right, switch the Internet (available only after phone confirmation)
# * Enabling GPU - choose accelerator from the options
# ## Istalling additional libraries
# ## Working with the data
# * competition data
# * adding own dataset - create a dataset and add it to competition (https://www.kaggle.com/datasets)
import torchvision.datasets as dset
import torchvision.transforms as transforms
path2data = "/kaggle/input/levi9-hack9-2023/train"
path2json = "/kaggle/input/levi9-hack9-2023/train.json"
coco_train = dset.CocoDetection(
root=path2data, annFile=path2json, transform=transforms.ToTensor()
)
print("Number of samples: ", len(coco_train))
img, target = coco_train[0]
print(img.size)
print(target)
# ## Notebook Sharing
# Privately with your teammates or make it public
# ## Working with the secrets
from kaggle_secrets import UserSecretsClient
user_secrets = UserSecretsClient()
AWS_ACCESS_KEY_ID = user_secrets.get_secret("AWS_ACCESS_KEY_ID")
# ## Submission
# On the right, there is output folder
import pandas as pd
submission = [
{"image_id": "test", "prediction": "test2"},
{"image_id": "test22", "prediction": "sample"},
]
df = pd.DataFrame.from_dict(submission)
df.to_csv("submission2.csv", index=False)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/650/129650672.ipynb
| null | null |
[{"Id": 129650672, "ScriptId": 38433042, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 1904452, "CreationDate": "05/15/2023 13:37:45", "VersionNumber": 2.0, "Title": "Kaggle intro", "EvaluationDate": "05/15/2023", "IsChange": true, "TotalLines": 51.0, "LinesInsertedFromPrevious": 23.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 28.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# # Kagle Intro
# * Please register on kaggle. Use your Levi9 email. If already registered, you can use your account, but identify yourself for org team
# * **Important:** confirm your phone number. Otherwise you will not have access to some features like Internet access from Notebook or GPU
# * You have 30 free GPU hours per week per user. Quota available in your profile.
# ## Notebook configuration
# * Enabling internet access - on the right, switch the Internet (available only after phone confirmation)
# * Enabling GPU - choose accelerator from the options
# ## Istalling additional libraries
# ## Working with the data
# * competition data
# * adding own dataset - create a dataset and add it to competition (https://www.kaggle.com/datasets)
import torchvision.datasets as dset
import torchvision.transforms as transforms
path2data = "/kaggle/input/levi9-hack9-2023/train"
path2json = "/kaggle/input/levi9-hack9-2023/train.json"
coco_train = dset.CocoDetection(
root=path2data, annFile=path2json, transform=transforms.ToTensor()
)
print("Number of samples: ", len(coco_train))
img, target = coco_train[0]
print(img.size)
print(target)
# ## Notebook Sharing
# Privately with your teammates or make it public
# ## Working with the secrets
from kaggle_secrets import UserSecretsClient
user_secrets = UserSecretsClient()
AWS_ACCESS_KEY_ID = user_secrets.get_secret("AWS_ACCESS_KEY_ID")
# ## Submission
# On the right, there is output folder
import pandas as pd
submission = [
{"image_id": "test", "prediction": "test2"},
{"image_id": "test22", "prediction": "sample"},
]
df = pd.DataFrame.from_dict(submission)
df.to_csv("submission2.csv", index=False)
| false | 0 | 477 | 0 | 477 | 477 |
||
129650266
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import matplotlib.pyplot as plt
from wordcloud import WordCloud
from IPython.display import display
import base64
import string
import re
from collections import Counter
from time import time
# from sklearn.feature_extraction.stop_words import ENGLISH_STOP_WORDS as stopwords
from nltk.corpus import stopwords
from sklearn.metrics import log_loss
stopwords = stopwords.words("english")
sns.set_context("notebook")
print("Numpy version:", np.__version__)
print("Pandas version:", pd.__version__)
print("Seaborn version:", sns.__version__)
# **Data Visualization**
train = pd.read_csv("/kaggle/working/train.csv")
train.head()
train.index
train = pd.read_csv(
"/kaggle/working/train.csv", skiprows=1, names=["ID", "Text", "Author"]
).set_index("ID")
test = pd.read_csv(
"/kaggle/working/test.csv", skiprows=1, names=["ID", "Text"]
).set_index("ID")
# Delete the word 'id' from the ID columns
train.index = [id[2:] for id in train.index]
test.index = [id[2:] for id in test.index]
train.sort_index(inplace=True)
test.sort_index(inplace=True)
train["Text"][0]
display(train.head())
display(test.head())
print("Training sample:", train["Text"][0])
print("Author of sample:", train["Author"][0])
print("Training Data Shape:", train.shape)
print("Testing Data Shape:", test.shape)
print("Training Dataset Info:")
display(train.info())
print("Test Dataset Info:")
display(test.info())
# **Plot**
sns.barplot(
x=["Edgar Allen Poe", "Mary Wollstonecraft Shelley", "H.P. Lovecraft"],
y=train["Author"].value_counts(),
)
plt.show()
# **Create Spooky Wordcloud**
font_64_decode = base64.decodebytes(font_64_encode)
font_result = open(
"spooky_font.ttf", "wb"
) # create a writable font file and write the decoding result
font_result.write(font_64_decode)
font_result.close()
all_text = " ".join([text for text in train["Text"]])
print("Number of words in all_text:", len(all_text))
wordcloud = WordCloud(
font_path="spooky_font.ttf",
width=800,
height=500,
random_state=21,
max_font_size=110,
).generate(all_text)
plt.figure(figsize=(15, 12))
plt.imshow(wordcloud, interpolation="bilinear")
plt.axis("off")
plt.show()
# **Wordcloud for Edgar Allen Poe**
[train["Author"] == "EAP"]
eap = train[train["Author"] == "EAP"]
eap_text = " ".join(text for text in eap["Text"])
print("Number of words in eap_text:", len(eap_text))
wordcloud = WordCloud(
font_path="spooky_font.ttf",
width=800,
height=500,
random_state=21,
max_font_size=110,
).generate(eap_text)
plt.figure(figsize=(15, 12))
plt.imshow(wordcloud, interpolation="bilinear")
plt.axis("off")
plt.show()
# **Wordcloud for Mary Wollstonecraft Shelley**
train[train["Author"] == "MWS"]
mws = train[train["Author"] == "MWS"]
mws_text = " ".join(text for text in mws["Text"])
print("Number of words in eap_text:", len(mws_text))
wordcloud = WordCloud(
font_path="spooky_font.ttf",
width=800,
height=500,
random_state=21,
max_font_size=110,
).generate(mws_text)
plt.figure(figsize=(15, 12))
plt.imshow(wordcloud, interpolation="bilinear")
plt.axis("off")
plt.show()
# **Wordcloud for H.P. Lovecraft**
train[train["Author"] == "HPL"]
hpl = train[train["Author"] == "HPL"]
hpl_text = " ".join(text for text in hpl["Text"])
print("Number of words in eap_text:", len(hpl_text))
wordcloud = WordCloud(
font_path="spooky_font.ttf",
width=800,
height=500,
random_state=21,
max_font_size=110,
).generate(hpl_text)
plt.figure(figsize=(15, 12))
plt.imshow(wordcloud, interpolation="bilinear")
plt.axis("off")
plt.show()
# **Text Preprocessing**
import spacy
nlp = spacy.load("en_core_web_sm")
punctuations = string.punctuation
def cleanup_text(docs, logging=False):
texts = []
counter = 1
for doc in docs:
if counter % 1000 == 0 and logging:
print("Processed %d out of %d documents." % (counter, len(docs)))
counter += 1
doc = nlp(doc, disable=["parser", "ner"])
tokens = [tok.lemma_.lower().strip() for tok in doc if tok.lemma_ != "-PRON-"]
tokens = [
tok for tok in tokens if tok not in stopwords and tok not in punctuations
]
tokens = " ".join(tokens)
texts.append(tokens)
return pd.Series(texts)
# **Plot Word Frequency By Each Author**
train[train["Author"] == "EAP"]["Text"]
eap_text = [text for text in train[train["Author"] == "EAP"]["Text"]]
hpl_text = [text for text in train[train["Author"] == "HPL"]["Text"]]
mws_text = [text for text in train[train["Author"] == "MWS"]["Text"]]
# clean up eap
eap_clean = cleanup_text(eap_text)
eap_clean = " ".join(eap_clean).split()
eap_claen = [word for word in eap_clean if word != "\s"]
# clean up hpl
hpl_clean = cleanup_text(hpl_text)
hpl_clean = " ".join(hpl_clean).split()
# 's appears a lot in the text, so we get rid of it since it's not a word
hpl_clean = [word for word in hpl_clean if word != "'s"]
# clean up mws
mws_clean = cleanup_text(mws_text)
mws_clean = " ".join(mws_clean).split()
# 's appears a lot in the text, so we get rid of it since it's not a word
mws_clean = [word for word in mws_clean if word != "'s"]
eap_counts = Counter(eap_clean)
hpl_counts = Counter(hpl_clean)
mws_counts = Counter(mws_clean)
# **EAP Common Words**
eap_common_words = [word[0] for word in eap_counts.most_common(25)]
eap_common_counts = [word[1] for word in eap_counts.most_common(25)]
plt.style.use("dark_background")
plt.figure(figsize=(15, 12))
sns.barplot(x=eap_common_words, y=eap_common_counts)
plt.title("Most Common Words used by Edgar Allen Poe")
plt.show()
# **HPL Common Words**
hpl_common_words = [word[0] for word in hpl_counts.most_common(25)]
hpl_common_counts = [word[1] for word in hpl_counts.most_common(25)]
plt.figure(figsize=(15, 12))
sns.barplot(x=hpl_common_words, y=hpl_common_counts)
plt.title("Most Common Words used by H.P. Lovecraft")
plt.show()
# **MWS Common Words**
mws_common_words = [word[0] for word in mws_counts.most_common(25)]
mws_common_counts = [word[1] for word in mws_counts.most_common(25)]
plt.figure(figsize=(15, 12))
sns.barplot(x=mws_common_words, y=mws_common_counts)
plt.title("Most Common Words used by Mary Wollstonecraft Shelley")
plt.show()
print(eap_counts.most_common(25))
print()
print(hpl_counts.most_common(25))
print()
print(mws_counts.most_common(25))
print("Original training data shape: ", train["Text"].shape)
train_cleaned = cleanup_text(train["Text"], logging=True)
print("Cleaned up training data shape: ", train_cleaned.shape)
print("Parsing documents...")
start = time()
train_vec = []
for doc in nlp.pipe(train_cleaned, batch_size=500):
if doc.has_vector:
train_vec.append(doc.vector)
else:
train_vec.append(np.zeros((128,), dtype="float32"))
train_vec = np.array(train_vec)
end = time()
print("Total time passed parsing documents: {} seconds".format(end - start))
print("Total number of documents parsed: {}".format(len(train_vec)))
print("Number of words in first document: ", len(train["Text"][0]))
print("Number of words in second document: ", len(train["Text"][1]))
# print('Size of vector embeddings: ', train_vec.shape[1])
print("Shape of vectors embeddings matrix: ", train_vec.shape)
# **Word2vec**
all_text = np.concatenate((train["Text"], test["Text"]), axis=0)
all_text = pd.DataFrame(all_text, columns=["Text"])
print("Number of total text documents:", len(all_text))
def cleanup_text_word2vec(docs, logging=False):
sentences = []
counter = 1
for doc in docs:
if counter % 1000 == 0 and logging:
print("Processed %d out of %d documents" % (counter, len(docs)))
doc = nlp(doc, disable=["tagger"])
doc = " ".join([tok.lemma_.lower() for tok in doc])
# Split into sentences based on punctuation
doc = re.split("[\.?!;] ", doc)
# Remove commas, periods, and other punctuation (mostly commas)
doc = [re.sub("[\.,;:!?]", "", sent) for sent in doc]
# Split into words
doc = [sent.split() for sent in doc]
sentences += doc
counter += 1
return sentences
train_cleaned_word2vec = cleanup_text_word2vec(all_text["Text"], logging=True)
print(
"Cleaned up training data size (i.e. number of sentences): ",
len(train_cleaned_word2vec),
)
from gensim.models.word2vec import Word2Vec
text_dim = 300
print("Training Word2Vec model...")
wordvec_model = Word2Vec(
train_cleaned_word2vec, vector_size=text_dim, window=5, min_count=3, workers=4, sg=1
)
print("Word2Vec model created.")
print(
"%d unique words represented by %d dimensional vectors"
% (len(wordvec_model.wv.key_to_index), text_dim)
)
print(wordvec_model.wv.most_similar(positive=["woman", "king"], negative=["man"]))
print(
wordvec_model.wv.most_similar_cosmul(positive=["woman", "king"], negative=["man"])
)
print(wordvec_model.wv.doesnt_match("breakfast cereal dinner lunch".split()))
print(wordvec_model.wv.similarity("woman", "man"))
print(wordvec_model.wv.similarity("widow", "mother"))
def create_average_vec(doc):
average = np.zeros((text_dim,), dtype="float32")
num_words = 0.0
for word in doc.split():
if word in wordvec_model.wv.key_to_index:
average = np.add(average, wordvec_model.wv[word])
num_words += 1.0
if num_words != 0.0:
average = np.divide(average, num_words)
return average
count = 0
for i in range(len(train_cleaned)):
if train_cleaned[i] == "":
print("index:", i)
count += 1
print(count)
# **Word Vectors**
train_cleaned_vec = np.zeros((train.shape[0], text_dim), dtype="float32") # 19579 x 300
for i in range(len(train_cleaned)):
train_cleaned_vec[i] = create_average_vec(train_cleaned[i])
print("Train word vector shape:", train_cleaned_vec.shape)
# **One-hot Encode Labels**
from sklearn.preprocessing import label_binarize
# transform labels into the hot encoded
y_train_ohe = label_binarize(train["Author"], classes=["EAP", "HPL", "MWS"])
print("y_train_ohe shape: {}".format(y_train_ohe.shape))
print("y_train_ohe samples:")
print(y_train_ohe[:5])
# **Split into Train/Test Datasets**
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
train_cleaned_vec, y_train_ohe, test_size=0.2, random_state=21
)
print("X_train size: {}".format(X_train.shape))
print("X_test size: {}".format(X_test.shape))
print("y_train size: {}".format(y_train.shape))
print("y_test size: {}".format(y_test.shape))
# **Keras**
from keras.models import Sequential, Model
from keras.layers import Dense, Dropout, Input, LSTM, Embedding, Bidirectional, Flatten
from keras.layers import Conv1D, MaxPooling1D, GlobalMaxPooling1D
from keras.optimizers import SGD
def build_model(architecture="mlp"):
model = Sequential()
if architecture == "mlp":
model.add(
Dense(512, activation="relu", kernel_initializer="he_normal", input_dim=300)
)
model.add(Dropout(0.2))
model.add(Dense(512, activation="relu", kernel_initializer="he_normal"))
model.add(Dropout(0.2))
model.add(Dense(512, activation="relu", kernel_initializer="he_normal"))
model.add(Dropout(0.2))
model.add(Dense(512, activation="relu", kernel_initializer="he_normal"))
model.add(Dropout(0.2))
model.add(Dense(3, activation="softmax"))
elif architecture == "cnn":
inputs = Input(shape=(300, 1))
x = Conv1D(64, 3, strides=1, padding="same", activation="relu")(inputs)
x = MaxPooling1D(pool_size=2)(x)
outputs = Dense(3, activation="softmax")(x)
modeel = Model(inputs=inputs, outputs=outputs, name="CNN")
elif architecture == "lstm":
inputs = Input(shape=(300, 1))
x = Bidirectional(LSTM(64, return_sequences=True), merge_mode="concat")(inputs)
x = Dropout(0.2)(x)
x = Flatten(x)
outputs = Dense(3, activation="softmax")(x)
model = Model(inputs=inputs, outputs=outputs, name="LSTM")
else:
print("Error: Model type not found.")
return model
model = build_model("mlp")
if model.name == "CNN" or model.name == "LSTM":
X_train = np.expand_dims(X_train, axis=2)
X_test = np.expand_dims(X_test, axis=2)
print("Text train shape: ", X_test.shape)
print("Text test shape: ", X_test.shape)
model.summary()
# **SGD**
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss="categorical_crossentropy", metrics=["acc"])
epochs = 30
estimator = model.fit(
X_train, y_train, validation_split=0.2, epochs=epochs, batch_size=128, verbose=1
)
print(
"Training accuracy: %.2f%% / Validation accuracy: %.2f%%"
% (100 * estimator.history["acc"][-1], 100 * estimator.history["val_acc"][-1])
)
sns.reset_orig()
plt.plot(estimator.history["acc"])
plt.plot(estimator.history["val_acc"])
plt.title("model accuracy")
plt.ylabel("accuracy")
plt.xlabel("epoch")
plt.legend(["train", "valid"], loc="upper left")
plt.show()
# Plot model loss over epochs
plt.plot(estimator.history["loss"])
plt.plot(estimator.history["val_loss"])
plt.title("model loss")
plt.ylabel("loss")
plt.xlabel("epoch")
plt.legend(["train", "valid"], loc="upper left")
plt.show()
# **Prediction and Submission**
predicted_prob = model.predict(X_test)
print(predicted_prob.shape)
with open("submission.csv", "w") as file_obj:
file_obj.write("ID,EAP,HPL,MWS\n")
for pred in range(len(predicted_prob)):
file_obj.write(
str(pred + 1)
+ ","
+ ",".join("{:.2f}".format(s) for s in predicted_prob[pred].tolist())
+ "\n"
)
loss_sk = log_loss(y_test, predicted_prob)
print("Log loss is: {}".format(loss_sk))
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/650/129650266.ipynb
| null | null |
[{"Id": 129650266, "ScriptId": 38524167, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 9917475, "CreationDate": "05/15/2023 13:34:55", "VersionNumber": 3.0, "Title": "Spooky Author Identification-WordCloud-Word2vec", "EvaluationDate": "05/15/2023", "IsChange": false, "TotalLines": 448.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 448.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import matplotlib.pyplot as plt
from wordcloud import WordCloud
from IPython.display import display
import base64
import string
import re
from collections import Counter
from time import time
# from sklearn.feature_extraction.stop_words import ENGLISH_STOP_WORDS as stopwords
from nltk.corpus import stopwords
from sklearn.metrics import log_loss
stopwords = stopwords.words("english")
sns.set_context("notebook")
print("Numpy version:", np.__version__)
print("Pandas version:", pd.__version__)
print("Seaborn version:", sns.__version__)
# **Data Visualization**
train = pd.read_csv("/kaggle/working/train.csv")
train.head()
train.index
train = pd.read_csv(
"/kaggle/working/train.csv", skiprows=1, names=["ID", "Text", "Author"]
).set_index("ID")
test = pd.read_csv(
"/kaggle/working/test.csv", skiprows=1, names=["ID", "Text"]
).set_index("ID")
# Delete the word 'id' from the ID columns
train.index = [id[2:] for id in train.index]
test.index = [id[2:] for id in test.index]
train.sort_index(inplace=True)
test.sort_index(inplace=True)
train["Text"][0]
display(train.head())
display(test.head())
print("Training sample:", train["Text"][0])
print("Author of sample:", train["Author"][0])
print("Training Data Shape:", train.shape)
print("Testing Data Shape:", test.shape)
print("Training Dataset Info:")
display(train.info())
print("Test Dataset Info:")
display(test.info())
# **Plot**
sns.barplot(
x=["Edgar Allen Poe", "Mary Wollstonecraft Shelley", "H.P. Lovecraft"],
y=train["Author"].value_counts(),
)
plt.show()
# **Create Spooky Wordcloud**
font_64_decode = base64.decodebytes(font_64_encode)
font_result = open(
"spooky_font.ttf", "wb"
) # create a writable font file and write the decoding result
font_result.write(font_64_decode)
font_result.close()
all_text = " ".join([text for text in train["Text"]])
print("Number of words in all_text:", len(all_text))
wordcloud = WordCloud(
font_path="spooky_font.ttf",
width=800,
height=500,
random_state=21,
max_font_size=110,
).generate(all_text)
plt.figure(figsize=(15, 12))
plt.imshow(wordcloud, interpolation="bilinear")
plt.axis("off")
plt.show()
# **Wordcloud for Edgar Allen Poe**
[train["Author"] == "EAP"]
eap = train[train["Author"] == "EAP"]
eap_text = " ".join(text for text in eap["Text"])
print("Number of words in eap_text:", len(eap_text))
wordcloud = WordCloud(
font_path="spooky_font.ttf",
width=800,
height=500,
random_state=21,
max_font_size=110,
).generate(eap_text)
plt.figure(figsize=(15, 12))
plt.imshow(wordcloud, interpolation="bilinear")
plt.axis("off")
plt.show()
# **Wordcloud for Mary Wollstonecraft Shelley**
train[train["Author"] == "MWS"]
mws = train[train["Author"] == "MWS"]
mws_text = " ".join(text for text in mws["Text"])
print("Number of words in eap_text:", len(mws_text))
wordcloud = WordCloud(
font_path="spooky_font.ttf",
width=800,
height=500,
random_state=21,
max_font_size=110,
).generate(mws_text)
plt.figure(figsize=(15, 12))
plt.imshow(wordcloud, interpolation="bilinear")
plt.axis("off")
plt.show()
# **Wordcloud for H.P. Lovecraft**
train[train["Author"] == "HPL"]
hpl = train[train["Author"] == "HPL"]
hpl_text = " ".join(text for text in hpl["Text"])
print("Number of words in eap_text:", len(hpl_text))
wordcloud = WordCloud(
font_path="spooky_font.ttf",
width=800,
height=500,
random_state=21,
max_font_size=110,
).generate(hpl_text)
plt.figure(figsize=(15, 12))
plt.imshow(wordcloud, interpolation="bilinear")
plt.axis("off")
plt.show()
# **Text Preprocessing**
import spacy
nlp = spacy.load("en_core_web_sm")
punctuations = string.punctuation
def cleanup_text(docs, logging=False):
texts = []
counter = 1
for doc in docs:
if counter % 1000 == 0 and logging:
print("Processed %d out of %d documents." % (counter, len(docs)))
counter += 1
doc = nlp(doc, disable=["parser", "ner"])
tokens = [tok.lemma_.lower().strip() for tok in doc if tok.lemma_ != "-PRON-"]
tokens = [
tok for tok in tokens if tok not in stopwords and tok not in punctuations
]
tokens = " ".join(tokens)
texts.append(tokens)
return pd.Series(texts)
# **Plot Word Frequency By Each Author**
train[train["Author"] == "EAP"]["Text"]
eap_text = [text for text in train[train["Author"] == "EAP"]["Text"]]
hpl_text = [text for text in train[train["Author"] == "HPL"]["Text"]]
mws_text = [text for text in train[train["Author"] == "MWS"]["Text"]]
# clean up eap
eap_clean = cleanup_text(eap_text)
eap_clean = " ".join(eap_clean).split()
eap_claen = [word for word in eap_clean if word != "\s"]
# clean up hpl
hpl_clean = cleanup_text(hpl_text)
hpl_clean = " ".join(hpl_clean).split()
# 's appears a lot in the text, so we get rid of it since it's not a word
hpl_clean = [word for word in hpl_clean if word != "'s"]
# clean up mws
mws_clean = cleanup_text(mws_text)
mws_clean = " ".join(mws_clean).split()
# 's appears a lot in the text, so we get rid of it since it's not a word
mws_clean = [word for word in mws_clean if word != "'s"]
eap_counts = Counter(eap_clean)
hpl_counts = Counter(hpl_clean)
mws_counts = Counter(mws_clean)
# **EAP Common Words**
eap_common_words = [word[0] for word in eap_counts.most_common(25)]
eap_common_counts = [word[1] for word in eap_counts.most_common(25)]
plt.style.use("dark_background")
plt.figure(figsize=(15, 12))
sns.barplot(x=eap_common_words, y=eap_common_counts)
plt.title("Most Common Words used by Edgar Allen Poe")
plt.show()
# **HPL Common Words**
hpl_common_words = [word[0] for word in hpl_counts.most_common(25)]
hpl_common_counts = [word[1] for word in hpl_counts.most_common(25)]
plt.figure(figsize=(15, 12))
sns.barplot(x=hpl_common_words, y=hpl_common_counts)
plt.title("Most Common Words used by H.P. Lovecraft")
plt.show()
# **MWS Common Words**
mws_common_words = [word[0] for word in mws_counts.most_common(25)]
mws_common_counts = [word[1] for word in mws_counts.most_common(25)]
plt.figure(figsize=(15, 12))
sns.barplot(x=mws_common_words, y=mws_common_counts)
plt.title("Most Common Words used by Mary Wollstonecraft Shelley")
plt.show()
print(eap_counts.most_common(25))
print()
print(hpl_counts.most_common(25))
print()
print(mws_counts.most_common(25))
print("Original training data shape: ", train["Text"].shape)
train_cleaned = cleanup_text(train["Text"], logging=True)
print("Cleaned up training data shape: ", train_cleaned.shape)
print("Parsing documents...")
start = time()
train_vec = []
for doc in nlp.pipe(train_cleaned, batch_size=500):
if doc.has_vector:
train_vec.append(doc.vector)
else:
train_vec.append(np.zeros((128,), dtype="float32"))
train_vec = np.array(train_vec)
end = time()
print("Total time passed parsing documents: {} seconds".format(end - start))
print("Total number of documents parsed: {}".format(len(train_vec)))
print("Number of words in first document: ", len(train["Text"][0]))
print("Number of words in second document: ", len(train["Text"][1]))
# print('Size of vector embeddings: ', train_vec.shape[1])
print("Shape of vectors embeddings matrix: ", train_vec.shape)
# **Word2vec**
all_text = np.concatenate((train["Text"], test["Text"]), axis=0)
all_text = pd.DataFrame(all_text, columns=["Text"])
print("Number of total text documents:", len(all_text))
def cleanup_text_word2vec(docs, logging=False):
sentences = []
counter = 1
for doc in docs:
if counter % 1000 == 0 and logging:
print("Processed %d out of %d documents" % (counter, len(docs)))
doc = nlp(doc, disable=["tagger"])
doc = " ".join([tok.lemma_.lower() for tok in doc])
# Split into sentences based on punctuation
doc = re.split("[\.?!;] ", doc)
# Remove commas, periods, and other punctuation (mostly commas)
doc = [re.sub("[\.,;:!?]", "", sent) for sent in doc]
# Split into words
doc = [sent.split() for sent in doc]
sentences += doc
counter += 1
return sentences
train_cleaned_word2vec = cleanup_text_word2vec(all_text["Text"], logging=True)
print(
"Cleaned up training data size (i.e. number of sentences): ",
len(train_cleaned_word2vec),
)
from gensim.models.word2vec import Word2Vec
text_dim = 300
print("Training Word2Vec model...")
wordvec_model = Word2Vec(
train_cleaned_word2vec, vector_size=text_dim, window=5, min_count=3, workers=4, sg=1
)
print("Word2Vec model created.")
print(
"%d unique words represented by %d dimensional vectors"
% (len(wordvec_model.wv.key_to_index), text_dim)
)
print(wordvec_model.wv.most_similar(positive=["woman", "king"], negative=["man"]))
print(
wordvec_model.wv.most_similar_cosmul(positive=["woman", "king"], negative=["man"])
)
print(wordvec_model.wv.doesnt_match("breakfast cereal dinner lunch".split()))
print(wordvec_model.wv.similarity("woman", "man"))
print(wordvec_model.wv.similarity("widow", "mother"))
def create_average_vec(doc):
average = np.zeros((text_dim,), dtype="float32")
num_words = 0.0
for word in doc.split():
if word in wordvec_model.wv.key_to_index:
average = np.add(average, wordvec_model.wv[word])
num_words += 1.0
if num_words != 0.0:
average = np.divide(average, num_words)
return average
count = 0
for i in range(len(train_cleaned)):
if train_cleaned[i] == "":
print("index:", i)
count += 1
print(count)
# **Word Vectors**
train_cleaned_vec = np.zeros((train.shape[0], text_dim), dtype="float32") # 19579 x 300
for i in range(len(train_cleaned)):
train_cleaned_vec[i] = create_average_vec(train_cleaned[i])
print("Train word vector shape:", train_cleaned_vec.shape)
# **One-hot Encode Labels**
from sklearn.preprocessing import label_binarize
# transform labels into the hot encoded
y_train_ohe = label_binarize(train["Author"], classes=["EAP", "HPL", "MWS"])
print("y_train_ohe shape: {}".format(y_train_ohe.shape))
print("y_train_ohe samples:")
print(y_train_ohe[:5])
# **Split into Train/Test Datasets**
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
train_cleaned_vec, y_train_ohe, test_size=0.2, random_state=21
)
print("X_train size: {}".format(X_train.shape))
print("X_test size: {}".format(X_test.shape))
print("y_train size: {}".format(y_train.shape))
print("y_test size: {}".format(y_test.shape))
# **Keras**
from keras.models import Sequential, Model
from keras.layers import Dense, Dropout, Input, LSTM, Embedding, Bidirectional, Flatten
from keras.layers import Conv1D, MaxPooling1D, GlobalMaxPooling1D
from keras.optimizers import SGD
def build_model(architecture="mlp"):
model = Sequential()
if architecture == "mlp":
model.add(
Dense(512, activation="relu", kernel_initializer="he_normal", input_dim=300)
)
model.add(Dropout(0.2))
model.add(Dense(512, activation="relu", kernel_initializer="he_normal"))
model.add(Dropout(0.2))
model.add(Dense(512, activation="relu", kernel_initializer="he_normal"))
model.add(Dropout(0.2))
model.add(Dense(512, activation="relu", kernel_initializer="he_normal"))
model.add(Dropout(0.2))
model.add(Dense(3, activation="softmax"))
elif architecture == "cnn":
inputs = Input(shape=(300, 1))
x = Conv1D(64, 3, strides=1, padding="same", activation="relu")(inputs)
x = MaxPooling1D(pool_size=2)(x)
outputs = Dense(3, activation="softmax")(x)
modeel = Model(inputs=inputs, outputs=outputs, name="CNN")
elif architecture == "lstm":
inputs = Input(shape=(300, 1))
x = Bidirectional(LSTM(64, return_sequences=True), merge_mode="concat")(inputs)
x = Dropout(0.2)(x)
x = Flatten(x)
outputs = Dense(3, activation="softmax")(x)
model = Model(inputs=inputs, outputs=outputs, name="LSTM")
else:
print("Error: Model type not found.")
return model
model = build_model("mlp")
if model.name == "CNN" or model.name == "LSTM":
X_train = np.expand_dims(X_train, axis=2)
X_test = np.expand_dims(X_test, axis=2)
print("Text train shape: ", X_test.shape)
print("Text test shape: ", X_test.shape)
model.summary()
# **SGD**
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss="categorical_crossentropy", metrics=["acc"])
epochs = 30
estimator = model.fit(
X_train, y_train, validation_split=0.2, epochs=epochs, batch_size=128, verbose=1
)
print(
"Training accuracy: %.2f%% / Validation accuracy: %.2f%%"
% (100 * estimator.history["acc"][-1], 100 * estimator.history["val_acc"][-1])
)
sns.reset_orig()
plt.plot(estimator.history["acc"])
plt.plot(estimator.history["val_acc"])
plt.title("model accuracy")
plt.ylabel("accuracy")
plt.xlabel("epoch")
plt.legend(["train", "valid"], loc="upper left")
plt.show()
# Plot model loss over epochs
plt.plot(estimator.history["loss"])
plt.plot(estimator.history["val_loss"])
plt.title("model loss")
plt.ylabel("loss")
plt.xlabel("epoch")
plt.legend(["train", "valid"], loc="upper left")
plt.show()
# **Prediction and Submission**
predicted_prob = model.predict(X_test)
print(predicted_prob.shape)
with open("submission.csv", "w") as file_obj:
file_obj.write("ID,EAP,HPL,MWS\n")
for pred in range(len(predicted_prob)):
file_obj.write(
str(pred + 1)
+ ","
+ ",".join("{:.2f}".format(s) for s in predicted_prob[pred].tolist())
+ "\n"
)
loss_sk = log_loss(y_test, predicted_prob)
print("Log loss is: {}".format(loss_sk))
| false | 0 | 4,646 | 0 | 4,646 | 4,646 |
||
129650674
|
<jupyter_start><jupyter_text>Skin Cancer Dataset
A training set for academic machine learning can be created using the dataset, which comprises of 10015 dermatoscopic images. All significant diagnostic categories for pigmented lesions are represented in the cases in a representative manner:
- Actinic keratoses and intraepithelial carcinoma / Bowen's disease (`akiec`),
- basal cell carcinoma (`bcc`),
- benign keratosis-like lesions (solar lentigines / seborrheic keratoses and lichen-planus like keratoses, `bkl`),
- dermatofibroma (`df`),
- melanoma (`mel`),
- melanocytic nevi (`nv`) and
- vascular lesions (angiomas, angiokeratomas, pyogenic granulomas and hemorrhage, `vasc`).
Histopathology (histo) is used to confirm more than 50% of lesions; in the remaining cases, follow-up exams, expert consensus, or in-vivo confocal microscopy confirmation are used as the gold standard (confocal).
Dataset Collected from:
https://dataverse.harvard.edu/dataset.xhtml?persistentId=doi:10.7910/DVN/DBW86T
Kaggle dataset identifier: skin-cancer-dataset
<jupyter_script>import os
import cv2
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.keras.applications import ResNet50
from tensorflow.keras.models import Model
from tensorflow.keras.layers import GlobalAveragePooling2D, Dense
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import ModelCheckpoint
# Load the metadata file
metadata = pd.read_csv("/kaggle/input/skin-cancer-dataset/HAM10000_metadata.csv")
# Preprocess the data
image_size = 32
X = []
y = []
for index, row in metadata.iterrows():
if index % 1000 == 0:
print(f"Processing image {index}")
img_id = row["image_id"] + ".jpg"
img_path1 = os.path.join(
"/kaggle/input/skin-cancer-dataset/Skin Cancer/Skin Cancer", img_id
)
if os.path.exists(img_path1):
img_path = img_path1
else:
print(f"Image file does not exist: {img_id}")
continue
img = cv2.imread(img_path)
if img is None:
print(f"Error loading image: {img_path}")
continue
img = cv2.resize(img, (image_size, image_size))
X.append(img)
y.append(row["dx"])
X = np.array(X)
X = X / 255.0
y = np.array(y)
# Split the data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
# Convert labels from string to numerical categories
label_dict = {"akiec": 0, "bcc": 1, "bkl": 2, "df": 3, "mel": 4, "nv": 5, "vasc": 6}
y_train = [label_dict[label] for label in y_train]
y_test = [label_dict[label] for label in y_test]
# Convert the labels to one-hot encoded vectors
y_train = to_categorical(y_train, num_classes=7)
y_test = to_categorical(y_test, num_classes=7)
datagen = ImageDataGenerator(
rotation_range=90, horizontal_flip=True, vertical_flip=True
)
# Generate augmented images from the training data
augmented_data = datagen.flow(X_train, y_train, batch_size=32)
def resnet50(input_shape, num_classes):
def residual_block(x, filters, downsample=False):
strides = (2, 2) if downsample else (1, 1)
shortcut = x
x = Conv2D(filters, (1, 1), strides=strides, padding="valid")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = Conv2D(filters, (3, 3), padding="same")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = Conv2D(filters * 4, (1, 1))(x)
x = BatchNormalization()(x)
if downsample:
shortcut = Conv2D(filters * 4, (1, 1), strides=(2, 2), padding="valid")(
shortcut
)
shortcut = BatchNormalization()(shortcut)
x = Add()([x, shortcut])
x = Activation("relu")(x)
return x
inputs = Input(shape=input_shape)
x = Conv2D(64, (7, 7), strides=(2, 2), padding="same")(inputs)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = MaxPooling2D((3, 3), strides=(2, 2), padding="same")(x)
x = residual_block(x, 64)
x = residual_block(x, 64)
x = residual_block(x, 64)
x = residual_block(x, 128, downsample=True)
x = residual_block(x, 128)
x = residual_block(x, 128)
x = residual_block(x, 128)
x = residual_block(x, 256, downsample=True)
x = residual_block(x, 256)
x = residual_block(x, 256)
x = residual_block(x, 256)
x = residual_block(x, 256)
x = residual_block(x, 256)
x = residual_block(x, 512, downsample=True)
x = residual_block(x, 512)
x = residual_block(x, 512)
x = GlobalAveragePooling2D()(x)
x = Dense(num_classes, activation="softmax")(x)
model = Model(inputs=inputs, outputs=x)
return model
# Define the input shape and number of classes
input_shape = (224, 224, 3)
num_classes = 7
# Create the ResNet50 model
model = resnet50(input_shape, num_classes)
# Compile the model
opt = tf.keras.optimizers.Adam(lr=0.0001)
model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"])
# Train the model using the augmented data
history = model.fit(
augmented_data,
steps_per_epoch=len(X_train) // 32,
validation_data=(X_test, y_test),
epochs=21,
)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/650/129650674.ipynb
|
skin-cancer-dataset
|
farjanakabirsamanta
|
[{"Id": 129650674, "ScriptId": 38553792, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 10152574, "CreationDate": "05/15/2023 13:37:45", "VersionNumber": 1.0, "Title": "notebook295418df5b", "EvaluationDate": "05/15/2023", "IsChange": true, "TotalLines": 133.0, "LinesInsertedFromPrevious": 133.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 185939950, "KernelVersionId": 129650674, "SourceDatasetVersionId": 4431730}]
|
[{"Id": 4431730, "DatasetId": 2595427, "DatasourceVersionId": 4491177, "CreatorUserId": 11779392, "LicenseName": "Other (specified in description)", "CreationDate": "11/01/2022 09:04:58", "VersionNumber": 1.0, "Title": "Skin Cancer Dataset", "Slug": "skin-cancer-dataset", "Subtitle": "7 types of Skin Cancer", "Description": "A training set for academic machine learning can be created using the dataset, which comprises of 10015 dermatoscopic images. All significant diagnostic categories for pigmented lesions are represented in the cases in a representative manner: \n- Actinic keratoses and intraepithelial carcinoma / Bowen's disease (`akiec`), \n- basal cell carcinoma (`bcc`), \n- benign keratosis-like lesions (solar lentigines / seborrheic keratoses and lichen-planus like keratoses, `bkl`), \n- dermatofibroma (`df`), \n- melanoma (`mel`), \n- melanocytic nevi (`nv`) and \n- vascular lesions (angiomas, angiokeratomas, pyogenic granulomas and hemorrhage, `vasc`).\n\nHistopathology (histo) is used to confirm more than 50% of lesions; in the remaining cases, follow-up exams, expert consensus, or in-vivo confocal microscopy confirmation are used as the gold standard (confocal). \n\nDataset Collected from:\nhttps://dataverse.harvard.edu/dataset.xhtml?persistentId=doi:10.7910/DVN/DBW86T", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 2595427, "CreatorUserId": 11779392, "OwnerUserId": 11779392.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 4431730.0, "CurrentDatasourceVersionId": 4491177.0, "ForumId": 2625653, "Type": 2, "CreationDate": "11/01/2022 09:04:58", "LastActivityDate": "11/01/2022", "TotalViews": 27735, "TotalDownloads": 2906, "TotalVotes": 71, "TotalKernels": 11}]
|
[{"Id": 11779392, "UserName": "farjanakabirsamanta", "DisplayName": "Farjana Kabir", "RegisterDate": "10/01/2022", "PerformanceTier": 2}]
|
import os
import cv2
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.keras.applications import ResNet50
from tensorflow.keras.models import Model
from tensorflow.keras.layers import GlobalAveragePooling2D, Dense
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import ModelCheckpoint
# Load the metadata file
metadata = pd.read_csv("/kaggle/input/skin-cancer-dataset/HAM10000_metadata.csv")
# Preprocess the data
image_size = 32
X = []
y = []
for index, row in metadata.iterrows():
if index % 1000 == 0:
print(f"Processing image {index}")
img_id = row["image_id"] + ".jpg"
img_path1 = os.path.join(
"/kaggle/input/skin-cancer-dataset/Skin Cancer/Skin Cancer", img_id
)
if os.path.exists(img_path1):
img_path = img_path1
else:
print(f"Image file does not exist: {img_id}")
continue
img = cv2.imread(img_path)
if img is None:
print(f"Error loading image: {img_path}")
continue
img = cv2.resize(img, (image_size, image_size))
X.append(img)
y.append(row["dx"])
X = np.array(X)
X = X / 255.0
y = np.array(y)
# Split the data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
# Convert labels from string to numerical categories
label_dict = {"akiec": 0, "bcc": 1, "bkl": 2, "df": 3, "mel": 4, "nv": 5, "vasc": 6}
y_train = [label_dict[label] for label in y_train]
y_test = [label_dict[label] for label in y_test]
# Convert the labels to one-hot encoded vectors
y_train = to_categorical(y_train, num_classes=7)
y_test = to_categorical(y_test, num_classes=7)
datagen = ImageDataGenerator(
rotation_range=90, horizontal_flip=True, vertical_flip=True
)
# Generate augmented images from the training data
augmented_data = datagen.flow(X_train, y_train, batch_size=32)
def resnet50(input_shape, num_classes):
def residual_block(x, filters, downsample=False):
strides = (2, 2) if downsample else (1, 1)
shortcut = x
x = Conv2D(filters, (1, 1), strides=strides, padding="valid")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = Conv2D(filters, (3, 3), padding="same")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = Conv2D(filters * 4, (1, 1))(x)
x = BatchNormalization()(x)
if downsample:
shortcut = Conv2D(filters * 4, (1, 1), strides=(2, 2), padding="valid")(
shortcut
)
shortcut = BatchNormalization()(shortcut)
x = Add()([x, shortcut])
x = Activation("relu")(x)
return x
inputs = Input(shape=input_shape)
x = Conv2D(64, (7, 7), strides=(2, 2), padding="same")(inputs)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = MaxPooling2D((3, 3), strides=(2, 2), padding="same")(x)
x = residual_block(x, 64)
x = residual_block(x, 64)
x = residual_block(x, 64)
x = residual_block(x, 128, downsample=True)
x = residual_block(x, 128)
x = residual_block(x, 128)
x = residual_block(x, 128)
x = residual_block(x, 256, downsample=True)
x = residual_block(x, 256)
x = residual_block(x, 256)
x = residual_block(x, 256)
x = residual_block(x, 256)
x = residual_block(x, 256)
x = residual_block(x, 512, downsample=True)
x = residual_block(x, 512)
x = residual_block(x, 512)
x = GlobalAveragePooling2D()(x)
x = Dense(num_classes, activation="softmax")(x)
model = Model(inputs=inputs, outputs=x)
return model
# Define the input shape and number of classes
input_shape = (224, 224, 3)
num_classes = 7
# Create the ResNet50 model
model = resnet50(input_shape, num_classes)
# Compile the model
opt = tf.keras.optimizers.Adam(lr=0.0001)
model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"])
# Train the model using the augmented data
history = model.fit(
augmented_data,
steps_per_epoch=len(X_train) // 32,
validation_data=(X_test, y_test),
epochs=21,
)
| false | 1 | 1,407 | 0 | 1,744 | 1,407 |
||
129627997
|
import pandas as pd
import numpy as np
from selenium import webdriver
import re
import plotly.graph_objs as go
import plotly.express as px
pd.options.mode.chained_assignment = None
data_one = pd.read_csv("H-1B_2014.csv")
data_two = pd.read_csv("H-1B_2015.csv")
data_three = pd.read_csv("H-1B_2016.csv")
columns = [
"LCA_CASE_NUMBER",
"STATUS",
"LCA_CASE_SUBMIT",
"DECISION_DATE",
"VISA_CLASS",
"LCA_CASE_EMPLOYMENT_START_DATE",
"LCA_CASE_EMPLOYMENT_END_DATE",
"LCA_CASE_EMPLOYER_NAME",
"LCA_CASE_EMPLOYER_STATE",
"LCA_CASE_EMPLOYER_CITY",
"LCA_CASE_SOC_CODE",
"LCA_CASE_SOC_NAME",
"LCA_CASE_JOB_TITLE",
"LCA_CASE_WAGE_RATE_FROM",
"LCA_CASE_WAGE_RATE_UNIT",
"FULL_TIME_POS",
"LCA_CASE_NAICS_CODE",
]
columns = [
"LCA_CASE_NUMBER",
"STATUS",
"LCA_CASE_SUBMIT",
"DECISION_DATE",
"VISA_CLASS",
"LCA_CASE_EMPLOYMENT_START_DATE",
"LCA_CASE_EMPLOYMENT_END_DATE",
"LCA_CASE_EMPLOYER_NAME",
"LCA_CASE_EMPLOYER_STATE",
"LCA_CASE_EMPLOYER_CITY",
"LCA_CASE_SOC_CODE",
"LCA_CASE_SOC_NAME",
"LCA_CASE_JOB_TITLE",
"LCA_CASE_WAGE_RATE_FROM",
"LCA_CASE_WAGE_RATE_UNIT",
"FULL_TIME_POS",
"LCA_CASE_NAICS_CODE",
]
# renaming the columns
data_two = data_two.rename(
columns={
"CASE_NUMBER": "LCA_CASE_NUMBER",
"CASE_STATUS": "STATUS",
"CASE_SUBMITTED": "LCA_CASE_SUBMIT",
"DECISION_DATE": "DECISION_DATE",
"EMPLOYMENT_START_DATE": "LCA_CASE_EMPLOYMENT_START_DATE",
"EMPLOYMENT_END_DATE": "LCA_CASE_EMPLOYMENT_END_DATE",
"EMPLOYER_NAME": "LCA_CASE_EMPLOYER_NAME",
"EMPLOYER_STATE": "LCA_CASE_EMPLOYER_STATE",
"EMPLOYER_CITY": "LCA_CASE_EMPLOYER_CITY",
"SOC_CODE": "LCA_CASE_SOC_CODE",
"SOC_NAME": "LCA_CASE_SOC_NAME",
"JOB_TITLE": "LCA_CASE_JOB_TITLE",
"WAGE_RATE_OF_PAY": "LCA_CASE_WAGE_RATE_FROM",
"WAGE_UNIT_OF_PAY": "LCA_CASE_WAGE_RATE_UNIT",
"FULL_TIME_POSITION": "FULL_TIME_POS",
"NAIC_CODE": "LCA_CASE_NAICS_CODE",
}
)
columns = [
"LCA_CASE_NUMBER",
"STATUS",
"LCA_CASE_SUBMIT",
"DECISION_DATE",
"VISA_CLASS",
"LCA_CASE_EMPLOYMENT_START_DATE",
"LCA_CASE_EMPLOYMENT_END_DATE",
"LCA_CASE_EMPLOYER_NAME",
"LCA_CASE_EMPLOYER_STATE",
"LCA_CASE_EMPLOYER_CITY",
"LCA_CASE_SOC_CODE",
"LCA_CASE_SOC_NAME",
"LCA_CASE_JOB_TITLE",
"LCA_CASE_WAGE_RATE_FROM",
"LCA_CASE_WAGE_RATE_UNIT",
"FULL_TIME_POS",
"LCA_CASE_NAICS_CODE",
]
# renaming the columns
data_three = data_three.rename(
columns={
"CASE_NUMBER": "LCA_CASE_NUMBER",
"CASE_STATUS": "STATUS",
"CASE_SUBMITTED": "LCA_CASE_SUBMIT",
"DECISION_DATE": "DECISION_DATE",
"EMPLOYMENT_START_DATE": "LCA_CASE_EMPLOYMENT_START_DATE",
"EMPLOYMENT_END_DATE": "LCA_CASE_EMPLOYMENT_END_DATE",
"EMPLOYER_NAME": "LCA_CASE_EMPLOYER_NAME",
"EMPLOYER_STATE": "LCA_CASE_EMPLOYER_STATE",
"EMPLOYER_CITY": "LCA_CASE_EMPLOYER_CITY",
"SOC_CODE": "LCA_CASE_SOC_CODE",
"SOC_NAME": "LCA_CASE_SOC_NAME",
"JOB_TITLE": "LCA_CASE_JOB_TITLE",
"WAGE_RATE_OF_PAY_FROM": "LCA_CASE_WAGE_RATE_FROM",
"WAGE_UNIT_OF_PAY": "LCA_CASE_WAGE_RATE_UNIT",
"FULL_TIME_POSITION": "FULL_TIME_POS",
"NAIC_CODE": "LCA_CASE_NAICS_CODE",
}
)
# concat the three dataframes
final_df = pd.concat(
[data_one[columns], data_two[columns], data_three[columns]]
).reset_index(drop=True)
final_df.head()
final_df.columns
final_df.info()
(final_df.isnull().sum())
final_df.head()
# ## Feature engineering
# ### creating employment period column
# convert date columns to datetime format and assign nan to invalid data
final_df["LCA_CASE_EMPLOYMENT_START_DATE"] = pd.to_datetime(
final_df["LCA_CASE_EMPLOYMENT_START_DATE"], errors="coerce"
)
final_df["LCA_CASE_EMPLOYMENT_END_DATE"] = pd.to_datetime(
final_df["LCA_CASE_EMPLOYMENT_END_DATE"], errors="coerce"
)
# subtract the LCA_CASE_EMPLOYMENT_END_DATE from LCA_CASE_EMPLOYMENT_START_DATEto find employment period
LCA_CASE_EMPLOYMENT_PERIOD = (
final_df["LCA_CASE_EMPLOYMENT_END_DATE"]
- final_df["LCA_CASE_EMPLOYMENT_START_DATE"]
)
# create a new column with LCA_CASE_EMPLOYMENT_PERIOD value
final_df.insert(7, "LCA_CASE_EMPLOYMENT_PERIOD", LCA_CASE_EMPLOYMENT_PERIOD)
final_df.head()
# converting LCA_CASE_EMPLOYMENT_PERIOD into days format
final_df["LCA_CASE_EMPLOYMENT_PERIOD"] = final_df["LCA_CASE_EMPLOYMENT_PERIOD"].dt.days
final_df["LCA_CASE_EMPLOYMENT_PERIOD"].describe()
# delete the outlier value, i.e employment days less than 0.
final_df = final_df[final_df["LCA_CASE_EMPLOYMENT_PERIOD"] > 0]
final_df["LCA_CASE_EMPLOYMENT_PERIOD"].describe()
# - The value of 30.44 is derived by calculating the average number of days in a month over a period of four years.
# - The average number of days in a year is 365.24 (due to leap years), so dividing by 12 gives an average of 30.44 days per month. This is a commonly used approximation in calculations involving months and days.
# - Using 30.44 as the number of days in a month provides a more accurate estimate when converting between months and days, rather than assuming that each month has exactly 30 or 31 days.
#
# the employment period is converted into months
final_df["LCA_CASE_EMPLOYMENT_PERIOD"] = round(
final_df["LCA_CASE_EMPLOYMENT_PERIOD"] / 30.44
)
# filled the missing value with 0 and converted the column type to int
final_df["LCA_CASE_EMPLOYMENT_PERIOD"] = (
final_df["LCA_CASE_EMPLOYMENT_PERIOD"].fillna(0).astype(int)
)
# ### creating visa decision duration column
# convert date columns to datetime format and assign nan to invalid data
final_df["LCA_CASE_SUBMIT"] = pd.to_datetime(
final_df["LCA_CASE_SUBMIT"], errors="coerce"
)
final_df["DECISION_DATE"] = pd.to_datetime(final_df["DECISION_DATE"], errors="coerce")
# subtract the LCA_CASE_SUBMIT from DECISION_DATE to find visa decision period
LCA_CASE_DECISION_PERIOD = final_df["DECISION_DATE"] - final_df["LCA_CASE_SUBMIT"]
# create a new column with LCA_CASE_DECISION_PERIOD value
final_df.insert(4, "LCA_CASE_DECISION_PERIOD", LCA_CASE_DECISION_PERIOD)
final_df["LCA_CASE_DECISION_PERIOD"] = final_df["LCA_CASE_DECISION_PERIOD"].dt.days
final_df["LCA_CASE_EMPLOYMENT_PERIOD"].describe()
# remove special characters from LCA_CASE_EMPLOYER_CITY
final_df["LCA_CASE_EMPLOYER_CITY"] = final_df["LCA_CASE_EMPLOYER_CITY"].replace(
{"[^a-zA-Z0-9]": ""}, regex=True
)
# ### find the sectors of the company using the NAICS code
# Convert the LCA_CASE_NAICS_CODE column to string data type
final_df["LCA_CASE_NAICS_CODE"] = final_df["LCA_CASE_NAICS_CODE"].astype(str)
# Extract the first two digits of each string value
final_df["LCA_CASE_NAICS_CODE"] = final_df["LCA_CASE_NAICS_CODE"].str[:2]
naics_unique_values = final_df["LCA_CASE_NAICS_CODE"].unique()
# reading the NAICS_data to cross check and create a new column for employer sector
NAICS_data = pd.read_csv("NAICS_data.csv")
NAICS_data.head()
# loop through all the NAICS in the naics_unique_values
for i in naics_unique_values:
try:
# assuming your dataframe is called 'df'
NAICS_data_code = NAICS_data.loc[
NAICS_data["NAICS_CODE"] == i, "NAICS_TITLE"
].iloc[0]
except:
# if there is no index with the particular soc code the occupation name will be null
NAICS_data_code = "Unknown"
# create a boolean mask for the conditions
mask = final_df["LCA_CASE_NAICS_CODE"] == i
# update the LCA_CASE_SOC_NAME column for the filtered rows
final_df.loc[mask, "EMPLOYER_SECTOR"] = NAICS_data_code
# extract the year component from the datetime column LCA_CASE_SUBMIT and store it in a new column year
final_df["year"] = final_df["LCA_CASE_SUBMIT"].dt.year
# # Preprocessing
# drop duplicates
final_df = final_df.drop_duplicates()
# remove numbers after "." period in 'LCA_CASE_SOC_CODE' column
final_df["LCA_CASE_SOC_CODE"] = (
final_df["LCA_CASE_SOC_CODE"].astype(str).apply(lambda x: x.split(".")[0])
)
# function to correct the LCA_CASE_SOC_CODE
def preprocess_column(column):
pattern = r"^\d{2}-\d{4}$" # regex pattern for "XX-XXXX" format
def preprocess_value(value):
if ("-" not in value) and len(value) < 6:
cleaned_value = np.nan
elif "-" in value:
value = value.replace("-", "")
cleaned_value = value[0:2] + "-" + value[2:6]
if len(cleaned_value) != 7:
cleaned_value = np.nan
elif ("-" not in value) and len(value) > 5:
value = value.replace("/", "")
cleaned_value = value[0:2] + "-" + value[2:6]
return cleaned_value
cleaned_column = column.apply(
lambda x: np.nan
if pd.isna(x)
else (x if re.search(pattern, str(x)) else preprocess_value(x))
)
return cleaned_column
final_df["LCA_CASE_SOC_CODE"] = preprocess_column(final_df["LCA_CASE_SOC_CODE"])
final_df.head()
# #### preprocessing LCA_CASE_SOC_CODE column
# Replace the values in the 'LCA_CASE_WAGE_RATE_FROM' column
# define a custom function to preprocess the wage_rate column
def preprocess_wage_rate(cell_value):
if isinstance(cell_value, float):
return cell_value
elif "-" in cell_value:
return cell_value.split("-")[0].strip()
else:
return cell_value
# apply the custom function to the wage_rate column
final_df["LCA_CASE_WAGE_RATE_FROM"] = final_df["LCA_CASE_WAGE_RATE_FROM"].apply(
lambda x: preprocess_wage_rate(x)
)
final_df.head()
for i in final_df.index:
if final_df.loc[i, "LCA_CASE_WAGE_RATE_UNIT"] == "Month":
final_df.loc[i, "LCA_CASE_WAGE_RATE_FROM"] = (
final_df.loc[i, "LCA_CASE_WAGE_RATE_FROM"] * 12
)
if final_df.loc[i, "LCA_CASE_WAGE_RATE_UNIT"] == "Week":
final_df.loc[i, "LCA_CASE_WAGE_RATE_FROM"] = (
final_df.loc[i, "LCA_CASE_WAGE_RATE_FROM"] * 52
)
if final_df.loc[i, "LCA_CASE_WAGE_RATE_UNIT"] == "Bi-Weekly":
final_df.loc[i, "LCA_CASE_WAGE_RATE_FROM"] = (
final_df.loc[i, "LCA_CASE_WAGE_RATE_FROM"] * 26
)
if final_df.loc[i, "LCA_CASE_WAGE_RATE_UNIT"] == "Hour":
if final_df.loc[i, "FULL_TIME_POS"] == "N":
final_df.loc[i, "LCA_CASE_WAGE_RATE_FROM"] = (
final_df.loc[i, "LCA_CASE_WAGE_RATE_FROM"] * 35 * 52
)
else:
final_df.loc[i, "LCA_CASE_WAGE_RATE_FROM"] = (
final_df.loc[i, "LCA_CASE_WAGE_RATE_FROM"] * 40 * 52
)
final_df.LCA_CASE_WAGE_RATE_UNIT.replace(
["Bi-Weekly", "Month", "Week", "Hour"],
["Year", "Year", "Year", "Year"],
inplace=True,
)
# #### scraping data for SOC name from website
# initialize webdriver
driver = webdriver.Chrome()
# navigate to webpage
driver.get("https://www.bls.gov/oes/current/oes_stru.htm#29-0000")
# find all li elements
li_elements = driver.find_elements("xpath", "//li")
# create empty list to store data
data = []
# loop through li elements
for li in li_elements:
text = li.text
if "-" in text:
# use regular expression to extract SOC code and occupation name
words = text.split()
soc = words[0]
name = (" ".join(words[1::])).replace('"', "").strip()
name_list = words[1::]
if "-" in name:
for i, word in enumerate(name_list):
if ("-" in word) and (len(word) > 1):
name = (" ".join(name_list[:i])).replace('"', "").strip()
break
data.append({"SOC Code": soc, "Occupation Name": name})
# close webdriver
driver.quit()
# create dataframe
occupation_data = pd.DataFrame(data)
# save dataframe as CSV
occupation_data.to_csv("occupations.csv", index=False)
final_df.isna().sum()
# reading the occupation data to impute the missing soc_name
occupation_data = pd.read_csv("occupations.csv")
occupation_data
# ### treating the null values
# #### imputing soc name nan values by refering to scraped data
# create a separate dataframe with NaN values
nan_df = final_df[final_df["LCA_CASE_SOC_NAME"].isna()]
mask = final_df["LCA_CASE_SOC_NAME"].isna()
df_nan = final_df[mask]
unique_null_soc_code = list(df_nan["LCA_CASE_SOC_CODE"].unique())
print(unique_null_soc_code)
unique_null_soc_code = [x for x in unique_null_soc_code if type(x) != float]
print(unique_null_soc_code)
# loop through all the SOC code in unique_null_soc_code list
for i in unique_null_soc_code:
try:
# assuming your dataframe is called 'df'
occupation_name = occupation_data.loc[
occupation_data["SOC Code"] == i, "Occupation Name"
].iloc[0]
except:
# if there is no index with the particular soc code the occupation name will be null
occupation_name = np.nan
# create a boolean mask for the conditions
mask = (final_df["LCA_CASE_SOC_NAME"].isna()) & (final_df["LCA_CASE_SOC_CODE"] == i)
# update the LCA_CASE_SOC_NAME column for the filtered rows
final_df.loc[mask, "LCA_CASE_SOC_NAME"] = occupation_name
final_df.isna().sum()
# #### replacing other NaN values in the other columns
final_df["LCA_CASE_EMPLOYER_NAME"].fillna("Unknown", inplace=True)
final_df["LCA_CASE_EMPLOYER_STATE"].fillna("Unknown", inplace=True)
final_df["LCA_CASE_EMPLOYER_CITY"].fillna("Unknown", inplace=True)
final_df["LCA_CASE_SOC_CODE"].fillna("Unknown", inplace=True)
final_df["LCA_CASE_SOC_NAME"].fillna("Unknown", inplace=True)
final_df["LCA_CASE_WAGE_RATE_FROM"].fillna(0, inplace=True)
# ### dropping unwanted columns
final_df.columns
final_df = final_df.drop(
[
"LCA_CASE_EMPLOYMENT_START_DATE",
"LCA_CASE_EMPLOYMENT_END_DATE",
"LCA_CASE_WAGE_RATE_UNIT",
"FULL_TIME_POS",
"LCA_CASE_NAICS_CODE",
],
axis=1,
)
final_df.columns
# replacing INVALIDATED and REJECTED as the DENIED
final_df.loc[(final_df["STATUS"] == "INVALIDATED"), "STATUS"] = "DENIED"
final_df.loc[(final_df["STATUS"] == "REJECTED"), "STATUS"] = "DENIED"
# #### preprocessing LCA_CASE_SOC_NAME column
# Convert SOC names to lowercase
final_df["LCA_CASE_SOC_NAME"] = final_df["LCA_CASE_SOC_NAME"].str.lower()
# remove the s from the words
final_df["LCA_CASE_SOC_NAME"] = final_df["LCA_CASE_SOC_NAME"].str.rstrip("s")
# #### preprocessing LCA_CASE_WAGE_RATE_FROM column
final_df["LCA_CASE_WAGE_RATE_FROM"]
final_df["LCA_CASE_WAGE_RATE_FROM"] = final_df["LCA_CASE_WAGE_RATE_FROM"].replace(
"Unknown", 0
)
final_df.head()
# converting the column to numeric then fill nan values by 0
final_df["LCA_CASE_WAGE_RATE_FROM"] = pd.to_numeric(
final_df["LCA_CASE_WAGE_RATE_FROM"], errors="coerce"
).fillna(0)
# Divide wages by 1000
final_df["LCA_CASE_WAGE_RATE_FROM"] = final_df["LCA_CASE_WAGE_RATE_FROM"] / 1000
# converting column type to int
final_df["LCA_CASE_WAGE_RATE_FROM"] = final_df["LCA_CASE_WAGE_RATE_FROM"].astype(int)
final_df["LCA_CASE_WAGE_RATE_FROM"].describe()
final_df = final_df.loc[final_df["LCA_CASE_WAGE_RATE_FROM"] > 0]
# Determine the 0.1 and 0.99 quantiles to remove the outliers
q1 = final_df["LCA_CASE_WAGE_RATE_FROM"].quantile(0.1)
q99 = final_df["LCA_CASE_WAGE_RATE_FROM"].quantile(0.99)
# Filter the DataFrame to remove any values outside of the 0.1 and 0.99 quantiles
final_df = final_df.loc[
(final_df["LCA_CASE_WAGE_RATE_FROM"] >= q1)
& (final_df["LCA_CASE_WAGE_RATE_FROM"] <= q99)
]
final_df["LCA_CASE_WAGE_RATE_FROM"].describe()
final_df["LCA_CASE_WAGE_RATE_FROM"].describe()
# ## ----------------------------------------------------------------------------------------
# # --------------------------------Analysis-----------------------------------
# ## What is the total number of H-1B visa applications and what is the growth rate of the applications over the past three years?
# Calculate number of applications and growth rate per year
year_count = (
final_df.loc[(final_df["year"] >= 2013)]["year"]
.value_counts()
.reset_index()
.rename(columns={"index": "year", "year": "count"})
)
year_count = year_count.sort_values("year")
year_count["growth_rate"] = year_count["count"].pct_change() * 100
# Create bar chart and line chart for growth rate
fig = go.Figure()
fig.add_trace(
go.Bar(x=year_count["year"], y=year_count["count"], name="Number of Applications")
)
fig.add_trace(
go.Scatter(
x=year_count["year"],
y=year_count["growth_rate"],
name="Growth Rate",
yaxis="y2",
)
)
# Set axis titles and layout
fig.update_layout(
title="Number of Applications and Growth Rate per Year",
xaxis_title="Year",
yaxis_title="Number of Applications",
yaxis2=dict(side="right", overlaying="y", title="Growth Rate (%)"),
)
fig.update_xaxes(tickvals=["2013", "2014", "2015", "2016"])
# Add growth rate as text to each bar
for i in range(len(year_count)):
fig.add_annotation(
x=year_count["year"][i],
y=year_count["count"][i],
text=f"{year_count['growth_rate'][i]:.1f}%",
showarrow=False,
font=dict(size=10),
yshift=5,
)
# Show the chart
fig.show()
#
# ##### The analysis shows that there was a significant increase in the number of applications between 2014 and 2015, with a growth rate of 17.7%. However, the application rate saw a sudden drop of 9% in 2016. Further investigation is needed to determine if this drop is due to an increase in rejection rates or other factors.
# ## ----------------------------------------------------------------------------------------
# ## What caused the sudden drop in the application rate? Is it due to an increase in rejection rates, or were other factors contributing to this decline?
# Group the data by year and status to get the total count
df_grouped = (
final_df.loc[(final_df["year"] >= 2014)]
.groupby(["year", "STATUS"])
.count()["LCA_CASE_NUMBER"]
.reset_index()
)
# Calculate the total count for each year
df_year_count = df_grouped.groupby("year").sum()["LCA_CASE_NUMBER"].reset_index()
# Add a column to the grouped dataframe with the percentage of each stack
df_grouped["percentage"] = df_grouped.apply(
lambda row: str(
round(
row["LCA_CASE_NUMBER"]
/ df_year_count[df_year_count["year"] == row["year"]][
"LCA_CASE_NUMBER"
].values[0]
* 100,
2,
)
)
+ "%",
axis=1,
)
# Create the stacked bar chart
fig = go.Figure()
for status in df_grouped["STATUS"].unique():
df_filtered = df_grouped[df_grouped["STATUS"] == status]
fig.add_trace(
go.Bar(
y=df_filtered["year"],
x=df_filtered["LCA_CASE_NUMBER"],
name=status,
text=df_filtered["percentage"],
textposition="auto",
orientation="h",
marker_color=px.colors.qualitative.Plotly[len(fig.data)],
)
)
# Set axis titles and layout
fig.update_layout(
title="Total count by year and status",
yaxis_title="Year",
xaxis_title="Total count",
barmode="stack",
)
fig.update_yaxes(tickvals=["2014", "2015", "2016"])
# Show the chart
fig.show()
# ##### According to the H1B visa data analysis, it has been observed that the rejection rate for the visa has decreased significantly from 5.41% to 3.4% over the years. On the other hand, the acceptance rate has been steadily increasing every year. This could be an indication of the US government's more favorable policies towards H1B visa applications, resulting in a higher acceptance rate. It may also suggest that employers have become more adept at submitting strong applications, thereby reducing the rejection rate.
# ## ----------------------------------------------------------------------------------------
# ## What are the top sectors for H1B visa applications?
# Group the data by employer sector to get the count of each sector
df_grouped = final_df.groupby("EMPLOYER_SECTOR").size().reset_index(name="count")
# Create the pie chart
fig = px.pie(
df_grouped,
values="count",
names="EMPLOYER_SECTOR",
title="Employer sector distribution",
hole=0.5,
)
# Show the chart
fig.show()
# ##### Based on our analysis, we have found that a significant proportion of H1B visa applications, approximately 72.4%, were related to the professional, scientific, and technical services sector, which includes fields such as computer programming, scientific research, engineering, and consulting services. This high number of applications can be attributed to the high demand for skilled professionals in these fields, as they require specialized expertise and knowledge.
# ##### Moreover, it is also possible that larger companies have been contributing to this trend by sponsoring more H1B visas for their employees, particularly in the professional, scientific, and technical services sector. This may be due to the fact that these companies require highly skilled workers to maintain their competitive edge and growth in the industry.
# ##### Further analysis is needed to investigate whether the concentration of H1B visa applications in the professional, scientific, and technical services sector is due to other factors such as pay scales, availability of skilled labor, or any regulatory changes affecting the industry.
# ## ----------------------------------------------------------------------------------------
# ## Which are the top 10 employers with the highest number of H1B visa applications, and in which sectors do they belong?
import plotly.graph_objects as go
import pandas as pd
# Group the data by employer name and status to get the total count
df_grouped = (
final_df.groupby(["LCA_CASE_EMPLOYER_NAME", "STATUS", "EMPLOYER_SECTOR"])
.count()["LCA_CASE_NUMBER"]
.reset_index()
)
# Get the top 10 employers based on application count
top_employers = (
df_grouped.groupby("LCA_CASE_EMPLOYER_NAME")
.sum()
.sort_values("LCA_CASE_NUMBER", ascending=False)
.head(10)
.reset_index()["LCA_CASE_EMPLOYER_NAME"]
.to_list()
)
# Filter the data for top 10 employers
df_top_employers = df_grouped[df_grouped["LCA_CASE_EMPLOYER_NAME"].isin(top_employers)]
# Create the stacked bar chart
fig = go.Figure()
for status in df_top_employers["STATUS"].unique():
df_filtered = df_top_employers[df_top_employers["STATUS"] == status]
fig.add_trace(
go.Bar(
x=df_filtered["LCA_CASE_NUMBER"],
y=df_filtered["LCA_CASE_EMPLOYER_NAME"],
name=status,
orientation="h",
marker_color=px.colors.qualitative.T10[len(fig.data)],
text=df_filtered["EMPLOYER_SECTOR"],
textposition="inside",
)
)
# Set axis titles and layout
fig.update_layout(
title="Top 10 Employers by Total Application Count",
xaxis_title="Total Application Count",
yaxis_title="Employer Name",
barmode="stack",
yaxis={"categoryorder": "total ascending"},
)
# Change color palette
fig.update_traces(marker=dict(line=dict(color="yellow", width=0.5)))
# Show the chart
fig.show()
# ##### Based on the analysis, it is found that 9 out of the top 10 employers with the highest number of H1B visa applications belong to the professional, scientific, and technical services sector. This sector is known to have a high demand for skilled professionals, and it includes fields such as computer programming, scientific research, engineering, and consulting services.
# ##### It is interesting to note that Infosys has the highest number of approved applications with 82,271 and the least number of denied applications among the top 10 employers. Furthermore, Infosys has played a significant role in the H1B visa application count, surpassing the second-ranked TCS and the third-ranked Wipro combined. This raises the question of what strategies Infosys might have implemented to achieve this level of success and what type of roles they are recruiting for.
# ## ----------------------------------------------------------------------------------------
# ## How much of an impact do the top employers have on the distribution of job positions for H1B visas?
# Create a list of top employers
top_employers = [
"INFOSYS LIMITED",
"TATA CONSULTANCY SERVICES LIMITED",
"WIPRO LIMITED",
"IBM INDIA PRIVATE LIMITED",
"ACCENTURE LLP",
"DELOITTE CONSULTING LLP",
"CAPGEMINI AMERICA INC",
"HCL AMERICA, INC.",
"MICROSOFT CORPORATION",
"ERNST & YOUNG U.S. LLP",
]
# Create a new column in final_df indicating whether the employer is in the top_employers list or not
final_df["EMPLOYER_GROUP"] = final_df["LCA_CASE_EMPLOYER_NAME"].apply(
lambda x: x if x in top_employers else "Other Employers"
)
# Group by LCA_CASE_SOC_NAME and LCA_CASE_EMPLOYER_NAME and get the count
df_grouped = (
final_df.groupby(["LCA_CASE_SOC_NAME", "EMPLOYER_GROUP"])
.size()
.reset_index(name="count")
)
# Filter out the top 10 LCA_CASE_SOC_NAME
df_top10 = (
df_grouped.groupby("LCA_CASE_SOC_NAME")
.sum()
.reset_index()
.sort_values(by="count", ascending=False)
.head(10)["LCA_CASE_SOC_NAME"]
.tolist()
)
df_filtered = df_grouped[df_grouped["LCA_CASE_SOC_NAME"].isin(df_top10)]
# Create the stacked bar chart
fig = px.bar(
df_filtered,
x="count",
y="LCA_CASE_SOC_NAME",
color="EMPLOYER_GROUP",
orientation="h",
)
# Add axis labels and title
fig.update_layout(
title="Top 10 LCA_CASE_SOC_NAME with employer group",
xaxis_title="Count",
yaxis_title="LCA_CASE_SOC_NAME",
)
fig.update_layout(yaxis={"categoryorder": "total ascending"})
# Show the chart
fig.show()
# ##### Upon analyzing the data, a chart was created to visualize the contribution of the top 10 H1B visa sponsoring employers to the top 10 job positions. The remaining employers were grouped as "other employers." The chart reveals that even though "other employers" occupy a significant portion of the chart, the top 10 employers have made a substantial contribution to the top 10 job positions.
# ##### For instance, Infosys has made a significant contribution to the computer systems analyst position, while Microsoft has made a significant contribution to the Software developers, application position. Similarly, IBM has made a considerable contribution to the computer programmer, applications position.
# ##### The chart also suggests that the top 10 employers have a significant impact on the H1B visa application process and the job positions that are filled.
# ## ----------------------------------------------------------------------------------------
# ## To what extent does the salary range affect the approval or denial of H1B visa applications for the job positions!?!?
# Filter the data to only include certified cases
df_filtered = final_df.loc[final_df["STATUS"] == "CERTIFIED"]
# Group by LCA_CASE_SOC_NAME and get the count and mean of LCA_CASE_WAGE_RATE_FROM
df_grouped = (
df_filtered.groupby("LCA_CASE_SOC_NAME")
.agg({"LCA_CASE_NUMBER": "count", "LCA_CASE_WAGE_RATE_FROM": "mean"})
.reset_index()
)
# Sort by count in descending order and get the top 10
df_top10 = df_grouped.sort_values(by="LCA_CASE_NUMBER", ascending=False).head(10)
# Create the bar chart for top 10 LCA_CASE_SOC_NAME
fig = px.bar(
df_top10,
x="LCA_CASE_SOC_NAME",
y="LCA_CASE_NUMBER",
labels={"LCA_CASE_SOC_NAME": "LCA_CASE_SOC_NAME", "LCA_CASE_NUMBER": "Count"},
)
# Add the scatter chart for mean of LCA_CASE_WAGE_RATE_FROM
fig.add_trace(
go.Scatter(
x=df_top10["LCA_CASE_SOC_NAME"],
y=df_top10["LCA_CASE_WAGE_RATE_FROM"],
yaxis="y2",
name="Average Wage Rate in thousands",
marker=dict(color="red"),
)
)
# Set the layout for the chart
fig.update_layout(
title="Top 10 Certified LCA_CASE_SOC_NAME with highest count and average wage rate",
xaxis_title="LCA_CASE_SOC_NAME",
yaxis_title="Count",
yaxis2=dict(title="Average Wage Rate", overlaying="y", side="right"),
)
# Show the chart
fig.show()
# Filter the data to only include certified cases
df_filtered = final_df.loc[final_df["STATUS"] == "DENIED"]
# Group by LCA_CASE_SOC_NAME and get the count and mean of LCA_CASE_WAGE_RATE_FROM
df_grouped = (
df_filtered.groupby("LCA_CASE_SOC_NAME")
.agg({"LCA_CASE_NUMBER": "count", "LCA_CASE_WAGE_RATE_FROM": "mean"})
.reset_index()
)
# Sort by count in descending order and get the top 10
df_top10 = df_grouped.sort_values(by="LCA_CASE_NUMBER", ascending=False).head(10)
# Create the bar chart for top 10 LCA_CASE_SOC_NAME
fig = px.bar(
df_top10,
x="LCA_CASE_SOC_NAME",
y="LCA_CASE_NUMBER",
labels={"LCA_CASE_SOC_NAME": "LCA_CASE_SOC_NAME", "LCA_CASE_NUMBER": "Count"},
)
# Add the scatter chart for mean of LCA_CASE_WAGE_RATE_FROM
fig.add_trace(
go.Scatter(
x=df_top10["LCA_CASE_SOC_NAME"],
y=df_top10["LCA_CASE_WAGE_RATE_FROM"],
yaxis="y2",
name="Average Wage Rate in thousands",
marker=dict(color="red"),
)
)
# Set the layout for the chart
fig.update_layout(
title="Top 10 Denied LCA_CASE_SOC_NAME with highest count and average wage rate",
xaxis_title="LCA_CASE_SOC_NAME",
yaxis_title="Count",
yaxis2=dict(title="Average Wage Rate", overlaying="y", side="right"),
)
# Show the chart
fig.show()
# calculate the quantiles
q1, q2, q3, q4 = final_df["LCA_CASE_WAGE_RATE_FROM"].quantile([0.25, 0.5, 0.75, 0.99])
# define the label function
def label_wage_range(x):
if x > 0 and x <= q1:
return "Below Q1 (64K)"
elif x > q1 and x <= q2:
return "Q1 (64K - 75K)"
elif x > q2 and x <= q3:
return "Q2 (75K - 95K)"
elif x > q3 and x <= q4:
return "Q3 (95K - 180K)"
elif x > q4:
return "Above Q4 (180K)"
# create the new column using apply() and the label function
final_df["wage_range"] = final_df["LCA_CASE_WAGE_RATE_FROM"].apply(label_wage_range)
df_filtered = final_df
# Create a grouped bar chart using the quantile column and the STATUS column as color
fig = px.histogram(df_filtered, x="wage_range", color="STATUS", barmode="group")
# Set the layout for the chart
fig.update_layout(
title="Distribution of Wage Rates by Status and Quantile",
xaxis_title="Quantile",
yaxis_title="Count",
)
# Show the chart
fig.show()
# ##### Upon analyzing the data, no significant correlation was observed between the salary range and the application status. After analyzing the data for the top 10 accepted and denied job positions, there was no significant correlation observed between salary range and application status. Interestingly, both the top 10 accepted and denied job positions had the same salary range.
# ##### To further investigate the relationship between salary range and application status, the salary range was split into four quantiles. After analyzing the data for low, average, above average, and higher pay salary levels, it was found that the majority of H1B visa applications that were approved fell into the low (Q1) and average (Q2) salary range categories. However, there were no clear trends observed between the salary range and the application status, suggesting that factors other than salary may have played a more significant role in determining the outcome of H1B visa applications.
# ## ----------------------------------------------------------------------------------------
# ## How does the length of employment impact the decision of the H1B visa application?
# Filter the data to only include certified cases
df_filtered = final_df.loc[final_df["STATUS"] == "CERTIFIED"]
# Group by LCA_CASE_SOC_NAME and get the count and mean of LCA_CASE_WAGE_RATE_FROM
df_grouped = (
df_filtered.groupby("LCA_CASE_SOC_NAME")
.agg({"LCA_CASE_NUMBER": "count", "LCA_CASE_EMPLOYMENT_PERIOD": "mean"})
.reset_index()
)
# Sort by count in descending order and get the top 10
df_top10 = df_grouped.sort_values(by="LCA_CASE_NUMBER", ascending=False).head(10)
# Create the bar chart for top 10 LCA_CASE_SOC_NAME
fig = px.bar(
df_top10,
x="LCA_CASE_SOC_NAME",
y="LCA_CASE_NUMBER",
labels={"LCA_CASE_SOC_NAME": "LCA_CASE_SOC_NAME", "LCA_CASE_NUMBER": "Count"},
)
# Add the scatter chart for mean of LCA_CASE_WAGE_RATE_FROM
fig.add_trace(
go.Scatter(
x=df_top10["LCA_CASE_SOC_NAME"],
y=df_top10["LCA_CASE_EMPLOYMENT_PERIOD"],
yaxis="y2",
name="Average Employment period in months",
marker=dict(color="red"),
)
)
# Set the layout for the chart
fig.update_layout(
title="Top 10 Certified LCA_CASE_SOC_NAME with highest count and average employment period",
xaxis_title="LCA_CASE_SOC_NAME",
yaxis_title="Count",
yaxis2=dict(title="Average employment period", overlaying="y", side="right"),
)
# Show the chart
fig.show()
# Filter the data to only include certified cases
df_filtered = final_df.loc[final_df["STATUS"] == "DENIED"]
# Group by LCA_CASE_SOC_NAME and get the count and mean of LCA_CASE_EMPLOYMENT_PERIOD
df_grouped = (
df_filtered.groupby("LCA_CASE_SOC_NAME")
.agg({"LCA_CASE_NUMBER": "count", "LCA_CASE_EMPLOYMENT_PERIOD": "mean"})
.reset_index()
)
# Sort by count in descending order and get the top 10
df_top10 = df_grouped.sort_values(by="LCA_CASE_NUMBER", ascending=False).head(10)
# Create the bar chart for top 10 LCA_CASE_SOC_NAME
fig = px.bar(
df_top10,
x="LCA_CASE_SOC_NAME",
y="LCA_CASE_NUMBER",
labels={"LCA_CASE_SOC_NAME": "LCA_CASE_SOC_NAME", "LCA_CASE_NUMBER": "Count"},
)
# Add the scatter chart for mean of LCA_CASE_EMPLOYMENT_PERIOD
fig.add_trace(
go.Scatter(
x=df_top10["LCA_CASE_SOC_NAME"],
y=df_top10["LCA_CASE_EMPLOYMENT_PERIOD"],
yaxis="y2",
name="Average Employment Period in months",
marker=dict(color="red"),
)
)
# Set the layout for the chart
fig.update_layout(
title="Top 10 Denied LCA_CASE_SOC_NAME with highest count and average employment period",
xaxis_title="LCA_CASE_SOC_NAME",
yaxis_title="Count",
yaxis2=dict(
title="Average Employment Period in months", overlaying="y", side="right"
),
)
# Show the chart
fig.show()
mean = final_df["LCA_CASE_EMPLOYMENT_PERIOD"].mean()
# define the label function
def label_employment_range(x):
if x < mean:
return "Below average (34 months)"
elif x > mean:
return "Above average (34 months)"
# create the new column using apply() and the label function
final_df["Employment_range"] = final_df["LCA_CASE_EMPLOYMENT_PERIOD"].apply(
label_employment_range
)
df_filtered = final_df
# Create a grouped bar chart using the quantile column and the STATUS column as color
fig = px.histogram(df_filtered, x="Employment_range", color="STATUS", barmode="group")
# Set the layout for the chart
fig.update_layout(
title="Distribution of Employment_range by Status and Mean of Employment period",
xaxis_title="Quantile",
yaxis_title="Count",
)
# Show the chart
fig.show()
# ##### After analyzing the data, it was found that the employment period also did not have any significant correlation with the decision of the visa. Both the top approved and the top denied job positions had an average employment period of 33 months.
# ##### Upon further investigation, the applicants were separated by the average month of employment period below and above 33 months.It was observed that only a few applications had an employment period of less than 33 months. This suggests that the employment period may not have played a significant role in determining the outcome of H1B visa applications. Other factors such as the applicant's qualifications and the employer's sponsorship may have had a greater impact on the decision.
# ## ----------------------------------------------------------------------------------------
# ## Are there any trends or patterns in the geographic distribution of H1B visa workers?
# Group by state and get the count
df_state = (
final_df.groupby("LCA_CASE_EMPLOYER_STATE")["LCA_CASE_NUMBER"].count().reset_index()
)
# Create the choropleth map
fig = px.choropleth(
df_state,
locations="LCA_CASE_EMPLOYER_STATE",
locationmode="USA-states",
color="LCA_CASE_NUMBER",
scope="usa",
color_continuous_scale="Blues",
title="H1B Visa Applications by State",
)
fig.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/627/129627997.ipynb
| null | null |
[{"Id": 129627997, "ScriptId": 38546957, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 10264085, "CreationDate": "05/15/2023 10:37:50", "VersionNumber": 1.0, "Title": "H1B-Visa", "EvaluationDate": "05/15/2023", "IsChange": true, "TotalLines": 802.0, "LinesInsertedFromPrevious": 802.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import pandas as pd
import numpy as np
from selenium import webdriver
import re
import plotly.graph_objs as go
import plotly.express as px
pd.options.mode.chained_assignment = None
data_one = pd.read_csv("H-1B_2014.csv")
data_two = pd.read_csv("H-1B_2015.csv")
data_three = pd.read_csv("H-1B_2016.csv")
columns = [
"LCA_CASE_NUMBER",
"STATUS",
"LCA_CASE_SUBMIT",
"DECISION_DATE",
"VISA_CLASS",
"LCA_CASE_EMPLOYMENT_START_DATE",
"LCA_CASE_EMPLOYMENT_END_DATE",
"LCA_CASE_EMPLOYER_NAME",
"LCA_CASE_EMPLOYER_STATE",
"LCA_CASE_EMPLOYER_CITY",
"LCA_CASE_SOC_CODE",
"LCA_CASE_SOC_NAME",
"LCA_CASE_JOB_TITLE",
"LCA_CASE_WAGE_RATE_FROM",
"LCA_CASE_WAGE_RATE_UNIT",
"FULL_TIME_POS",
"LCA_CASE_NAICS_CODE",
]
columns = [
"LCA_CASE_NUMBER",
"STATUS",
"LCA_CASE_SUBMIT",
"DECISION_DATE",
"VISA_CLASS",
"LCA_CASE_EMPLOYMENT_START_DATE",
"LCA_CASE_EMPLOYMENT_END_DATE",
"LCA_CASE_EMPLOYER_NAME",
"LCA_CASE_EMPLOYER_STATE",
"LCA_CASE_EMPLOYER_CITY",
"LCA_CASE_SOC_CODE",
"LCA_CASE_SOC_NAME",
"LCA_CASE_JOB_TITLE",
"LCA_CASE_WAGE_RATE_FROM",
"LCA_CASE_WAGE_RATE_UNIT",
"FULL_TIME_POS",
"LCA_CASE_NAICS_CODE",
]
# renaming the columns
data_two = data_two.rename(
columns={
"CASE_NUMBER": "LCA_CASE_NUMBER",
"CASE_STATUS": "STATUS",
"CASE_SUBMITTED": "LCA_CASE_SUBMIT",
"DECISION_DATE": "DECISION_DATE",
"EMPLOYMENT_START_DATE": "LCA_CASE_EMPLOYMENT_START_DATE",
"EMPLOYMENT_END_DATE": "LCA_CASE_EMPLOYMENT_END_DATE",
"EMPLOYER_NAME": "LCA_CASE_EMPLOYER_NAME",
"EMPLOYER_STATE": "LCA_CASE_EMPLOYER_STATE",
"EMPLOYER_CITY": "LCA_CASE_EMPLOYER_CITY",
"SOC_CODE": "LCA_CASE_SOC_CODE",
"SOC_NAME": "LCA_CASE_SOC_NAME",
"JOB_TITLE": "LCA_CASE_JOB_TITLE",
"WAGE_RATE_OF_PAY": "LCA_CASE_WAGE_RATE_FROM",
"WAGE_UNIT_OF_PAY": "LCA_CASE_WAGE_RATE_UNIT",
"FULL_TIME_POSITION": "FULL_TIME_POS",
"NAIC_CODE": "LCA_CASE_NAICS_CODE",
}
)
columns = [
"LCA_CASE_NUMBER",
"STATUS",
"LCA_CASE_SUBMIT",
"DECISION_DATE",
"VISA_CLASS",
"LCA_CASE_EMPLOYMENT_START_DATE",
"LCA_CASE_EMPLOYMENT_END_DATE",
"LCA_CASE_EMPLOYER_NAME",
"LCA_CASE_EMPLOYER_STATE",
"LCA_CASE_EMPLOYER_CITY",
"LCA_CASE_SOC_CODE",
"LCA_CASE_SOC_NAME",
"LCA_CASE_JOB_TITLE",
"LCA_CASE_WAGE_RATE_FROM",
"LCA_CASE_WAGE_RATE_UNIT",
"FULL_TIME_POS",
"LCA_CASE_NAICS_CODE",
]
# renaming the columns
data_three = data_three.rename(
columns={
"CASE_NUMBER": "LCA_CASE_NUMBER",
"CASE_STATUS": "STATUS",
"CASE_SUBMITTED": "LCA_CASE_SUBMIT",
"DECISION_DATE": "DECISION_DATE",
"EMPLOYMENT_START_DATE": "LCA_CASE_EMPLOYMENT_START_DATE",
"EMPLOYMENT_END_DATE": "LCA_CASE_EMPLOYMENT_END_DATE",
"EMPLOYER_NAME": "LCA_CASE_EMPLOYER_NAME",
"EMPLOYER_STATE": "LCA_CASE_EMPLOYER_STATE",
"EMPLOYER_CITY": "LCA_CASE_EMPLOYER_CITY",
"SOC_CODE": "LCA_CASE_SOC_CODE",
"SOC_NAME": "LCA_CASE_SOC_NAME",
"JOB_TITLE": "LCA_CASE_JOB_TITLE",
"WAGE_RATE_OF_PAY_FROM": "LCA_CASE_WAGE_RATE_FROM",
"WAGE_UNIT_OF_PAY": "LCA_CASE_WAGE_RATE_UNIT",
"FULL_TIME_POSITION": "FULL_TIME_POS",
"NAIC_CODE": "LCA_CASE_NAICS_CODE",
}
)
# concat the three dataframes
final_df = pd.concat(
[data_one[columns], data_two[columns], data_three[columns]]
).reset_index(drop=True)
final_df.head()
final_df.columns
final_df.info()
(final_df.isnull().sum())
final_df.head()
# ## Feature engineering
# ### creating employment period column
# convert date columns to datetime format and assign nan to invalid data
final_df["LCA_CASE_EMPLOYMENT_START_DATE"] = pd.to_datetime(
final_df["LCA_CASE_EMPLOYMENT_START_DATE"], errors="coerce"
)
final_df["LCA_CASE_EMPLOYMENT_END_DATE"] = pd.to_datetime(
final_df["LCA_CASE_EMPLOYMENT_END_DATE"], errors="coerce"
)
# subtract the LCA_CASE_EMPLOYMENT_END_DATE from LCA_CASE_EMPLOYMENT_START_DATEto find employment period
LCA_CASE_EMPLOYMENT_PERIOD = (
final_df["LCA_CASE_EMPLOYMENT_END_DATE"]
- final_df["LCA_CASE_EMPLOYMENT_START_DATE"]
)
# create a new column with LCA_CASE_EMPLOYMENT_PERIOD value
final_df.insert(7, "LCA_CASE_EMPLOYMENT_PERIOD", LCA_CASE_EMPLOYMENT_PERIOD)
final_df.head()
# converting LCA_CASE_EMPLOYMENT_PERIOD into days format
final_df["LCA_CASE_EMPLOYMENT_PERIOD"] = final_df["LCA_CASE_EMPLOYMENT_PERIOD"].dt.days
final_df["LCA_CASE_EMPLOYMENT_PERIOD"].describe()
# delete the outlier value, i.e employment days less than 0.
final_df = final_df[final_df["LCA_CASE_EMPLOYMENT_PERIOD"] > 0]
final_df["LCA_CASE_EMPLOYMENT_PERIOD"].describe()
# - The value of 30.44 is derived by calculating the average number of days in a month over a period of four years.
# - The average number of days in a year is 365.24 (due to leap years), so dividing by 12 gives an average of 30.44 days per month. This is a commonly used approximation in calculations involving months and days.
# - Using 30.44 as the number of days in a month provides a more accurate estimate when converting between months and days, rather than assuming that each month has exactly 30 or 31 days.
#
# the employment period is converted into months
final_df["LCA_CASE_EMPLOYMENT_PERIOD"] = round(
final_df["LCA_CASE_EMPLOYMENT_PERIOD"] / 30.44
)
# filled the missing value with 0 and converted the column type to int
final_df["LCA_CASE_EMPLOYMENT_PERIOD"] = (
final_df["LCA_CASE_EMPLOYMENT_PERIOD"].fillna(0).astype(int)
)
# ### creating visa decision duration column
# convert date columns to datetime format and assign nan to invalid data
final_df["LCA_CASE_SUBMIT"] = pd.to_datetime(
final_df["LCA_CASE_SUBMIT"], errors="coerce"
)
final_df["DECISION_DATE"] = pd.to_datetime(final_df["DECISION_DATE"], errors="coerce")
# subtract the LCA_CASE_SUBMIT from DECISION_DATE to find visa decision period
LCA_CASE_DECISION_PERIOD = final_df["DECISION_DATE"] - final_df["LCA_CASE_SUBMIT"]
# create a new column with LCA_CASE_DECISION_PERIOD value
final_df.insert(4, "LCA_CASE_DECISION_PERIOD", LCA_CASE_DECISION_PERIOD)
final_df["LCA_CASE_DECISION_PERIOD"] = final_df["LCA_CASE_DECISION_PERIOD"].dt.days
final_df["LCA_CASE_EMPLOYMENT_PERIOD"].describe()
# remove special characters from LCA_CASE_EMPLOYER_CITY
final_df["LCA_CASE_EMPLOYER_CITY"] = final_df["LCA_CASE_EMPLOYER_CITY"].replace(
{"[^a-zA-Z0-9]": ""}, regex=True
)
# ### find the sectors of the company using the NAICS code
# Convert the LCA_CASE_NAICS_CODE column to string data type
final_df["LCA_CASE_NAICS_CODE"] = final_df["LCA_CASE_NAICS_CODE"].astype(str)
# Extract the first two digits of each string value
final_df["LCA_CASE_NAICS_CODE"] = final_df["LCA_CASE_NAICS_CODE"].str[:2]
naics_unique_values = final_df["LCA_CASE_NAICS_CODE"].unique()
# reading the NAICS_data to cross check and create a new column for employer sector
NAICS_data = pd.read_csv("NAICS_data.csv")
NAICS_data.head()
# loop through all the NAICS in the naics_unique_values
for i in naics_unique_values:
try:
# assuming your dataframe is called 'df'
NAICS_data_code = NAICS_data.loc[
NAICS_data["NAICS_CODE"] == i, "NAICS_TITLE"
].iloc[0]
except:
# if there is no index with the particular soc code the occupation name will be null
NAICS_data_code = "Unknown"
# create a boolean mask for the conditions
mask = final_df["LCA_CASE_NAICS_CODE"] == i
# update the LCA_CASE_SOC_NAME column for the filtered rows
final_df.loc[mask, "EMPLOYER_SECTOR"] = NAICS_data_code
# extract the year component from the datetime column LCA_CASE_SUBMIT and store it in a new column year
final_df["year"] = final_df["LCA_CASE_SUBMIT"].dt.year
# # Preprocessing
# drop duplicates
final_df = final_df.drop_duplicates()
# remove numbers after "." period in 'LCA_CASE_SOC_CODE' column
final_df["LCA_CASE_SOC_CODE"] = (
final_df["LCA_CASE_SOC_CODE"].astype(str).apply(lambda x: x.split(".")[0])
)
# function to correct the LCA_CASE_SOC_CODE
def preprocess_column(column):
pattern = r"^\d{2}-\d{4}$" # regex pattern for "XX-XXXX" format
def preprocess_value(value):
if ("-" not in value) and len(value) < 6:
cleaned_value = np.nan
elif "-" in value:
value = value.replace("-", "")
cleaned_value = value[0:2] + "-" + value[2:6]
if len(cleaned_value) != 7:
cleaned_value = np.nan
elif ("-" not in value) and len(value) > 5:
value = value.replace("/", "")
cleaned_value = value[0:2] + "-" + value[2:6]
return cleaned_value
cleaned_column = column.apply(
lambda x: np.nan
if pd.isna(x)
else (x if re.search(pattern, str(x)) else preprocess_value(x))
)
return cleaned_column
final_df["LCA_CASE_SOC_CODE"] = preprocess_column(final_df["LCA_CASE_SOC_CODE"])
final_df.head()
# #### preprocessing LCA_CASE_SOC_CODE column
# Replace the values in the 'LCA_CASE_WAGE_RATE_FROM' column
# define a custom function to preprocess the wage_rate column
def preprocess_wage_rate(cell_value):
if isinstance(cell_value, float):
return cell_value
elif "-" in cell_value:
return cell_value.split("-")[0].strip()
else:
return cell_value
# apply the custom function to the wage_rate column
final_df["LCA_CASE_WAGE_RATE_FROM"] = final_df["LCA_CASE_WAGE_RATE_FROM"].apply(
lambda x: preprocess_wage_rate(x)
)
final_df.head()
for i in final_df.index:
if final_df.loc[i, "LCA_CASE_WAGE_RATE_UNIT"] == "Month":
final_df.loc[i, "LCA_CASE_WAGE_RATE_FROM"] = (
final_df.loc[i, "LCA_CASE_WAGE_RATE_FROM"] * 12
)
if final_df.loc[i, "LCA_CASE_WAGE_RATE_UNIT"] == "Week":
final_df.loc[i, "LCA_CASE_WAGE_RATE_FROM"] = (
final_df.loc[i, "LCA_CASE_WAGE_RATE_FROM"] * 52
)
if final_df.loc[i, "LCA_CASE_WAGE_RATE_UNIT"] == "Bi-Weekly":
final_df.loc[i, "LCA_CASE_WAGE_RATE_FROM"] = (
final_df.loc[i, "LCA_CASE_WAGE_RATE_FROM"] * 26
)
if final_df.loc[i, "LCA_CASE_WAGE_RATE_UNIT"] == "Hour":
if final_df.loc[i, "FULL_TIME_POS"] == "N":
final_df.loc[i, "LCA_CASE_WAGE_RATE_FROM"] = (
final_df.loc[i, "LCA_CASE_WAGE_RATE_FROM"] * 35 * 52
)
else:
final_df.loc[i, "LCA_CASE_WAGE_RATE_FROM"] = (
final_df.loc[i, "LCA_CASE_WAGE_RATE_FROM"] * 40 * 52
)
final_df.LCA_CASE_WAGE_RATE_UNIT.replace(
["Bi-Weekly", "Month", "Week", "Hour"],
["Year", "Year", "Year", "Year"],
inplace=True,
)
# #### scraping data for SOC name from website
# initialize webdriver
driver = webdriver.Chrome()
# navigate to webpage
driver.get("https://www.bls.gov/oes/current/oes_stru.htm#29-0000")
# find all li elements
li_elements = driver.find_elements("xpath", "//li")
# create empty list to store data
data = []
# loop through li elements
for li in li_elements:
text = li.text
if "-" in text:
# use regular expression to extract SOC code and occupation name
words = text.split()
soc = words[0]
name = (" ".join(words[1::])).replace('"', "").strip()
name_list = words[1::]
if "-" in name:
for i, word in enumerate(name_list):
if ("-" in word) and (len(word) > 1):
name = (" ".join(name_list[:i])).replace('"', "").strip()
break
data.append({"SOC Code": soc, "Occupation Name": name})
# close webdriver
driver.quit()
# create dataframe
occupation_data = pd.DataFrame(data)
# save dataframe as CSV
occupation_data.to_csv("occupations.csv", index=False)
final_df.isna().sum()
# reading the occupation data to impute the missing soc_name
occupation_data = pd.read_csv("occupations.csv")
occupation_data
# ### treating the null values
# #### imputing soc name nan values by refering to scraped data
# create a separate dataframe with NaN values
nan_df = final_df[final_df["LCA_CASE_SOC_NAME"].isna()]
mask = final_df["LCA_CASE_SOC_NAME"].isna()
df_nan = final_df[mask]
unique_null_soc_code = list(df_nan["LCA_CASE_SOC_CODE"].unique())
print(unique_null_soc_code)
unique_null_soc_code = [x for x in unique_null_soc_code if type(x) != float]
print(unique_null_soc_code)
# loop through all the SOC code in unique_null_soc_code list
for i in unique_null_soc_code:
try:
# assuming your dataframe is called 'df'
occupation_name = occupation_data.loc[
occupation_data["SOC Code"] == i, "Occupation Name"
].iloc[0]
except:
# if there is no index with the particular soc code the occupation name will be null
occupation_name = np.nan
# create a boolean mask for the conditions
mask = (final_df["LCA_CASE_SOC_NAME"].isna()) & (final_df["LCA_CASE_SOC_CODE"] == i)
# update the LCA_CASE_SOC_NAME column for the filtered rows
final_df.loc[mask, "LCA_CASE_SOC_NAME"] = occupation_name
final_df.isna().sum()
# #### replacing other NaN values in the other columns
final_df["LCA_CASE_EMPLOYER_NAME"].fillna("Unknown", inplace=True)
final_df["LCA_CASE_EMPLOYER_STATE"].fillna("Unknown", inplace=True)
final_df["LCA_CASE_EMPLOYER_CITY"].fillna("Unknown", inplace=True)
final_df["LCA_CASE_SOC_CODE"].fillna("Unknown", inplace=True)
final_df["LCA_CASE_SOC_NAME"].fillna("Unknown", inplace=True)
final_df["LCA_CASE_WAGE_RATE_FROM"].fillna(0, inplace=True)
# ### dropping unwanted columns
final_df.columns
final_df = final_df.drop(
[
"LCA_CASE_EMPLOYMENT_START_DATE",
"LCA_CASE_EMPLOYMENT_END_DATE",
"LCA_CASE_WAGE_RATE_UNIT",
"FULL_TIME_POS",
"LCA_CASE_NAICS_CODE",
],
axis=1,
)
final_df.columns
# replacing INVALIDATED and REJECTED as the DENIED
final_df.loc[(final_df["STATUS"] == "INVALIDATED"), "STATUS"] = "DENIED"
final_df.loc[(final_df["STATUS"] == "REJECTED"), "STATUS"] = "DENIED"
# #### preprocessing LCA_CASE_SOC_NAME column
# Convert SOC names to lowercase
final_df["LCA_CASE_SOC_NAME"] = final_df["LCA_CASE_SOC_NAME"].str.lower()
# remove the s from the words
final_df["LCA_CASE_SOC_NAME"] = final_df["LCA_CASE_SOC_NAME"].str.rstrip("s")
# #### preprocessing LCA_CASE_WAGE_RATE_FROM column
final_df["LCA_CASE_WAGE_RATE_FROM"]
final_df["LCA_CASE_WAGE_RATE_FROM"] = final_df["LCA_CASE_WAGE_RATE_FROM"].replace(
"Unknown", 0
)
final_df.head()
# converting the column to numeric then fill nan values by 0
final_df["LCA_CASE_WAGE_RATE_FROM"] = pd.to_numeric(
final_df["LCA_CASE_WAGE_RATE_FROM"], errors="coerce"
).fillna(0)
# Divide wages by 1000
final_df["LCA_CASE_WAGE_RATE_FROM"] = final_df["LCA_CASE_WAGE_RATE_FROM"] / 1000
# converting column type to int
final_df["LCA_CASE_WAGE_RATE_FROM"] = final_df["LCA_CASE_WAGE_RATE_FROM"].astype(int)
final_df["LCA_CASE_WAGE_RATE_FROM"].describe()
final_df = final_df.loc[final_df["LCA_CASE_WAGE_RATE_FROM"] > 0]
# Determine the 0.1 and 0.99 quantiles to remove the outliers
q1 = final_df["LCA_CASE_WAGE_RATE_FROM"].quantile(0.1)
q99 = final_df["LCA_CASE_WAGE_RATE_FROM"].quantile(0.99)
# Filter the DataFrame to remove any values outside of the 0.1 and 0.99 quantiles
final_df = final_df.loc[
(final_df["LCA_CASE_WAGE_RATE_FROM"] >= q1)
& (final_df["LCA_CASE_WAGE_RATE_FROM"] <= q99)
]
final_df["LCA_CASE_WAGE_RATE_FROM"].describe()
final_df["LCA_CASE_WAGE_RATE_FROM"].describe()
# ## ----------------------------------------------------------------------------------------
# # --------------------------------Analysis-----------------------------------
# ## What is the total number of H-1B visa applications and what is the growth rate of the applications over the past three years?
# Calculate number of applications and growth rate per year
year_count = (
final_df.loc[(final_df["year"] >= 2013)]["year"]
.value_counts()
.reset_index()
.rename(columns={"index": "year", "year": "count"})
)
year_count = year_count.sort_values("year")
year_count["growth_rate"] = year_count["count"].pct_change() * 100
# Create bar chart and line chart for growth rate
fig = go.Figure()
fig.add_trace(
go.Bar(x=year_count["year"], y=year_count["count"], name="Number of Applications")
)
fig.add_trace(
go.Scatter(
x=year_count["year"],
y=year_count["growth_rate"],
name="Growth Rate",
yaxis="y2",
)
)
# Set axis titles and layout
fig.update_layout(
title="Number of Applications and Growth Rate per Year",
xaxis_title="Year",
yaxis_title="Number of Applications",
yaxis2=dict(side="right", overlaying="y", title="Growth Rate (%)"),
)
fig.update_xaxes(tickvals=["2013", "2014", "2015", "2016"])
# Add growth rate as text to each bar
for i in range(len(year_count)):
fig.add_annotation(
x=year_count["year"][i],
y=year_count["count"][i],
text=f"{year_count['growth_rate'][i]:.1f}%",
showarrow=False,
font=dict(size=10),
yshift=5,
)
# Show the chart
fig.show()
#
# ##### The analysis shows that there was a significant increase in the number of applications between 2014 and 2015, with a growth rate of 17.7%. However, the application rate saw a sudden drop of 9% in 2016. Further investigation is needed to determine if this drop is due to an increase in rejection rates or other factors.
# ## ----------------------------------------------------------------------------------------
# ## What caused the sudden drop in the application rate? Is it due to an increase in rejection rates, or were other factors contributing to this decline?
# Group the data by year and status to get the total count
df_grouped = (
final_df.loc[(final_df["year"] >= 2014)]
.groupby(["year", "STATUS"])
.count()["LCA_CASE_NUMBER"]
.reset_index()
)
# Calculate the total count for each year
df_year_count = df_grouped.groupby("year").sum()["LCA_CASE_NUMBER"].reset_index()
# Add a column to the grouped dataframe with the percentage of each stack
df_grouped["percentage"] = df_grouped.apply(
lambda row: str(
round(
row["LCA_CASE_NUMBER"]
/ df_year_count[df_year_count["year"] == row["year"]][
"LCA_CASE_NUMBER"
].values[0]
* 100,
2,
)
)
+ "%",
axis=1,
)
# Create the stacked bar chart
fig = go.Figure()
for status in df_grouped["STATUS"].unique():
df_filtered = df_grouped[df_grouped["STATUS"] == status]
fig.add_trace(
go.Bar(
y=df_filtered["year"],
x=df_filtered["LCA_CASE_NUMBER"],
name=status,
text=df_filtered["percentage"],
textposition="auto",
orientation="h",
marker_color=px.colors.qualitative.Plotly[len(fig.data)],
)
)
# Set axis titles and layout
fig.update_layout(
title="Total count by year and status",
yaxis_title="Year",
xaxis_title="Total count",
barmode="stack",
)
fig.update_yaxes(tickvals=["2014", "2015", "2016"])
# Show the chart
fig.show()
# ##### According to the H1B visa data analysis, it has been observed that the rejection rate for the visa has decreased significantly from 5.41% to 3.4% over the years. On the other hand, the acceptance rate has been steadily increasing every year. This could be an indication of the US government's more favorable policies towards H1B visa applications, resulting in a higher acceptance rate. It may also suggest that employers have become more adept at submitting strong applications, thereby reducing the rejection rate.
# ## ----------------------------------------------------------------------------------------
# ## What are the top sectors for H1B visa applications?
# Group the data by employer sector to get the count of each sector
df_grouped = final_df.groupby("EMPLOYER_SECTOR").size().reset_index(name="count")
# Create the pie chart
fig = px.pie(
df_grouped,
values="count",
names="EMPLOYER_SECTOR",
title="Employer sector distribution",
hole=0.5,
)
# Show the chart
fig.show()
# ##### Based on our analysis, we have found that a significant proportion of H1B visa applications, approximately 72.4%, were related to the professional, scientific, and technical services sector, which includes fields such as computer programming, scientific research, engineering, and consulting services. This high number of applications can be attributed to the high demand for skilled professionals in these fields, as they require specialized expertise and knowledge.
# ##### Moreover, it is also possible that larger companies have been contributing to this trend by sponsoring more H1B visas for their employees, particularly in the professional, scientific, and technical services sector. This may be due to the fact that these companies require highly skilled workers to maintain their competitive edge and growth in the industry.
# ##### Further analysis is needed to investigate whether the concentration of H1B visa applications in the professional, scientific, and technical services sector is due to other factors such as pay scales, availability of skilled labor, or any regulatory changes affecting the industry.
# ## ----------------------------------------------------------------------------------------
# ## Which are the top 10 employers with the highest number of H1B visa applications, and in which sectors do they belong?
import plotly.graph_objects as go
import pandas as pd
# Group the data by employer name and status to get the total count
df_grouped = (
final_df.groupby(["LCA_CASE_EMPLOYER_NAME", "STATUS", "EMPLOYER_SECTOR"])
.count()["LCA_CASE_NUMBER"]
.reset_index()
)
# Get the top 10 employers based on application count
top_employers = (
df_grouped.groupby("LCA_CASE_EMPLOYER_NAME")
.sum()
.sort_values("LCA_CASE_NUMBER", ascending=False)
.head(10)
.reset_index()["LCA_CASE_EMPLOYER_NAME"]
.to_list()
)
# Filter the data for top 10 employers
df_top_employers = df_grouped[df_grouped["LCA_CASE_EMPLOYER_NAME"].isin(top_employers)]
# Create the stacked bar chart
fig = go.Figure()
for status in df_top_employers["STATUS"].unique():
df_filtered = df_top_employers[df_top_employers["STATUS"] == status]
fig.add_trace(
go.Bar(
x=df_filtered["LCA_CASE_NUMBER"],
y=df_filtered["LCA_CASE_EMPLOYER_NAME"],
name=status,
orientation="h",
marker_color=px.colors.qualitative.T10[len(fig.data)],
text=df_filtered["EMPLOYER_SECTOR"],
textposition="inside",
)
)
# Set axis titles and layout
fig.update_layout(
title="Top 10 Employers by Total Application Count",
xaxis_title="Total Application Count",
yaxis_title="Employer Name",
barmode="stack",
yaxis={"categoryorder": "total ascending"},
)
# Change color palette
fig.update_traces(marker=dict(line=dict(color="yellow", width=0.5)))
# Show the chart
fig.show()
# ##### Based on the analysis, it is found that 9 out of the top 10 employers with the highest number of H1B visa applications belong to the professional, scientific, and technical services sector. This sector is known to have a high demand for skilled professionals, and it includes fields such as computer programming, scientific research, engineering, and consulting services.
# ##### It is interesting to note that Infosys has the highest number of approved applications with 82,271 and the least number of denied applications among the top 10 employers. Furthermore, Infosys has played a significant role in the H1B visa application count, surpassing the second-ranked TCS and the third-ranked Wipro combined. This raises the question of what strategies Infosys might have implemented to achieve this level of success and what type of roles they are recruiting for.
# ## ----------------------------------------------------------------------------------------
# ## How much of an impact do the top employers have on the distribution of job positions for H1B visas?
# Create a list of top employers
top_employers = [
"INFOSYS LIMITED",
"TATA CONSULTANCY SERVICES LIMITED",
"WIPRO LIMITED",
"IBM INDIA PRIVATE LIMITED",
"ACCENTURE LLP",
"DELOITTE CONSULTING LLP",
"CAPGEMINI AMERICA INC",
"HCL AMERICA, INC.",
"MICROSOFT CORPORATION",
"ERNST & YOUNG U.S. LLP",
]
# Create a new column in final_df indicating whether the employer is in the top_employers list or not
final_df["EMPLOYER_GROUP"] = final_df["LCA_CASE_EMPLOYER_NAME"].apply(
lambda x: x if x in top_employers else "Other Employers"
)
# Group by LCA_CASE_SOC_NAME and LCA_CASE_EMPLOYER_NAME and get the count
df_grouped = (
final_df.groupby(["LCA_CASE_SOC_NAME", "EMPLOYER_GROUP"])
.size()
.reset_index(name="count")
)
# Filter out the top 10 LCA_CASE_SOC_NAME
df_top10 = (
df_grouped.groupby("LCA_CASE_SOC_NAME")
.sum()
.reset_index()
.sort_values(by="count", ascending=False)
.head(10)["LCA_CASE_SOC_NAME"]
.tolist()
)
df_filtered = df_grouped[df_grouped["LCA_CASE_SOC_NAME"].isin(df_top10)]
# Create the stacked bar chart
fig = px.bar(
df_filtered,
x="count",
y="LCA_CASE_SOC_NAME",
color="EMPLOYER_GROUP",
orientation="h",
)
# Add axis labels and title
fig.update_layout(
title="Top 10 LCA_CASE_SOC_NAME with employer group",
xaxis_title="Count",
yaxis_title="LCA_CASE_SOC_NAME",
)
fig.update_layout(yaxis={"categoryorder": "total ascending"})
# Show the chart
fig.show()
# ##### Upon analyzing the data, a chart was created to visualize the contribution of the top 10 H1B visa sponsoring employers to the top 10 job positions. The remaining employers were grouped as "other employers." The chart reveals that even though "other employers" occupy a significant portion of the chart, the top 10 employers have made a substantial contribution to the top 10 job positions.
# ##### For instance, Infosys has made a significant contribution to the computer systems analyst position, while Microsoft has made a significant contribution to the Software developers, application position. Similarly, IBM has made a considerable contribution to the computer programmer, applications position.
# ##### The chart also suggests that the top 10 employers have a significant impact on the H1B visa application process and the job positions that are filled.
# ## ----------------------------------------------------------------------------------------
# ## To what extent does the salary range affect the approval or denial of H1B visa applications for the job positions!?!?
# Filter the data to only include certified cases
df_filtered = final_df.loc[final_df["STATUS"] == "CERTIFIED"]
# Group by LCA_CASE_SOC_NAME and get the count and mean of LCA_CASE_WAGE_RATE_FROM
df_grouped = (
df_filtered.groupby("LCA_CASE_SOC_NAME")
.agg({"LCA_CASE_NUMBER": "count", "LCA_CASE_WAGE_RATE_FROM": "mean"})
.reset_index()
)
# Sort by count in descending order and get the top 10
df_top10 = df_grouped.sort_values(by="LCA_CASE_NUMBER", ascending=False).head(10)
# Create the bar chart for top 10 LCA_CASE_SOC_NAME
fig = px.bar(
df_top10,
x="LCA_CASE_SOC_NAME",
y="LCA_CASE_NUMBER",
labels={"LCA_CASE_SOC_NAME": "LCA_CASE_SOC_NAME", "LCA_CASE_NUMBER": "Count"},
)
# Add the scatter chart for mean of LCA_CASE_WAGE_RATE_FROM
fig.add_trace(
go.Scatter(
x=df_top10["LCA_CASE_SOC_NAME"],
y=df_top10["LCA_CASE_WAGE_RATE_FROM"],
yaxis="y2",
name="Average Wage Rate in thousands",
marker=dict(color="red"),
)
)
# Set the layout for the chart
fig.update_layout(
title="Top 10 Certified LCA_CASE_SOC_NAME with highest count and average wage rate",
xaxis_title="LCA_CASE_SOC_NAME",
yaxis_title="Count",
yaxis2=dict(title="Average Wage Rate", overlaying="y", side="right"),
)
# Show the chart
fig.show()
# Filter the data to only include certified cases
df_filtered = final_df.loc[final_df["STATUS"] == "DENIED"]
# Group by LCA_CASE_SOC_NAME and get the count and mean of LCA_CASE_WAGE_RATE_FROM
df_grouped = (
df_filtered.groupby("LCA_CASE_SOC_NAME")
.agg({"LCA_CASE_NUMBER": "count", "LCA_CASE_WAGE_RATE_FROM": "mean"})
.reset_index()
)
# Sort by count in descending order and get the top 10
df_top10 = df_grouped.sort_values(by="LCA_CASE_NUMBER", ascending=False).head(10)
# Create the bar chart for top 10 LCA_CASE_SOC_NAME
fig = px.bar(
df_top10,
x="LCA_CASE_SOC_NAME",
y="LCA_CASE_NUMBER",
labels={"LCA_CASE_SOC_NAME": "LCA_CASE_SOC_NAME", "LCA_CASE_NUMBER": "Count"},
)
# Add the scatter chart for mean of LCA_CASE_WAGE_RATE_FROM
fig.add_trace(
go.Scatter(
x=df_top10["LCA_CASE_SOC_NAME"],
y=df_top10["LCA_CASE_WAGE_RATE_FROM"],
yaxis="y2",
name="Average Wage Rate in thousands",
marker=dict(color="red"),
)
)
# Set the layout for the chart
fig.update_layout(
title="Top 10 Denied LCA_CASE_SOC_NAME with highest count and average wage rate",
xaxis_title="LCA_CASE_SOC_NAME",
yaxis_title="Count",
yaxis2=dict(title="Average Wage Rate", overlaying="y", side="right"),
)
# Show the chart
fig.show()
# calculate the quantiles
q1, q2, q3, q4 = final_df["LCA_CASE_WAGE_RATE_FROM"].quantile([0.25, 0.5, 0.75, 0.99])
# define the label function
def label_wage_range(x):
if x > 0 and x <= q1:
return "Below Q1 (64K)"
elif x > q1 and x <= q2:
return "Q1 (64K - 75K)"
elif x > q2 and x <= q3:
return "Q2 (75K - 95K)"
elif x > q3 and x <= q4:
return "Q3 (95K - 180K)"
elif x > q4:
return "Above Q4 (180K)"
# create the new column using apply() and the label function
final_df["wage_range"] = final_df["LCA_CASE_WAGE_RATE_FROM"].apply(label_wage_range)
df_filtered = final_df
# Create a grouped bar chart using the quantile column and the STATUS column as color
fig = px.histogram(df_filtered, x="wage_range", color="STATUS", barmode="group")
# Set the layout for the chart
fig.update_layout(
title="Distribution of Wage Rates by Status and Quantile",
xaxis_title="Quantile",
yaxis_title="Count",
)
# Show the chart
fig.show()
# ##### Upon analyzing the data, no significant correlation was observed between the salary range and the application status. After analyzing the data for the top 10 accepted and denied job positions, there was no significant correlation observed between salary range and application status. Interestingly, both the top 10 accepted and denied job positions had the same salary range.
# ##### To further investigate the relationship between salary range and application status, the salary range was split into four quantiles. After analyzing the data for low, average, above average, and higher pay salary levels, it was found that the majority of H1B visa applications that were approved fell into the low (Q1) and average (Q2) salary range categories. However, there were no clear trends observed between the salary range and the application status, suggesting that factors other than salary may have played a more significant role in determining the outcome of H1B visa applications.
# ## ----------------------------------------------------------------------------------------
# ## How does the length of employment impact the decision of the H1B visa application?
# Filter the data to only include certified cases
df_filtered = final_df.loc[final_df["STATUS"] == "CERTIFIED"]
# Group by LCA_CASE_SOC_NAME and get the count and mean of LCA_CASE_WAGE_RATE_FROM
df_grouped = (
df_filtered.groupby("LCA_CASE_SOC_NAME")
.agg({"LCA_CASE_NUMBER": "count", "LCA_CASE_EMPLOYMENT_PERIOD": "mean"})
.reset_index()
)
# Sort by count in descending order and get the top 10
df_top10 = df_grouped.sort_values(by="LCA_CASE_NUMBER", ascending=False).head(10)
# Create the bar chart for top 10 LCA_CASE_SOC_NAME
fig = px.bar(
df_top10,
x="LCA_CASE_SOC_NAME",
y="LCA_CASE_NUMBER",
labels={"LCA_CASE_SOC_NAME": "LCA_CASE_SOC_NAME", "LCA_CASE_NUMBER": "Count"},
)
# Add the scatter chart for mean of LCA_CASE_WAGE_RATE_FROM
fig.add_trace(
go.Scatter(
x=df_top10["LCA_CASE_SOC_NAME"],
y=df_top10["LCA_CASE_EMPLOYMENT_PERIOD"],
yaxis="y2",
name="Average Employment period in months",
marker=dict(color="red"),
)
)
# Set the layout for the chart
fig.update_layout(
title="Top 10 Certified LCA_CASE_SOC_NAME with highest count and average employment period",
xaxis_title="LCA_CASE_SOC_NAME",
yaxis_title="Count",
yaxis2=dict(title="Average employment period", overlaying="y", side="right"),
)
# Show the chart
fig.show()
# Filter the data to only include certified cases
df_filtered = final_df.loc[final_df["STATUS"] == "DENIED"]
# Group by LCA_CASE_SOC_NAME and get the count and mean of LCA_CASE_EMPLOYMENT_PERIOD
df_grouped = (
df_filtered.groupby("LCA_CASE_SOC_NAME")
.agg({"LCA_CASE_NUMBER": "count", "LCA_CASE_EMPLOYMENT_PERIOD": "mean"})
.reset_index()
)
# Sort by count in descending order and get the top 10
df_top10 = df_grouped.sort_values(by="LCA_CASE_NUMBER", ascending=False).head(10)
# Create the bar chart for top 10 LCA_CASE_SOC_NAME
fig = px.bar(
df_top10,
x="LCA_CASE_SOC_NAME",
y="LCA_CASE_NUMBER",
labels={"LCA_CASE_SOC_NAME": "LCA_CASE_SOC_NAME", "LCA_CASE_NUMBER": "Count"},
)
# Add the scatter chart for mean of LCA_CASE_EMPLOYMENT_PERIOD
fig.add_trace(
go.Scatter(
x=df_top10["LCA_CASE_SOC_NAME"],
y=df_top10["LCA_CASE_EMPLOYMENT_PERIOD"],
yaxis="y2",
name="Average Employment Period in months",
marker=dict(color="red"),
)
)
# Set the layout for the chart
fig.update_layout(
title="Top 10 Denied LCA_CASE_SOC_NAME with highest count and average employment period",
xaxis_title="LCA_CASE_SOC_NAME",
yaxis_title="Count",
yaxis2=dict(
title="Average Employment Period in months", overlaying="y", side="right"
),
)
# Show the chart
fig.show()
mean = final_df["LCA_CASE_EMPLOYMENT_PERIOD"].mean()
# define the label function
def label_employment_range(x):
if x < mean:
return "Below average (34 months)"
elif x > mean:
return "Above average (34 months)"
# create the new column using apply() and the label function
final_df["Employment_range"] = final_df["LCA_CASE_EMPLOYMENT_PERIOD"].apply(
label_employment_range
)
df_filtered = final_df
# Create a grouped bar chart using the quantile column and the STATUS column as color
fig = px.histogram(df_filtered, x="Employment_range", color="STATUS", barmode="group")
# Set the layout for the chart
fig.update_layout(
title="Distribution of Employment_range by Status and Mean of Employment period",
xaxis_title="Quantile",
yaxis_title="Count",
)
# Show the chart
fig.show()
# ##### After analyzing the data, it was found that the employment period also did not have any significant correlation with the decision of the visa. Both the top approved and the top denied job positions had an average employment period of 33 months.
# ##### Upon further investigation, the applicants were separated by the average month of employment period below and above 33 months.It was observed that only a few applications had an employment period of less than 33 months. This suggests that the employment period may not have played a significant role in determining the outcome of H1B visa applications. Other factors such as the applicant's qualifications and the employer's sponsorship may have had a greater impact on the decision.
# ## ----------------------------------------------------------------------------------------
# ## Are there any trends or patterns in the geographic distribution of H1B visa workers?
# Group by state and get the count
df_state = (
final_df.groupby("LCA_CASE_EMPLOYER_STATE")["LCA_CASE_NUMBER"].count().reset_index()
)
# Create the choropleth map
fig = px.choropleth(
df_state,
locations="LCA_CASE_EMPLOYER_STATE",
locationmode="USA-states",
color="LCA_CASE_NUMBER",
scope="usa",
color_continuous_scale="Blues",
title="H1B Visa Applications by State",
)
fig.show()
| false | 0 | 11,423 | 0 | 11,423 | 11,423 |
||
129627018
|
import nltk
from nltk.collocations import (
TrigramCollocationFinder,
BigramCollocationFinder,
QuadgramCollocationFinder,
)
from nltk.metrics import (
TrigramAssocMeasures,
BigramAssocMeasures,
QuadgramAssocMeasures,
)
# Tokenize the sentence
# tokens = nltk.word_tokenize(txt[0])
tokens = []
for i in txt:
for j in i:
tokens.append(j)
# Create a trigram collocation finder
finder_b = BigramCollocationFinder.from_words(tokens)
finder_t = TrigramCollocationFinder.from_words(tokens)
finder_q = QuadgramCollocationFinder.from_words(tokens)
# Filter out common words and punctuation
finder_b.apply_freq_filter(1)
finder_t.apply_freq_filter(1)
finder_q.apply_freq_filter(1)
# Set the scoring metric
scoring_measure_b = BigramAssocMeasures.raw_freq
scoring_measure_t = TrigramAssocMeasures.raw_freq
scoring_measure_t = QuadgramAssocMeasures.raw_freq
# Get the top 10 trigrams based on the scoring metric
top_trigrams = finder_t.nbest(scoring_measure_t, 10)
top_bigrams = finder_b.nbest(scoring_measure_b, 10)
top_quadgrams = finder_q.nbest(scoring_measure_b, 10)
trigrams = {}
bigrams = {}
quadgrams = {}
t = finder_t.ngram_fd.items()
b = finder_b.ngram_fd.items()
q = finder_q.ngram_fd.items()
for i in t:
if i[0] in top_trigrams:
trigrams[" ".join(i[0])] = i[1]
trigrams = {
k: v for k, v in sorted(trigrams.items(), key=lambda item: item[1], reverse=True)
}
for i in b:
if i[0] in top_bigrams:
bigrams[" ".join(i[0])] = i[1]
bigrams = {
k: v for k, v in sorted(bigrams.items(), key=lambda item: item[1], reverse=True)
}
for i in q:
if i[0] in top_quadgrams:
quadgrams[" ".join(i[0])] = i[1]
quadgrams = {
k: v for k, v in sorted(quadgrams.items(), key=lambda item: item[1], reverse=True)
}
trigrams
bigrams
quadgrams
for n in top_trigrams:
print(trigram)
# Print the top trigrams
print("Top Trigrams:")
for trigram in finder_t.ngram_fd.items():
print(trigram)
print("-------------------------")
print("Top Bigrams:")
for trigram in finder_b.ngram_fd.items():
print(trigram)
print("-------------------------")
print("Top Quadgrams:")
for trigram in finder_q.ngram_fd.items():
print(trigram)
import re
# Preprocessing
def remove_string_special_characters(s):
# removes special characters with ' '
stripped = re.sub("[^0-9a-zA-z\s]", "", s)
stripped = re.sub("_", "", stripped)
# Change any white space to one space
stripped = re.sub("\s+", " ", stripped)
# Remove start and end white spaces
stripped = stripped.strip()
if stripped != "":
return stripped.lower()
from nltk.stem import WordNetLemmatizer
from nltk.corpus import stopwords
from nltk import word_tokenize
lemmatizer = WordNetLemmatizer()
# Stopword removal
stop_words = set(stopwords.words("english"))
for i, line in enumerate(txt):
line = remove_string_special_characters(line)
# txt[i] = [x for x in line if ( x not in stop_words )]
txt[i] = [x for x in word_tokenize(line) if (x not in stop_words)]
import pandas as pd
data_may1 = pd.read_json("/kaggle/input/nctc-may-1-7/NCTC_may_3.json")
txt = list(data_may1["Text"])
lemmatizer.lemmatize("Rs")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/627/129627018.ipynb
| null | null |
[{"Id": 129627018, "ScriptId": 38536156, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14218315, "CreationDate": "05/15/2023 10:29:07", "VersionNumber": 2.0, "Title": "ngram_workspace", "EvaluationDate": NaN, "IsChange": true, "TotalLines": 117.0, "LinesInsertedFromPrevious": 51.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 66.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import nltk
from nltk.collocations import (
TrigramCollocationFinder,
BigramCollocationFinder,
QuadgramCollocationFinder,
)
from nltk.metrics import (
TrigramAssocMeasures,
BigramAssocMeasures,
QuadgramAssocMeasures,
)
# Tokenize the sentence
# tokens = nltk.word_tokenize(txt[0])
tokens = []
for i in txt:
for j in i:
tokens.append(j)
# Create a trigram collocation finder
finder_b = BigramCollocationFinder.from_words(tokens)
finder_t = TrigramCollocationFinder.from_words(tokens)
finder_q = QuadgramCollocationFinder.from_words(tokens)
# Filter out common words and punctuation
finder_b.apply_freq_filter(1)
finder_t.apply_freq_filter(1)
finder_q.apply_freq_filter(1)
# Set the scoring metric
scoring_measure_b = BigramAssocMeasures.raw_freq
scoring_measure_t = TrigramAssocMeasures.raw_freq
scoring_measure_t = QuadgramAssocMeasures.raw_freq
# Get the top 10 trigrams based on the scoring metric
top_trigrams = finder_t.nbest(scoring_measure_t, 10)
top_bigrams = finder_b.nbest(scoring_measure_b, 10)
top_quadgrams = finder_q.nbest(scoring_measure_b, 10)
trigrams = {}
bigrams = {}
quadgrams = {}
t = finder_t.ngram_fd.items()
b = finder_b.ngram_fd.items()
q = finder_q.ngram_fd.items()
for i in t:
if i[0] in top_trigrams:
trigrams[" ".join(i[0])] = i[1]
trigrams = {
k: v for k, v in sorted(trigrams.items(), key=lambda item: item[1], reverse=True)
}
for i in b:
if i[0] in top_bigrams:
bigrams[" ".join(i[0])] = i[1]
bigrams = {
k: v for k, v in sorted(bigrams.items(), key=lambda item: item[1], reverse=True)
}
for i in q:
if i[0] in top_quadgrams:
quadgrams[" ".join(i[0])] = i[1]
quadgrams = {
k: v for k, v in sorted(quadgrams.items(), key=lambda item: item[1], reverse=True)
}
trigrams
bigrams
quadgrams
for n in top_trigrams:
print(trigram)
# Print the top trigrams
print("Top Trigrams:")
for trigram in finder_t.ngram_fd.items():
print(trigram)
print("-------------------------")
print("Top Bigrams:")
for trigram in finder_b.ngram_fd.items():
print(trigram)
print("-------------------------")
print("Top Quadgrams:")
for trigram in finder_q.ngram_fd.items():
print(trigram)
import re
# Preprocessing
def remove_string_special_characters(s):
# removes special characters with ' '
stripped = re.sub("[^0-9a-zA-z\s]", "", s)
stripped = re.sub("_", "", stripped)
# Change any white space to one space
stripped = re.sub("\s+", " ", stripped)
# Remove start and end white spaces
stripped = stripped.strip()
if stripped != "":
return stripped.lower()
from nltk.stem import WordNetLemmatizer
from nltk.corpus import stopwords
from nltk import word_tokenize
lemmatizer = WordNetLemmatizer()
# Stopword removal
stop_words = set(stopwords.words("english"))
for i, line in enumerate(txt):
line = remove_string_special_characters(line)
# txt[i] = [x for x in line if ( x not in stop_words )]
txt[i] = [x for x in word_tokenize(line) if (x not in stop_words)]
import pandas as pd
data_may1 = pd.read_json("/kaggle/input/nctc-may-1-7/NCTC_may_3.json")
txt = list(data_may1["Text"])
lemmatizer.lemmatize("Rs")
| false | 0 | 1,065 | 0 | 1,065 | 1,065 |
||
129720104
|
<jupyter_start><jupyter_text>Predicting Critical Heat Flux
### Context
This dataset was prepared for the journal article entitled "On the prediction of critical heat flux using a physics-informed machine learning-aided framework" (doi: 10.1016/j.applthermaleng.2019.114540). The dataset contains processed and compiled records of experimental critical heat flux and boundary conditions used for the work presented in the article.
Kaggle dataset identifier: predicting-heat-flux
<jupyter_script># In this notebook we will try to use AutoML to create a model.
# If you like it, please, upvote!
# The EDA with 'classical' paameters tunning is published here: https://www.kaggle.com/kdmitrie/pgs315-simple-imputer-optuna-blending
# **Version info:**
# 5. Initial submit without Deep Learning
# # 0. Import basic libraries and data sources, including the original dataset
import pandas as pd
import numpy as np
import h2o
h2o.init()
from h2o.automl import H2OAutoML
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
TRAIN_CSV = "/kaggle/input/playground-series-s3e15/data.csv"
TEST_CSV = "/kaggle/input/playground-series-s3e15/sample_submission.csv"
EXTERNAL_CSV = "/kaggle/input/predicting-heat-flux/Data_CHF_Zhao_2020_ATE.csv"
# # 1. Prepare the data
# ## 1.1. Read the data
# First we read the CSV files and put all the data into a single dataframe. Though we don't have access to the missed features, with this combination of the datasets we can possibly better use the relations between features.
# `test` column indicates the datasets and `gen` column indicates, whether the data was generated by deep learning model or not.
target = "x_e_out [-]"
df1 = pd.read_csv(TRAIN_CSV)
df1["test"] = pd.isna(df1[target])
df1["gen"] = 1
df2 = pd.read_csv(EXTERNAL_CSV)
df2["test"] = 0
df2["gen"] = 0
# df = df1 # Do not include the original dataset
df = pd.concat([df1, df2])
df.reset_index(inplace=True)
df.drop("index", axis=1, inplace=True)
df.head()
# We replace categorical variables with their mean values.
columns = [
"author",
"geometry",
"pressure [MPa]",
"mass_flux [kg/m2-s]",
"D_e [mm]",
"D_h [mm]",
"length [mm]",
"chf_exp [MW/m2]",
]
cat_columns = ["author", "geometry"]
for col in cat_columns:
replace = df.groupby(by=col)[target].mean().to_dict()
df[col].replace(replace, inplace=True)
# # 2. Using H2O
def get_numpy_arrays(data):
X = data.drop(["id", "test", "gen", target], axis=1).to_numpy()
y = data[target].to_numpy()
scaler = StandardScaler()
X = scaler.fit_transform(X)
return X, y
X, y = get_numpy_arrays(df[df.test != 1])
X_submit, y_submit = get_numpy_arrays(df[df.test == 1])
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1)
df_train = pd.DataFrame(data=X_train, columns=columns)
df_train[target] = y_train
df_test = pd.DataFrame(data=X_test, columns=columns)
df_test[target] = y_test
data_h2o_train_val = h2o.H2OFrame(df_train)
test_frame = h2o.H2OFrame(df_test)
data_h2o_train_val[target] = data_h2o_train_val[target]
test_frame[target] = test_frame[target]
training_frame, validation_frame = data_h2o_train_val.split_frame(
ratios=[
0.7,
],
seed=42,
)
aml = H2OAutoML(
max_models=100,
seed=12,
exclude_algos=["DeepLearning"],
verbosity=None,
nfolds=10,
sort_metric="RMSE",
stopping_metric="RMSE",
)
# aml = H2OAutoML(max_models=10, seed=12, exclude_algos=[], verbosity=None, nfolds=5)
aml.train(
x=columns,
y=target,
training_frame=training_frame,
validation_frame=validation_frame,
)
# # 3. Predict & submit
df_submit = pd.DataFrame(data=X_submit, columns=columns)
data_h2o_submit = h2o.H2OFrame(df_submit)
prediction = aml.leader.predict(data_h2o_submit).as_data_frame()["predict"].to_numpy()
data_submit = df[df.test == 1].copy()
data_submit[target] = prediction
data_submit[["id", target]].to_csv("submission_h2o.csv", index=False)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/720/129720104.ipynb
|
predicting-heat-flux
|
saurabhshahane
|
[{"Id": 129720104, "ScriptId": 38088939, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4308868, "CreationDate": "05/16/2023 02:39:52", "VersionNumber": 5.0, "Title": "PGS315: Easy use of H2O AutoML", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 109.0, "LinesInsertedFromPrevious": 31.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 78.0, "LinesInsertedFromFork": 54.0, "LinesDeletedFromFork": 526.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 55.0, "TotalVotes": 1}]
|
[{"Id": 186058942, "KernelVersionId": 129720104, "SourceDatasetVersionId": 1921393}]
|
[{"Id": 1921393, "DatasetId": 1145869, "DatasourceVersionId": 1959907, "CreatorUserId": 2411256, "LicenseName": "Attribution 4.0 International (CC BY 4.0)", "CreationDate": "02/08/2021 11:44:07", "VersionNumber": 1.0, "Title": "Predicting Critical Heat Flux", "Slug": "predicting-heat-flux", "Subtitle": "prediction of critical heat flux using Machine Learning", "Description": "### Context\n\nThis dataset was prepared for the journal article entitled \"On the prediction of critical heat flux using a physics-informed machine learning-aided framework\" (doi: 10.1016/j.applthermaleng.2019.114540). The dataset contains processed and compiled records of experimental critical heat flux and boundary conditions used for the work presented in the article. \n\n### Acknowledgements\n\nZhao, Xingang (2020), \u201cData for: On the prediction of critical heat flux using a physics-informed machine learning-aided framework\u201d, Mendeley Data, V1, doi: 10.17632/5p5h37tyv7.1", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1145869, "CreatorUserId": 2411256, "OwnerUserId": 2411256.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 1921393.0, "CurrentDatasourceVersionId": 1959907.0, "ForumId": 1163376, "Type": 2, "CreationDate": "02/08/2021 11:44:07", "LastActivityDate": "02/08/2021", "TotalViews": 6889, "TotalDownloads": 589, "TotalVotes": 42, "TotalKernels": 78}]
|
[{"Id": 2411256, "UserName": "saurabhshahane", "DisplayName": "Saurabh Shahane", "RegisterDate": "10/26/2018", "PerformanceTier": 4}]
|
# In this notebook we will try to use AutoML to create a model.
# If you like it, please, upvote!
# The EDA with 'classical' paameters tunning is published here: https://www.kaggle.com/kdmitrie/pgs315-simple-imputer-optuna-blending
# **Version info:**
# 5. Initial submit without Deep Learning
# # 0. Import basic libraries and data sources, including the original dataset
import pandas as pd
import numpy as np
import h2o
h2o.init()
from h2o.automl import H2OAutoML
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
TRAIN_CSV = "/kaggle/input/playground-series-s3e15/data.csv"
TEST_CSV = "/kaggle/input/playground-series-s3e15/sample_submission.csv"
EXTERNAL_CSV = "/kaggle/input/predicting-heat-flux/Data_CHF_Zhao_2020_ATE.csv"
# # 1. Prepare the data
# ## 1.1. Read the data
# First we read the CSV files and put all the data into a single dataframe. Though we don't have access to the missed features, with this combination of the datasets we can possibly better use the relations between features.
# `test` column indicates the datasets and `gen` column indicates, whether the data was generated by deep learning model or not.
target = "x_e_out [-]"
df1 = pd.read_csv(TRAIN_CSV)
df1["test"] = pd.isna(df1[target])
df1["gen"] = 1
df2 = pd.read_csv(EXTERNAL_CSV)
df2["test"] = 0
df2["gen"] = 0
# df = df1 # Do not include the original dataset
df = pd.concat([df1, df2])
df.reset_index(inplace=True)
df.drop("index", axis=1, inplace=True)
df.head()
# We replace categorical variables with their mean values.
columns = [
"author",
"geometry",
"pressure [MPa]",
"mass_flux [kg/m2-s]",
"D_e [mm]",
"D_h [mm]",
"length [mm]",
"chf_exp [MW/m2]",
]
cat_columns = ["author", "geometry"]
for col in cat_columns:
replace = df.groupby(by=col)[target].mean().to_dict()
df[col].replace(replace, inplace=True)
# # 2. Using H2O
def get_numpy_arrays(data):
X = data.drop(["id", "test", "gen", target], axis=1).to_numpy()
y = data[target].to_numpy()
scaler = StandardScaler()
X = scaler.fit_transform(X)
return X, y
X, y = get_numpy_arrays(df[df.test != 1])
X_submit, y_submit = get_numpy_arrays(df[df.test == 1])
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1)
df_train = pd.DataFrame(data=X_train, columns=columns)
df_train[target] = y_train
df_test = pd.DataFrame(data=X_test, columns=columns)
df_test[target] = y_test
data_h2o_train_val = h2o.H2OFrame(df_train)
test_frame = h2o.H2OFrame(df_test)
data_h2o_train_val[target] = data_h2o_train_val[target]
test_frame[target] = test_frame[target]
training_frame, validation_frame = data_h2o_train_val.split_frame(
ratios=[
0.7,
],
seed=42,
)
aml = H2OAutoML(
max_models=100,
seed=12,
exclude_algos=["DeepLearning"],
verbosity=None,
nfolds=10,
sort_metric="RMSE",
stopping_metric="RMSE",
)
# aml = H2OAutoML(max_models=10, seed=12, exclude_algos=[], verbosity=None, nfolds=5)
aml.train(
x=columns,
y=target,
training_frame=training_frame,
validation_frame=validation_frame,
)
# # 3. Predict & submit
df_submit = pd.DataFrame(data=X_submit, columns=columns)
data_h2o_submit = h2o.H2OFrame(df_submit)
prediction = aml.leader.predict(data_h2o_submit).as_data_frame()["predict"].to_numpy()
data_submit = df[df.test == 1].copy()
data_submit[target] = prediction
data_submit[["id", target]].to_csv("submission_h2o.csv", index=False)
| false | 0 | 1,250 | 1 | 1,366 | 1,250 |
||
129720583
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import seaborn as sns
full_data = pd.read_csv("/kaggle/input/playground-series-s3e15/data.csv")
sample = pd.read_csv("/kaggle/input/playground-series-s3e15/sample_submission.csv")
len(sample), len(full_data)
# Check if IDs are same
u1 = full_data[full_data["x_e_out [-]"].isna()]["id"].unique()
u2 = sample["id"].unique()
set(u1).union(set(u2)) == set(u1).intersection(set(u2))
test = full_data[full_data["x_e_out [-]"].isna()]
data = full_data[~full_data["x_e_out [-]"].isna()]
data.info()
sample
sample["x_e_out [-]"] = data.mean()["x_e_out [-]"]
sample.to_csv("submission.csv", index=False)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/720/129720583.ipynb
| null | null |
[{"Id": 129720583, "ScriptId": 38577091, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 12420055, "CreationDate": "05/16/2023 02:46:48", "VersionNumber": 1.0, "Title": "notebook98729bbae7", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 43.0, "LinesInsertedFromPrevious": 43.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import seaborn as sns
full_data = pd.read_csv("/kaggle/input/playground-series-s3e15/data.csv")
sample = pd.read_csv("/kaggle/input/playground-series-s3e15/sample_submission.csv")
len(sample), len(full_data)
# Check if IDs are same
u1 = full_data[full_data["x_e_out [-]"].isna()]["id"].unique()
u2 = sample["id"].unique()
set(u1).union(set(u2)) == set(u1).intersection(set(u2))
test = full_data[full_data["x_e_out [-]"].isna()]
data = full_data[~full_data["x_e_out [-]"].isna()]
data.info()
sample
sample["x_e_out [-]"] = data.mean()["x_e_out [-]"]
sample.to_csv("submission.csv", index=False)
| false | 0 | 419 | 0 | 419 | 419 |
||
129720149
|
<jupyter_start><jupyter_text>Rice Dataset Commeo and Osmancik
DATASET: https://www.muratkoklu.com/datasets/
1: KOKLU, M., CINAR, I. and TASPINAR, Y. S. (2021). Classification of rice varieties with deep learning methods. Computers and Electronics in Agriculture, 187, 106285.
DOI: https://doi.org/10.1016/j.compag.2021.106285
2: CINAR, I. and KOKLU, M. (2021). Determination of Effective and Specific Physical Features of Rice Varieties by Computer Vision In Exterior Quality Inspection. Selcuk Journal of Agriculture and Food Sciences, 35(3), 229-243.
DOI: https://doi.org/10.15316/SJAFS.2021.252
3: CINAR, I. and KOKLU, M. (2022). Identification of Rice Varieties Using Machine Learning Algorithms. Journal of Agricultural Sciences, 28 (2), 307-325.
DOI: https://doi.org/10.15832/ankutbd.862482
4: CINAR, I. and KOKLU, M. (2019). Classification of Rice Varieties Using Artificial Intelligence Methods. International Journal of Intelligent Systems and Applications in Engineering, 7(3), 188-194.
DOI: https://doi.org/10.18201/ijisae.2019355381
Relevant Papers / Citation Requests / Acknowledgements:
Cinar, I. and Koklu, M. (2019). Classification of Rice Varieties Using Artificial Intelligence Methods. International Journal of Intelligent Systems and Applications in Engineering, vol.7, no.3 (Sep. 2019), pp.188-194. https://doi.org/10.18201/ijisae.2019355381.
Data Set Name: Rice Dataset (Commeo and Osmancik)
Abstract: A total of 3810 rice grain's images were taken for the two species (Cammeo and Osmancik), processed and feature inferences were made. 7 morphological features were obtained for each grain of rice.
Source:
Ilkay CINAR
Graduate School of Natural and Applied Sciences,
Selcuk University, Konya, TURKEY
[email protected]
Murat KOKLU
Faculty of Technology,
Selcuk University, Konya, TURKEY.
[email protected]
DATASET: https://www.muratkoklu.com/datasets/
Relevant Information: In order to classify the rice varieties (Cammeo and Osmancik) used, preliminary processing was applied to the pictures obtained with computer vision system and a total of 3810 rice grains were obtained. Furthermore, 7 morphological features have been inferred for each grain. A data set has been created for the properties obtained.
Attribute Information:
1. Area: Returns the number of pixels within the boundaries of the rice grain.
2. Perimeter: Calculates the circumference by calculating the distance between pixels around the boundaries of the rice grain.
3. Major Axis Length: The longest line that can be drawn on the rice grain, i.e. the main axis distance, gives.
4. Minor Axis Length: The shortest line that can be drawn on the rice grain, i.e. the small axis distance, gives.
5. Eccentricity: It measures how round the ellipse, which has the same moments as the rice grain, is.
6. Convex Area: Returns the pixel count of the smallest convex shell of the region formed by the rice grain.
7. Extent: Returns the ratio of the region formed by the rice grain to the bounding box pixels
8. Class: Commeo and Osmancik.
Relevant Papers / Citation Requests / Acknowledgements:
Cinar, I. and Koklu, M. (2019). Classification of Rice Varieties Using Artificial Intelligence Methods. International Journal of Intelligent Systems and Applications in Engineering, vol.7, no.3 (Sep. 2019), pp.188-194. https://doi.org/10.18201/ijisae.2019355381.
Kaggle dataset identifier: rice-dataset-commeo-and-osmancik
<jupyter_script>import pandas as pd
import numpy as np
import seaborn as sb
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
data = pd.read_excel(
"/kaggle/input/rice-dataset-commeo-and-osmancik/Rice_Dataset_Commeo_and_Osmancik/Rice_Cammeo_Osmancik.xlsx"
)
data
# **Выбор признаков**
x = data.loc[:, ["Major_Axis_Length", "Perimeter"]].values
# **Определение оптимального количества кластеров:**
wcss = []
for i in range(1, 11):
kmeans = KMeans(n_clusters=i, init="k-means++", random_state=100500)
kmeans.fit(X)
wcss.append(kmeans.inertia_)
plt.plot(range(1, 11), wcss)
plt.title("Метод локтя")
plt.xlabel("Количество кластеров")
plt.ylabel("WCSS")
plt.show()
# Оптимальное количество кластеров - 5
# Обучение модели с оптимальным количествои кластеров
kmeans = KMeans(n_clusters=5, init="k-means++", random_state=100500)
y_kmeans = kmeans.fit_predict(X)
plt.scatter(X[y_kmeans == 0, 0], X[y_kmeans == 0, 1], c="red", label="Cluster 1")
plt.scatter(X[y_kmeans == 1, 0], X[y_kmeans == 1, 1], c="green", label="Cluster 2")
plt.scatter(X[y_kmeans == 2, 0], X[y_kmeans == 2, 1], c="cyan", label="Cluster 3")
plt.scatter(X[y_kmeans == 3, 0], X[y_kmeans == 3, 1], c="purple", label="Cluster 4")
plt.scatter(X[y_kmeans == 4, 0], X[y_kmeans == 4, 1], c="orange", label="Cluster 5")
plt.scatter(X[y_kmeans == 5, 0], X[y_kmeans == 5, 1], c="blue", label="Cluster 6")
plt.scatter(
kmeans.cluster_centers_[:, 0],
kmeans.cluster_centers_[:, 1],
c="yellow",
label="Centroids",
)
plt.title("Кластеры")
plt.xlabel("MajorAxisLength")
plt.ylabel("Perimeter")
plt.legend()
plt.show()
# Визуализация с другим количеством кластеров
kmeans = KMeans(n_clusters=3, init="k-means++", random_state=100500)
y_kmeans = kmeans.fit_predict(X)
plt.scatter(X[y_kmeans == 0, 0], X[y_kmeans == 0, 1], c="red", label="Cluster 1")
plt.scatter(X[y_kmeans == 1, 0], X[y_kmeans == 1, 1], c="green", label="Cluster 2")
plt.scatter(X[y_kmeans == 2, 0], X[y_kmeans == 2, 1], c="cyan", label="Cluster 3")
plt.scatter(
kmeans.cluster_centers_[:, 0],
kmeans.cluster_centers_[:, 1],
c="yellow",
label="Centroids",
)
plt.title("Кластеры")
plt.xlabel("MajorAxisLength")
plt.ylabel("Perimeter")
plt.legend()
plt.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/720/129720149.ipynb
|
rice-dataset-commeo-and-osmancik
|
muratkokludataset
|
[{"Id": 129720149, "ScriptId": 37975997, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14569588, "CreationDate": "05/16/2023 02:40:34", "VersionNumber": 3.0, "Title": "metriclab", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 61.0, "LinesInsertedFromPrevious": 48.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 13.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 186059002, "KernelVersionId": 129720149, "SourceDatasetVersionId": 3398955}]
|
[{"Id": 3398955, "DatasetId": 2048923, "DatasourceVersionId": 3450628, "CreatorUserId": 10072866, "LicenseName": "CC0: Public Domain", "CreationDate": "04/03/2022 00:40:03", "VersionNumber": 1.0, "Title": "Rice Dataset Commeo and Osmancik", "Slug": "rice-dataset-commeo-and-osmancik", "Subtitle": "Rice Dataset: 2 Class Commeo and Osmancik Rice", "Description": "DATASET: https://www.muratkoklu.com/datasets/\n\n1: KOKLU, M., CINAR, I. and TASPINAR, Y. S. (2021). Classification of rice varieties with deep learning methods. Computers and Electronics in Agriculture, 187, 106285.\nDOI: https://doi.org/10.1016/j.compag.2021.106285\n\n2: CINAR, I. and KOKLU, M. (2021). Determination of Effective and Specific Physical Features of Rice Varieties by Computer Vision In Exterior Quality Inspection. Selcuk Journal of Agriculture and Food Sciences, 35(3), 229-243.\nDOI: https://doi.org/10.15316/SJAFS.2021.252\n\n3: CINAR, I. and KOKLU, M. (2022). Identification of Rice Varieties Using Machine Learning Algorithms. Journal of Agricultural Sciences, 28 (2), 307-325.\nDOI: https://doi.org/10.15832/ankutbd.862482\n\n4: CINAR, I. and KOKLU, M. (2019). Classification of Rice Varieties Using Artificial Intelligence Methods. International Journal of Intelligent Systems and Applications in Engineering, 7(3), 188-194.\nDOI: https://doi.org/10.18201/ijisae.2019355381\n\nRelevant Papers / Citation Requests / Acknowledgements:\nCinar, I. and Koklu, M. (2019). Classification of Rice Varieties Using Artificial Intelligence Methods. International Journal of Intelligent Systems and Applications in Engineering, vol.7, no.3 (Sep. 2019), pp.188-194. https://doi.org/10.18201/ijisae.2019355381.\n\nData Set Name: Rice Dataset (Commeo and Osmancik)\nAbstract: A total of 3810 rice grain's images were taken for the two species (Cammeo and Osmancik), processed and feature inferences were made. 7 morphological features were obtained for each grain of rice.\t\n\nSource:\nIlkay CINAR\nGraduate School of Natural and Applied Sciences, \nSelcuk University, Konya, TURKEY\[email protected]\n\nMurat KOKLU\nFaculty of Technology, \nSelcuk University, Konya, TURKEY.\[email protected]\n\nDATASET: https://www.muratkoklu.com/datasets/\n\nRelevant Information: In order to classify the rice varieties (Cammeo and Osmancik) used, preliminary processing was applied to the pictures obtained with computer vision system and a total of 3810 rice grains were obtained. Furthermore, 7 morphological features have been inferred for each grain. A data set has been created for the properties obtained.\n\nAttribute Information:\n1. Area: Returns the number of pixels within the boundaries of the rice grain.\n2. Perimeter: Calculates the circumference by calculating the distance between pixels around the boundaries of the rice grain.\n3. Major Axis Length: The longest line that can be drawn on the rice grain, i.e. the main axis distance, gives.\n4. Minor Axis Length: The shortest line that can be drawn on the rice grain, i.e. the small axis distance, gives.\n5. Eccentricity: It measures how round the ellipse, which has the same moments as the rice grain, is.\n6. Convex Area: Returns the pixel count of the smallest convex shell of the region formed by the rice grain.\n7. Extent: Returns the ratio of the region formed by the rice grain to the bounding box pixels\n8. Class: Commeo and Osmancik.\n\nRelevant Papers / Citation Requests / Acknowledgements:\nCinar, I. and Koklu, M. (2019). Classification of Rice Varieties Using Artificial Intelligence Methods. International Journal of Intelligent Systems and Applications in Engineering, vol.7, no.3 (Sep. 2019), pp.188-194. https://doi.org/10.18201/ijisae.2019355381.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 2048923, "CreatorUserId": 10072866, "OwnerUserId": 10072866.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 3398955.0, "CurrentDatasourceVersionId": 3450628.0, "ForumId": 2073935, "Type": 2, "CreationDate": "04/03/2022 00:40:03", "LastActivityDate": "04/03/2022", "TotalViews": 6863, "TotalDownloads": 810, "TotalVotes": 1417, "TotalKernels": 2}]
|
[{"Id": 10072866, "UserName": "muratkokludataset", "DisplayName": "Murat KOKLU", "RegisterDate": "03/28/2022", "PerformanceTier": 2}]
|
import pandas as pd
import numpy as np
import seaborn as sb
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
data = pd.read_excel(
"/kaggle/input/rice-dataset-commeo-and-osmancik/Rice_Dataset_Commeo_and_Osmancik/Rice_Cammeo_Osmancik.xlsx"
)
data
# **Выбор признаков**
x = data.loc[:, ["Major_Axis_Length", "Perimeter"]].values
# **Определение оптимального количества кластеров:**
wcss = []
for i in range(1, 11):
kmeans = KMeans(n_clusters=i, init="k-means++", random_state=100500)
kmeans.fit(X)
wcss.append(kmeans.inertia_)
plt.plot(range(1, 11), wcss)
plt.title("Метод локтя")
plt.xlabel("Количество кластеров")
plt.ylabel("WCSS")
plt.show()
# Оптимальное количество кластеров - 5
# Обучение модели с оптимальным количествои кластеров
kmeans = KMeans(n_clusters=5, init="k-means++", random_state=100500)
y_kmeans = kmeans.fit_predict(X)
plt.scatter(X[y_kmeans == 0, 0], X[y_kmeans == 0, 1], c="red", label="Cluster 1")
plt.scatter(X[y_kmeans == 1, 0], X[y_kmeans == 1, 1], c="green", label="Cluster 2")
plt.scatter(X[y_kmeans == 2, 0], X[y_kmeans == 2, 1], c="cyan", label="Cluster 3")
plt.scatter(X[y_kmeans == 3, 0], X[y_kmeans == 3, 1], c="purple", label="Cluster 4")
plt.scatter(X[y_kmeans == 4, 0], X[y_kmeans == 4, 1], c="orange", label="Cluster 5")
plt.scatter(X[y_kmeans == 5, 0], X[y_kmeans == 5, 1], c="blue", label="Cluster 6")
plt.scatter(
kmeans.cluster_centers_[:, 0],
kmeans.cluster_centers_[:, 1],
c="yellow",
label="Centroids",
)
plt.title("Кластеры")
plt.xlabel("MajorAxisLength")
plt.ylabel("Perimeter")
plt.legend()
plt.show()
# Визуализация с другим количеством кластеров
kmeans = KMeans(n_clusters=3, init="k-means++", random_state=100500)
y_kmeans = kmeans.fit_predict(X)
plt.scatter(X[y_kmeans == 0, 0], X[y_kmeans == 0, 1], c="red", label="Cluster 1")
plt.scatter(X[y_kmeans == 1, 0], X[y_kmeans == 1, 1], c="green", label="Cluster 2")
plt.scatter(X[y_kmeans == 2, 0], X[y_kmeans == 2, 1], c="cyan", label="Cluster 3")
plt.scatter(
kmeans.cluster_centers_[:, 0],
kmeans.cluster_centers_[:, 1],
c="yellow",
label="Centroids",
)
plt.title("Кластеры")
plt.xlabel("MajorAxisLength")
plt.ylabel("Perimeter")
plt.legend()
plt.show()
| false | 0 | 964 | 0 | 2,114 | 964 |
||
129720447
|
<jupyter_start><jupyter_text>Diamonds
### Context
This classic dataset contains the prices and other attributes of almost 54,000 diamonds. It's a great dataset for beginners learning to work with data analysis and visualization.
### Content
**price** price in US dollars (\$326--\$18,823)
**carat** weight of the diamond (0.2--5.01)
**cut** quality of the cut (Fair, Good, Very Good, Premium, Ideal)
**color** diamond colour, from J (worst) to D (best)
**clarity** a measurement of how clear the diamond is (I1 (worst), SI2, SI1, VS2, VS1, VVS2, VVS1, IF (best))
**x** length in mm (0--10.74)
**y** width in mm (0--58.9)
**z** depth in mm (0--31.8)
**depth** total depth percentage = z / mean(x, y) = 2 * z / (x + y) (43--79)
**table** width of top of diamond relative to widest point (43--95)
Kaggle dataset identifier: diamonds
<jupyter_code>import pandas as pd
df = pd.read_csv('diamonds/diamonds.csv')
df.info()
<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 53940 entries, 0 to 53939
Data columns (total 11 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Unnamed: 0 53940 non-null int64
1 carat 53940 non-null float64
2 cut 53940 non-null object
3 color 53940 non-null object
4 clarity 53940 non-null object
5 depth 53940 non-null float64
6 table 53940 non-null float64
7 price 53940 non-null int64
8 x 53940 non-null float64
9 y 53940 non-null float64
10 z 53940 non-null float64
dtypes: float64(6), int64(2), object(3)
memory usage: 4.5+ MB
<jupyter_text>Examples:
{
"Unnamed: 0": 1,
"carat": 0.23,
"cut": "Ideal",
"color": "E",
"clarity": "SI2",
"depth": 61.5,
"table": 55,
"price": 326,
"x": 3.95,
"y": 3.98,
"z": 2.43
}
{
"Unnamed: 0": 2,
"carat": 0.21,
"cut": "Premium",
"color": "E",
"clarity": "SI1",
"depth": 59.8,
"table": 61,
"price": 326,
"x": 3.89,
"y": 3.84,
"z": 2.31
}
{
"Unnamed: 0": 3,
"carat": 0.23,
"cut": "Good",
"color": "E",
"clarity": "VS1",
"depth": 56.9,
"table": 65,
"price": 327,
"x": 4.05,
"y": 4.07,
"z": 2.31
}
{
"Unnamed: 0": 4,
"carat": 0.29,
"cut": "Premium",
"color": "I",
"clarity": "VS2",
"depth": 62.4,
"table": 58,
"price": 334,
"x": 4.2,
"y": 4.23,
"z": 2.63
}
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# Load Diamonds Dataset
df = pd.read_csv("/kaggle/input/diamonds/diamonds.csv")
df.head(20)
# Analyze dataset
df.describe()
# Example 1: Get the summary statistics of a numerical column
summary_stats = df["price"].describe()
summary_stats
# Example 2: Count the occurrences of unique values in a categorical column
value_counts = df["cut"].value_counts()
value_counts
# ## Insights from the "diamonds" dataset
# In Diamonds dataset the most expensive type it's price could reach to '18823.000000',whereas the cheppest kind reaches around '326.000000'and the mean is '3932.799722'
# About the carat for the most expensive one its '5.010000',about the cheppest it's '0.200000' and the mean is '3932.799722'.
# Actully,we have four catogries of cutting the pieces (good,very good , premium ,ideal and fair)
# - The distribution of diamond cuts is as follows:
# - Fair: Count 1610
# - Good: Count 4906
# - Very Good: Count 12082
# - Premium : Count 13791
# - Ideal : Count 21551
#
import seaborn as sns
import matplotlib.pyplot as plt
# Example: Bar plot of cut counts
plt.figure(figsize=(8, 6))
value_counts.plot(kind="bar")
plt.title("Diamond Cut Counts")
plt.xlabel("Cut")
plt.ylabel("Count")
plt.show()
sns.boxplot(x="cut", y="price", data=df)
plt.title("Diamond Prices by Cut")
plt.xlabel("Cut")
plt.ylabel("Price")
plt.show()
sns.scatterplot(x="carat", y="price", hue="clarity", size="depth", data=df)
plt.title("Carat vs. Price (Colored by Clarity, Sized by Depth)")
plt.xlabel("Carat")
plt.ylabel("Price")
plt.show()
# Load Diabetes Dataset
df = pd.read_csv("/kaggle/input/pima-indians-diabetes-database/diabetes.csv")
df
# Analyze dataset
df.describe()
# Get the summary statistics of the numerical columns
summary_stats = df.describe()
summary_stats
# Count the number of instances with and without diabetes
diabetes_counts = df["Outcome"].value_counts()
diabetes_counts
# The dataset contains information about various factors such as glucose levels, blood pressure, BMI, and age of Pima Indian women, along with an outcome variable indicating whether or not they have diabetes.
# - The summary statistics of the numerical columns are as follows:
# - Glucose: Mean = 120.894531, Standard Deviation = 31.972618, Minimum = 0.000000, Maximum = 199.000000
# - BloodPressure: Mean = 69.105469, Standard Deviation = 19.355807, Minimum = 0.000000, Maximum = 122.000000
# - BMI: Mean = 31.992578, Standard Deviation = 7.884160, Minimum = 0.000000, Maximum = 67.100000
# - Age: Mean = 33.240885, Standard Deviation = 11.760232, Minimum =21.000000, Maximum = 81.000000.
# - The counts of diabetes outcome are as follows:
# - No Diabetes: 500 instances
# - Diabetes: 268 instances
# - From the count plot, we can see that the dataset is slightly imbalanced, with a higher number of instances without diabetes compared to those with diabetes.
# - In the scatter plot of glucose levels versus BMI, we observe that there seems to be a higher concentration of diabetes cases in the higher glucose and higher BMI region, while the non-diabetes cases are spread across the range of glucose and BMI values.
sns.lineplot(x="Age", y="Glucose", hue="Outcome", data=df)
plt.title("Age vs. Glucose Levels (Colored by Diabetes Outcome)")
plt.xlabel("Age")
plt.ylabel("Glucose")
plt.show()
sns.scatterplot(x="Glucose", y="BMI", hue="Outcome", data=df)
plt.title("Glucose Levels vs. BMI (Colored by Diabetes Outcome)")
plt.xlabel("Glucose")
plt.ylabel("BMI")
plt.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/720/129720447.ipynb
|
diamonds
|
shivam2503
|
[{"Id": 129720447, "ScriptId": 38575075, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 8004461, "CreationDate": "05/16/2023 02:45:04", "VersionNumber": 1.0, "Title": "Data Visualization", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 120.0, "LinesInsertedFromPrevious": 120.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 186059550, "KernelVersionId": 129720447, "SourceDatasetVersionId": 2368}, {"Id": 186059549, "KernelVersionId": 129720447, "SourceDatasetVersionId": 482}]
|
[{"Id": 2368, "DatasetId": 1312, "DatasourceVersionId": 2368, "CreatorUserId": 945829, "LicenseName": "Unknown", "CreationDate": "05/25/2017 03:06:57", "VersionNumber": 1.0, "Title": "Diamonds", "Slug": "diamonds", "Subtitle": "Analyze diamonds by their cut, color, clarity, price, and other attributes", "Description": "### Context \n\nThis classic dataset contains the prices and other attributes of almost 54,000 diamonds. It's a great dataset for beginners learning to work with data analysis and visualization.\n\n### Content\n\n**price** price in US dollars (\\$326--\\$18,823)\n\n**carat** weight of the diamond (0.2--5.01)\n\n**cut** quality of the cut (Fair, Good, Very Good, Premium, Ideal)\n\n**color** diamond colour, from J (worst) to D (best)\n\n**clarity** a measurement of how clear the diamond is (I1 (worst), SI2, SI1, VS2, VS1, VVS2, VVS1, IF (best))\n\n**x** length in mm (0--10.74)\n\n**y** width in mm (0--58.9)\n\n**z** depth in mm (0--31.8)\n\n**depth** total depth percentage = z / mean(x, y) = 2 * z / (x + y) (43--79)\n\n**table** width of top of diamond relative to widest point (43--95)", "VersionNotes": "Initial release", "TotalCompressedBytes": 3192560.0, "TotalUncompressedBytes": 3192560.0}]
|
[{"Id": 1312, "CreatorUserId": 945829, "OwnerUserId": 945829.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2368.0, "CurrentDatasourceVersionId": 2368.0, "ForumId": 3701, "Type": 2, "CreationDate": "05/25/2017 03:06:57", "LastActivityDate": "02/06/2018", "TotalViews": 434479, "TotalDownloads": 74575, "TotalVotes": 952, "TotalKernels": 444}]
|
[{"Id": 945829, "UserName": "shivam2503", "DisplayName": "Shivam Agrawal", "RegisterDate": "03/07/2017", "PerformanceTier": 1}]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# Load Diamonds Dataset
df = pd.read_csv("/kaggle/input/diamonds/diamonds.csv")
df.head(20)
# Analyze dataset
df.describe()
# Example 1: Get the summary statistics of a numerical column
summary_stats = df["price"].describe()
summary_stats
# Example 2: Count the occurrences of unique values in a categorical column
value_counts = df["cut"].value_counts()
value_counts
# ## Insights from the "diamonds" dataset
# In Diamonds dataset the most expensive type it's price could reach to '18823.000000',whereas the cheppest kind reaches around '326.000000'and the mean is '3932.799722'
# About the carat for the most expensive one its '5.010000',about the cheppest it's '0.200000' and the mean is '3932.799722'.
# Actully,we have four catogries of cutting the pieces (good,very good , premium ,ideal and fair)
# - The distribution of diamond cuts is as follows:
# - Fair: Count 1610
# - Good: Count 4906
# - Very Good: Count 12082
# - Premium : Count 13791
# - Ideal : Count 21551
#
import seaborn as sns
import matplotlib.pyplot as plt
# Example: Bar plot of cut counts
plt.figure(figsize=(8, 6))
value_counts.plot(kind="bar")
plt.title("Diamond Cut Counts")
plt.xlabel("Cut")
plt.ylabel("Count")
plt.show()
sns.boxplot(x="cut", y="price", data=df)
plt.title("Diamond Prices by Cut")
plt.xlabel("Cut")
plt.ylabel("Price")
plt.show()
sns.scatterplot(x="carat", y="price", hue="clarity", size="depth", data=df)
plt.title("Carat vs. Price (Colored by Clarity, Sized by Depth)")
plt.xlabel("Carat")
plt.ylabel("Price")
plt.show()
# Load Diabetes Dataset
df = pd.read_csv("/kaggle/input/pima-indians-diabetes-database/diabetes.csv")
df
# Analyze dataset
df.describe()
# Get the summary statistics of the numerical columns
summary_stats = df.describe()
summary_stats
# Count the number of instances with and without diabetes
diabetes_counts = df["Outcome"].value_counts()
diabetes_counts
# The dataset contains information about various factors such as glucose levels, blood pressure, BMI, and age of Pima Indian women, along with an outcome variable indicating whether or not they have diabetes.
# - The summary statistics of the numerical columns are as follows:
# - Glucose: Mean = 120.894531, Standard Deviation = 31.972618, Minimum = 0.000000, Maximum = 199.000000
# - BloodPressure: Mean = 69.105469, Standard Deviation = 19.355807, Minimum = 0.000000, Maximum = 122.000000
# - BMI: Mean = 31.992578, Standard Deviation = 7.884160, Minimum = 0.000000, Maximum = 67.100000
# - Age: Mean = 33.240885, Standard Deviation = 11.760232, Minimum =21.000000, Maximum = 81.000000.
# - The counts of diabetes outcome are as follows:
# - No Diabetes: 500 instances
# - Diabetes: 268 instances
# - From the count plot, we can see that the dataset is slightly imbalanced, with a higher number of instances without diabetes compared to those with diabetes.
# - In the scatter plot of glucose levels versus BMI, we observe that there seems to be a higher concentration of diabetes cases in the higher glucose and higher BMI region, while the non-diabetes cases are spread across the range of glucose and BMI values.
sns.lineplot(x="Age", y="Glucose", hue="Outcome", data=df)
plt.title("Age vs. Glucose Levels (Colored by Diabetes Outcome)")
plt.xlabel("Age")
plt.ylabel("Glucose")
plt.show()
sns.scatterplot(x="Glucose", y="BMI", hue="Outcome", data=df)
plt.title("Glucose Levels vs. BMI (Colored by Diabetes Outcome)")
plt.xlabel("Glucose")
plt.ylabel("BMI")
plt.show()
|
[{"diamonds/diamonds.csv": {"column_names": "[\"Unnamed: 0\", \"carat\", \"cut\", \"color\", \"clarity\", \"depth\", \"table\", \"price\", \"x\", \"y\", \"z\"]", "column_data_types": "{\"Unnamed: 0\": \"int64\", \"carat\": \"float64\", \"cut\": \"object\", \"color\": \"object\", \"clarity\": \"object\", \"depth\": \"float64\", \"table\": \"float64\", \"price\": \"int64\", \"x\": \"float64\", \"y\": \"float64\", \"z\": \"float64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 53940 entries, 0 to 53939\nData columns (total 11 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Unnamed: 0 53940 non-null int64 \n 1 carat 53940 non-null float64\n 2 cut 53940 non-null object \n 3 color 53940 non-null object \n 4 clarity 53940 non-null object \n 5 depth 53940 non-null float64\n 6 table 53940 non-null float64\n 7 price 53940 non-null int64 \n 8 x 53940 non-null float64\n 9 y 53940 non-null float64\n 10 z 53940 non-null float64\ndtypes: float64(6), int64(2), object(3)\nmemory usage: 4.5+ MB\n", "summary": "{\"Unnamed: 0\": {\"count\": 53940.0, \"mean\": 26970.5, \"std\": 15571.281096942537, \"min\": 1.0, \"25%\": 13485.75, \"50%\": 26970.5, \"75%\": 40455.25, \"max\": 53940.0}, \"carat\": {\"count\": 53940.0, \"mean\": 0.7979397478680014, \"std\": 0.4740112444054184, \"min\": 0.2, \"25%\": 0.4, \"50%\": 0.7, \"75%\": 1.04, \"max\": 5.01}, \"depth\": {\"count\": 53940.0, \"mean\": 61.749404894327036, \"std\": 1.432621318833661, \"min\": 43.0, \"25%\": 61.0, \"50%\": 61.8, \"75%\": 62.5, \"max\": 79.0}, \"table\": {\"count\": 53940.0, \"mean\": 57.45718390804598, \"std\": 2.2344905628213225, \"min\": 43.0, \"25%\": 56.0, \"50%\": 57.0, \"75%\": 59.0, \"max\": 95.0}, \"price\": {\"count\": 53940.0, \"mean\": 3932.799721913237, \"std\": 3989.439738146379, \"min\": 326.0, \"25%\": 950.0, \"50%\": 2401.0, \"75%\": 5324.25, \"max\": 18823.0}, \"x\": {\"count\": 53940.0, \"mean\": 5.731157211716722, \"std\": 1.1217607467924928, \"min\": 0.0, \"25%\": 4.71, \"50%\": 5.7, \"75%\": 6.54, \"max\": 10.74}, \"y\": {\"count\": 53940.0, \"mean\": 5.734525954764553, \"std\": 1.1421346741235552, \"min\": 0.0, \"25%\": 4.72, \"50%\": 5.71, \"75%\": 6.54, \"max\": 58.9}, \"z\": {\"count\": 53940.0, \"mean\": 3.5387337782721544, \"std\": 0.7056988469499941, \"min\": 0.0, \"25%\": 2.91, \"50%\": 3.53, \"75%\": 4.04, \"max\": 31.8}}", "examples": "{\"Unnamed: 0\":{\"0\":1,\"1\":2,\"2\":3,\"3\":4},\"carat\":{\"0\":0.23,\"1\":0.21,\"2\":0.23,\"3\":0.29},\"cut\":{\"0\":\"Ideal\",\"1\":\"Premium\",\"2\":\"Good\",\"3\":\"Premium\"},\"color\":{\"0\":\"E\",\"1\":\"E\",\"2\":\"E\",\"3\":\"I\"},\"clarity\":{\"0\":\"SI2\",\"1\":\"SI1\",\"2\":\"VS1\",\"3\":\"VS2\"},\"depth\":{\"0\":61.5,\"1\":59.8,\"2\":56.9,\"3\":62.4},\"table\":{\"0\":55.0,\"1\":61.0,\"2\":65.0,\"3\":58.0},\"price\":{\"0\":326,\"1\":326,\"2\":327,\"3\":334},\"x\":{\"0\":3.95,\"1\":3.89,\"2\":4.05,\"3\":4.2},\"y\":{\"0\":3.98,\"1\":3.84,\"2\":4.07,\"3\":4.23},\"z\":{\"0\":2.43,\"1\":2.31,\"2\":2.31,\"3\":2.63}}"}}]
| true | 2 |
<start_data_description><data_path>diamonds/diamonds.csv:
<column_names>
['Unnamed: 0', 'carat', 'cut', 'color', 'clarity', 'depth', 'table', 'price', 'x', 'y', 'z']
<column_types>
{'Unnamed: 0': 'int64', 'carat': 'float64', 'cut': 'object', 'color': 'object', 'clarity': 'object', 'depth': 'float64', 'table': 'float64', 'price': 'int64', 'x': 'float64', 'y': 'float64', 'z': 'float64'}
<dataframe_Summary>
{'Unnamed: 0': {'count': 53940.0, 'mean': 26970.5, 'std': 15571.281096942537, 'min': 1.0, '25%': 13485.75, '50%': 26970.5, '75%': 40455.25, 'max': 53940.0}, 'carat': {'count': 53940.0, 'mean': 0.7979397478680014, 'std': 0.4740112444054184, 'min': 0.2, '25%': 0.4, '50%': 0.7, '75%': 1.04, 'max': 5.01}, 'depth': {'count': 53940.0, 'mean': 61.749404894327036, 'std': 1.432621318833661, 'min': 43.0, '25%': 61.0, '50%': 61.8, '75%': 62.5, 'max': 79.0}, 'table': {'count': 53940.0, 'mean': 57.45718390804598, 'std': 2.2344905628213225, 'min': 43.0, '25%': 56.0, '50%': 57.0, '75%': 59.0, 'max': 95.0}, 'price': {'count': 53940.0, 'mean': 3932.799721913237, 'std': 3989.439738146379, 'min': 326.0, '25%': 950.0, '50%': 2401.0, '75%': 5324.25, 'max': 18823.0}, 'x': {'count': 53940.0, 'mean': 5.731157211716722, 'std': 1.1217607467924928, 'min': 0.0, '25%': 4.71, '50%': 5.7, '75%': 6.54, 'max': 10.74}, 'y': {'count': 53940.0, 'mean': 5.734525954764553, 'std': 1.1421346741235552, 'min': 0.0, '25%': 4.72, '50%': 5.71, '75%': 6.54, 'max': 58.9}, 'z': {'count': 53940.0, 'mean': 3.5387337782721544, 'std': 0.7056988469499941, 'min': 0.0, '25%': 2.91, '50%': 3.53, '75%': 4.04, 'max': 31.8}}
<dataframe_info>
RangeIndex: 53940 entries, 0 to 53939
Data columns (total 11 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Unnamed: 0 53940 non-null int64
1 carat 53940 non-null float64
2 cut 53940 non-null object
3 color 53940 non-null object
4 clarity 53940 non-null object
5 depth 53940 non-null float64
6 table 53940 non-null float64
7 price 53940 non-null int64
8 x 53940 non-null float64
9 y 53940 non-null float64
10 z 53940 non-null float64
dtypes: float64(6), int64(2), object(3)
memory usage: 4.5+ MB
<some_examples>
{'Unnamed: 0': {'0': 1, '1': 2, '2': 3, '3': 4}, 'carat': {'0': 0.23, '1': 0.21, '2': 0.23, '3': 0.29}, 'cut': {'0': 'Ideal', '1': 'Premium', '2': 'Good', '3': 'Premium'}, 'color': {'0': 'E', '1': 'E', '2': 'E', '3': 'I'}, 'clarity': {'0': 'SI2', '1': 'SI1', '2': 'VS1', '3': 'VS2'}, 'depth': {'0': 61.5, '1': 59.8, '2': 56.9, '3': 62.4}, 'table': {'0': 55.0, '1': 61.0, '2': 65.0, '3': 58.0}, 'price': {'0': 326, '1': 326, '2': 327, '3': 334}, 'x': {'0': 3.95, '1': 3.89, '2': 4.05, '3': 4.2}, 'y': {'0': 3.98, '1': 3.84, '2': 4.07, '3': 4.23}, 'z': {'0': 2.43, '1': 2.31, '2': 2.31, '3': 2.63}}
<end_description>
| 1,436 | 0 | 2,480 | 1,436 |
129757782
|
<jupyter_start><jupyter_text>IMDB movies dataset
The IMDB dataset contains information about movies, including their names, release dates, user ratings, genres, overviews, cast and crew members, original titles, production status, original languages, budgets, revenues, and countries of origin. This data can be used for various analyses, such as identifying trends in movie genres, exploring the relationship between budget and revenue, and predicting the success of future movies.
Kaggle dataset identifier: imdb-movies-dataset
<jupyter_script>import pyspark
from pyspark.sql import SparkSession
from pyspark.sql.functions import *
spark = SparkSession.builder.appName("Movie").getOrCreate()
df = spark.read.csv("/kaggle/input/imdb-movies-dataset/imdb_movies.csv", header=True)
df.show(5)
df = df.withColumn("Profit", col("revenue") - col("budget_x"))
df = df.withColumn("Achievement", when(col("score") < 50, "Average").otherwise("Good"))
df.createOrReplaceTempView("movie_released")
var = spark.sql("select * from movie_released")
var.show(5)
df.select("names", "status", "date_x", "score", "Profit", "Achievement").show(20)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/757/129757782.ipynb
|
imdb-movies-dataset
|
ashpalsingh1525
|
[{"Id": 129757782, "ScriptId": 38433248, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 8866968, "CreationDate": "05/16/2023 09:04:08", "VersionNumber": 1.0, "Title": "movies", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 26.0, "LinesInsertedFromPrevious": 26.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 186113879, "KernelVersionId": 129757782, "SourceDatasetVersionId": 5552662}]
|
[{"Id": 5552662, "DatasetId": 3198793, "DatasourceVersionId": 5627422, "CreatorUserId": 13490345, "LicenseName": "Community Data License Agreement - Permissive - Version 1.0", "CreationDate": "04/28/2023 23:18:15", "VersionNumber": 1.0, "Title": "IMDB movies dataset", "Slug": "imdb-movies-dataset", "Subtitle": "Explore 10000+ movies worldwide with the IMDB Movies dataset", "Description": "The IMDB dataset contains information about movies, including their names, release dates, user ratings, genres, overviews, cast and crew members, original titles, production status, original languages, budgets, revenues, and countries of origin. This data can be used for various analyses, such as identifying trends in movie genres, exploring the relationship between budget and revenue, and predicting the success of future movies.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 3198793, "CreatorUserId": 13490345, "OwnerUserId": 13490345.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5552662.0, "CurrentDatasourceVersionId": 5627422.0, "ForumId": 3263430, "Type": 2, "CreationDate": "04/28/2023 23:18:15", "LastActivityDate": "04/28/2023", "TotalViews": 19297, "TotalDownloads": 3999, "TotalVotes": 79, "TotalKernels": 10}]
|
[{"Id": 13490345, "UserName": "ashpalsingh1525", "DisplayName": "Ashpal Singh1525", "RegisterDate": "01/31/2023", "PerformanceTier": 2}]
|
import pyspark
from pyspark.sql import SparkSession
from pyspark.sql.functions import *
spark = SparkSession.builder.appName("Movie").getOrCreate()
df = spark.read.csv("/kaggle/input/imdb-movies-dataset/imdb_movies.csv", header=True)
df.show(5)
df = df.withColumn("Profit", col("revenue") - col("budget_x"))
df = df.withColumn("Achievement", when(col("score") < 50, "Average").otherwise("Good"))
df.createOrReplaceTempView("movie_released")
var = spark.sql("select * from movie_released")
var.show(5)
df.select("names", "status", "date_x", "score", "Profit", "Achievement").show(20)
| false | 0 | 191 | 0 | 307 | 191 |
||
129389124
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
train_df = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/train.csv")
test_df = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/test.csv")
greeks_df = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/greeks.csv")
sample_submission_df = pd.read_csv(
"/kaggle/input/icr-identify-age-related-conditions/sample_submission.csv"
)
sample_submission_df.head()
categorical_columns = train_df.select_dtypes(include=["object"]).columns
print(categorical_columns)
test_df.head(50)
import pandas as pd
from scipy.stats import skew
import seaborn as sns
import matplotlib.pyplot as plt
def get_skewed_columns(df):
skewed_columns = []
for column in df.columns:
if df[column].dtype != "object": # Consider numerical columns only
skewness = skew(df[column])
if skewness > 1 or skewness < -1: # Define a threshold for skewness
skewed_columns.append(column)
print("Columns with skewed distribution:")
print(skewed_columns)
get_skewed_columns(train_df)
X = train_df.copy()
from sklearn.preprocessing import LabelEncoder
label_encoder = LabelEncoder()
X["EJ"] = label_encoder.fit_transform(X["EJ"])
y_train = X.pop("Class")
X_train = X.loc[:, X.columns != "Id"]
X_test = test_df.loc[:, test_df.columns != "Id"]
X_test["EJ"] = label_encoder.fit_transform(X_test["EJ"])
X_test.info()
import lightgbm as lgb
import numpy as np
from sklearn.metrics import log_loss
from sklearn.model_selection import StratifiedKFold
# Assuming X_train, y_train, X_test are your training features, training labels, and test features respectively
# Set parameters for LightGBM model
params = {
"objective": "binary",
"metric": "binary_logloss",
"verbosity": -1,
"n_estimators": 1000,
"learning_rate": 0.1,
"max_depth": 6,
"subsample": 0.8,
"colsample_bytree": 0.8,
"lambda": 1.0,
}
# Initialize arrays to store validation scores and test predictions
validation_scores = []
test_prediction = np.zeros((len(X_test)))
# Define the number of folds for cross-validation
n_folds = 5
# Create the cross-validation strategy
skf = StratifiedKFold(n_splits=n_folds, shuffle=True, random_state=42)
# Iterate over each fold
for fold, (train_index, val_index) in enumerate(skf.split(X_train, y_train)):
print(f"Training on Fold {fold + 1}")
# Get the training and validation sets for this fold
X_train_fold, X_val_fold = X_train.iloc[train_index], X_train.iloc[val_index]
y_train_fold, y_val_fold = y_train.iloc[train_index], y_train.iloc[val_index]
# Create LightGBM datasets for training and validation
train_data = lgb.Dataset(X_train_fold, label=y_train_fold)
val_data = lgb.Dataset(X_val_fold, label=y_val_fold)
# Train the LightGBM model
model = lgb.train(
params,
train_data,
valid_sets=[train_data, val_data],
early_stopping_rounds=100,
verbose_eval=100,
)
# Make predictions on the validation set
val_pred = model.predict(X_val_fold)
fold_logloss = log_loss(y_val_fold, val_pred)
validation_scores.append(fold_logloss)
# Make predictions on the test set for this fold
# test_pred = model.predict(X_test)
test_prediction += model.predict(X_test, num_iteration=model.best_iteration)
print(f"Fold {fold + 1} log loss: {fold_logloss}")
# Calculate the mean and standard deviation of validation scores
mean_validation_score = np.mean(validation_scores)
std_validation_score = np.std(validation_scores)
print(f"\nMean validation log loss: {mean_validation_score}")
print(f"Standard deviation of validation log loss: {std_validation_score}")
# Average the predictions across all folds for the final test predictions
test_prediction /= n_folds
print(test_prediction)
sample_submission = sample_submission_df.copy()
sample_submission["class_1"] = test_prediction
sample_submission["class_0"] = 1 - test_prediction
sample_submission.to_csv(r"submission.csv", index=False)
sample_submission.head()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/389/129389124.ipynb
| null | null |
[{"Id": 129389124, "ScriptId": 38470513, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 1870127, "CreationDate": "05/13/2023 11:23:19", "VersionNumber": 1.0, "Title": "notebooke963155877", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 149.0, "LinesInsertedFromPrevious": 149.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 2}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
train_df = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/train.csv")
test_df = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/test.csv")
greeks_df = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/greeks.csv")
sample_submission_df = pd.read_csv(
"/kaggle/input/icr-identify-age-related-conditions/sample_submission.csv"
)
sample_submission_df.head()
categorical_columns = train_df.select_dtypes(include=["object"]).columns
print(categorical_columns)
test_df.head(50)
import pandas as pd
from scipy.stats import skew
import seaborn as sns
import matplotlib.pyplot as plt
def get_skewed_columns(df):
skewed_columns = []
for column in df.columns:
if df[column].dtype != "object": # Consider numerical columns only
skewness = skew(df[column])
if skewness > 1 or skewness < -1: # Define a threshold for skewness
skewed_columns.append(column)
print("Columns with skewed distribution:")
print(skewed_columns)
get_skewed_columns(train_df)
X = train_df.copy()
from sklearn.preprocessing import LabelEncoder
label_encoder = LabelEncoder()
X["EJ"] = label_encoder.fit_transform(X["EJ"])
y_train = X.pop("Class")
X_train = X.loc[:, X.columns != "Id"]
X_test = test_df.loc[:, test_df.columns != "Id"]
X_test["EJ"] = label_encoder.fit_transform(X_test["EJ"])
X_test.info()
import lightgbm as lgb
import numpy as np
from sklearn.metrics import log_loss
from sklearn.model_selection import StratifiedKFold
# Assuming X_train, y_train, X_test are your training features, training labels, and test features respectively
# Set parameters for LightGBM model
params = {
"objective": "binary",
"metric": "binary_logloss",
"verbosity": -1,
"n_estimators": 1000,
"learning_rate": 0.1,
"max_depth": 6,
"subsample": 0.8,
"colsample_bytree": 0.8,
"lambda": 1.0,
}
# Initialize arrays to store validation scores and test predictions
validation_scores = []
test_prediction = np.zeros((len(X_test)))
# Define the number of folds for cross-validation
n_folds = 5
# Create the cross-validation strategy
skf = StratifiedKFold(n_splits=n_folds, shuffle=True, random_state=42)
# Iterate over each fold
for fold, (train_index, val_index) in enumerate(skf.split(X_train, y_train)):
print(f"Training on Fold {fold + 1}")
# Get the training and validation sets for this fold
X_train_fold, X_val_fold = X_train.iloc[train_index], X_train.iloc[val_index]
y_train_fold, y_val_fold = y_train.iloc[train_index], y_train.iloc[val_index]
# Create LightGBM datasets for training and validation
train_data = lgb.Dataset(X_train_fold, label=y_train_fold)
val_data = lgb.Dataset(X_val_fold, label=y_val_fold)
# Train the LightGBM model
model = lgb.train(
params,
train_data,
valid_sets=[train_data, val_data],
early_stopping_rounds=100,
verbose_eval=100,
)
# Make predictions on the validation set
val_pred = model.predict(X_val_fold)
fold_logloss = log_loss(y_val_fold, val_pred)
validation_scores.append(fold_logloss)
# Make predictions on the test set for this fold
# test_pred = model.predict(X_test)
test_prediction += model.predict(X_test, num_iteration=model.best_iteration)
print(f"Fold {fold + 1} log loss: {fold_logloss}")
# Calculate the mean and standard deviation of validation scores
mean_validation_score = np.mean(validation_scores)
std_validation_score = np.std(validation_scores)
print(f"\nMean validation log loss: {mean_validation_score}")
print(f"Standard deviation of validation log loss: {std_validation_score}")
# Average the predictions across all folds for the final test predictions
test_prediction /= n_folds
print(test_prediction)
sample_submission = sample_submission_df.copy()
sample_submission["class_1"] = test_prediction
sample_submission["class_0"] = 1 - test_prediction
sample_submission.to_csv(r"submission.csv", index=False)
sample_submission.head()
| false | 0 | 1,418 | 2 | 1,418 | 1,418 |
||
129389452
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# ## Load Preprocessed data
tr = pd.read_csv("/kaggle/input/dummy-hp/train_proc.csv")
ts = pd.read_csv("/kaggle/input/dummy-hp/test_proc.csv")
# ## Frequency subsampling
# In order for recurrent and autoregressive networks to work it is needed to have a constant frequency. That does not happend, there are missing values. I simply interpolate linearly those values
num = 9870
subset = tr[tr["product_number"] == num]
timestamp = pd.to_datetime(subset["date"]).values.astype(np.int64) // 10**9
def clean_freq(timestamp, series):
# Detect wrong frequencies
diffs = np.diff(timestamp.astype(np.int64))
median = np.median(diffs)
bad = np.where(diffs != median)[0]
if len(bad) == 0:
return timestamp, series
# Add missing
for b in bad:
ratio = int(diffs[b] / median)
assert ratio * median == diffs[b], "Not integer frequency"
np.linspace(timestamp[b], timestamp[b + 1], ratio + 1)[1:-1]
timestamp = np.hstack(
(timestamp, np.linspace(timestamp[b], timestamp[b + 1], ratio + 1)[1:-1])
)
series = np.hstack(
(series, np.linspace(series[b], series[b + 1], ratio + 1)[1:-1])
)
# Sort again
timestamp, series = np.array(sorted(zip(timestamp, series))).transpose()
# Check everyting is alright
diffs = np.diff(timestamp.astype(np.int64))
bad = np.where(diffs != np.median(diffs))[0]
assert len(bad) == 0, "Wrong processing"
return timestamp, series
timestamp, series = clean_freq(timestamp, subset["inventory_units"].to_numpy())
# ### Example of inventory for one product
plt.plot(timestamp, series)
plt.show()
# Check every series is well behaved
for num in pd.unique(tr["product_number"]):
subset = tr[tr["product_number"] == num]
timestamp = pd.to_datetime(subset["date"]).values.astype(np.int64) // 10**9
try:
timestamp, series = clean_freq(timestamp, subset["inventory_units"].to_numpy())
except:
print(num)
# # Gated Recurrent Units
import torch
import torch.nn as nn
import torch.optim as optim
# Hyperparameters
input_size = 1
hidden_size = 100
output_size = 1
seq_length = 10
batch_size = 5
train_ratio = 0.5
num = 9870
subset = tr[tr["product_number"] == num]
timestamp = pd.to_datetime(subset["date"]).values.astype(np.int64) // 10**9
series = subset["inventory_units"].to_numpy()
timestamp, series = clean_freq(timestamp, subset["inventory_units"].to_numpy())
data = np.vstack((timestamp, series)).transpose()
train_data = data[: int(len(data) * train_ratio)]
val_data = data[int(len(data) * train_ratio) :]
# ### Train / Validation data
plt.plot(train_data[:, 0], train_data[:, 1])
plt.plot(val_data[:, 0], val_data[:, 1], c="r")
plt.show()
def create_data_loader(data, seq_length, batch_size):
inputs = []
targets = []
for i in range(len(data) - seq_length):
inputs.append(data[i : i + seq_length, 1])
targets.append(data[i + seq_length, 1])
inputs = torch.tensor(np.array(inputs)).float().view(-1, seq_length, input_size)
targets = torch.tensor(np.array(targets)).float().view(-1, output_size)
dataset = torch.utils.data.TensorDataset(inputs, targets)
data_loader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size, shuffle=False
)
return data_loader
train_loader = create_data_loader(train_data, seq_length, batch_size)
val_loader = create_data_loader(val_data, seq_length, batch_size)
# Define GRU model
class GRUModel(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(GRUModel, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.gru = nn.GRU(input_size, hidden_size)
self.fc = nn.Linear(hidden_size, output_size)
def forward(self, x, hidden):
out, hidden = self.gru(x, hidden)
out = self.fc(out[:, -1, :])
return out, hidden
def init_hidden(self, seq_length):
return torch.zeros(self.input_size, seq_length, self.hidden_size)
# Initialize model, criterion, and optimizer
model = GRUModel(input_size, hidden_size, output_size)
criterion = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr=0.01)
# Training loop
def train(model, train_loader, criterion, optimizer, device):
model.train()
hidden = model.init_hidden(seq_length).to(device)
total_loss = 0
for inputs, targets in train_loader:
inputs, targets = inputs.to(device), targets.to(device)
hidden = hidden.detach()
optimizer.zero_grad()
outputs, hidden = model(inputs, hidden)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
total_loss += loss.item()
return total_loss / len(val_loader)
# Validation loop
def validate(model, val_loader, criterion, device):
model.eval()
hidden = model.init_hidden(seq_length).to(device)
total_loss = 0
with torch.no_grad():
for inputs, targets in val_loader:
inputs, targets = inputs.to(device), targets.to(device)
outputs, hidden = model(inputs, hidden)
loss = criterion(outputs, targets)
total_loss += loss.item()
return total_loss / len(val_loader)
# ### Training loop
# As you can see there is a huge difference between train and validation loss.
# Main training and validation loop
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
num_epochs = 500
for epoch in range(num_epochs):
tr_loss = train(model, train_loader, criterion, optimizer, device)
val_loss = validate(model, val_loader, criterion, device)
if (epoch + 1) % 10 == 0:
print(f"Epoch [{epoch+1}/{num_epochs}], Training Loss: {tr_loss:.4f}")
print(f"Epoch [{epoch+1}/{num_epochs}], Validation Loss: {val_loss:.4f}")
print("Training and validation completed.")
# ## Predictions
# There are two ways of predicting in GRU. One is giving the real previous samples. The other is to execute the model in an autoregressive manner. The latter is clearly the only one available at runtime. But it is interesting to have both to compare.
def predict(model, loader, device):
model.eval()
hidden = model.init_hidden(seq_length).to(device)
outputs = []
with torch.no_grad():
for inputs, targets in loader:
inputs, targets = inputs.to(device), targets.to(device)
output, hidden = model(inputs, hidden)
outputs.append(output.numpy()[0][0])
return np.array(outputs)
def predict_autoreg(model, device, steps, initial_inp):
model.eval()
hidden = model.init_hidden(seq_length).to(device)
outputs = []
inputs = initial_inp
with torch.no_grad():
for k in range(steps):
output, hidden = model(inputs, hidden)
next = output.numpy()[0][0]
outputs.append(next)
aux = np.empty(inputs.shape)
aux[:, :-1, :] = inputs[:, 1:, :]
aux[:, -1, :] = next
inputs = torch.tensor(aux).float()
return np.array(outputs)
train_loader_pred = create_data_loader(train_data, seq_length, 1)
val_loader_pred = create_data_loader(val_data, seq_length, 1)
preds_tr = predict(model, train_loader_pred, device)
preds_val = predict(model, val_loader_pred, device)
preds_reg = predict_autoreg(
model, device, len(val_data), list(train_loader_pred)[-1][0]
)
preds_reg_tr = predict_autoreg(
model, device, len(train_data), list(train_loader_pred)[0][0]
)
plt.plot(train_data[:, 0], train_data[:, 1], label="Training")
plt.plot(train_data[seq_length:, 0], preds_tr, c="g", label="Predictions training")
plt.plot(
train_data[:, 0], preds_reg_tr, c="k", label="Predictions training autoregreesive"
)
plt.plot(val_data[:, 0], val_data[:, 1], c="r", label="Validation")
plt.plot(val_data[seq_length:, 0], preds_val, c="g", label="Predictions validation")
plt.plot(
val_data[:, 0], preds_reg, c="k", label="Predictions validation autoregressive"
)
plt.legend(loc="best")
plt.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/389/129389452.ipynb
| null | null |
[{"Id": 129389452, "ScriptId": 38472148, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 2424396, "CreationDate": "05/13/2023 11:26:49", "VersionNumber": 1.0, "Title": "GRU example", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 243.0, "LinesInsertedFromPrevious": 243.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# ## Load Preprocessed data
tr = pd.read_csv("/kaggle/input/dummy-hp/train_proc.csv")
ts = pd.read_csv("/kaggle/input/dummy-hp/test_proc.csv")
# ## Frequency subsampling
# In order for recurrent and autoregressive networks to work it is needed to have a constant frequency. That does not happend, there are missing values. I simply interpolate linearly those values
num = 9870
subset = tr[tr["product_number"] == num]
timestamp = pd.to_datetime(subset["date"]).values.astype(np.int64) // 10**9
def clean_freq(timestamp, series):
# Detect wrong frequencies
diffs = np.diff(timestamp.astype(np.int64))
median = np.median(diffs)
bad = np.where(diffs != median)[0]
if len(bad) == 0:
return timestamp, series
# Add missing
for b in bad:
ratio = int(diffs[b] / median)
assert ratio * median == diffs[b], "Not integer frequency"
np.linspace(timestamp[b], timestamp[b + 1], ratio + 1)[1:-1]
timestamp = np.hstack(
(timestamp, np.linspace(timestamp[b], timestamp[b + 1], ratio + 1)[1:-1])
)
series = np.hstack(
(series, np.linspace(series[b], series[b + 1], ratio + 1)[1:-1])
)
# Sort again
timestamp, series = np.array(sorted(zip(timestamp, series))).transpose()
# Check everyting is alright
diffs = np.diff(timestamp.astype(np.int64))
bad = np.where(diffs != np.median(diffs))[0]
assert len(bad) == 0, "Wrong processing"
return timestamp, series
timestamp, series = clean_freq(timestamp, subset["inventory_units"].to_numpy())
# ### Example of inventory for one product
plt.plot(timestamp, series)
plt.show()
# Check every series is well behaved
for num in pd.unique(tr["product_number"]):
subset = tr[tr["product_number"] == num]
timestamp = pd.to_datetime(subset["date"]).values.astype(np.int64) // 10**9
try:
timestamp, series = clean_freq(timestamp, subset["inventory_units"].to_numpy())
except:
print(num)
# # Gated Recurrent Units
import torch
import torch.nn as nn
import torch.optim as optim
# Hyperparameters
input_size = 1
hidden_size = 100
output_size = 1
seq_length = 10
batch_size = 5
train_ratio = 0.5
num = 9870
subset = tr[tr["product_number"] == num]
timestamp = pd.to_datetime(subset["date"]).values.astype(np.int64) // 10**9
series = subset["inventory_units"].to_numpy()
timestamp, series = clean_freq(timestamp, subset["inventory_units"].to_numpy())
data = np.vstack((timestamp, series)).transpose()
train_data = data[: int(len(data) * train_ratio)]
val_data = data[int(len(data) * train_ratio) :]
# ### Train / Validation data
plt.plot(train_data[:, 0], train_data[:, 1])
plt.plot(val_data[:, 0], val_data[:, 1], c="r")
plt.show()
def create_data_loader(data, seq_length, batch_size):
inputs = []
targets = []
for i in range(len(data) - seq_length):
inputs.append(data[i : i + seq_length, 1])
targets.append(data[i + seq_length, 1])
inputs = torch.tensor(np.array(inputs)).float().view(-1, seq_length, input_size)
targets = torch.tensor(np.array(targets)).float().view(-1, output_size)
dataset = torch.utils.data.TensorDataset(inputs, targets)
data_loader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size, shuffle=False
)
return data_loader
train_loader = create_data_loader(train_data, seq_length, batch_size)
val_loader = create_data_loader(val_data, seq_length, batch_size)
# Define GRU model
class GRUModel(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(GRUModel, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.gru = nn.GRU(input_size, hidden_size)
self.fc = nn.Linear(hidden_size, output_size)
def forward(self, x, hidden):
out, hidden = self.gru(x, hidden)
out = self.fc(out[:, -1, :])
return out, hidden
def init_hidden(self, seq_length):
return torch.zeros(self.input_size, seq_length, self.hidden_size)
# Initialize model, criterion, and optimizer
model = GRUModel(input_size, hidden_size, output_size)
criterion = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr=0.01)
# Training loop
def train(model, train_loader, criterion, optimizer, device):
model.train()
hidden = model.init_hidden(seq_length).to(device)
total_loss = 0
for inputs, targets in train_loader:
inputs, targets = inputs.to(device), targets.to(device)
hidden = hidden.detach()
optimizer.zero_grad()
outputs, hidden = model(inputs, hidden)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
total_loss += loss.item()
return total_loss / len(val_loader)
# Validation loop
def validate(model, val_loader, criterion, device):
model.eval()
hidden = model.init_hidden(seq_length).to(device)
total_loss = 0
with torch.no_grad():
for inputs, targets in val_loader:
inputs, targets = inputs.to(device), targets.to(device)
outputs, hidden = model(inputs, hidden)
loss = criterion(outputs, targets)
total_loss += loss.item()
return total_loss / len(val_loader)
# ### Training loop
# As you can see there is a huge difference between train and validation loss.
# Main training and validation loop
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
num_epochs = 500
for epoch in range(num_epochs):
tr_loss = train(model, train_loader, criterion, optimizer, device)
val_loss = validate(model, val_loader, criterion, device)
if (epoch + 1) % 10 == 0:
print(f"Epoch [{epoch+1}/{num_epochs}], Training Loss: {tr_loss:.4f}")
print(f"Epoch [{epoch+1}/{num_epochs}], Validation Loss: {val_loss:.4f}")
print("Training and validation completed.")
# ## Predictions
# There are two ways of predicting in GRU. One is giving the real previous samples. The other is to execute the model in an autoregressive manner. The latter is clearly the only one available at runtime. But it is interesting to have both to compare.
def predict(model, loader, device):
model.eval()
hidden = model.init_hidden(seq_length).to(device)
outputs = []
with torch.no_grad():
for inputs, targets in loader:
inputs, targets = inputs.to(device), targets.to(device)
output, hidden = model(inputs, hidden)
outputs.append(output.numpy()[0][0])
return np.array(outputs)
def predict_autoreg(model, device, steps, initial_inp):
model.eval()
hidden = model.init_hidden(seq_length).to(device)
outputs = []
inputs = initial_inp
with torch.no_grad():
for k in range(steps):
output, hidden = model(inputs, hidden)
next = output.numpy()[0][0]
outputs.append(next)
aux = np.empty(inputs.shape)
aux[:, :-1, :] = inputs[:, 1:, :]
aux[:, -1, :] = next
inputs = torch.tensor(aux).float()
return np.array(outputs)
train_loader_pred = create_data_loader(train_data, seq_length, 1)
val_loader_pred = create_data_loader(val_data, seq_length, 1)
preds_tr = predict(model, train_loader_pred, device)
preds_val = predict(model, val_loader_pred, device)
preds_reg = predict_autoreg(
model, device, len(val_data), list(train_loader_pred)[-1][0]
)
preds_reg_tr = predict_autoreg(
model, device, len(train_data), list(train_loader_pred)[0][0]
)
plt.plot(train_data[:, 0], train_data[:, 1], label="Training")
plt.plot(train_data[seq_length:, 0], preds_tr, c="g", label="Predictions training")
plt.plot(
train_data[:, 0], preds_reg_tr, c="k", label="Predictions training autoregreesive"
)
plt.plot(val_data[:, 0], val_data[:, 1], c="r", label="Validation")
plt.plot(val_data[seq_length:, 0], preds_val, c="g", label="Predictions validation")
plt.plot(
val_data[:, 0], preds_reg, c="k", label="Predictions validation autoregressive"
)
plt.legend(loc="best")
plt.show()
| false | 0 | 2,458 | 0 | 2,458 | 2,458 |
||
129389501
|
# # Regression Model for car data
# # 1. About Dataset
# Variable Name
# Description
# Car_Name
# Name of the cars
# Year
# Year of the car when it was bought
# Selling_Price
# Price at which the car is being sold
# Present_Price
# present price of cars
# Kms_Driven
# Number of the kilometres the car is driven
# Fuel_Type
# Fuel type of car
# Seller_Type
# Tells if seller is individual or a dealer
# Transmission
# MGear transmission of the car
# Owner
# Number of previous owner of the car
# # 2. Dataset and data analysis
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn import metrics
from warnings import filterwarnings
filterwarnings("ignore")
data = pd.read_csv("/kaggle/input/cardekho/car data (1).csv")
df = pd.DataFrame(data)
df
# after reading data we can find 8 features which I deleted one of them (car_name) and here selling_price is target.
# now we have to checking data for missing value and noise. so with describe and isna we can check it.
df.describe()
# Check for missing values
nans = df[df.isna().any(axis=1)]
print(f"Total rows with NaNs: {nans.shape[0]}\n")
df.info()
# remove car_name column from dataset
df1 = df.drop("Car_Name", axis="columns")
# after removing car_name column, inserted one column for age which make from year column
# define age for year column and insert it
Age = abs(df1.Year - 2019)
df1.insert(1, "Age", Age)
df1.nunique()
df1
print(df["Fuel_Type"].unique())
print(df["Seller_Type"].unique())
print(df["Transmission"].unique())
df1["Fuel_Type"] = df1["Fuel_Type"].replace("Petrol", 2)
df1["Fuel_Type"] = df1["Fuel_Type"].replace("Diesel", 3)
df1["Fuel_Type"] = df1["Fuel_Type"].replace("CNG", 4)
df1["Seller_Type"] = df1["Seller_Type"].replace("Dealer", 2)
df1["Seller_Type"] = df1["Seller_Type"].replace("Individual", 3)
df1["Transmission"] = df1["Transmission"].replace("Manual", 2)
df1["Transmission"] = df1["Transmission"].replace("Automatic", 3)
# with look at information af data, we can find 3 columns have object type. so we have to change their type.
# The classification is as follows:
# Fuel_Type_cat ===>> petrol==2 diesel == 3 , gas == 4
# Seller_Type_cat ===>> dealer ==2 indivisual ==3
# Transmission_cat ==>> manual ==2 automatic ==3
df2 = df1.drop(columns=["Year"], axis="columns")
df2
# Checking the corelation of all the inputs
pear_corr = df2.corr(method="pearson")
pear_corr.style.background_gradient(cmap="Greens")
# as we see selling price and present price have good correlation. also kms driven and age.
# for checking noise I draw scatter for all features
for column in df2.drop(columns=["Selling_Price"]).columns:
plt.figure(figsize=(10, 5))
plt.scatter(df2[column], df2.Selling_Price, alpha=0.5)
plt.title(
column + " & Selling_Price", backgroundcolor="green", color="white", fontsize=15
)
plt.xlabel(column, fontsize=20)
plt.ylabel("Selling_Price", fontsize=20)
plt.grid()
plt.show()
plt.figure(figsize=(10, 5))
plt.scatter(df2["Fuel_Type"], df2.Selling_Price, alpha=0.5)
plt.xlabel("Fuel_Type", fontsize=20)
plt.ylabel("Selling_Price", fontsize=20)
plt.annotate(
"maybe noise",
xy=(4, 4),
xytext=(3.75, 10),
arrowprops=dict(facecolor="red", shrink=0.05),
fontsize=20,
)
plt.grid()
plt.show()
plt.figure(figsize=(10, 5))
plt.scatter(df2["Owner"], df2.Selling_Price, alpha=0.5)
plt.xlabel("Owner", fontsize=20)
plt.ylabel("Selling_Price", fontsize=20)
plt.annotate(
"maybe noise",
xy=(3, 3),
xytext=(2.75, 6),
arrowprops=dict(facecolor="red", shrink=0.05),
fontsize=20,
)
plt.grid()
plt.show()
plt.figure(figsize=(10, 5))
plt.scatter(df2["Present_Price"], df2.Selling_Price, alpha=0.5)
plt.xlabel("Present_Price", fontsize=20)
plt.ylabel("Selling_Price", fontsize=20)
plt.annotate(
"maybe noise",
xy=(92, 35),
xytext=(90, 28),
arrowprops=dict(facecolor="red", shrink=0.05),
fontsize=20,
)
plt.grid()
plt.show()
plt.figure(figsize=(10, 5))
plt.scatter(df2["Kms_Driven"], df2.Selling_Price, alpha=0.5)
plt.xlabel("Kms_Driven", fontsize=20)
plt.ylabel("Selling_Price", fontsize=20)
plt.annotate(
"maybe noise",
xy=(500000, 0),
xytext=(500000, 5),
arrowprops=dict(facecolor="red", shrink=0.05),
fontsize=20,
)
plt.grid()
plt.show()
# univariate analysis of categorical data:
import seaborn as sns
classification = ["Fuel_Type", "Seller_Type", "Transmission", "Owner"]
sns.set_palette("summer_r")
for i, col in enumerate(classification):
fig, axes = plt.subplots(1, 2, figsize=(10, 4))
# count of col (countplot)
sns.countplot(data=df, x=col, ax=axes[0])
for container in axes[0].containers:
axes[0].bar_label(container)
# count of col (pie chart)
slices = df[col].value_counts().sort_index().values
activities = [var for var in df[col].value_counts().sort_index().index]
axes[1].pie(slices, labels=activities, shadow=True, autopct="%1.1f%%")
plt.suptitle(col, backgroundcolor="black", color="white", fontsize=15)
plt.show()
numerical = ["Age", "Selling_Price", "Present_Price", "Kms_Driven"]
i = 0
sns.set_palette("summer_r")
while i < 4:
fig = plt.figure(figsize=(10, 4))
plt.subplot(1, 2, 1)
sns.boxplot(x=numerical[i], data=df1)
plt.title(numerical[i])
plt.title(numerical[i], backgroundcolor="black", color="white", fontsize=15)
i += 1
plt.subplot(1, 2, 2)
sns.boxplot(x=numerical[i], data=df1)
plt.title(numerical[i])
plt.title(numerical[i], backgroundcolor="black", color="white", fontsize=15)
i += 1
plt.show()
sns.set_style("darkgrid")
fig, ax = plt.subplots(2, 2, figsize=(16, 12))
i = 0
j = 0
for idx, col in enumerate(numerical):
i = idx // 2
j = idx % 2
sns.histplot(x=col, data=df2, stat="frequency", ax=ax[i, j])
sns.kdeplot(x=col, data=df2, color="purple", ax=ax[i, j], linewidth=3)
ax[i, j].axvline(
x=df2[col].mean(), color="r", ls="--", label="Mean value", linewidth=2
)
ax[i, j].legend()
ax[i, j].set_xlabel(col, fontsize=12)
plt.suptitle("Continuous Feature Frequency", size=25, y=1.02, fontweight="bold")
plt.tight_layout()
plt.show()
# remove 4 data which are noises
df3 = df2[df2["Present_Price"] < 80.00]
df3 = df3[df3["Kms_Driven"] < 300000.000000]
df3 = df3[df3["Fuel_Type"] < 4]
df3 = df3[df3["Owner"] < 3]
df3.describe()
#
# # 3. model
# # Regression Model
from sklearn import preprocessing
# normalizing
scaler = preprocessing.MinMaxScaler(feature_range=(0, 1))
df4 = df3.drop(columns=["Selling_Price"])
norm = scaler.fit_transform(df4)
norm_df = pd.DataFrame(
norm,
columns=[
"Present_Price",
"Kms_Driven",
"Age",
"Owner",
"Fuel_Type",
"Seller_Type",
"Transmission",
],
)
x = pd.DataFrame(
norm_df,
columns=[
"Present_Price",
"Kms_Driven",
"Age",
"Owner",
"Fuel_Type",
"Seller_Type",
"Transmission",
],
)
y = df3["Selling_Price"].values.reshape(-1, 1)
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.25, random_state=101
)
regressor = LinearRegression()
regressor.fit(x_train, y_train)
y_pred = regressor.predict(x_test)
print(regressor.intercept_)
print(regressor.coef_)
# for 20% test of data
print("Mean Absolute Error :", metrics.mean_absolute_error(y_test, y_pred))
print("Mean Squared Error :", metrics.mean_squared_error(y_test, y_pred))
print("Root Mean Squared Error :", np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
print("R2 Score:", metrics.r2_score(y_test, y_pred))
regressor.score(x, y)
compare = pd.DataFrame({"actual": y_test.flatten(), "prediction": y_pred.flatten()})
compare
# plot predicted vs actual
sns.set_palette("dark:#5A9_r")
plt.figure(figsize=(10, 5))
# sns.regplot(compare['actual'],compare['prediction'] , line_kws={'color':'r', 'alpha':0.8,
# 'linestyle':'--', 'linewidth':2},
# scatter_kws={'alpha':0.5})
sns.jointplot(data=compare, x="actual", y="prediction", kind="reg")
plt.ylim(0, 25)
plt.xlabel("Actual Values")
plt.ylabel("prediction Values")
plt.show()
import scipy.stats as stats
print("Pearson R: ", stats.pearsonr(compare["prediction"], compare["actual"]))
a = x_train.Present_Price
b = y_train
c = x_test.Present_Price
d = y_test
plt.scatter(a, b)
plt.scatter(c, d)
x_test.insert(3, "y_test", y_test)
x_test.insert(4, "y_pred", y_pred)
A = x_test.sort_values(by=["Age"])
plt.scatter(A.Age, y_test)
plt.plot(A.Age, y_pred, color="red")
# insert column with ^2 for some feature
present2 = df3.Present_Price**2
owner2 = df3.Owner**2
Kms2 = df3.Kms_Driven**2
Fuel2 = df3.Fuel_Type**2
df3.insert(1, "present2", present2)
df3.insert(1, "Kms2", Kms2)
df3.insert(1, "Fuel2", Fuel2)
scaler = preprocessing.MinMaxScaler(feature_range=(0, 1))
df4 = df3.drop(columns=["Selling_Price"])
norm = scaler.fit_transform(df4)
norm_df = pd.DataFrame(
norm,
columns=[
"Fuel2",
"Kms2",
"present2",
"Present_Price",
"Kms_Driven",
"Age",
"Owner",
"Fuel_Type",
"Seller_Type",
"Transmission",
],
)
x = pd.DataFrame(
norm_df,
columns=[
"Fuel2",
"Kms2",
"present2",
"Present_Price",
"Kms_Driven",
"Age",
"Owner",
"Fuel_Type",
"Seller_Type",
"Transmission",
],
)
y = df3["Selling_Price"].values.reshape(-1, 1)
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.25, random_state=101
)
regressor = LinearRegression()
regressor.fit(x_train, y_train)
y_pred = regressor.predict(x_test)
# for power 2 test of data
print("Mean Absolute Error :", metrics.mean_absolute_error(y_test, y_pred))
print("Mean Squared Error :", metrics.mean_squared_error(y_test, y_pred))
print("Root Mean Squared Error :", np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
print("R2 Score:", metrics.r2_score(y_test, y_pred))
regressor.score(x, y)
# insert column km*age
AK = df3.Kms_Driven * df3.Age
df3.insert(1, "AK", AK)
scaler = preprocessing.MinMaxScaler(feature_range=(0, 1))
df4 = df3.drop(columns=["Selling_Price"])
norm = scaler.fit_transform(df4)
norm_df = pd.DataFrame(
norm,
columns=[
"AK",
"Fuel2",
"Kms2",
"present2",
"Present_Price",
"Kms_Driven",
"Age",
"Owner",
"Fuel_Type",
"Seller_Type",
"Transmission",
],
)
x = pd.DataFrame(
norm_df,
columns=[
"AK",
"Fuel2",
"Kms2",
"present2",
"Present_Price",
"Kms_Driven",
"Age",
"Owner",
"Fuel_Type",
"Seller_Type",
"Transmission",
],
)
y = df3["Selling_Price"].values.reshape(-1, 1)
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.25, random_state=101
)
regressor = LinearRegression()
regressor.fit(x_train, y_train)
y_pred = regressor.predict(x_test)
print(regressor.intercept_)
print(regressor.coef_)
# for power 2& 3 and multiple km&age of data
print("Mean Absolute Error :", metrics.mean_absolute_error(y_test, y_pred))
print("Mean Squared Error :", metrics.mean_squared_error(y_test, y_pred))
print("Root Mean Squared Error :", np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
print("R2 Score:", metrics.r2_score(y_test, y_pred))
regressor.score(x, y)
# now add new x and normalizing again for predict new y
# adding new x for predict y new and normalizing again
df_new = pd.DataFrame(
{
"AK": [420000],
"Fuel2": [4],
"Kms2": [42000 * 42000],
"present2": [126.1129],
"Selling_Price": [1],
"Present_Price": [11.23],
"Kms_Driven": [42000],
"Age": [10],
"Owner": [1],
"Fuel_Type": [2],
"Seller_Type": [2],
"Transmission": [2],
}
)
df_new1 = df3.append(df_new)
scaler = preprocessing.MinMaxScaler(feature_range=(0, 1))
df5 = df_new1.drop(columns=["Selling_Price"])
norm1 = scaler.fit_transform(df5)
norm_df1 = pd.DataFrame(
norm1,
columns=[
"AK",
"Fuel2",
"Kms2",
"present2",
"Present_Price",
"Kms_Driven",
"Age",
"Owner",
"Fuel_Type",
"Seller_Type",
"Transmission",
],
)
x_train = norm_df1[
[
"AK",
"Fuel2",
"Kms2",
"present2",
"Present_Price",
"Kms_Driven",
"Age",
"Owner",
"Fuel_Type",
"Seller_Type",
"Transmission",
]
][:296]
y_train = df_new1["Selling_Price"][:296].values.reshape(-1, 1)
regressor.fit(x_train, y_train)
x_test = norm_df1[
[
"AK",
"Fuel2",
"Kms2",
"present2",
"Present_Price",
"Kms_Driven",
"Age",
"Owner",
"Fuel_Type",
"Seller_Type",
"Transmission",
]
][296:]
y_pred = regressor.predict(x_test)
y_pred
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/389/129389501.ipynb
| null | null |
[{"Id": 129389501, "ScriptId": 38469068, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 12941081, "CreationDate": "05/13/2023 11:27:27", "VersionNumber": 1.0, "Title": "Regression model for car data", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 379.0, "LinesInsertedFromPrevious": 379.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 44}]
| null | null | null | null |
# # Regression Model for car data
# # 1. About Dataset
# Variable Name
# Description
# Car_Name
# Name of the cars
# Year
# Year of the car when it was bought
# Selling_Price
# Price at which the car is being sold
# Present_Price
# present price of cars
# Kms_Driven
# Number of the kilometres the car is driven
# Fuel_Type
# Fuel type of car
# Seller_Type
# Tells if seller is individual or a dealer
# Transmission
# MGear transmission of the car
# Owner
# Number of previous owner of the car
# # 2. Dataset and data analysis
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn import metrics
from warnings import filterwarnings
filterwarnings("ignore")
data = pd.read_csv("/kaggle/input/cardekho/car data (1).csv")
df = pd.DataFrame(data)
df
# after reading data we can find 8 features which I deleted one of them (car_name) and here selling_price is target.
# now we have to checking data for missing value and noise. so with describe and isna we can check it.
df.describe()
# Check for missing values
nans = df[df.isna().any(axis=1)]
print(f"Total rows with NaNs: {nans.shape[0]}\n")
df.info()
# remove car_name column from dataset
df1 = df.drop("Car_Name", axis="columns")
# after removing car_name column, inserted one column for age which make from year column
# define age for year column and insert it
Age = abs(df1.Year - 2019)
df1.insert(1, "Age", Age)
df1.nunique()
df1
print(df["Fuel_Type"].unique())
print(df["Seller_Type"].unique())
print(df["Transmission"].unique())
df1["Fuel_Type"] = df1["Fuel_Type"].replace("Petrol", 2)
df1["Fuel_Type"] = df1["Fuel_Type"].replace("Diesel", 3)
df1["Fuel_Type"] = df1["Fuel_Type"].replace("CNG", 4)
df1["Seller_Type"] = df1["Seller_Type"].replace("Dealer", 2)
df1["Seller_Type"] = df1["Seller_Type"].replace("Individual", 3)
df1["Transmission"] = df1["Transmission"].replace("Manual", 2)
df1["Transmission"] = df1["Transmission"].replace("Automatic", 3)
# with look at information af data, we can find 3 columns have object type. so we have to change their type.
# The classification is as follows:
# Fuel_Type_cat ===>> petrol==2 diesel == 3 , gas == 4
# Seller_Type_cat ===>> dealer ==2 indivisual ==3
# Transmission_cat ==>> manual ==2 automatic ==3
df2 = df1.drop(columns=["Year"], axis="columns")
df2
# Checking the corelation of all the inputs
pear_corr = df2.corr(method="pearson")
pear_corr.style.background_gradient(cmap="Greens")
# as we see selling price and present price have good correlation. also kms driven and age.
# for checking noise I draw scatter for all features
for column in df2.drop(columns=["Selling_Price"]).columns:
plt.figure(figsize=(10, 5))
plt.scatter(df2[column], df2.Selling_Price, alpha=0.5)
plt.title(
column + " & Selling_Price", backgroundcolor="green", color="white", fontsize=15
)
plt.xlabel(column, fontsize=20)
plt.ylabel("Selling_Price", fontsize=20)
plt.grid()
plt.show()
plt.figure(figsize=(10, 5))
plt.scatter(df2["Fuel_Type"], df2.Selling_Price, alpha=0.5)
plt.xlabel("Fuel_Type", fontsize=20)
plt.ylabel("Selling_Price", fontsize=20)
plt.annotate(
"maybe noise",
xy=(4, 4),
xytext=(3.75, 10),
arrowprops=dict(facecolor="red", shrink=0.05),
fontsize=20,
)
plt.grid()
plt.show()
plt.figure(figsize=(10, 5))
plt.scatter(df2["Owner"], df2.Selling_Price, alpha=0.5)
plt.xlabel("Owner", fontsize=20)
plt.ylabel("Selling_Price", fontsize=20)
plt.annotate(
"maybe noise",
xy=(3, 3),
xytext=(2.75, 6),
arrowprops=dict(facecolor="red", shrink=0.05),
fontsize=20,
)
plt.grid()
plt.show()
plt.figure(figsize=(10, 5))
plt.scatter(df2["Present_Price"], df2.Selling_Price, alpha=0.5)
plt.xlabel("Present_Price", fontsize=20)
plt.ylabel("Selling_Price", fontsize=20)
plt.annotate(
"maybe noise",
xy=(92, 35),
xytext=(90, 28),
arrowprops=dict(facecolor="red", shrink=0.05),
fontsize=20,
)
plt.grid()
plt.show()
plt.figure(figsize=(10, 5))
plt.scatter(df2["Kms_Driven"], df2.Selling_Price, alpha=0.5)
plt.xlabel("Kms_Driven", fontsize=20)
plt.ylabel("Selling_Price", fontsize=20)
plt.annotate(
"maybe noise",
xy=(500000, 0),
xytext=(500000, 5),
arrowprops=dict(facecolor="red", shrink=0.05),
fontsize=20,
)
plt.grid()
plt.show()
# univariate analysis of categorical data:
import seaborn as sns
classification = ["Fuel_Type", "Seller_Type", "Transmission", "Owner"]
sns.set_palette("summer_r")
for i, col in enumerate(classification):
fig, axes = plt.subplots(1, 2, figsize=(10, 4))
# count of col (countplot)
sns.countplot(data=df, x=col, ax=axes[0])
for container in axes[0].containers:
axes[0].bar_label(container)
# count of col (pie chart)
slices = df[col].value_counts().sort_index().values
activities = [var for var in df[col].value_counts().sort_index().index]
axes[1].pie(slices, labels=activities, shadow=True, autopct="%1.1f%%")
plt.suptitle(col, backgroundcolor="black", color="white", fontsize=15)
plt.show()
numerical = ["Age", "Selling_Price", "Present_Price", "Kms_Driven"]
i = 0
sns.set_palette("summer_r")
while i < 4:
fig = plt.figure(figsize=(10, 4))
plt.subplot(1, 2, 1)
sns.boxplot(x=numerical[i], data=df1)
plt.title(numerical[i])
plt.title(numerical[i], backgroundcolor="black", color="white", fontsize=15)
i += 1
plt.subplot(1, 2, 2)
sns.boxplot(x=numerical[i], data=df1)
plt.title(numerical[i])
plt.title(numerical[i], backgroundcolor="black", color="white", fontsize=15)
i += 1
plt.show()
sns.set_style("darkgrid")
fig, ax = plt.subplots(2, 2, figsize=(16, 12))
i = 0
j = 0
for idx, col in enumerate(numerical):
i = idx // 2
j = idx % 2
sns.histplot(x=col, data=df2, stat="frequency", ax=ax[i, j])
sns.kdeplot(x=col, data=df2, color="purple", ax=ax[i, j], linewidth=3)
ax[i, j].axvline(
x=df2[col].mean(), color="r", ls="--", label="Mean value", linewidth=2
)
ax[i, j].legend()
ax[i, j].set_xlabel(col, fontsize=12)
plt.suptitle("Continuous Feature Frequency", size=25, y=1.02, fontweight="bold")
plt.tight_layout()
plt.show()
# remove 4 data which are noises
df3 = df2[df2["Present_Price"] < 80.00]
df3 = df3[df3["Kms_Driven"] < 300000.000000]
df3 = df3[df3["Fuel_Type"] < 4]
df3 = df3[df3["Owner"] < 3]
df3.describe()
#
# # 3. model
# # Regression Model
from sklearn import preprocessing
# normalizing
scaler = preprocessing.MinMaxScaler(feature_range=(0, 1))
df4 = df3.drop(columns=["Selling_Price"])
norm = scaler.fit_transform(df4)
norm_df = pd.DataFrame(
norm,
columns=[
"Present_Price",
"Kms_Driven",
"Age",
"Owner",
"Fuel_Type",
"Seller_Type",
"Transmission",
],
)
x = pd.DataFrame(
norm_df,
columns=[
"Present_Price",
"Kms_Driven",
"Age",
"Owner",
"Fuel_Type",
"Seller_Type",
"Transmission",
],
)
y = df3["Selling_Price"].values.reshape(-1, 1)
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.25, random_state=101
)
regressor = LinearRegression()
regressor.fit(x_train, y_train)
y_pred = regressor.predict(x_test)
print(regressor.intercept_)
print(regressor.coef_)
# for 20% test of data
print("Mean Absolute Error :", metrics.mean_absolute_error(y_test, y_pred))
print("Mean Squared Error :", metrics.mean_squared_error(y_test, y_pred))
print("Root Mean Squared Error :", np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
print("R2 Score:", metrics.r2_score(y_test, y_pred))
regressor.score(x, y)
compare = pd.DataFrame({"actual": y_test.flatten(), "prediction": y_pred.flatten()})
compare
# plot predicted vs actual
sns.set_palette("dark:#5A9_r")
plt.figure(figsize=(10, 5))
# sns.regplot(compare['actual'],compare['prediction'] , line_kws={'color':'r', 'alpha':0.8,
# 'linestyle':'--', 'linewidth':2},
# scatter_kws={'alpha':0.5})
sns.jointplot(data=compare, x="actual", y="prediction", kind="reg")
plt.ylim(0, 25)
plt.xlabel("Actual Values")
plt.ylabel("prediction Values")
plt.show()
import scipy.stats as stats
print("Pearson R: ", stats.pearsonr(compare["prediction"], compare["actual"]))
a = x_train.Present_Price
b = y_train
c = x_test.Present_Price
d = y_test
plt.scatter(a, b)
plt.scatter(c, d)
x_test.insert(3, "y_test", y_test)
x_test.insert(4, "y_pred", y_pred)
A = x_test.sort_values(by=["Age"])
plt.scatter(A.Age, y_test)
plt.plot(A.Age, y_pred, color="red")
# insert column with ^2 for some feature
present2 = df3.Present_Price**2
owner2 = df3.Owner**2
Kms2 = df3.Kms_Driven**2
Fuel2 = df3.Fuel_Type**2
df3.insert(1, "present2", present2)
df3.insert(1, "Kms2", Kms2)
df3.insert(1, "Fuel2", Fuel2)
scaler = preprocessing.MinMaxScaler(feature_range=(0, 1))
df4 = df3.drop(columns=["Selling_Price"])
norm = scaler.fit_transform(df4)
norm_df = pd.DataFrame(
norm,
columns=[
"Fuel2",
"Kms2",
"present2",
"Present_Price",
"Kms_Driven",
"Age",
"Owner",
"Fuel_Type",
"Seller_Type",
"Transmission",
],
)
x = pd.DataFrame(
norm_df,
columns=[
"Fuel2",
"Kms2",
"present2",
"Present_Price",
"Kms_Driven",
"Age",
"Owner",
"Fuel_Type",
"Seller_Type",
"Transmission",
],
)
y = df3["Selling_Price"].values.reshape(-1, 1)
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.25, random_state=101
)
regressor = LinearRegression()
regressor.fit(x_train, y_train)
y_pred = regressor.predict(x_test)
# for power 2 test of data
print("Mean Absolute Error :", metrics.mean_absolute_error(y_test, y_pred))
print("Mean Squared Error :", metrics.mean_squared_error(y_test, y_pred))
print("Root Mean Squared Error :", np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
print("R2 Score:", metrics.r2_score(y_test, y_pred))
regressor.score(x, y)
# insert column km*age
AK = df3.Kms_Driven * df3.Age
df3.insert(1, "AK", AK)
scaler = preprocessing.MinMaxScaler(feature_range=(0, 1))
df4 = df3.drop(columns=["Selling_Price"])
norm = scaler.fit_transform(df4)
norm_df = pd.DataFrame(
norm,
columns=[
"AK",
"Fuel2",
"Kms2",
"present2",
"Present_Price",
"Kms_Driven",
"Age",
"Owner",
"Fuel_Type",
"Seller_Type",
"Transmission",
],
)
x = pd.DataFrame(
norm_df,
columns=[
"AK",
"Fuel2",
"Kms2",
"present2",
"Present_Price",
"Kms_Driven",
"Age",
"Owner",
"Fuel_Type",
"Seller_Type",
"Transmission",
],
)
y = df3["Selling_Price"].values.reshape(-1, 1)
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.25, random_state=101
)
regressor = LinearRegression()
regressor.fit(x_train, y_train)
y_pred = regressor.predict(x_test)
print(regressor.intercept_)
print(regressor.coef_)
# for power 2& 3 and multiple km&age of data
print("Mean Absolute Error :", metrics.mean_absolute_error(y_test, y_pred))
print("Mean Squared Error :", metrics.mean_squared_error(y_test, y_pred))
print("Root Mean Squared Error :", np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
print("R2 Score:", metrics.r2_score(y_test, y_pred))
regressor.score(x, y)
# now add new x and normalizing again for predict new y
# adding new x for predict y new and normalizing again
df_new = pd.DataFrame(
{
"AK": [420000],
"Fuel2": [4],
"Kms2": [42000 * 42000],
"present2": [126.1129],
"Selling_Price": [1],
"Present_Price": [11.23],
"Kms_Driven": [42000],
"Age": [10],
"Owner": [1],
"Fuel_Type": [2],
"Seller_Type": [2],
"Transmission": [2],
}
)
df_new1 = df3.append(df_new)
scaler = preprocessing.MinMaxScaler(feature_range=(0, 1))
df5 = df_new1.drop(columns=["Selling_Price"])
norm1 = scaler.fit_transform(df5)
norm_df1 = pd.DataFrame(
norm1,
columns=[
"AK",
"Fuel2",
"Kms2",
"present2",
"Present_Price",
"Kms_Driven",
"Age",
"Owner",
"Fuel_Type",
"Seller_Type",
"Transmission",
],
)
x_train = norm_df1[
[
"AK",
"Fuel2",
"Kms2",
"present2",
"Present_Price",
"Kms_Driven",
"Age",
"Owner",
"Fuel_Type",
"Seller_Type",
"Transmission",
]
][:296]
y_train = df_new1["Selling_Price"][:296].values.reshape(-1, 1)
regressor.fit(x_train, y_train)
x_test = norm_df1[
[
"AK",
"Fuel2",
"Kms2",
"present2",
"Present_Price",
"Kms_Driven",
"Age",
"Owner",
"Fuel_Type",
"Seller_Type",
"Transmission",
]
][296:]
y_pred = regressor.predict(x_test)
y_pred
| false | 0 | 4,630 | 44 | 4,630 | 4,630 |
||
129389272
|
# # Sales forecasting
# The project is mainly to predict the future sales by using the time-series forecasting technique.
# # Import Dependencies
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import scipy.stats as stats
from scipy.stats import pearsonr
import itertools
from sklearn import preprocessing
from statsmodels.tsa.stattools import kpss
import statsmodels.api as sm
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
# # Datasets
train_df = pd.read_csv("/kaggle/input/store-sales-time-series-forecasting/train.csv")
test_df = pd.read_csv("/kaggle/input/store-sales-time-series-forecasting/test.csv")
oil_df = pd.read_csv("/kaggle/input/store-sales-time-series-forecasting/oil.csv")
transaction_df = pd.read_csv(
"/kaggle/input/store-sales-time-series-forecasting/transactions.csv"
)
stores_df = pd.read_csv("/kaggle/input/store-sales-time-series-forecasting/stores.csv")
holiday_event_df = pd.read_csv(
"/kaggle/input/store-sales-time-series-forecasting/holidays_events.csv"
)
# # Understanding the Data
train_df.head()
test_df.head()
oil_df.head()
transaction_df.head()
stores_df.head()
holiday_event_df.head()
# The sales column is the target variable.
# ## Merging datasets holiday_event_df, stores_df, oil_df & train_df
train_df = train_df.merge(stores_df, on="store_nbr")
train_df = train_df.merge(oil_df, on="date", how="left")
holiday_event_df = holiday_event_df.rename(columns={"type": "holiday_type"})
train_df = train_df.merge(holiday_event_df, on="date", how="left")
train_df.head(3)
train_df.info()
# # Missing Values Detection
train_df.isnull().sum()
# percentage of missing values in train_df
missing_percentages = train_df.isnull().sum() / len(train_df) * 100
print(missing_percentages)
# remove columns that are having more than 30% missing values
columns_to_delete = missing_percentages[missing_percentages > 30].index
train_df = train_df.drop(columns=columns_to_delete)
train_df.info()
# # Duplicates
#
train_df.duplicated().any()
dupes = train_df.duplicated()
# dupes
sum(dupes)
# dropping duplicate values
train_df = train_df.drop_duplicates()
train_df
train_df.duplicated().any()
test_df.duplicated().any()
# # Check if there still any missing values present in the train_df
#
train_df.isnull().sum()
# # Calculate count, mean, std, min, 25%, 50%, 75%, max values for each column. Prepare an analysis of the difference between mean and median for each column and possible reasons for the same.
train_df.describe()
# # EDA
train_df.info()
# ## Questions
# 1. Does the type of stores affect the store sales?
# 2. Which family is having the highest sales?
# 3. Does promotion able to improve the sales?
# 4. Which city is having the most number of customers?
# 5. Which state is having the most number of customers?
# 6. Which of the stores has the highest sales.
# 7. Which month is having the most sales, and least sales.
# ### 1. Does the type of stores affect the store sales?
# To answer the first question 'Does the type of stores affect the store sales?' , i will use ANOVA test.
# ANOVA (Analysis of Variance) is a statistical test used to determine whether there are significant differences between the means of two or more groups. It compares the variation between the groups (due to the different categories or factors) to the variation within the groups.
# H0 (>0.05)= The type of stores does not affect store sales. There is no significant difference in store sales between different types of stores.
# H1 (<0.05)= The type of stores does affect store sales. There is a significant difference in store sales between different types of stores.
#
grouped_data = train_df.groupby("type")["sales"]
# Perform the ANOVA test
f_statistic, p_value = stats.f_oneway(
*[grouped_data.get_group(type) for type in grouped_data.groups]
)
# Print the results
print("F-Statistic:", f_statistic)
print("p-value:", p_value)
# Based on the F-statistics and p-value above, we reject null hypothesis and accept alternative hypothesis. Hence, the type of stores does affect the store sales. There is a significant difference in store sales between different type.
# Sales Vs Type
plt.scatter(train_df["type"], train_df["sales"])
plt.ylabel("sales")
plt.xlabel("type")
plt.show()
# ### 2. Which family is having the highest sales?
# Pie chart
# Group the data by family and calculate the total sales for each family
family_sales = train_df.groupby("family")["sales"].sum()
# Sort the families based on sales in descending order
family_sales_sorted = family_sales.sort_values(ascending=False)
# Get the top 5 families with the highest sales
top_families = family_sales_sorted.head(5)
# Create the pie chart
plt.pie(top_families, labels=top_families.index, autopct="%1.1f%%", startangle=90)
plt.title("Distribution of Sales by Family")
plt.axis("equal")
plt.show()
# Based on the pie chart above, the GROCERY I is having the highest sales, and Baverages comes second highest.
# ### 3. Does promotion able to improve the sales?
# To answer the 3rd question "Does promotion able to improve the sales?" I will use Pearson correlation test to determine the relationship between the two variables, as both of the variables are numericals. The Pearson correlation coefficient measures the linear relationship between two continuous variables and ranges from -1 to +1.
# H0 (>0.05)= The promotion does not affect store sales.
# H1 (<0.05)= The promotion does affect store sales.
#
correlation, p_value = pearsonr(train_df["onpromotion"], train_df["sales"])
print("Pearson correlation coefficient:", correlation)
print("p-value:", p_value)
# Based on the Pearson correlation coefficient of 0.4279 and the p-value of 0.0, we can reject the null hypothesis (H0) and conclude that there is a significant relationship between promotion and store sales. Therefore, the promotion does affect store sales.
# Scatter plot
plt.scatter(train_df["onpromotion"], train_df["sales"])
plt.xlabel("Promotion")
plt.ylabel("Sales")
plt.title("Promotion vs Sales")
plt.show()
# ### 4. Which city is having the most most number of customers?
# Count Plot
# Create a count plot
plt.figure(figsize=(10, 6)) # Set the figure size
sns.countplot(data=train_df, x="city")
plt.xlabel("City")
plt.ylabel("Count")
plt.title("Sales Distribution by City")
plt.xticks(rotation=45)
plt.show()
# Based on the count plot above, the Quito city has the most sales.
# ### 5. Which state is having the most number of customers?
#
# Count Plot
# Create a count plot
plt.figure(figsize=(10, 6)) # Set the figure size
sns.countplot(data=train_df, x="state")
plt.xlabel("state")
plt.ylabel("Count")
plt.title("Sales Distribution by City")
plt.xticks(rotation=45)
plt.show()
# Based on the count plot above, Pichincha state has the most sales as compared to other states.
# ### 6. Which of the stores has the highest sales.
# Calculate the total sales for each store
store_sales = train_df.groupby("store_nbr")["sales"].sum().reset_index()
# Sort the stores based on sales in descending order
store_sales = store_sales.sort_values("sales", ascending=False)
# Create a bar plot
plt.figure(figsize=(12, 6))
sns.barplot(data=store_sales, x="store_nbr", y="sales")
plt.xlabel("Store Number")
plt.ylabel("Total Sales")
plt.title("Total Sales by Store")
plt.xticks(rotation=45)
plt.show()
# ### 7. Which month is having the most sales, and least sales.
# First convert the 'date' from object to date time
train_df["date"] = pd.to_datetime(train_df["date"])
# create new columns 'month' 'year'
train_df["month"] = train_df["date"].dt.month
train_df["year"] = train_df["date"].dt.year
train_df.head(7)
# Group the data by month, year, and calculate the total sales
monthly_sales = train_df.groupby(["month", "year"])["sales"].sum().reset_index()
# Create the line chart
plt.figure(figsize=(10, 6)) # Set the figure size
# Get unique years and cycle through colors
years = monthly_sales["year"].unique()
colors = itertools.cycle(["red", "green", "blue", "orange", "purple"])
for year in years:
year_data = monthly_sales[monthly_sales["year"] == year]
plt.plot(
year_data["month"],
year_data["sales"],
marker="o",
color=next(colors),
label=str(year),
)
plt.xlabel("Month")
plt.ylabel("Sales")
plt.title("Monthly Sales Trend")
# Customize x-axis ticks to show month names
month_names = [
"Jan",
"Feb",
"Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sep",
"Oct",
"Nov",
"Dec",
]
plt.xticks(range(1, 13), month_names)
plt.legend()
plt.show()
# Overall, the orange line which is 2016 has a stable high sales since January to Dec. Between the months in 2016, December had the most sales. In other hand, in comparing to other years, 2013 had an overall lowest sales achieved, especially during February.
train_df = train_df.groupby("date")["sales", "onpromotion"].sum().reset_index()
print(train_df)
# # Autocorrelation
# Autocorrelation measures the correlation between a time series and its lagged values. Autocorrelation plots (ACF) and partial autocorrelation plots (PACF) help identify significant lag values and potential autoregressive or moving average components.
# - If the autocorrelation value is close to 1 or -1, it indicates a strong positive or negative autocorrelation, respectively.
# - If the autocorrelation value is close to 0, it indicates a weak or no autocorrelation.
sales_series = train_df["sales"]
autocorr_values = sales_series.autocorr()
print("Autocorrelation:", autocorr_values)
# Based on the result above, since the autocorrelation value is close to 1 (0.766), it suggests that there is a positive autocorrelation. A positive autocorrelation indicates that there is a relationship between the current sales values and the previous sales values.
from statsmodels.graphics.tsaplots import plot_acf
plot_acf(train_df["sales"])
from statsmodels.graphics.tsaplots import plot_pacf
import matplotlib.pyplot as plt
# Plot the PACF
fig, ax = plt.subplots(figsize=(10, 6))
plot_pacf(train_df["sales"], ax=ax)
plt.xlabel("Lag")
plt.ylabel("Partial Autocorrelation")
plt.title("Partial Autocorrelation Function (PACF)")
plt.show()
# # Stationarity Test
# There are various statistical tests to check stationarity, including the Augmented Dickey-Fuller (ADF) test and the Kwiatkowski-Phillips-Schmidt-Shin (KPSS) test.
# ### Augmented Dickey-Fuller (ADF) test
# The Augmented Dickey-Fuller (ADF) test is a statistical test used to determine whether a time series is stationary or non-stationary. Stationarity is an important assumption in many time series analysis models.
# The ADF test evaluates the null hypothesis that the time series has a unit root, indicating non-stationarity. The alternative hypothesis is that the time series is stationary.
# When performing the ADF test, we obtain the ADF statistic and the p-value. The ADF statistic is a negative number and the more negative it is, the stronger the evidence against the null hypothesis. The p-value represents the probability of observing the ADF statistic or a more extreme value if the null hypothesis were true. A low p-value (below a chosen significance level, typically 0.05) indicates strong evidence against the null hypothesis and suggests that the time series is stationary.
ts = train_df["sales"]
import pandas as pd
from statsmodels.tsa.stattools import adfuller
# Perform the ADF test
result = adfuller(ts)
# Extract and print the test statistics and p-value
adf_statistic = result[0]
p_value = result[1]
print("ADF Statistic:", adf_statistic)
print("p-value:", p_value)
# The ADF statistic of -2.616195748604853 suggests that there is some evidence against the null hypothesis of non-stationarity in the time series. However, the p-value of 0.08969592175787544 indicates that this evidence is not statistically significant at a conventional significance level of 0.05.
# In simpler terms, the ADF test indicates that there may be some stationarity in the time series data, but it is not strong enough to conclude with certainty. The p-value suggests that the observed results could occur by chance under the assumption of non-stationarity. Therefore, further analysis and modeling techniques may be necessary to better understand the stationarity of the data.
# ### Kwiatkowski-Phillips-Schmidt-Shin (KPSS)
# The Kwiatkowski-Phillips-Schmidt-Shin (KPSS) test is another statistical test used to assess the stationarity of a time series. It is complementary to the Augmented Dickey-Fuller (ADF) test.
# The KPSS test evaluates the null hypothesis that the time series is stationary against the alternative hypothesis of non-stationarity. Unlike the ADF test, which assumes the presence of a unit root, the KPSS test assumes the absence of a unit root.
# The test calculates the KPSS statistic, which measures the cumulative sum of squared deviations from the mean in the series. It also provides a p-value that indicates the probability of observing the KPSS statistic or a more extreme value under the null hypothesis.
# Interpreting the results of the KPSS test involves considering the KPSS statistic and the associated p-value. If the KPSS statistic is greater than the critical value at a chosen significance level (e.g., 0.05), it provides evidence against the null hypothesis of stationarity. Conversely, if the KPSS statistic is smaller than the critical value, it suggests that the time series is stationary.
result = kpss(ts)
# Extract and print the test statistic and p-value
kpss_statistic = result[0]
p_value = result[1]
print("KPSS Statistic:", kpss_statistic)
print("p-value:", p_value)
# The KPSS test result suggests that the time series is likely non-stationary. The KPSS statistic value of 5.737661236232327 exceeds the critical value, and the p-value of 0.01 is smaller than the significance level of 0.05. Therefore, we reject the null hypothesis of stationarity, indicating the presence of a trend or non-constant variance in the time series.
# # Autoregressive Integrated Moving Average Model (ARIMA) model
p = 2
d = 1
q = 1
train_np = train_df["sales"].values.astype("float64")
model = sm.tsa.ARIMA(train_np, order=(p, d, q))
result = model.fit()
# Print the model summary
print(result.summary())
# Make predictions
start_idx = len(train_np)
end_idx = len(train_np) + len(test_df) - 1
predictions = result.predict(start=start_idx, end=end_idx)
# Print the predictions
print(predictions)
from statsmodels.tsa.statespace.sarimax import SARIMAX
from sklearn.metrics import mean_absolute_error, mean_squared_error
y_train = train_df["sales"]
X_train = train_df["onpromotion"]
# Define and fit the SARIMAX model
model = SARIMAX(y_train, exog=X_train, order=(1, 0, 1), seasonal_order=(1, 0, 1, 7))
model_fit = model.fit()
# Make predictions on the training data
y_pred = model_fit.predict(
start=train_df.index[0], end=train_df.index[-1], exog=X_train
)
# Compute mean absolute error and mean squared error
mae = mean_absolute_error(y_train, y_pred)
mse = mean_squared_error(y_train, y_pred)
print("MAE:", mae)
print("MSE:", mse)
# # Submission
#
submission = pd.DataFrame()
submission["id"] = test_df.index
submission["sales"] = np.zeros(len(test_df))
# save the submission file as a CSV file
submission.to_csv("mysubmission.csv", index=False)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/389/129389272.ipynb
| null | null |
[{"Id": 129389272, "ScriptId": 38472204, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11491103, "CreationDate": "05/13/2023 11:24:50", "VersionNumber": 1.0, "Title": "Sales Forecasting (ARIMA MODEL)", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 461.0, "LinesInsertedFromPrevious": 461.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 5}]
| null | null | null | null |
# # Sales forecasting
# The project is mainly to predict the future sales by using the time-series forecasting technique.
# # Import Dependencies
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import scipy.stats as stats
from scipy.stats import pearsonr
import itertools
from sklearn import preprocessing
from statsmodels.tsa.stattools import kpss
import statsmodels.api as sm
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
# # Datasets
train_df = pd.read_csv("/kaggle/input/store-sales-time-series-forecasting/train.csv")
test_df = pd.read_csv("/kaggle/input/store-sales-time-series-forecasting/test.csv")
oil_df = pd.read_csv("/kaggle/input/store-sales-time-series-forecasting/oil.csv")
transaction_df = pd.read_csv(
"/kaggle/input/store-sales-time-series-forecasting/transactions.csv"
)
stores_df = pd.read_csv("/kaggle/input/store-sales-time-series-forecasting/stores.csv")
holiday_event_df = pd.read_csv(
"/kaggle/input/store-sales-time-series-forecasting/holidays_events.csv"
)
# # Understanding the Data
train_df.head()
test_df.head()
oil_df.head()
transaction_df.head()
stores_df.head()
holiday_event_df.head()
# The sales column is the target variable.
# ## Merging datasets holiday_event_df, stores_df, oil_df & train_df
train_df = train_df.merge(stores_df, on="store_nbr")
train_df = train_df.merge(oil_df, on="date", how="left")
holiday_event_df = holiday_event_df.rename(columns={"type": "holiday_type"})
train_df = train_df.merge(holiday_event_df, on="date", how="left")
train_df.head(3)
train_df.info()
# # Missing Values Detection
train_df.isnull().sum()
# percentage of missing values in train_df
missing_percentages = train_df.isnull().sum() / len(train_df) * 100
print(missing_percentages)
# remove columns that are having more than 30% missing values
columns_to_delete = missing_percentages[missing_percentages > 30].index
train_df = train_df.drop(columns=columns_to_delete)
train_df.info()
# # Duplicates
#
train_df.duplicated().any()
dupes = train_df.duplicated()
# dupes
sum(dupes)
# dropping duplicate values
train_df = train_df.drop_duplicates()
train_df
train_df.duplicated().any()
test_df.duplicated().any()
# # Check if there still any missing values present in the train_df
#
train_df.isnull().sum()
# # Calculate count, mean, std, min, 25%, 50%, 75%, max values for each column. Prepare an analysis of the difference between mean and median for each column and possible reasons for the same.
train_df.describe()
# # EDA
train_df.info()
# ## Questions
# 1. Does the type of stores affect the store sales?
# 2. Which family is having the highest sales?
# 3. Does promotion able to improve the sales?
# 4. Which city is having the most number of customers?
# 5. Which state is having the most number of customers?
# 6. Which of the stores has the highest sales.
# 7. Which month is having the most sales, and least sales.
# ### 1. Does the type of stores affect the store sales?
# To answer the first question 'Does the type of stores affect the store sales?' , i will use ANOVA test.
# ANOVA (Analysis of Variance) is a statistical test used to determine whether there are significant differences between the means of two or more groups. It compares the variation between the groups (due to the different categories or factors) to the variation within the groups.
# H0 (>0.05)= The type of stores does not affect store sales. There is no significant difference in store sales between different types of stores.
# H1 (<0.05)= The type of stores does affect store sales. There is a significant difference in store sales between different types of stores.
#
grouped_data = train_df.groupby("type")["sales"]
# Perform the ANOVA test
f_statistic, p_value = stats.f_oneway(
*[grouped_data.get_group(type) for type in grouped_data.groups]
)
# Print the results
print("F-Statistic:", f_statistic)
print("p-value:", p_value)
# Based on the F-statistics and p-value above, we reject null hypothesis and accept alternative hypothesis. Hence, the type of stores does affect the store sales. There is a significant difference in store sales between different type.
# Sales Vs Type
plt.scatter(train_df["type"], train_df["sales"])
plt.ylabel("sales")
plt.xlabel("type")
plt.show()
# ### 2. Which family is having the highest sales?
# Pie chart
# Group the data by family and calculate the total sales for each family
family_sales = train_df.groupby("family")["sales"].sum()
# Sort the families based on sales in descending order
family_sales_sorted = family_sales.sort_values(ascending=False)
# Get the top 5 families with the highest sales
top_families = family_sales_sorted.head(5)
# Create the pie chart
plt.pie(top_families, labels=top_families.index, autopct="%1.1f%%", startangle=90)
plt.title("Distribution of Sales by Family")
plt.axis("equal")
plt.show()
# Based on the pie chart above, the GROCERY I is having the highest sales, and Baverages comes second highest.
# ### 3. Does promotion able to improve the sales?
# To answer the 3rd question "Does promotion able to improve the sales?" I will use Pearson correlation test to determine the relationship between the two variables, as both of the variables are numericals. The Pearson correlation coefficient measures the linear relationship between two continuous variables and ranges from -1 to +1.
# H0 (>0.05)= The promotion does not affect store sales.
# H1 (<0.05)= The promotion does affect store sales.
#
correlation, p_value = pearsonr(train_df["onpromotion"], train_df["sales"])
print("Pearson correlation coefficient:", correlation)
print("p-value:", p_value)
# Based on the Pearson correlation coefficient of 0.4279 and the p-value of 0.0, we can reject the null hypothesis (H0) and conclude that there is a significant relationship between promotion and store sales. Therefore, the promotion does affect store sales.
# Scatter plot
plt.scatter(train_df["onpromotion"], train_df["sales"])
plt.xlabel("Promotion")
plt.ylabel("Sales")
plt.title("Promotion vs Sales")
plt.show()
# ### 4. Which city is having the most most number of customers?
# Count Plot
# Create a count plot
plt.figure(figsize=(10, 6)) # Set the figure size
sns.countplot(data=train_df, x="city")
plt.xlabel("City")
plt.ylabel("Count")
plt.title("Sales Distribution by City")
plt.xticks(rotation=45)
plt.show()
# Based on the count plot above, the Quito city has the most sales.
# ### 5. Which state is having the most number of customers?
#
# Count Plot
# Create a count plot
plt.figure(figsize=(10, 6)) # Set the figure size
sns.countplot(data=train_df, x="state")
plt.xlabel("state")
plt.ylabel("Count")
plt.title("Sales Distribution by City")
plt.xticks(rotation=45)
plt.show()
# Based on the count plot above, Pichincha state has the most sales as compared to other states.
# ### 6. Which of the stores has the highest sales.
# Calculate the total sales for each store
store_sales = train_df.groupby("store_nbr")["sales"].sum().reset_index()
# Sort the stores based on sales in descending order
store_sales = store_sales.sort_values("sales", ascending=False)
# Create a bar plot
plt.figure(figsize=(12, 6))
sns.barplot(data=store_sales, x="store_nbr", y="sales")
plt.xlabel("Store Number")
plt.ylabel("Total Sales")
plt.title("Total Sales by Store")
plt.xticks(rotation=45)
plt.show()
# ### 7. Which month is having the most sales, and least sales.
# First convert the 'date' from object to date time
train_df["date"] = pd.to_datetime(train_df["date"])
# create new columns 'month' 'year'
train_df["month"] = train_df["date"].dt.month
train_df["year"] = train_df["date"].dt.year
train_df.head(7)
# Group the data by month, year, and calculate the total sales
monthly_sales = train_df.groupby(["month", "year"])["sales"].sum().reset_index()
# Create the line chart
plt.figure(figsize=(10, 6)) # Set the figure size
# Get unique years and cycle through colors
years = monthly_sales["year"].unique()
colors = itertools.cycle(["red", "green", "blue", "orange", "purple"])
for year in years:
year_data = monthly_sales[monthly_sales["year"] == year]
plt.plot(
year_data["month"],
year_data["sales"],
marker="o",
color=next(colors),
label=str(year),
)
plt.xlabel("Month")
plt.ylabel("Sales")
plt.title("Monthly Sales Trend")
# Customize x-axis ticks to show month names
month_names = [
"Jan",
"Feb",
"Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sep",
"Oct",
"Nov",
"Dec",
]
plt.xticks(range(1, 13), month_names)
plt.legend()
plt.show()
# Overall, the orange line which is 2016 has a stable high sales since January to Dec. Between the months in 2016, December had the most sales. In other hand, in comparing to other years, 2013 had an overall lowest sales achieved, especially during February.
train_df = train_df.groupby("date")["sales", "onpromotion"].sum().reset_index()
print(train_df)
# # Autocorrelation
# Autocorrelation measures the correlation between a time series and its lagged values. Autocorrelation plots (ACF) and partial autocorrelation plots (PACF) help identify significant lag values and potential autoregressive or moving average components.
# - If the autocorrelation value is close to 1 or -1, it indicates a strong positive or negative autocorrelation, respectively.
# - If the autocorrelation value is close to 0, it indicates a weak or no autocorrelation.
sales_series = train_df["sales"]
autocorr_values = sales_series.autocorr()
print("Autocorrelation:", autocorr_values)
# Based on the result above, since the autocorrelation value is close to 1 (0.766), it suggests that there is a positive autocorrelation. A positive autocorrelation indicates that there is a relationship between the current sales values and the previous sales values.
from statsmodels.graphics.tsaplots import plot_acf
plot_acf(train_df["sales"])
from statsmodels.graphics.tsaplots import plot_pacf
import matplotlib.pyplot as plt
# Plot the PACF
fig, ax = plt.subplots(figsize=(10, 6))
plot_pacf(train_df["sales"], ax=ax)
plt.xlabel("Lag")
plt.ylabel("Partial Autocorrelation")
plt.title("Partial Autocorrelation Function (PACF)")
plt.show()
# # Stationarity Test
# There are various statistical tests to check stationarity, including the Augmented Dickey-Fuller (ADF) test and the Kwiatkowski-Phillips-Schmidt-Shin (KPSS) test.
# ### Augmented Dickey-Fuller (ADF) test
# The Augmented Dickey-Fuller (ADF) test is a statistical test used to determine whether a time series is stationary or non-stationary. Stationarity is an important assumption in many time series analysis models.
# The ADF test evaluates the null hypothesis that the time series has a unit root, indicating non-stationarity. The alternative hypothesis is that the time series is stationary.
# When performing the ADF test, we obtain the ADF statistic and the p-value. The ADF statistic is a negative number and the more negative it is, the stronger the evidence against the null hypothesis. The p-value represents the probability of observing the ADF statistic or a more extreme value if the null hypothesis were true. A low p-value (below a chosen significance level, typically 0.05) indicates strong evidence against the null hypothesis and suggests that the time series is stationary.
ts = train_df["sales"]
import pandas as pd
from statsmodels.tsa.stattools import adfuller
# Perform the ADF test
result = adfuller(ts)
# Extract and print the test statistics and p-value
adf_statistic = result[0]
p_value = result[1]
print("ADF Statistic:", adf_statistic)
print("p-value:", p_value)
# The ADF statistic of -2.616195748604853 suggests that there is some evidence against the null hypothesis of non-stationarity in the time series. However, the p-value of 0.08969592175787544 indicates that this evidence is not statistically significant at a conventional significance level of 0.05.
# In simpler terms, the ADF test indicates that there may be some stationarity in the time series data, but it is not strong enough to conclude with certainty. The p-value suggests that the observed results could occur by chance under the assumption of non-stationarity. Therefore, further analysis and modeling techniques may be necessary to better understand the stationarity of the data.
# ### Kwiatkowski-Phillips-Schmidt-Shin (KPSS)
# The Kwiatkowski-Phillips-Schmidt-Shin (KPSS) test is another statistical test used to assess the stationarity of a time series. It is complementary to the Augmented Dickey-Fuller (ADF) test.
# The KPSS test evaluates the null hypothesis that the time series is stationary against the alternative hypothesis of non-stationarity. Unlike the ADF test, which assumes the presence of a unit root, the KPSS test assumes the absence of a unit root.
# The test calculates the KPSS statistic, which measures the cumulative sum of squared deviations from the mean in the series. It also provides a p-value that indicates the probability of observing the KPSS statistic or a more extreme value under the null hypothesis.
# Interpreting the results of the KPSS test involves considering the KPSS statistic and the associated p-value. If the KPSS statistic is greater than the critical value at a chosen significance level (e.g., 0.05), it provides evidence against the null hypothesis of stationarity. Conversely, if the KPSS statistic is smaller than the critical value, it suggests that the time series is stationary.
result = kpss(ts)
# Extract and print the test statistic and p-value
kpss_statistic = result[0]
p_value = result[1]
print("KPSS Statistic:", kpss_statistic)
print("p-value:", p_value)
# The KPSS test result suggests that the time series is likely non-stationary. The KPSS statistic value of 5.737661236232327 exceeds the critical value, and the p-value of 0.01 is smaller than the significance level of 0.05. Therefore, we reject the null hypothesis of stationarity, indicating the presence of a trend or non-constant variance in the time series.
# # Autoregressive Integrated Moving Average Model (ARIMA) model
p = 2
d = 1
q = 1
train_np = train_df["sales"].values.astype("float64")
model = sm.tsa.ARIMA(train_np, order=(p, d, q))
result = model.fit()
# Print the model summary
print(result.summary())
# Make predictions
start_idx = len(train_np)
end_idx = len(train_np) + len(test_df) - 1
predictions = result.predict(start=start_idx, end=end_idx)
# Print the predictions
print(predictions)
from statsmodels.tsa.statespace.sarimax import SARIMAX
from sklearn.metrics import mean_absolute_error, mean_squared_error
y_train = train_df["sales"]
X_train = train_df["onpromotion"]
# Define and fit the SARIMAX model
model = SARIMAX(y_train, exog=X_train, order=(1, 0, 1), seasonal_order=(1, 0, 1, 7))
model_fit = model.fit()
# Make predictions on the training data
y_pred = model_fit.predict(
start=train_df.index[0], end=train_df.index[-1], exog=X_train
)
# Compute mean absolute error and mean squared error
mae = mean_absolute_error(y_train, y_pred)
mse = mean_squared_error(y_train, y_pred)
print("MAE:", mae)
print("MSE:", mse)
# # Submission
#
submission = pd.DataFrame()
submission["id"] = test_df.index
submission["sales"] = np.zeros(len(test_df))
# save the submission file as a CSV file
submission.to_csv("mysubmission.csv", index=False)
| false | 0 | 4,473 | 5 | 4,473 | 4,473 |
||
129310768
|
<jupyter_start><jupyter_text>Fashion Product Images Dataset
### Context
Thr growing e-commerce industry presents us with a large dataset waiting to be scraped and researched upon. In addition to professionally shot high resolution product images, we also have multiple label attributes describing the product which was manually entered while cataloging. To add to this, we also have descriptive text that comments on the product characteristics.
### Content
Each product is identified by an ID like 42431. You will find a map to all the products in `styles.csv`. From here, you can fetch the image for this product from `images/42431.jpg` and the complete metadata from `styles/42431.json`.
To get started easily, we also have exposed some of the key product categories and it's display name in `styles.csv`.
If this dataset is too large, you can start with a smaller (280MB) version here:
https://www.kaggle.com/paramaggarwal/fashion-product-images-small
### Inspiration
So what can you try building? Here are some suggestions:
* Start with an image classifier. Use the `masterCategory` column from `styles.csv` and train a convolutional neural network.
* The same can be achieved via NLP. Extract the product descriptions from `styles/42431.json` and then run a classifier to get the `masterCategory`.
* Try adding more sophisticated classification by predicting the other category labels in `styles.csv`
Transfer Learning is your friend and use it wisely. You can even take things much further from here:
* Is it possible to build a GAN that takes a category as input and outputs an image?
* Auto-encode the image attributes to be able to make a visual search engine that converts the image into a small encoding which is sent to the server to perform visual search?
* Visual similarity search? Given an image, suggest other similar images.
Kaggle dataset identifier: fashion-product-images-dataset
<jupyter_script># ignore warnings
import warnings
warnings.filterwarnings("ignore")
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
import cv2
import random
import tensorflow as tf
# data generator
from tensorflow.keras.preprocessing.image import ImageDataGenerator
# wandb
import wandb
from wandb.keras import WandbCallback
DATA_PATH = "/kaggle/input/fashion-product-images-dataset/"
# # Exploratory and Visualizations
# ## Images Dataframe
images_df = pd.read_csv(os.path.join(DATA_PATH, "fashion-dataset", "images.csv"))
images_df.head(2)
# ## Product Meta Data Dataframe
styles_df = pd.read_csv(
os.path.join(DATA_PATH, "fashion-dataset", "styles.csv"), on_bad_lines="skip"
)
styles_df.head(2)
# ## Create Unique ID in both Dataframes
images_df.head(2)
# tạo id để merge với metadata df
images_df["id"] = (
images_df["filename"]
.apply(lambda filename: filename.replace(".jpg", ""))
.astype(int)
)
images_df.head(2)
# ## Merging the two dataframes
data = styles_df.merge(images_df, on="id", how="left")
data.head(2)
# chuyển filename thành filepath
data["filename"] = data["filename"].apply(
lambda filename: os.path.join(DATA_PATH, "fashion-dataset", "images", filename)
)
data.head(2)
# ## Removing Products for which images are not present
# lấy danh sách ảnh trong dataset
image_files = os.listdir(os.path.join(DATA_PATH, "fashion-dataset", "images"))
print(len(image_files))
# các file có trong dataset
data["file_found"] = data["id"].apply(lambda idx: "{}.jpg".format(idx) in image_files)
data["file_found"].value_counts()
# xóa bỏ file không có ảnh trong dataset
data = data[data["file_found"]].reset_index(drop=True)
print(data.shape)
data.head(2)
# ## Checking for Missing data
data.isnull().sum()
# ## Visualizations
# trực quan một số hình ảnh trong datasets
def dislay_image(image_files):
random.shuffle(image_files)
for idx, image_file in enumerate(image_files[0:9]):
plt.subplot(3, 3, idx + 1)
image_path = os.path.join(DATA_PATH, "fashion-dataset", "images", image_file)
image_arr = cv2.imread(image_path)
image_arr = cv2.cvtColor(image_arr, cv2.COLOR_BGR2RGB)
plt.imshow(image_arr)
plt.axis("off")
dislay_image(image_files)
# masterCategory count
gr_data_masterCate = data.groupby("masterCategory").size()
gr_data_masterCate_sorted = gr_data_masterCate.sort_values()
gr_data_masterCate_sorted
plt.figure(figsize=(10, 4))
with plt.rc_context({"ytick.color": "darkgrey"}):
plt.barh(
gr_data_masterCate_sorted.index,
gr_data_masterCate_sorted.values,
color="lightblue",
)
for spine in plt.gca().spines.values():
spine.set_visible(False)
plt.ylabel("$CATEGORIES$", size=15, color="darkgrey")
plt.xlabel("Number of Image", size=15, color="darkgrey")
plt.show()
# subCategory count
gr_data_subCate = data.groupby("subCategory").size()
gr_data_subCate_sorted = gr_data_subCate.sort_values()
len(gr_data_subCate_sorted)
plt.figure(figsize=(10, 10))
with plt.rc_context({"ytick.color": "darkgrey"}):
plt.barh(
gr_data_subCate_sorted[-25:].index,
gr_data_subCate_sorted[-25:].values,
color="lightblue",
)
for spine in plt.gca().spines.values():
spine.set_visible(False)
plt.ylabel("$CATEGORIES$", size=15, color="darkgrey")
plt.xlabel("Number of Image", size=15, color="darkgrey")
plt.show()
# articleType count
gr_data_articleType = data.groupby("articleType").size()
gr_data_articleType_sorted = gr_data_articleType.sort_values()
plt.figure(figsize=(10, 10))
with plt.rc_context({"ytick.color": "darkgrey"}):
plt.barh(
gr_data_articleType_sorted[-25:].index,
gr_data_articleType_sorted[-25:].values,
color="lightblue",
)
for spine in plt.gca().spines.values():
spine.set_visible(False)
plt.ylabel("$CATEGORIES$", size=15, color="darkgrey")
plt.xlabel("Number of Image", size=15, color="darkgrey")
plt.show()
# ## final data
# lấy 20 danh mục
categoricals = sorted(list(gr_data_subCate_sorted.index[-20:]))
data_20 = data[data["subCategory"].isin(categoricals)]
data_20 = data_20[["subCategory", "filename"]]
data_20
data_20.groupby("subCategory").size().sort_values(ascending=False)
# mỗi danh mục lấy tối đa 600 ảnh
from sklearn.utils import resample, shuffle
from sklearn.model_selection import train_test_split
n_samples = 600
lst_df = []
for categorical in categoricals:
df_class_tmp = data_20.loc[data_20["subCategory"] == categorical]
if df_class_tmp.shape[0] < n_samples:
df_resample_tmp = df_class_tmp
else:
df_resample_tmp = resample(df_class_tmp, n_samples=n_samples, random_state=42)
lst_df.append(df_resample_tmp)
df = pd.concat(lst_df)
cate = df.groupby("subCategory").size().sort_values()
# plt.figure(figsize = (10, 10))
with plt.rc_context({"ytick.color": "darkgrey"}):
plt.barh(cate[-25:].index, cate[-25:].values, color="lightblue")
for spine in plt.gca().spines.values():
spine.set_visible(False)
plt.ylabel("$CATEGORIES$", size=15, color="darkgrey")
plt.xlabel("Number of Image", size=15, color="darkgrey")
plt.show()
df.shape
df = shuffle(df, random_state=42)
df = df.reset_index(drop=True)
df.rename({"subCategory": "categorical"}, axis=1, inplace=True)
# final data
data = df
data
# # Train-val-test Split
from sklearn.model_selection import train_test_split
train_df, test_df = train_test_split(
data, test_size=0.2, random_state=42, stratify=data["categorical"]
)
valid_df, test_df = train_test_split(
test_df, test_size=0.5, random_state=42, stratify=test_df["categorical"]
)
train_df = train_df.reset_index(drop=True)
valid_df = valid_df.reset_index(drop=True)
test_df = test_df.reset_index(drop=True)
train_df
# ## Data Augmentation
datagen = ImageDataGenerator(
rescale=1 / 255.0, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
train_generator = datagen.flow_from_dataframe(
dataframe=train_df,
target_size=(224, 224),
x_col="filename",
y_col="categorical",
class_mode="categorical",
batch_size=32,
shuffle=True,
seed=42,
)
test_datagen = ImageDataGenerator(rescale=1 / 255.0)
valid_generator = test_datagen.flow_from_dataframe(
dataframe=valid_df,
target_size=(224, 224),
x_col="filename",
y_col="categorical",
class_mode="categorical",
batch_size=32,
shuffle=True,
seed=42,
)
test_generator = test_datagen.flow_from_dataframe(
dataframe=test_df,
x_col="filename",
y_col="categorical",
target_size=(224, 224),
batch_size=32,
class_mode="categorical",
shuffle=True,
seed=42,
)
# tmp
from keras.models import Sequential
from keras.layers import (
Conv2D,
MaxPooling2D,
Dense,
Flatten,
Dropout,
BatchNormalization,
)
from tensorflow.keras.applications.vgg19 import VGG19
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
def build_model(name, weights_path=None):
base_model = VGG19(weights="imagenet", include_top=False, input_shape=(224, 224, 3))
for layer in base_model.layers:
layer.trainable = False
x = base_model.output
x = Flatten()(x)
x = Dense(4096, activation="leaky_relu")(x)
x = BatchNormalization()(x)
x = Dropout(0.4)(x)
x = Dense(1024, activation="sigmoid")(x)
x = BatchNormalization()(x)
x = Dropout(0.4)(x)
predictions = Dense(20, activation="softmax")(x)
model = Model(name=name, inputs=base_model.input, outputs=predictions)
if weights_path:
model.load_weights(weights_path)
return model
import time
NAME = "vgg19-{}".format(int(time.time()))
model = build_model(NAME)
model.summary()
lr = 0.01
epochs = 10
## Initlisazie wandb project
wandb.init(
project="CBIR-fashion product dataset",
name=NAME,
config={
"learning_rate": lr,
"Batch_normalization": True,
"Batch_size": 64,
"Dropout": "0.4",
"architecture": "VGG19",
"dataset": "fashion-product-images-dataset",
"epochs": epochs,
"data generator": True,
},
)
wandb_callback = WandbCallback()
filepath = "{}_loss_opti.hdf5".format(NAME)
checkpoint1 = tf.keras.callbacks.ModelCheckpoint(
filepath,
monitor="val_loss",
verbose=1,
save_best_only=True,
save_weights_only=False,
mode="auto",
save_freq="epoch",
)
model.compile(
loss="categorical_crossentropy",
optimizer=Adam(learning_rate=lr),
metrics=["accuracy"],
)
history = model.fit_generator(
train_generator,
validation_data=train_generator,
steps_per_epoch=train_generator.n // train_generator.batch_size,
validation_steps=valid_generator.n // valid_generator.batch_size,
epochs=epochs,
callbacks=[checkpoint1, wandb_callback],
)
# model.save(filepath)
score = model.evaluate_generator(test_generator)
print("Test loss:", score[0])
print("Test accuracy:", score[1])
from tensorflow.keras.models import load_model
best_model = load_model(filepath)
score = best_model.evaluate_generator(test_generator)
print("Test loss:", score[0])
print("Test accuracy:", score[1])
IMAGESIZE = 224
CHANNELS = 3
def image_preprocess(image_path):
image_orig = cv2.imread(image_path)
image_arr = cv2.cvtColor(image_orig, cv2.COLOR_BGR2RGB)
image_arr = cv2.resize(image_arr, (IMAGESIZE, IMAGESIZE))
image_arr = image_arr / 255.0
image_arr = image_arr.reshape(-1, IMAGESIZE, IMAGESIZE, CHANNELS)
return image_arr
test_df.filename[1]
anchor_path = (
"/kaggle/input/fashion-product-images-dataset/fashion-dataset/images/50892.jpg"
)
pos_path = (
"/kaggle/input/fashion-product-images-dataset/fashion-dataset/images/45986.jpg"
)
neg_path = (
"/kaggle/input/fashion-product-images-dataset/fashion-dataset/images/29863.jpg"
)
image_arr1 = image_preprocess(anchor_path)
image_arr2 = image_preprocess(pos_path)
image_arr3 = image_preprocess(neg_path)
image_lst = [image_arr1, image_arr2, image_arr3]
for i, image_arr in enumerate(image_lst):
plt.subplot(1, 3, i + 1)
plt.imshow(image_arr[0])
plt.axis(False)
plt.show()
y_hat = model.predict(image_arr)
y_hat.argmax()
categoricals[14]
best_model.layers
CBIR_model = Model(inputs=best_model.input, outputs=best_model.layers[-4].output)
prehashcode1 = CBIR_model.predict(image_arr1)
prehashcode2 = CBIR_model.predict(image_arr2)
prehashcode3 = CBIR_model.predict(image_arr3)
prehashcode.shape
hashcode1 = np.where(prehashcode1 < 0.5, 0, 1)
hashcode2 = np.where(prehashcode2 < 0.5, 0, 1)
hashcode3 = np.where(prehashcode3 < 0.5, 0, 1)
hashcode1 = hashcode1.astype("bool")
hashcode2 = hashcode2.astype("bool")
hashcode3 = hashcode3.astype("bool")
hamming_distance = np.hamming(hashcode1.shape[0]) * np.abs(hashcode1 - hashcode2)
hamming_dist = np.count_nonzero(hashcode1 != hashcode2)
hamming_dist
hamming_dist = np.count_nonzero(hashcode1 != hashcode3)
hamming_dist
hamming_dist = np.count_nonzero(hashcode2 != hashcode3)
hamming_dist
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/310/129310768.ipynb
|
fashion-product-images-dataset
|
paramaggarwal
|
[{"Id": 129310768, "ScriptId": 38111925, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11502381, "CreationDate": "05/12/2023 16:45:42", "VersionNumber": 1.0, "Title": "CBIR_using_VGG_and_hash", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 411.0, "LinesInsertedFromPrevious": 411.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 185238363, "KernelVersionId": 129310768, "SourceDatasetVersionId": 329006}]
|
[{"Id": 329006, "DatasetId": 139630, "DatasourceVersionId": 342521, "CreatorUserId": 938019, "LicenseName": "Data files \u00a9 Original Authors", "CreationDate": "03/14/2019 18:57:43", "VersionNumber": 1.0, "Title": "Fashion Product Images Dataset", "Slug": "fashion-product-images-dataset", "Subtitle": "44k products with multiple category labels, descriptions and high-res images.", "Description": "### Context\n\nThr growing e-commerce industry presents us with a large dataset waiting to be scraped and researched upon. In addition to professionally shot high resolution product images, we also have multiple label attributes describing the product which was manually entered while cataloging. To add to this, we also have descriptive text that comments on the product characteristics.\n\n### Content\n\nEach product is identified by an ID like 42431. You will find a map to all the products in `styles.csv`. From here, you can fetch the image for this product from `images/42431.jpg` and the complete metadata from `styles/42431.json`.\n\nTo get started easily, we also have exposed some of the key product categories and it's display name in `styles.csv`.\n \nIf this dataset is too large, you can start with a smaller (280MB) version here:\nhttps://www.kaggle.com/paramaggarwal/fashion-product-images-small\n\n### Inspiration\n\nSo what can you try building? Here are some suggestions:\n\n* Start with an image classifier. Use the `masterCategory` column from `styles.csv` and train a convolutional neural network.\n* The same can be achieved via NLP. Extract the product descriptions from `styles/42431.json` and then run a classifier to get the `masterCategory`.\n* Try adding more sophisticated classification by predicting the other category labels in `styles.csv`\n\nTransfer Learning is your friend and use it wisely. You can even take things much further from here:\n\n* Is it possible to build a GAN that takes a category as input and outputs an image?\n* Auto-encode the image attributes to be able to make a visual search engine that converts the image into a small encoding which is sent to the server to perform visual search?\n* Visual similarity search? Given an image, suggest other similar images.", "VersionNotes": "Initial release", "TotalCompressedBytes": 12369096246.0, "TotalUncompressedBytes": 12369096246.0}]
|
[{"Id": 139630, "CreatorUserId": 938019, "OwnerUserId": 938019.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 329006.0, "CurrentDatasourceVersionId": 342521.0, "ForumId": 149882, "Type": 2, "CreationDate": "03/14/2019 18:57:43", "LastActivityDate": "03/14/2019", "TotalViews": 213875, "TotalDownloads": 23461, "TotalVotes": 484, "TotalKernels": 96}]
|
[{"Id": 938019, "UserName": "paramaggarwal", "DisplayName": "Param Aggarwal", "RegisterDate": "03/02/2017", "PerformanceTier": 0}]
|
# ignore warnings
import warnings
warnings.filterwarnings("ignore")
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
import cv2
import random
import tensorflow as tf
# data generator
from tensorflow.keras.preprocessing.image import ImageDataGenerator
# wandb
import wandb
from wandb.keras import WandbCallback
DATA_PATH = "/kaggle/input/fashion-product-images-dataset/"
# # Exploratory and Visualizations
# ## Images Dataframe
images_df = pd.read_csv(os.path.join(DATA_PATH, "fashion-dataset", "images.csv"))
images_df.head(2)
# ## Product Meta Data Dataframe
styles_df = pd.read_csv(
os.path.join(DATA_PATH, "fashion-dataset", "styles.csv"), on_bad_lines="skip"
)
styles_df.head(2)
# ## Create Unique ID in both Dataframes
images_df.head(2)
# tạo id để merge với metadata df
images_df["id"] = (
images_df["filename"]
.apply(lambda filename: filename.replace(".jpg", ""))
.astype(int)
)
images_df.head(2)
# ## Merging the two dataframes
data = styles_df.merge(images_df, on="id", how="left")
data.head(2)
# chuyển filename thành filepath
data["filename"] = data["filename"].apply(
lambda filename: os.path.join(DATA_PATH, "fashion-dataset", "images", filename)
)
data.head(2)
# ## Removing Products for which images are not present
# lấy danh sách ảnh trong dataset
image_files = os.listdir(os.path.join(DATA_PATH, "fashion-dataset", "images"))
print(len(image_files))
# các file có trong dataset
data["file_found"] = data["id"].apply(lambda idx: "{}.jpg".format(idx) in image_files)
data["file_found"].value_counts()
# xóa bỏ file không có ảnh trong dataset
data = data[data["file_found"]].reset_index(drop=True)
print(data.shape)
data.head(2)
# ## Checking for Missing data
data.isnull().sum()
# ## Visualizations
# trực quan một số hình ảnh trong datasets
def dislay_image(image_files):
random.shuffle(image_files)
for idx, image_file in enumerate(image_files[0:9]):
plt.subplot(3, 3, idx + 1)
image_path = os.path.join(DATA_PATH, "fashion-dataset", "images", image_file)
image_arr = cv2.imread(image_path)
image_arr = cv2.cvtColor(image_arr, cv2.COLOR_BGR2RGB)
plt.imshow(image_arr)
plt.axis("off")
dislay_image(image_files)
# masterCategory count
gr_data_masterCate = data.groupby("masterCategory").size()
gr_data_masterCate_sorted = gr_data_masterCate.sort_values()
gr_data_masterCate_sorted
plt.figure(figsize=(10, 4))
with plt.rc_context({"ytick.color": "darkgrey"}):
plt.barh(
gr_data_masterCate_sorted.index,
gr_data_masterCate_sorted.values,
color="lightblue",
)
for spine in plt.gca().spines.values():
spine.set_visible(False)
plt.ylabel("$CATEGORIES$", size=15, color="darkgrey")
plt.xlabel("Number of Image", size=15, color="darkgrey")
plt.show()
# subCategory count
gr_data_subCate = data.groupby("subCategory").size()
gr_data_subCate_sorted = gr_data_subCate.sort_values()
len(gr_data_subCate_sorted)
plt.figure(figsize=(10, 10))
with plt.rc_context({"ytick.color": "darkgrey"}):
plt.barh(
gr_data_subCate_sorted[-25:].index,
gr_data_subCate_sorted[-25:].values,
color="lightblue",
)
for spine in plt.gca().spines.values():
spine.set_visible(False)
plt.ylabel("$CATEGORIES$", size=15, color="darkgrey")
plt.xlabel("Number of Image", size=15, color="darkgrey")
plt.show()
# articleType count
gr_data_articleType = data.groupby("articleType").size()
gr_data_articleType_sorted = gr_data_articleType.sort_values()
plt.figure(figsize=(10, 10))
with plt.rc_context({"ytick.color": "darkgrey"}):
plt.barh(
gr_data_articleType_sorted[-25:].index,
gr_data_articleType_sorted[-25:].values,
color="lightblue",
)
for spine in plt.gca().spines.values():
spine.set_visible(False)
plt.ylabel("$CATEGORIES$", size=15, color="darkgrey")
plt.xlabel("Number of Image", size=15, color="darkgrey")
plt.show()
# ## final data
# lấy 20 danh mục
categoricals = sorted(list(gr_data_subCate_sorted.index[-20:]))
data_20 = data[data["subCategory"].isin(categoricals)]
data_20 = data_20[["subCategory", "filename"]]
data_20
data_20.groupby("subCategory").size().sort_values(ascending=False)
# mỗi danh mục lấy tối đa 600 ảnh
from sklearn.utils import resample, shuffle
from sklearn.model_selection import train_test_split
n_samples = 600
lst_df = []
for categorical in categoricals:
df_class_tmp = data_20.loc[data_20["subCategory"] == categorical]
if df_class_tmp.shape[0] < n_samples:
df_resample_tmp = df_class_tmp
else:
df_resample_tmp = resample(df_class_tmp, n_samples=n_samples, random_state=42)
lst_df.append(df_resample_tmp)
df = pd.concat(lst_df)
cate = df.groupby("subCategory").size().sort_values()
# plt.figure(figsize = (10, 10))
with plt.rc_context({"ytick.color": "darkgrey"}):
plt.barh(cate[-25:].index, cate[-25:].values, color="lightblue")
for spine in plt.gca().spines.values():
spine.set_visible(False)
plt.ylabel("$CATEGORIES$", size=15, color="darkgrey")
plt.xlabel("Number of Image", size=15, color="darkgrey")
plt.show()
df.shape
df = shuffle(df, random_state=42)
df = df.reset_index(drop=True)
df.rename({"subCategory": "categorical"}, axis=1, inplace=True)
# final data
data = df
data
# # Train-val-test Split
from sklearn.model_selection import train_test_split
train_df, test_df = train_test_split(
data, test_size=0.2, random_state=42, stratify=data["categorical"]
)
valid_df, test_df = train_test_split(
test_df, test_size=0.5, random_state=42, stratify=test_df["categorical"]
)
train_df = train_df.reset_index(drop=True)
valid_df = valid_df.reset_index(drop=True)
test_df = test_df.reset_index(drop=True)
train_df
# ## Data Augmentation
datagen = ImageDataGenerator(
rescale=1 / 255.0, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
train_generator = datagen.flow_from_dataframe(
dataframe=train_df,
target_size=(224, 224),
x_col="filename",
y_col="categorical",
class_mode="categorical",
batch_size=32,
shuffle=True,
seed=42,
)
test_datagen = ImageDataGenerator(rescale=1 / 255.0)
valid_generator = test_datagen.flow_from_dataframe(
dataframe=valid_df,
target_size=(224, 224),
x_col="filename",
y_col="categorical",
class_mode="categorical",
batch_size=32,
shuffle=True,
seed=42,
)
test_generator = test_datagen.flow_from_dataframe(
dataframe=test_df,
x_col="filename",
y_col="categorical",
target_size=(224, 224),
batch_size=32,
class_mode="categorical",
shuffle=True,
seed=42,
)
# tmp
from keras.models import Sequential
from keras.layers import (
Conv2D,
MaxPooling2D,
Dense,
Flatten,
Dropout,
BatchNormalization,
)
from tensorflow.keras.applications.vgg19 import VGG19
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
def build_model(name, weights_path=None):
base_model = VGG19(weights="imagenet", include_top=False, input_shape=(224, 224, 3))
for layer in base_model.layers:
layer.trainable = False
x = base_model.output
x = Flatten()(x)
x = Dense(4096, activation="leaky_relu")(x)
x = BatchNormalization()(x)
x = Dropout(0.4)(x)
x = Dense(1024, activation="sigmoid")(x)
x = BatchNormalization()(x)
x = Dropout(0.4)(x)
predictions = Dense(20, activation="softmax")(x)
model = Model(name=name, inputs=base_model.input, outputs=predictions)
if weights_path:
model.load_weights(weights_path)
return model
import time
NAME = "vgg19-{}".format(int(time.time()))
model = build_model(NAME)
model.summary()
lr = 0.01
epochs = 10
## Initlisazie wandb project
wandb.init(
project="CBIR-fashion product dataset",
name=NAME,
config={
"learning_rate": lr,
"Batch_normalization": True,
"Batch_size": 64,
"Dropout": "0.4",
"architecture": "VGG19",
"dataset": "fashion-product-images-dataset",
"epochs": epochs,
"data generator": True,
},
)
wandb_callback = WandbCallback()
filepath = "{}_loss_opti.hdf5".format(NAME)
checkpoint1 = tf.keras.callbacks.ModelCheckpoint(
filepath,
monitor="val_loss",
verbose=1,
save_best_only=True,
save_weights_only=False,
mode="auto",
save_freq="epoch",
)
model.compile(
loss="categorical_crossentropy",
optimizer=Adam(learning_rate=lr),
metrics=["accuracy"],
)
history = model.fit_generator(
train_generator,
validation_data=train_generator,
steps_per_epoch=train_generator.n // train_generator.batch_size,
validation_steps=valid_generator.n // valid_generator.batch_size,
epochs=epochs,
callbacks=[checkpoint1, wandb_callback],
)
# model.save(filepath)
score = model.evaluate_generator(test_generator)
print("Test loss:", score[0])
print("Test accuracy:", score[1])
from tensorflow.keras.models import load_model
best_model = load_model(filepath)
score = best_model.evaluate_generator(test_generator)
print("Test loss:", score[0])
print("Test accuracy:", score[1])
IMAGESIZE = 224
CHANNELS = 3
def image_preprocess(image_path):
image_orig = cv2.imread(image_path)
image_arr = cv2.cvtColor(image_orig, cv2.COLOR_BGR2RGB)
image_arr = cv2.resize(image_arr, (IMAGESIZE, IMAGESIZE))
image_arr = image_arr / 255.0
image_arr = image_arr.reshape(-1, IMAGESIZE, IMAGESIZE, CHANNELS)
return image_arr
test_df.filename[1]
anchor_path = (
"/kaggle/input/fashion-product-images-dataset/fashion-dataset/images/50892.jpg"
)
pos_path = (
"/kaggle/input/fashion-product-images-dataset/fashion-dataset/images/45986.jpg"
)
neg_path = (
"/kaggle/input/fashion-product-images-dataset/fashion-dataset/images/29863.jpg"
)
image_arr1 = image_preprocess(anchor_path)
image_arr2 = image_preprocess(pos_path)
image_arr3 = image_preprocess(neg_path)
image_lst = [image_arr1, image_arr2, image_arr3]
for i, image_arr in enumerate(image_lst):
plt.subplot(1, 3, i + 1)
plt.imshow(image_arr[0])
plt.axis(False)
plt.show()
y_hat = model.predict(image_arr)
y_hat.argmax()
categoricals[14]
best_model.layers
CBIR_model = Model(inputs=best_model.input, outputs=best_model.layers[-4].output)
prehashcode1 = CBIR_model.predict(image_arr1)
prehashcode2 = CBIR_model.predict(image_arr2)
prehashcode3 = CBIR_model.predict(image_arr3)
prehashcode.shape
hashcode1 = np.where(prehashcode1 < 0.5, 0, 1)
hashcode2 = np.where(prehashcode2 < 0.5, 0, 1)
hashcode3 = np.where(prehashcode3 < 0.5, 0, 1)
hashcode1 = hashcode1.astype("bool")
hashcode2 = hashcode2.astype("bool")
hashcode3 = hashcode3.astype("bool")
hamming_distance = np.hamming(hashcode1.shape[0]) * np.abs(hashcode1 - hashcode2)
hamming_dist = np.count_nonzero(hashcode1 != hashcode2)
hamming_dist
hamming_dist = np.count_nonzero(hashcode1 != hashcode3)
hamming_dist
hamming_dist = np.count_nonzero(hashcode2 != hashcode3)
hamming_dist
| false | 0 | 3,778 | 0 | 4,236 | 3,778 |
||
129310337
|
import pandas as pd
import numpy as np
# Load a dataset into a Pandas DataFrame
train_proteins = pd.read_csv(
"/kaggle/input/amp-parkinsons-disease-progression-prediction/train_proteins.csv"
)
train_peptides = pd.read_csv(
"/kaggle/input/amp-parkinsons-disease-progression-prediction/train_peptides.csv"
)
train_clinical = pd.read_csv(
"/kaggle/input/amp-parkinsons-disease-progression-prediction/train_clinical_data.csv"
)
train_clinical.shape
train_clinical.head()
train_clinical.describe()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/310/129310337.ipynb
| null | null |
[{"Id": 129310337, "ScriptId": 38443859, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14946652, "CreationDate": "05/12/2023 16:41:03", "VersionNumber": 1.0, "Title": "GMnb-AMP-Parkinson-Progression-Competition", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 17.0, "LinesInsertedFromPrevious": 17.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import pandas as pd
import numpy as np
# Load a dataset into a Pandas DataFrame
train_proteins = pd.read_csv(
"/kaggle/input/amp-parkinsons-disease-progression-prediction/train_proteins.csv"
)
train_peptides = pd.read_csv(
"/kaggle/input/amp-parkinsons-disease-progression-prediction/train_peptides.csv"
)
train_clinical = pd.read_csv(
"/kaggle/input/amp-parkinsons-disease-progression-prediction/train_clinical_data.csv"
)
train_clinical.shape
train_clinical.head()
train_clinical.describe()
| false | 0 | 176 | 0 | 176 | 176 |
||
129284284
|
<jupyter_start><jupyter_text>Credit Card Fraud Detection
Context
---------
It is important that credit card companies are able to recognize fraudulent credit card transactions so that customers are not charged for items that they did not purchase.
Content
---------
The dataset contains transactions made by credit cards in September 2013 by European cardholders.
This dataset presents transactions that occurred in two days, where we have 492 frauds out of 284,807 transactions. The dataset is highly unbalanced, the positive class (frauds) account for 0.172% of all transactions.
It contains only numerical input variables which are the result of a PCA transformation. Unfortunately, due to confidentiality issues, we cannot provide the original features and more background information about the data. Features V1, V2, ... V28 are the principal components obtained with PCA, the only features which have not been transformed with PCA are 'Time' and 'Amount'. Feature 'Time' contains the seconds elapsed between each transaction and the first transaction in the dataset. The feature 'Amount' is the transaction Amount, this feature can be used for example-dependant cost-sensitive learning. Feature 'Class' is the response variable and it takes value 1 in case of fraud and 0 otherwise.
Given the class imbalance ratio, we recommend measuring the accuracy using the Area Under the Precision-Recall Curve (AUPRC). Confusion matrix accuracy is not meaningful for unbalanced classification.
Update (03/05/2021)
---------
A simulator for transaction data has been released as part of the practical handbook on Machine Learning for Credit Card Fraud Detection - https://fraud-detection-handbook.github.io/fraud-detection-handbook/Chapter_3_GettingStarted/SimulatedDataset.html. We invite all practitioners interested in fraud detection datasets to also check out this data simulator, and the methodologies for credit card fraud detection presented in the book.
Acknowledgements
---------
The dataset has been collected and analysed during a research collaboration of Worldline and the Machine Learning Group (http://mlg.ulb.ac.be) of ULB (Université Libre de Bruxelles) on big data mining and fraud detection.
More details on current and past projects on related topics are available on [https://www.researchgate.net/project/Fraud-detection-5][1] and the page of the [DefeatFraud][2] project
Please cite the following works:
Andrea Dal Pozzolo, Olivier Caelen, Reid A. Johnson and Gianluca Bontempi. [Calibrating Probability with Undersampling for Unbalanced Classification.][3] In Symposium on Computational Intelligence and Data Mining (CIDM), IEEE, 2015
Dal Pozzolo, Andrea; Caelen, Olivier; Le Borgne, Yann-Ael; Waterschoot, Serge; Bontempi, Gianluca. [Learned lessons in credit card fraud detection from a practitioner perspective][4], Expert systems with applications,41,10,4915-4928,2014, Pergamon
Dal Pozzolo, Andrea; Boracchi, Giacomo; Caelen, Olivier; Alippi, Cesare; Bontempi, Gianluca. [Credit card fraud detection: a realistic modeling and a novel learning strategy,][5] IEEE transactions on neural networks and learning systems,29,8,3784-3797,2018,IEEE
Dal Pozzolo, Andrea [Adaptive Machine learning for credit card fraud detection][6] ULB MLG PhD thesis (supervised by G. Bontempi)
Carcillo, Fabrizio; Dal Pozzolo, Andrea; Le Borgne, Yann-Aël; Caelen, Olivier; Mazzer, Yannis; Bontempi, Gianluca. [Scarff: a scalable framework for streaming credit card fraud detection with Spark][7], Information fusion,41, 182-194,2018,Elsevier
Carcillo, Fabrizio; Le Borgne, Yann-Aël; Caelen, Olivier; Bontempi, Gianluca. [Streaming active learning strategies for real-life credit card fraud detection: assessment and visualization,][8] International Journal of Data Science and Analytics, 5,4,285-300,2018,Springer International Publishing
Bertrand Lebichot, Yann-Aël Le Borgne, Liyun He, Frederic Oblé, Gianluca Bontempi [Deep-Learning Domain Adaptation Techniques for Credit Cards Fraud Detection](https://www.researchgate.net/publication/332180999_Deep-Learning_Domain_Adaptation_Techniques_for_Credit_Cards_Fraud_Detection), INNSBDDL 2019: Recent Advances in Big Data and Deep Learning, pp 78-88, 2019
Fabrizio Carcillo, Yann-Aël Le Borgne, Olivier Caelen, Frederic Oblé, Gianluca Bontempi [Combining Unsupervised and Supervised Learning in Credit Card Fraud Detection ](https://www.researchgate.net/publication/333143698_Combining_Unsupervised_and_Supervised_Learning_in_Credit_Card_Fraud_Detection) Information Sciences, 2019
Yann-Aël Le Borgne, Gianluca Bontempi [Reproducible machine Learning for Credit Card Fraud Detection - Practical Handbook ](https://www.researchgate.net/publication/351283764_Machine_Learning_for_Credit_Card_Fraud_Detection_-_Practical_Handbook)
Bertrand Lebichot, Gianmarco Paldino, Wissam Siblini, Liyun He, Frederic Oblé, Gianluca Bontempi [Incremental learning strategies for credit cards fraud detection](https://www.researchgate.net/publication/352275169_Incremental_learning_strategies_for_credit_cards_fraud_detection), IInternational Journal of Data Science and Analytics
[1]: https://www.researchgate.net/project/Fraud-detection-5
[2]: https://mlg.ulb.ac.be/wordpress/portfolio_page/defeatfraud-assessment-and-validation-of-deep-feature-engineering-and-learning-solutions-for-fraud-detection/
[3]: https://www.researchgate.net/publication/283349138_Calibrating_Probability_with_Undersampling_for_Unbalanced_Classification
[4]: https://www.researchgate.net/publication/260837261_Learned_lessons_in_credit_card_fraud_detection_from_a_practitioner_perspective
[5]: https://www.researchgate.net/publication/319867396_Credit_Card_Fraud_Detection_A_Realistic_Modeling_and_a_Novel_Learning_Strategy
[6]: http://di.ulb.ac.be/map/adalpozz/pdf/Dalpozzolo2015PhD.pdf
[7]: https://www.researchgate.net/publication/319616537_SCARFF_a_Scalable_Framework_for_Streaming_Credit_Card_Fraud_Detection_with_Spark
[8]: https://www.researchgate.net/publication/332180999_Deep-Learning_Domain_Adaptation_Techniques_for_Credit_Cards_Fraud_Detection
Kaggle dataset identifier: creditcardfraud
<jupyter_script>import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
from sklearn.feature_selection import SelectKBest, mutual_info_classif
from sklearn.covariance import EllipticEnvelope
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler, RobustScaler
from sklearn.model_selection import cross_val_score, ShuffleSplit, cross_val_predict
from sklearn.model_selection import GridSearchCV
from sklearn.neighbors import KernelDensity
pd.set_option("display.precision", 2)
scores = ["precision", "recall"]
def print_dataframe(filtered_cv_results):
"""Pretty print for filtered dataframe"""
for mean_precision, std_precision, mean_recall, std_recall, params in zip(
filtered_cv_results["mean_test_precision"],
filtered_cv_results["std_test_precision"],
filtered_cv_results["mean_test_recall"],
filtered_cv_results["std_test_recall"],
filtered_cv_results["params"],
):
print(
f"precision: {mean_precision:0.3f} (±{std_precision:0.03f}),"
f" recall: {mean_recall:0.3f} (±{std_recall:0.03f}),"
f" for {params}"
)
print()
def refit_strategy(cv_results):
"""Define the strategy to select the best estimator.
The strategy defined here is to filter-out all results below a precision threshold
of 0.96, rank the remaining by recall and keep all models with one standard
deviation of the best by recall. Once these models are selected, we can select the
fastest model to predict.
Parameters
----------
cv_results : dict of numpy (masked) ndarrays
CV results as returned by the `GridSearchCV`.
Returns
-------
best_index : int
The index of the best estimator as it appears in `cv_results`.
"""
scores = ["precision", "recall"]
# print the info about the grid-search for the different scores
precision_threshold = 0.96
cv_results_ = pd.DataFrame(cv_results)
print("All grid-search results:")
print_dataframe(cv_results_)
# Filter-out all results below the threshold
high_precision_cv_results = cv_results_[
cv_results_["mean_test_precision"] > precision_threshold
]
print(f"Models with a precision higher than {precision_threshold}:")
print_dataframe(high_precision_cv_results)
high_precision_cv_results = high_precision_cv_results[
[
"mean_score_time",
"mean_test_recall",
"std_test_recall",
"mean_test_precision",
"std_test_precision",
"rank_test_recall",
"rank_test_precision",
"params",
]
]
# Select the most performant models in terms of recall
# (within 1 sigma from the best)
best_recall_std = high_precision_cv_results["mean_test_recall"].std()
best_recall = high_precision_cv_results["mean_test_recall"].max()
best_recall_threshold = best_recall - best_recall_std
high_recall_cv_results = high_precision_cv_results[
high_precision_cv_results["mean_test_recall"] > best_recall_threshold
]
print(
"Out of the previously selected high precision models, we keep all the\n"
"the models within one standard deviation of the highest recall model:"
)
print_dataframe(high_recall_cv_results)
# From the best candidates, select the fastest model to predict
fastest_top_recall_high_precision_index = high_recall_cv_results[
"mean_score_time"
].idxmin()
print(
"\nThe selected final model is the fastest to predict out of the previously\n"
"selected subset of best models based on precision and recall.\n"
"Its scoring time is:\n\n"
f"{high_recall_cv_results.loc[fastest_top_recall_high_precision_index]}"
)
return fastest_top_recall_high_precision_index
import warnings
warnings.filterwarnings("ignore")
df = pd.read_csv("/kaggle/input/creditcardfraud/creditcard.csv")
df.head()
print("Dataset size - ", df.shape)
df = df.drop(columns="Time")
print("Duplicates in the data set - ", df.duplicated().sum())
df = df.drop_duplicates()
print("Dataset Final Size - ", df.shape)
df.info()
class_freq = df["Class"].value_counts()
print("Overview of distribution of Fraud and non-fraud entries in Data.")
print(class_freq)
print("% Fraud cases in the dataset - ", class_freq[1] / class_freq[0])
df.describe(percentiles=[0.01, 0.99]).T
feature_selector = SelectKBest(mutual_info_classif)
feature_selected = feature_selector.fit_transform(df.drop(columns="Class"), df["Class"])
features = feature_selector.get_feature_names_out()
sns.pairplot(data=df[features])
df = df.sample(frac=1, random_state=38)
frauds = df.loc[df["Class"] == 1]
no_frauds = df.loc[df["Class"] == 0][: frauds.shape[0]]
norm_distributed_df = pd.concat([frauds, no_frauds], axis=0)
new_df = norm_distributed_df.sample(frac=1, random_state=38)
new_df = new_df.drop_duplicates()
new_df.shape
X = new_df.loc[:, features]
outlier_detector = EllipticEnvelope(contamination=0.05)
outlier_detector.fit(X)
indices = outlier_detector.predict(X)
bool_indices = np.where(indices == 1, True, False)
X = new_df.loc[bool_indices, features]
y = new_df.loc[bool_indices, "Class"]
print("After Removing the outliers we have %s data instances", X.shape)
# # Clustering
km = KMeans(n_clusters=2, n_init=10, random_state=39).fit(X)
cluster = km.predict(X)
print(classification_report(cluster, y))
tc = km.fit_transform(X)
sns.scatterplot(data=tc, x=tc[:, 0], y=tc[:, 1], hue=y)
# # Model Building
ss = StandardScaler()
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=39)
X_train = ss.fit_transform(X_train)
X_test = ss.transform(X_test)
# ## Undersample - SVC
tuned_parameters = [
{"kernel": ["rbf"], "gamma": [1e-3, 1e-4], "C": [1, 10, 100, 1000]},
{"kernel": ["linear"], "C": [1, 10, 100, 1000]},
]
grid_search = GridSearchCV(
SVC(random_state=53), tuned_parameters, scoring=scores, refit=refit_strategy
)
grid_search.fit(X_train, y_train)
grid_search.best_params_
y_pred = grid_search.predict(X_test)
print(classification_report(y_test, y_pred))
undersample_svc_model = grid_search.best_estimator_
# ## Undersample - RFC
tuned_parameters = [{"max_depth": [4, 6, 8]}]
grid_search = GridSearchCV(
RandomForestClassifier(max_samples=0.8, random_state=53),
tuned_parameters,
scoring=scores,
refit=refit_strategy,
)
grid_search.fit(X_train, y_train)
grid_search.best_params_
y_pred = grid_search.predict(X_test)
print(classification_report(y_test, y_pred))
undersample_rfc = grid_search.best_estimator_
# # OverSampling
params = {"bandwidth": np.logspace(-1, 1, 20)}
grid = GridSearchCV(KernelDensity(), params)
grid.fit(df.loc[df["Class"] == 1])
print("best bandwidth: {0}".format(grid.best_estimator_.bandwidth))
# use the best estimator to compute the kernel density estimate
kde = grid.best_estimator_
latest_data = kde.sample(1000, random_state=10)
latest_df = pd.DataFrame(latest_data, columns=df.columns)
latest_df["Class"] = 1
latest_df = latest_df[latest_df["Amount"] > 0]
oversample = pd.concat([latest_df, df], axis=0)
oversample = oversample.sample(frac=1, random_state=38)
frauds = oversample.loc[oversample["Class"] == 1]
no_frauds = oversample.loc[oversample["Class"] == 0][: frauds.shape[0]]
norm_distributed_df = pd.concat([frauds, no_frauds], axis=0)
oversample_new_df = norm_distributed_df.sample(frac=1, random_state=38)
feature_selector = SelectKBest(mutual_info_classif)
feature_selected = feature_selector.fit_transform(
oversample_new_df.drop(columns="Class"), oversample_new_df["Class"]
)
features = feature_selector.get_feature_names_out()
X = oversample_new_df.loc[:, features]
outlier_detector = EllipticEnvelope(contamination=0.1)
outlier_detector.fit(X)
indices = outlier_detector.predict(X)
bool_indices = np.where(indices == 1, True, False)
X = X.loc[bool_indices]
y = oversample_new_df.loc[bool_indices, "Class"]
# # Model Building
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=39)
ss1 = StandardScaler()
X_train = ss1.fit_transform(X_train)
X_test = ss1.transform(X_test)
# ## SVC
tuned_parameters = [
{"kernel": ["rbf"], "gamma": [1e-3, 1e-4], "C": [1, 10, 100, 1000]},
{"kernel": ["linear"], "C": [1, 10, 100, 1000]},
]
grid_search = GridSearchCV(
SVC(random_state=53), tuned_parameters, scoring=scores, refit=refit_strategy
)
grid_search.fit(X_train, y_train)
grid_search.best_params_
y_pred = grid_search.predict(X_test)
print(classification_report(y_test, y_pred))
oversample_svc_model = grid_search.best_estimator_
# ## RFC
tuned_parameters = [{"max_depth": [4, 6, 8]}]
grid_search = GridSearchCV(
RandomForestClassifier(max_samples=0.8, random_state=53),
tuned_parameters,
scoring=scores,
refit=refit_strategy,
)
grid_search.fit(X_train, y_train)
grid_search.best_params_
y_pred = grid_search.predict(X_test)
print(classification_report(y_test, y_pred))
oversample_rfc = grid_search.best_estimator_
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/284/129284284.ipynb
|
creditcardfraud
| null |
[{"Id": 129284284, "ScriptId": 38357055, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 3687805, "CreationDate": "05/12/2023 12:48:59", "VersionNumber": 1.0, "Title": "Credit Card fraud detection", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 327.0, "LinesInsertedFromPrevious": 327.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 185187993, "KernelVersionId": 129284284, "SourceDatasetVersionId": 23498}]
|
[{"Id": 23498, "DatasetId": 310, "DatasourceVersionId": 23502, "CreatorUserId": 998023, "LicenseName": "Database: Open Database, Contents: Database Contents", "CreationDate": "03/23/2018 01:17:27", "VersionNumber": 3.0, "Title": "Credit Card Fraud Detection", "Slug": "creditcardfraud", "Subtitle": "Anonymized credit card transactions labeled as fraudulent or genuine", "Description": "Context\n---------\n\nIt is important that credit card companies are able to recognize fraudulent credit card transactions so that customers are not charged for items that they did not purchase.\n\nContent\n---------\n\nThe dataset contains transactions made by credit cards in September 2013 by European cardholders. \nThis dataset presents transactions that occurred in two days, where we have 492 frauds out of 284,807 transactions. The dataset is highly unbalanced, the positive class (frauds) account for 0.172% of all transactions.\n\nIt contains only numerical input variables which are the result of a PCA transformation. Unfortunately, due to confidentiality issues, we cannot provide the original features and more background information about the data. Features V1, V2, ... V28 are the principal components obtained with PCA, the only features which have not been transformed with PCA are 'Time' and 'Amount'. Feature 'Time' contains the seconds elapsed between each transaction and the first transaction in the dataset. The feature 'Amount' is the transaction Amount, this feature can be used for example-dependant cost-sensitive learning. Feature 'Class' is the response variable and it takes value 1 in case of fraud and 0 otherwise. \n\nGiven the class imbalance ratio, we recommend measuring the accuracy using the Area Under the Precision-Recall Curve (AUPRC). Confusion matrix accuracy is not meaningful for unbalanced classification.\n\nUpdate (03/05/2021)\n---------\n\nA simulator for transaction data has been released as part of the practical handbook on Machine Learning for Credit Card Fraud Detection - https://fraud-detection-handbook.github.io/fraud-detection-handbook/Chapter_3_GettingStarted/SimulatedDataset.html. We invite all practitioners interested in fraud detection datasets to also check out this data simulator, and the methodologies for credit card fraud detection presented in the book.\n\nAcknowledgements\n---------\n\nThe dataset has been collected and analysed during a research collaboration of Worldline and the Machine Learning Group (http://mlg.ulb.ac.be) of ULB (Universit\u00e9 Libre de Bruxelles) on big data mining and fraud detection.\nMore details on current and past projects on related topics are available on [https://www.researchgate.net/project/Fraud-detection-5][1] and the page of the [DefeatFraud][2] project\n\nPlease cite the following works: \n\nAndrea Dal Pozzolo, Olivier Caelen, Reid A. Johnson and Gianluca Bontempi. [Calibrating Probability with Undersampling for Unbalanced Classification.][3] In Symposium on Computational Intelligence and Data Mining (CIDM), IEEE, 2015\n\nDal Pozzolo, Andrea; Caelen, Olivier; Le Borgne, Yann-Ael; Waterschoot, Serge; Bontempi, Gianluca. [Learned lessons in credit card fraud detection from a practitioner perspective][4], Expert systems with applications,41,10,4915-4928,2014, Pergamon\n\nDal Pozzolo, Andrea; Boracchi, Giacomo; Caelen, Olivier; Alippi, Cesare; Bontempi, Gianluca. [Credit card fraud detection: a realistic modeling and a novel learning strategy,][5] IEEE transactions on neural networks and learning systems,29,8,3784-3797,2018,IEEE\n\nDal Pozzolo, Andrea [Adaptive Machine learning for credit card fraud detection][6] ULB MLG PhD thesis (supervised by G. Bontempi)\n\nCarcillo, Fabrizio; Dal Pozzolo, Andrea; Le Borgne, Yann-A\u00ebl; Caelen, Olivier; Mazzer, Yannis; Bontempi, Gianluca. [Scarff: a scalable framework for streaming credit card fraud detection with Spark][7], Information fusion,41, 182-194,2018,Elsevier\n\nCarcillo, Fabrizio; Le Borgne, Yann-A\u00ebl; Caelen, Olivier; Bontempi, Gianluca. [Streaming active learning strategies for real-life credit card fraud detection: assessment and visualization,][8] International Journal of Data Science and Analytics, 5,4,285-300,2018,Springer International Publishing\n\nBertrand Lebichot, Yann-A\u00ebl Le Borgne, Liyun He, Frederic Obl\u00e9, Gianluca Bontempi [Deep-Learning Domain Adaptation Techniques for Credit Cards Fraud Detection](https://www.researchgate.net/publication/332180999_Deep-Learning_Domain_Adaptation_Techniques_for_Credit_Cards_Fraud_Detection), INNSBDDL 2019: Recent Advances in Big Data and Deep Learning, pp 78-88, 2019\n\nFabrizio Carcillo, Yann-A\u00ebl Le Borgne, Olivier Caelen, Frederic Obl\u00e9, Gianluca Bontempi [Combining Unsupervised and Supervised Learning in Credit Card Fraud Detection ](https://www.researchgate.net/publication/333143698_Combining_Unsupervised_and_Supervised_Learning_in_Credit_Card_Fraud_Detection) Information Sciences, 2019\n\nYann-A\u00ebl Le Borgne, Gianluca Bontempi [Reproducible machine Learning for Credit Card Fraud Detection - Practical Handbook ](https://www.researchgate.net/publication/351283764_Machine_Learning_for_Credit_Card_Fraud_Detection_-_Practical_Handbook) \n\nBertrand Lebichot, Gianmarco Paldino, Wissam Siblini, Liyun He, Frederic Obl\u00e9, Gianluca Bontempi [Incremental learning strategies for credit cards fraud detection](https://www.researchgate.net/publication/352275169_Incremental_learning_strategies_for_credit_cards_fraud_detection), IInternational Journal of Data Science and Analytics\n\n [1]: https://www.researchgate.net/project/Fraud-detection-5\n [2]: https://mlg.ulb.ac.be/wordpress/portfolio_page/defeatfraud-assessment-and-validation-of-deep-feature-engineering-and-learning-solutions-for-fraud-detection/\n [3]: https://www.researchgate.net/publication/283349138_Calibrating_Probability_with_Undersampling_for_Unbalanced_Classification\n [4]: https://www.researchgate.net/publication/260837261_Learned_lessons_in_credit_card_fraud_detection_from_a_practitioner_perspective\n [5]: https://www.researchgate.net/publication/319867396_Credit_Card_Fraud_Detection_A_Realistic_Modeling_and_a_Novel_Learning_Strategy\n [6]: http://di.ulb.ac.be/map/adalpozz/pdf/Dalpozzolo2015PhD.pdf\n [7]: https://www.researchgate.net/publication/319616537_SCARFF_a_Scalable_Framework_for_Streaming_Credit_Card_Fraud_Detection_with_Spark\n \n[8]: https://www.researchgate.net/publication/332180999_Deep-Learning_Domain_Adaptation_Techniques_for_Credit_Cards_Fraud_Detection", "VersionNotes": "Fixed preview", "TotalCompressedBytes": 150828752.0, "TotalUncompressedBytes": 69155632.0}]
|
[{"Id": 310, "CreatorUserId": 14069, "OwnerUserId": NaN, "OwnerOrganizationId": 1160.0, "CurrentDatasetVersionId": 23498.0, "CurrentDatasourceVersionId": 23502.0, "ForumId": 1838, "Type": 2, "CreationDate": "11/03/2016 13:21:36", "LastActivityDate": "02/06/2018", "TotalViews": 10310781, "TotalDownloads": 564249, "TotalVotes": 10432, "TotalKernels": 4266}]
| null |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
from sklearn.feature_selection import SelectKBest, mutual_info_classif
from sklearn.covariance import EllipticEnvelope
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler, RobustScaler
from sklearn.model_selection import cross_val_score, ShuffleSplit, cross_val_predict
from sklearn.model_selection import GridSearchCV
from sklearn.neighbors import KernelDensity
pd.set_option("display.precision", 2)
scores = ["precision", "recall"]
def print_dataframe(filtered_cv_results):
"""Pretty print for filtered dataframe"""
for mean_precision, std_precision, mean_recall, std_recall, params in zip(
filtered_cv_results["mean_test_precision"],
filtered_cv_results["std_test_precision"],
filtered_cv_results["mean_test_recall"],
filtered_cv_results["std_test_recall"],
filtered_cv_results["params"],
):
print(
f"precision: {mean_precision:0.3f} (±{std_precision:0.03f}),"
f" recall: {mean_recall:0.3f} (±{std_recall:0.03f}),"
f" for {params}"
)
print()
def refit_strategy(cv_results):
"""Define the strategy to select the best estimator.
The strategy defined here is to filter-out all results below a precision threshold
of 0.96, rank the remaining by recall and keep all models with one standard
deviation of the best by recall. Once these models are selected, we can select the
fastest model to predict.
Parameters
----------
cv_results : dict of numpy (masked) ndarrays
CV results as returned by the `GridSearchCV`.
Returns
-------
best_index : int
The index of the best estimator as it appears in `cv_results`.
"""
scores = ["precision", "recall"]
# print the info about the grid-search for the different scores
precision_threshold = 0.96
cv_results_ = pd.DataFrame(cv_results)
print("All grid-search results:")
print_dataframe(cv_results_)
# Filter-out all results below the threshold
high_precision_cv_results = cv_results_[
cv_results_["mean_test_precision"] > precision_threshold
]
print(f"Models with a precision higher than {precision_threshold}:")
print_dataframe(high_precision_cv_results)
high_precision_cv_results = high_precision_cv_results[
[
"mean_score_time",
"mean_test_recall",
"std_test_recall",
"mean_test_precision",
"std_test_precision",
"rank_test_recall",
"rank_test_precision",
"params",
]
]
# Select the most performant models in terms of recall
# (within 1 sigma from the best)
best_recall_std = high_precision_cv_results["mean_test_recall"].std()
best_recall = high_precision_cv_results["mean_test_recall"].max()
best_recall_threshold = best_recall - best_recall_std
high_recall_cv_results = high_precision_cv_results[
high_precision_cv_results["mean_test_recall"] > best_recall_threshold
]
print(
"Out of the previously selected high precision models, we keep all the\n"
"the models within one standard deviation of the highest recall model:"
)
print_dataframe(high_recall_cv_results)
# From the best candidates, select the fastest model to predict
fastest_top_recall_high_precision_index = high_recall_cv_results[
"mean_score_time"
].idxmin()
print(
"\nThe selected final model is the fastest to predict out of the previously\n"
"selected subset of best models based on precision and recall.\n"
"Its scoring time is:\n\n"
f"{high_recall_cv_results.loc[fastest_top_recall_high_precision_index]}"
)
return fastest_top_recall_high_precision_index
import warnings
warnings.filterwarnings("ignore")
df = pd.read_csv("/kaggle/input/creditcardfraud/creditcard.csv")
df.head()
print("Dataset size - ", df.shape)
df = df.drop(columns="Time")
print("Duplicates in the data set - ", df.duplicated().sum())
df = df.drop_duplicates()
print("Dataset Final Size - ", df.shape)
df.info()
class_freq = df["Class"].value_counts()
print("Overview of distribution of Fraud and non-fraud entries in Data.")
print(class_freq)
print("% Fraud cases in the dataset - ", class_freq[1] / class_freq[0])
df.describe(percentiles=[0.01, 0.99]).T
feature_selector = SelectKBest(mutual_info_classif)
feature_selected = feature_selector.fit_transform(df.drop(columns="Class"), df["Class"])
features = feature_selector.get_feature_names_out()
sns.pairplot(data=df[features])
df = df.sample(frac=1, random_state=38)
frauds = df.loc[df["Class"] == 1]
no_frauds = df.loc[df["Class"] == 0][: frauds.shape[0]]
norm_distributed_df = pd.concat([frauds, no_frauds], axis=0)
new_df = norm_distributed_df.sample(frac=1, random_state=38)
new_df = new_df.drop_duplicates()
new_df.shape
X = new_df.loc[:, features]
outlier_detector = EllipticEnvelope(contamination=0.05)
outlier_detector.fit(X)
indices = outlier_detector.predict(X)
bool_indices = np.where(indices == 1, True, False)
X = new_df.loc[bool_indices, features]
y = new_df.loc[bool_indices, "Class"]
print("After Removing the outliers we have %s data instances", X.shape)
# # Clustering
km = KMeans(n_clusters=2, n_init=10, random_state=39).fit(X)
cluster = km.predict(X)
print(classification_report(cluster, y))
tc = km.fit_transform(X)
sns.scatterplot(data=tc, x=tc[:, 0], y=tc[:, 1], hue=y)
# # Model Building
ss = StandardScaler()
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=39)
X_train = ss.fit_transform(X_train)
X_test = ss.transform(X_test)
# ## Undersample - SVC
tuned_parameters = [
{"kernel": ["rbf"], "gamma": [1e-3, 1e-4], "C": [1, 10, 100, 1000]},
{"kernel": ["linear"], "C": [1, 10, 100, 1000]},
]
grid_search = GridSearchCV(
SVC(random_state=53), tuned_parameters, scoring=scores, refit=refit_strategy
)
grid_search.fit(X_train, y_train)
grid_search.best_params_
y_pred = grid_search.predict(X_test)
print(classification_report(y_test, y_pred))
undersample_svc_model = grid_search.best_estimator_
# ## Undersample - RFC
tuned_parameters = [{"max_depth": [4, 6, 8]}]
grid_search = GridSearchCV(
RandomForestClassifier(max_samples=0.8, random_state=53),
tuned_parameters,
scoring=scores,
refit=refit_strategy,
)
grid_search.fit(X_train, y_train)
grid_search.best_params_
y_pred = grid_search.predict(X_test)
print(classification_report(y_test, y_pred))
undersample_rfc = grid_search.best_estimator_
# # OverSampling
params = {"bandwidth": np.logspace(-1, 1, 20)}
grid = GridSearchCV(KernelDensity(), params)
grid.fit(df.loc[df["Class"] == 1])
print("best bandwidth: {0}".format(grid.best_estimator_.bandwidth))
# use the best estimator to compute the kernel density estimate
kde = grid.best_estimator_
latest_data = kde.sample(1000, random_state=10)
latest_df = pd.DataFrame(latest_data, columns=df.columns)
latest_df["Class"] = 1
latest_df = latest_df[latest_df["Amount"] > 0]
oversample = pd.concat([latest_df, df], axis=0)
oversample = oversample.sample(frac=1, random_state=38)
frauds = oversample.loc[oversample["Class"] == 1]
no_frauds = oversample.loc[oversample["Class"] == 0][: frauds.shape[0]]
norm_distributed_df = pd.concat([frauds, no_frauds], axis=0)
oversample_new_df = norm_distributed_df.sample(frac=1, random_state=38)
feature_selector = SelectKBest(mutual_info_classif)
feature_selected = feature_selector.fit_transform(
oversample_new_df.drop(columns="Class"), oversample_new_df["Class"]
)
features = feature_selector.get_feature_names_out()
X = oversample_new_df.loc[:, features]
outlier_detector = EllipticEnvelope(contamination=0.1)
outlier_detector.fit(X)
indices = outlier_detector.predict(X)
bool_indices = np.where(indices == 1, True, False)
X = X.loc[bool_indices]
y = oversample_new_df.loc[bool_indices, "Class"]
# # Model Building
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=39)
ss1 = StandardScaler()
X_train = ss1.fit_transform(X_train)
X_test = ss1.transform(X_test)
# ## SVC
tuned_parameters = [
{"kernel": ["rbf"], "gamma": [1e-3, 1e-4], "C": [1, 10, 100, 1000]},
{"kernel": ["linear"], "C": [1, 10, 100, 1000]},
]
grid_search = GridSearchCV(
SVC(random_state=53), tuned_parameters, scoring=scores, refit=refit_strategy
)
grid_search.fit(X_train, y_train)
grid_search.best_params_
y_pred = grid_search.predict(X_test)
print(classification_report(y_test, y_pred))
oversample_svc_model = grid_search.best_estimator_
# ## RFC
tuned_parameters = [{"max_depth": [4, 6, 8]}]
grid_search = GridSearchCV(
RandomForestClassifier(max_samples=0.8, random_state=53),
tuned_parameters,
scoring=scores,
refit=refit_strategy,
)
grid_search.fit(X_train, y_train)
grid_search.best_params_
y_pred = grid_search.predict(X_test)
print(classification_report(y_test, y_pred))
oversample_rfc = grid_search.best_estimator_
| false | 0 | 2,971 | 0 | 4,845 | 2,971 |
||
129284945
|
<jupyter_start><jupyter_text>german-traffic-signs
### Context
Detecting Street Signs is one of the most important tasks in Self Driving Cars.This dataset is a benchmark in Street signs and symbols including 43 different classes. Classifying road symbols using Deep Convolutional Neural Network is the aim of this dataset.
Kaggle dataset identifier: germantrafficsigns
<jupyter_script>import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# importing pickle module to unpickle the files in the dataset
import pickle
with open("/kaggle/input/germantrafficsigns/train.p", "rb") as f:
train_data = pickle.load(f)
with open("/kaggle/input/germantrafficsigns/test.p", "rb") as f:
test_data = pickle.load(f)
with open("/kaggle/input/germantrafficsigns/valid.p", "rb") as f:
val_data = pickle.load(f)
# splitting the data into our variables
x_train, y_train = train_data["features"], train_data["labels"]
x_test, y_test = test_data["features"], test_data["labels"]
x_val, y_val = val_data["features"], val_data["labels"]
# showing the size of each variable we have
print(x_train.shape)
print(x_test.shape)
print(x_val.shape)
import pandas as pd
data = pd.read_csv("/kaggle/input/germantrafficsigns/signnames.csv")
print(data)
# from the dataframe printed below we come to know that the dataset has 43 classes.
# import matplotlib for visualizing images
import matplotlib.pyplot as plt
# the block of code below just display an image from our data
plt.imshow(x_train[0])
print(x_train[0].shape)
# converting images into gray scale so that the neural network can learn the pattern easily
import cv2
def gray(img):
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
return img
# equalizing images to make the features in the images more porminent for the model to understand
def equalize(img):
img = cv2.equalizeHist(img)
return img
def preprocessing(img):
img = gray(img)
img = equalize(img)
# now normalizing the images
img = img / 255
return img
# using map fucntion to iterate through the whole dataset and apply our preprocessing fucntion to every image
import numpy as np
x_train = np.array(list(map(preprocessing, x_train)))
x_val = np.array(list(map(preprocessing, x_val)))
x_test = np.array(list(map(preprocessing, x_test)))
# showing the new preprocessed images
plt.imshow(x_train[0])
print(x_train[0].shape)
# converting the labels into categorical variables
from keras.utils.np_utils import to_categorical
y_cat_train = to_categorical(y_train, 43)
y_cat_test = to_categorical(y_test, 43)
y_cat_val = to_categorical(y_val, 43)
# reshaping the images
x_train = x_train.reshape(34799, 32, 32, 1)
x_test = x_test.reshape(12630, 32, 32, 1)
x_val = x_val.reshape(4410, 32, 32, 1)
print(x_train.shape)
# importing keras and required layers to create the model
import keras
from keras.models import Sequential
from keras.optimizers import Adam
from keras.layers import Dense
from keras.layers import Flatten, Dropout
from keras.layers.convolutional import Conv2D, MaxPooling2D
# create model
model = Sequential()
model.add(Conv2D(30, (5, 5), input_shape=(32, 32, 1), activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(15, (3, 3), activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(500, activation="relu"))
model.add(Dropout(0.5))
model.add(Dense(43, activation="softmax"))
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
print(model.summary())
model.fit(x_train, y_cat_train, epochs=20, batch_size=400, verbose=1, shuffle=1)
from sklearn.metrics import classification_report
prediction = model.predict_classes(x_test)
print(classification_report(y_test, prediction))
model.save("street_signs.h5")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/284/129284945.ipynb
|
germantrafficsigns
|
saadhaxxan
|
[{"Id": 129284945, "ScriptId": 16489704, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7209429, "CreationDate": "05/12/2023 12:53:50", "VersionNumber": 1.0, "Title": "Street-Signs-and-boards-classification-using-DCNN", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 135.0, "LinesInsertedFromPrevious": 135.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 185189021, "KernelVersionId": 129284945, "SourceDatasetVersionId": 976937}]
|
[{"Id": 976937, "DatasetId": 533907, "DatasourceVersionId": 1005266, "CreatorUserId": 2667524, "LicenseName": "CC0: Public Domain", "CreationDate": "02/29/2020 09:05:32", "VersionNumber": 1.0, "Title": "german-traffic-signs", "Slug": "germantrafficsigns", "Subtitle": "Traffic Signs Dataset for Classification", "Description": "### Context\n\nDetecting Street Signs is one of the most important tasks in Self Driving Cars.This dataset is a benchmark in Street signs and symbols including 43 different classes. Classifying road symbols using Deep Convolutional Neural Network is the aim of this dataset.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 533907, "CreatorUserId": 2667524, "OwnerUserId": 2667524.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 976937.0, "CurrentDatasourceVersionId": 1005266.0, "ForumId": 547357, "Type": 2, "CreationDate": "02/29/2020 09:05:32", "LastActivityDate": "02/29/2020", "TotalViews": 6704, "TotalDownloads": 672, "TotalVotes": 7, "TotalKernels": 4}]
|
[{"Id": 2667524, "UserName": "saadhaxxan", "DisplayName": "Saad Hassan", "RegisterDate": "01/03/2019", "PerformanceTier": 1}]
|
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# importing pickle module to unpickle the files in the dataset
import pickle
with open("/kaggle/input/germantrafficsigns/train.p", "rb") as f:
train_data = pickle.load(f)
with open("/kaggle/input/germantrafficsigns/test.p", "rb") as f:
test_data = pickle.load(f)
with open("/kaggle/input/germantrafficsigns/valid.p", "rb") as f:
val_data = pickle.load(f)
# splitting the data into our variables
x_train, y_train = train_data["features"], train_data["labels"]
x_test, y_test = test_data["features"], test_data["labels"]
x_val, y_val = val_data["features"], val_data["labels"]
# showing the size of each variable we have
print(x_train.shape)
print(x_test.shape)
print(x_val.shape)
import pandas as pd
data = pd.read_csv("/kaggle/input/germantrafficsigns/signnames.csv")
print(data)
# from the dataframe printed below we come to know that the dataset has 43 classes.
# import matplotlib for visualizing images
import matplotlib.pyplot as plt
# the block of code below just display an image from our data
plt.imshow(x_train[0])
print(x_train[0].shape)
# converting images into gray scale so that the neural network can learn the pattern easily
import cv2
def gray(img):
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
return img
# equalizing images to make the features in the images more porminent for the model to understand
def equalize(img):
img = cv2.equalizeHist(img)
return img
def preprocessing(img):
img = gray(img)
img = equalize(img)
# now normalizing the images
img = img / 255
return img
# using map fucntion to iterate through the whole dataset and apply our preprocessing fucntion to every image
import numpy as np
x_train = np.array(list(map(preprocessing, x_train)))
x_val = np.array(list(map(preprocessing, x_val)))
x_test = np.array(list(map(preprocessing, x_test)))
# showing the new preprocessed images
plt.imshow(x_train[0])
print(x_train[0].shape)
# converting the labels into categorical variables
from keras.utils.np_utils import to_categorical
y_cat_train = to_categorical(y_train, 43)
y_cat_test = to_categorical(y_test, 43)
y_cat_val = to_categorical(y_val, 43)
# reshaping the images
x_train = x_train.reshape(34799, 32, 32, 1)
x_test = x_test.reshape(12630, 32, 32, 1)
x_val = x_val.reshape(4410, 32, 32, 1)
print(x_train.shape)
# importing keras and required layers to create the model
import keras
from keras.models import Sequential
from keras.optimizers import Adam
from keras.layers import Dense
from keras.layers import Flatten, Dropout
from keras.layers.convolutional import Conv2D, MaxPooling2D
# create model
model = Sequential()
model.add(Conv2D(30, (5, 5), input_shape=(32, 32, 1), activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(15, (3, 3), activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(500, activation="relu"))
model.add(Dropout(0.5))
model.add(Dense(43, activation="softmax"))
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
print(model.summary())
model.fit(x_train, y_cat_train, epochs=20, batch_size=400, verbose=1, shuffle=1)
from sklearn.metrics import classification_report
prediction = model.predict_classes(x_test)
print(classification_report(y_test, prediction))
model.save("street_signs.h5")
| false | 1 | 1,151 | 0 | 1,237 | 1,151 |
||
129284646
|
<jupyter_start><jupyter_text>Food and their calories
### Context
The data set consist of food such as soup,ice-cream,pizza,vegetables,fruits etc, the serving for which the calories are calculated.
### Content
There are three columns for this dataset: Food Serving Calories
### Inspiration
We would always wanted to prepare the diet chart based in the calories. This the dataset then.😄
Kaggle dataset identifier: food-and-their-calories
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
import random
import re
def extract_numeric_calories(calories_string):
numeric_calories = re.findall(r"\d+", calories_string)
if numeric_calories:
return int(numeric_calories[0])
else:
return 0
def suggest_foods(predicted_calories, food_dataset, num_suggestions=5):
predicted_calories_numeric = extract_numeric_calories(predicted_calories)
filtered_foods = food_dataset[
food_dataset["Calories"].apply(extract_numeric_calories)
== predicted_calories_numeric
]
if len(filtered_foods) >= num_suggestions:
suggestions = random.sample(list(filtered_foods["Food"]), num_suggestions)
return suggestions
elif len(filtered_foods) > 0:
suggestions = list(filtered_foods["Food"])
return suggestions
else:
return []
predicted_calories = "60 cals"
food_dataset = pd.read_csv(
"/kaggle/input/food-and-their-calories/Food and Calories - Sheet1.csv"
)
suggested_foods = suggest_foods(predicted_calories, food_dataset)
if suggested_foods:
print("Suggested foods for", predicted_calories, "calories:")
for food in suggested_foods:
print(food)
else:
print("No food options available for the specified calorie value.")
import pandas as pd
import re
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.preprocessing import MinMaxScaler
from sklearn.neighbors import NearestNeighbors
food_df = pd.read_csv(
"/kaggle/input/food-and-their-calories/Food and Calories - Sheet1.csv"
)
food_df["Calories"] = (
food_df["Calories"].apply(lambda x: re.findall(r"\d+", x)[0]).astype(float)
)
scaler = MinMaxScaler()
food_df["Calories"] = scaler.fit_transform(food_df["Calories"].values.reshape(-1, 1))
predicted_calories = 4
filtered_foods = food_df[
(food_df["Calories"] >= predicted_calories)
& (food_df["Calories"] <= predicted_calories)
]
if len(filtered_foods) > 0:
food_vectors = filtered_foods.drop(["Food", "Calories"], axis=1)
cosine_sim = cosine_similarity(food_vectors)
k = 5
nn = NearestNeighbors(n_neighbors=k, metric="precomputed")
nn.fit(cosine_sim)
top_food_indices = nn.kneighbors([cosine_sim[0]], return_distance=False)
recommended_foods = filtered_foods.iloc[top_food_indices[0]]
for _, food in recommended_foods.iterrows():
print(f"Food: {food['Food']}")
print(f"Calories: {food['Calories']}")
print()
else:
print("No foods found within the desired calorie range.")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/284/129284646.ipynb
|
food-and-their-calories
|
vaishnavivenkatesan
|
[{"Id": 129284646, "ScriptId": 38343193, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 9946883, "CreationDate": "05/12/2023 12:51:41", "VersionNumber": 1.0, "Title": "notebook20825d17fb", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 95.0, "LinesInsertedFromPrevious": 95.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 185188691, "KernelVersionId": 129284646, "SourceDatasetVersionId": 1588653}]
|
[{"Id": 1588653, "DatasetId": 937303, "DatasourceVersionId": 1623908, "CreatorUserId": 5592707, "LicenseName": "Attribution-NonCommercial 4.0 International (CC BY-NC 4.0)", "CreationDate": "10/24/2020 11:45:12", "VersionNumber": 1.0, "Title": "Food and their calories", "Slug": "food-and-their-calories", "Subtitle": "Variety of food and their calories based on serving", "Description": "### Context\n\nThe data set consist of food such as soup,ice-cream,pizza,vegetables,fruits etc, the serving for which the calories are calculated.\n\n\n### Content\n\nThere are three columns for this dataset: Food Serving Calories\n\n\n### Inspiration\n\nWe would always wanted to prepare the diet chart based in the calories. This the dataset then.\ud83d\ude04", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 937303, "CreatorUserId": 5592707, "OwnerUserId": 5592707.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 1588653.0, "CurrentDatasourceVersionId": 1623908.0, "ForumId": 953334, "Type": 2, "CreationDate": "10/24/2020 11:45:12", "LastActivityDate": "10/24/2020", "TotalViews": 22731, "TotalDownloads": 2284, "TotalVotes": 28, "TotalKernels": 3}]
|
[{"Id": 5592707, "UserName": "vaishnavivenkatesan", "DisplayName": "Vaishnavi", "RegisterDate": "08/08/2020", "PerformanceTier": 2}]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
import random
import re
def extract_numeric_calories(calories_string):
numeric_calories = re.findall(r"\d+", calories_string)
if numeric_calories:
return int(numeric_calories[0])
else:
return 0
def suggest_foods(predicted_calories, food_dataset, num_suggestions=5):
predicted_calories_numeric = extract_numeric_calories(predicted_calories)
filtered_foods = food_dataset[
food_dataset["Calories"].apply(extract_numeric_calories)
== predicted_calories_numeric
]
if len(filtered_foods) >= num_suggestions:
suggestions = random.sample(list(filtered_foods["Food"]), num_suggestions)
return suggestions
elif len(filtered_foods) > 0:
suggestions = list(filtered_foods["Food"])
return suggestions
else:
return []
predicted_calories = "60 cals"
food_dataset = pd.read_csv(
"/kaggle/input/food-and-their-calories/Food and Calories - Sheet1.csv"
)
suggested_foods = suggest_foods(predicted_calories, food_dataset)
if suggested_foods:
print("Suggested foods for", predicted_calories, "calories:")
for food in suggested_foods:
print(food)
else:
print("No food options available for the specified calorie value.")
import pandas as pd
import re
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.preprocessing import MinMaxScaler
from sklearn.neighbors import NearestNeighbors
food_df = pd.read_csv(
"/kaggle/input/food-and-their-calories/Food and Calories - Sheet1.csv"
)
food_df["Calories"] = (
food_df["Calories"].apply(lambda x: re.findall(r"\d+", x)[0]).astype(float)
)
scaler = MinMaxScaler()
food_df["Calories"] = scaler.fit_transform(food_df["Calories"].values.reshape(-1, 1))
predicted_calories = 4
filtered_foods = food_df[
(food_df["Calories"] >= predicted_calories)
& (food_df["Calories"] <= predicted_calories)
]
if len(filtered_foods) > 0:
food_vectors = filtered_foods.drop(["Food", "Calories"], axis=1)
cosine_sim = cosine_similarity(food_vectors)
k = 5
nn = NearestNeighbors(n_neighbors=k, metric="precomputed")
nn.fit(cosine_sim)
top_food_indices = nn.kneighbors([cosine_sim[0]], return_distance=False)
recommended_foods = filtered_foods.iloc[top_food_indices[0]]
for _, food in recommended_foods.iterrows():
print(f"Food: {food['Food']}")
print(f"Calories: {food['Calories']}")
print()
else:
print("No foods found within the desired calorie range.")
| false | 1 | 932 | 0 | 1,052 | 932 |
||
129284987
|
<jupyter_start><jupyter_text>Video Game Sales
This dataset contains a list of video games with sales greater than 100,000 copies. It was generated by a scrape of [vgchartz.com][1].
Fields include
* Rank - Ranking of overall sales
* Name - The games name
* Platform - Platform of the games release (i.e. PC,PS4, etc.)
* Year - Year of the game's release
* Genre - Genre of the game
* Publisher - Publisher of the game
* NA_Sales - Sales in North America (in millions)
* EU_Sales - Sales in Europe (in millions)
* JP_Sales - Sales in Japan (in millions)
* Other_Sales - Sales in the rest of the world (in millions)
* Global_Sales - Total worldwide sales.
The script to scrape the data is available at https://github.com/GregorUT/vgchartzScrape.
It is based on BeautifulSoup using Python.
There are 16,598 records. 2 records were dropped due to incomplete information.
[1]: http://www.vgchartz.com/
Kaggle dataset identifier: videogamesales
<jupyter_code>import pandas as pd
df = pd.read_csv('videogamesales/vgsales.csv')
df.info()
<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 16598 entries, 0 to 16597
Data columns (total 11 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Rank 16598 non-null int64
1 Name 16598 non-null object
2 Platform 16598 non-null object
3 Year 16327 non-null float64
4 Genre 16598 non-null object
5 Publisher 16540 non-null object
6 NA_Sales 16598 non-null float64
7 EU_Sales 16598 non-null float64
8 JP_Sales 16598 non-null float64
9 Other_Sales 16598 non-null float64
10 Global_Sales 16598 non-null float64
dtypes: float64(6), int64(1), object(4)
memory usage: 1.4+ MB
<jupyter_text>Examples:
{
"Rank": 1,
"Name": "Wii Sports",
"Platform": "Wii",
"Year": 2006,
"Genre": "Sports",
"Publisher": "Nintendo",
"NA_Sales": 41.49,
"EU_Sales": 29.02,
"JP_Sales": 3.77,
"Other_Sales": 8.46,
"Global_Sales": 82.74
}
{
"Rank": 2,
"Name": "Super Mario Bros.",
"Platform": "NES",
"Year": 1985,
"Genre": "Platform",
"Publisher": "Nintendo",
"NA_Sales": 29.08,
"EU_Sales": 3.58,
"JP_Sales": 6.8100000000000005,
"Other_Sales": 0.77,
"Global_Sales": 40.24
}
{
"Rank": 3,
"Name": "Mario Kart Wii",
"Platform": "Wii",
"Year": 2008,
"Genre": "Racing",
"Publisher": "Nintendo",
"NA_Sales": 15.85,
"EU_Sales": 12.88,
"JP_Sales": 3.79,
"Other_Sales": 3.31,
"Global_Sales": 35.82
}
{
"Rank": 4,
"Name": "Wii Sports Resort",
"Platform": "Wii",
"Year": 2009,
"Genre": "Sports",
"Publisher": "Nintendo",
"NA_Sales": 15.75,
"EU_Sales": 11.01,
"JP_Sales": 3.2800000000000002,
"Other_Sales": 2.96,
"Global_Sales": 33.0
}
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
df = pd.read_csv("/kaggle/input/videogamesales/vgsales.csv")
df
game_over_ten_thousand = df[df["Global_Sales"] > 0.01]
game_over_ten_thousand
# # Which company is the most common video game publisher?
video_game_publisher = game_over_ten_thousand["Publisher"].mode()
video_game_publisher[0]
# # What’s the most common platform?
video_game_platform = game_over_ten_thousand["Platform"].mode()
video_game_platform[0]
# # What about the most common genre?
video_game_genre = game_over_ten_thousand["Genre"].mode()
video_game_genre[0]
# # What are the top 20 highest grossing games?
highest_grossing_games = df.nlargest(20, "Global_Sales")
highest_grossing_games
# # For North American video game sales, what’s the median?
# ### Provide a secondary output showing ten games surrounding the median sales output
# ### Assume that games with same median value are sorted in descending order.
#
sorted_NA = game_over_ten_thousand.sort_values(["NA_Sales"], ascending=False)
median = game_over_ten_thousand["NA_Sales"].median()
mid_row = len(sorted_NA) // 2
ten_games = sorted_NA.iloc[mid_row - 4 : mid_row + 5]
print("the median is: ", median)
ten_games
# # For the top-selling game of all time, how many standard deviations above/below the mean are its sales for North America?
max_Global_Sales = game_over_ten_thousand["Global_Sales"].max()
row_for_max_global_sales = game_over_ten_thousand[
game_over_ten_thousand["Global_Sales"] == max_Global_Sales
]
max_NA_sales = row_for_max_global_sales["NA_Sales"]
max_NA_sales_value = max_NA_sales[0]
max_NA_sales_value
mean_sales_NA = game_over_ten_thousand["NA_Sales"].mean()
mean_sales_NA
std_for_sales_NA = game_over_ten_thousand["NA_Sales"].std()
std_for_sales_NA
result = (max_NA_sales_value - mean_sales_NA) / std_for_sales_NA
result
# # The Nintendo Wii seems to have outdone itself with games. How does its average number of sales compare with all of the other platforms?
filter_platforms = game_over_ten_thousand.groupby("Platform")["Global_Sales"]
filter_platforms
average_of_Wii = filter_platforms.mean()["Wii"]
average_of_averages = filter_platforms.mean().mean()
average_of_averages
if average_of_Wii > average_of_averages:
print(
f"the average of Wii is {average_of_Wii} that is bigger than the averages of other platforms"
)
else:
print(
f"the average of Wii is {average_of_Wii} that is less than the averages of other platforms"
)
# # Come up with 3 more questions that can be answered with this data set.
# ### what is the most commen genre in EU?¶
#
video_game_publisher = game_over_ten_thousand.groupby("Genre")
video_game_publisher["EU_Sales"].count().sort_values(ascending=False).head(1)
# ### what is the top 10 highest games sales in Eu?
#
highest_grossing_games = df.nlargest(10, "EU_Sales")
highest_grossing_games
# ### what is the lowest company on sales in EU?
#
highest_grossing_games = df.sort_values("EU_Sales").head(1)
highest_grossing_games
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/284/129284987.ipynb
|
videogamesales
|
gregorut
|
[{"Id": 129284987, "ScriptId": 38345080, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 15001180, "CreationDate": "05/12/2023 12:54:07", "VersionNumber": 2.0, "Title": "notebook62d2995610", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 108.0, "LinesInsertedFromPrevious": 18.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 90.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 185189082, "KernelVersionId": 129284987, "SourceDatasetVersionId": 618}]
|
[{"Id": 618, "DatasetId": 284, "DatasourceVersionId": 618, "CreatorUserId": 462330, "LicenseName": "Unknown", "CreationDate": "10/26/2016 09:10:49", "VersionNumber": 2.0, "Title": "Video Game Sales", "Slug": "videogamesales", "Subtitle": "Analyze sales data from more than 16,500 games.", "Description": "This dataset contains a list of video games with sales greater than 100,000 copies. It was generated by a scrape of [vgchartz.com][1].\n\nFields include\n\n* Rank - Ranking of overall sales\n\n* Name - The games name\n\n* Platform - Platform of the games release (i.e. PC,PS4, etc.)\n\n* Year - Year of the game's release\n\n* Genre - Genre of the game\n\n* Publisher - Publisher of the game\n\n* NA_Sales - Sales in North America (in millions)\n\n* EU_Sales - Sales in Europe (in millions)\n\n* JP_Sales - Sales in Japan (in millions)\n\n* Other_Sales - Sales in the rest of the world (in millions)\n\n* Global_Sales - Total worldwide sales.\n\nThe script to scrape the data is available at https://github.com/GregorUT/vgchartzScrape.\nIt is based on BeautifulSoup using Python.\nThere are 16,598 records. 2 records were dropped due to incomplete information.\n\n\n [1]: http://www.vgchartz.com/", "VersionNotes": "Cleaned up formating", "TotalCompressedBytes": 1355781.0, "TotalUncompressedBytes": 1355781.0}]
|
[{"Id": 284, "CreatorUserId": 462330, "OwnerUserId": 462330.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 618.0, "CurrentDatasourceVersionId": 618.0, "ForumId": 1788, "Type": 2, "CreationDate": "10/26/2016 08:17:30", "LastActivityDate": "02/06/2018", "TotalViews": 1798828, "TotalDownloads": 471172, "TotalVotes": 5485, "TotalKernels": 1480}]
|
[{"Id": 462330, "UserName": "gregorut", "DisplayName": "GregorySmith", "RegisterDate": "11/09/2015", "PerformanceTier": 1}]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
df = pd.read_csv("/kaggle/input/videogamesales/vgsales.csv")
df
game_over_ten_thousand = df[df["Global_Sales"] > 0.01]
game_over_ten_thousand
# # Which company is the most common video game publisher?
video_game_publisher = game_over_ten_thousand["Publisher"].mode()
video_game_publisher[0]
# # What’s the most common platform?
video_game_platform = game_over_ten_thousand["Platform"].mode()
video_game_platform[0]
# # What about the most common genre?
video_game_genre = game_over_ten_thousand["Genre"].mode()
video_game_genre[0]
# # What are the top 20 highest grossing games?
highest_grossing_games = df.nlargest(20, "Global_Sales")
highest_grossing_games
# # For North American video game sales, what’s the median?
# ### Provide a secondary output showing ten games surrounding the median sales output
# ### Assume that games with same median value are sorted in descending order.
#
sorted_NA = game_over_ten_thousand.sort_values(["NA_Sales"], ascending=False)
median = game_over_ten_thousand["NA_Sales"].median()
mid_row = len(sorted_NA) // 2
ten_games = sorted_NA.iloc[mid_row - 4 : mid_row + 5]
print("the median is: ", median)
ten_games
# # For the top-selling game of all time, how many standard deviations above/below the mean are its sales for North America?
max_Global_Sales = game_over_ten_thousand["Global_Sales"].max()
row_for_max_global_sales = game_over_ten_thousand[
game_over_ten_thousand["Global_Sales"] == max_Global_Sales
]
max_NA_sales = row_for_max_global_sales["NA_Sales"]
max_NA_sales_value = max_NA_sales[0]
max_NA_sales_value
mean_sales_NA = game_over_ten_thousand["NA_Sales"].mean()
mean_sales_NA
std_for_sales_NA = game_over_ten_thousand["NA_Sales"].std()
std_for_sales_NA
result = (max_NA_sales_value - mean_sales_NA) / std_for_sales_NA
result
# # The Nintendo Wii seems to have outdone itself with games. How does its average number of sales compare with all of the other platforms?
filter_platforms = game_over_ten_thousand.groupby("Platform")["Global_Sales"]
filter_platforms
average_of_Wii = filter_platforms.mean()["Wii"]
average_of_averages = filter_platforms.mean().mean()
average_of_averages
if average_of_Wii > average_of_averages:
print(
f"the average of Wii is {average_of_Wii} that is bigger than the averages of other platforms"
)
else:
print(
f"the average of Wii is {average_of_Wii} that is less than the averages of other platforms"
)
# # Come up with 3 more questions that can be answered with this data set.
# ### what is the most commen genre in EU?¶
#
video_game_publisher = game_over_ten_thousand.groupby("Genre")
video_game_publisher["EU_Sales"].count().sort_values(ascending=False).head(1)
# ### what is the top 10 highest games sales in Eu?
#
highest_grossing_games = df.nlargest(10, "EU_Sales")
highest_grossing_games
# ### what is the lowest company on sales in EU?
#
highest_grossing_games = df.sort_values("EU_Sales").head(1)
highest_grossing_games
|
[{"videogamesales/vgsales.csv": {"column_names": "[\"Rank\", \"Name\", \"Platform\", \"Year\", \"Genre\", \"Publisher\", \"NA_Sales\", \"EU_Sales\", \"JP_Sales\", \"Other_Sales\", \"Global_Sales\"]", "column_data_types": "{\"Rank\": \"int64\", \"Name\": \"object\", \"Platform\": \"object\", \"Year\": \"float64\", \"Genre\": \"object\", \"Publisher\": \"object\", \"NA_Sales\": \"float64\", \"EU_Sales\": \"float64\", \"JP_Sales\": \"float64\", \"Other_Sales\": \"float64\", \"Global_Sales\": \"float64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 16598 entries, 0 to 16597\nData columns (total 11 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Rank 16598 non-null int64 \n 1 Name 16598 non-null object \n 2 Platform 16598 non-null object \n 3 Year 16327 non-null float64\n 4 Genre 16598 non-null object \n 5 Publisher 16540 non-null object \n 6 NA_Sales 16598 non-null float64\n 7 EU_Sales 16598 non-null float64\n 8 JP_Sales 16598 non-null float64\n 9 Other_Sales 16598 non-null float64\n 10 Global_Sales 16598 non-null float64\ndtypes: float64(6), int64(1), object(4)\nmemory usage: 1.4+ MB\n", "summary": "{\"Rank\": {\"count\": 16598.0, \"mean\": 8300.605253645017, \"std\": 4791.853932896403, \"min\": 1.0, \"25%\": 4151.25, \"50%\": 8300.5, \"75%\": 12449.75, \"max\": 16600.0}, \"Year\": {\"count\": 16327.0, \"mean\": 2006.4064433147546, \"std\": 5.828981114712805, \"min\": 1980.0, \"25%\": 2003.0, \"50%\": 2007.0, \"75%\": 2010.0, \"max\": 2020.0}, \"NA_Sales\": {\"count\": 16598.0, \"mean\": 0.26466742981082064, \"std\": 0.8166830292988796, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.08, \"75%\": 0.24, \"max\": 41.49}, \"EU_Sales\": {\"count\": 16598.0, \"mean\": 0.14665200626581515, \"std\": 0.5053512312869116, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.02, \"75%\": 0.11, \"max\": 29.02}, \"JP_Sales\": {\"count\": 16598.0, \"mean\": 0.077781660441017, \"std\": 0.30929064808220297, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.04, \"max\": 10.22}, \"Other_Sales\": {\"count\": 16598.0, \"mean\": 0.0480630196409206, \"std\": 0.18858840291271461, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.01, \"75%\": 0.04, \"max\": 10.57}, \"Global_Sales\": {\"count\": 16598.0, \"mean\": 0.5374406555006628, \"std\": 1.5550279355699124, \"min\": 0.01, \"25%\": 0.06, \"50%\": 0.17, \"75%\": 0.47, \"max\": 82.74}}", "examples": "{\"Rank\":{\"0\":1,\"1\":2,\"2\":3,\"3\":4},\"Name\":{\"0\":\"Wii Sports\",\"1\":\"Super Mario Bros.\",\"2\":\"Mario Kart Wii\",\"3\":\"Wii Sports Resort\"},\"Platform\":{\"0\":\"Wii\",\"1\":\"NES\",\"2\":\"Wii\",\"3\":\"Wii\"},\"Year\":{\"0\":2006.0,\"1\":1985.0,\"2\":2008.0,\"3\":2009.0},\"Genre\":{\"0\":\"Sports\",\"1\":\"Platform\",\"2\":\"Racing\",\"3\":\"Sports\"},\"Publisher\":{\"0\":\"Nintendo\",\"1\":\"Nintendo\",\"2\":\"Nintendo\",\"3\":\"Nintendo\"},\"NA_Sales\":{\"0\":41.49,\"1\":29.08,\"2\":15.85,\"3\":15.75},\"EU_Sales\":{\"0\":29.02,\"1\":3.58,\"2\":12.88,\"3\":11.01},\"JP_Sales\":{\"0\":3.77,\"1\":6.81,\"2\":3.79,\"3\":3.28},\"Other_Sales\":{\"0\":8.46,\"1\":0.77,\"2\":3.31,\"3\":2.96},\"Global_Sales\":{\"0\":82.74,\"1\":40.24,\"2\":35.82,\"3\":33.0}}"}}]
| true | 1 |
<start_data_description><data_path>videogamesales/vgsales.csv:
<column_names>
['Rank', 'Name', 'Platform', 'Year', 'Genre', 'Publisher', 'NA_Sales', 'EU_Sales', 'JP_Sales', 'Other_Sales', 'Global_Sales']
<column_types>
{'Rank': 'int64', 'Name': 'object', 'Platform': 'object', 'Year': 'float64', 'Genre': 'object', 'Publisher': 'object', 'NA_Sales': 'float64', 'EU_Sales': 'float64', 'JP_Sales': 'float64', 'Other_Sales': 'float64', 'Global_Sales': 'float64'}
<dataframe_Summary>
{'Rank': {'count': 16598.0, 'mean': 8300.605253645017, 'std': 4791.853932896403, 'min': 1.0, '25%': 4151.25, '50%': 8300.5, '75%': 12449.75, 'max': 16600.0}, 'Year': {'count': 16327.0, 'mean': 2006.4064433147546, 'std': 5.828981114712805, 'min': 1980.0, '25%': 2003.0, '50%': 2007.0, '75%': 2010.0, 'max': 2020.0}, 'NA_Sales': {'count': 16598.0, 'mean': 0.26466742981082064, 'std': 0.8166830292988796, 'min': 0.0, '25%': 0.0, '50%': 0.08, '75%': 0.24, 'max': 41.49}, 'EU_Sales': {'count': 16598.0, 'mean': 0.14665200626581515, 'std': 0.5053512312869116, 'min': 0.0, '25%': 0.0, '50%': 0.02, '75%': 0.11, 'max': 29.02}, 'JP_Sales': {'count': 16598.0, 'mean': 0.077781660441017, 'std': 0.30929064808220297, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.04, 'max': 10.22}, 'Other_Sales': {'count': 16598.0, 'mean': 0.0480630196409206, 'std': 0.18858840291271461, 'min': 0.0, '25%': 0.0, '50%': 0.01, '75%': 0.04, 'max': 10.57}, 'Global_Sales': {'count': 16598.0, 'mean': 0.5374406555006628, 'std': 1.5550279355699124, 'min': 0.01, '25%': 0.06, '50%': 0.17, '75%': 0.47, 'max': 82.74}}
<dataframe_info>
RangeIndex: 16598 entries, 0 to 16597
Data columns (total 11 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Rank 16598 non-null int64
1 Name 16598 non-null object
2 Platform 16598 non-null object
3 Year 16327 non-null float64
4 Genre 16598 non-null object
5 Publisher 16540 non-null object
6 NA_Sales 16598 non-null float64
7 EU_Sales 16598 non-null float64
8 JP_Sales 16598 non-null float64
9 Other_Sales 16598 non-null float64
10 Global_Sales 16598 non-null float64
dtypes: float64(6), int64(1), object(4)
memory usage: 1.4+ MB
<some_examples>
{'Rank': {'0': 1, '1': 2, '2': 3, '3': 4}, 'Name': {'0': 'Wii Sports', '1': 'Super Mario Bros.', '2': 'Mario Kart Wii', '3': 'Wii Sports Resort'}, 'Platform': {'0': 'Wii', '1': 'NES', '2': 'Wii', '3': 'Wii'}, 'Year': {'0': 2006.0, '1': 1985.0, '2': 2008.0, '3': 2009.0}, 'Genre': {'0': 'Sports', '1': 'Platform', '2': 'Racing', '3': 'Sports'}, 'Publisher': {'0': 'Nintendo', '1': 'Nintendo', '2': 'Nintendo', '3': 'Nintendo'}, 'NA_Sales': {'0': 41.49, '1': 29.08, '2': 15.85, '3': 15.75}, 'EU_Sales': {'0': 29.02, '1': 3.58, '2': 12.88, '3': 11.01}, 'JP_Sales': {'0': 3.77, '1': 6.81, '2': 3.79, '3': 3.28}, 'Other_Sales': {'0': 8.46, '1': 0.77, '2': 3.31, '3': 2.96}, 'Global_Sales': {'0': 82.74, '1': 40.24, '2': 35.82, '3': 33.0}}
<end_description>
| 1,167 | 0 | 2,281 | 1,167 |
129367548
|
<jupyter_start><jupyter_text>daun_jagung_dataset
Kaggle dataset identifier: daun-jagung-dataset
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import tensorflow as tf
from tensorflow.keras import models, layers
import matplotlib.pyplot as plt
IMAGE_SIZE = 256
BATCH_SIZE = 32
CHANNELS = 3
EPOCHS = 40
dataset = tf.keras.preprocessing.image_dataset_from_directory(
"/kaggle/input/second-maize-dataset/corn disease",
shuffle=True,
image_size=(IMAGE_SIZE, IMAGE_SIZE),
batch_size=BATCH_SIZE,
)
class_name = dataset.class_names
class_name
import matplotlib.image as mpimg
for image_batch, labels_batch in dataset.take(1):
print(image_batch.shape)
print(labels_batch.numpy())
# **VISUALISASI BEBERAPA IMAGE DARI DATA**
plt.figure(figsize=(15, 15))
for image_batch, labels_batch in dataset.take(1):
for i in range(12):
ax = plt.subplot(3, 4, i + 1)
plt.imshow(image_batch[i].numpy().astype("uint8"))
plt.title(class_name[labels_batch[i]])
plt.axis("off")
# **Function Membagi Dataset**
# Dataset dibagi menjadi 3 set.
# 1. Training: Dataset yang digunakan untuk pelatihan.
# 2. Validasi: Dataset yang akan diuji saat pelatihan.
# 3. Test: Data yang akan diuji saat melatih model.
len(dataset)
# 80% ==> training
# 20% ==> 10% validation, 10% test
train_size = 0.8
len(dataset) * train_size
train_ds = dataset.take(109)
len(train_ds)
test_ds = dataset.skip(109)
len(test_ds)
val_size = 0.1
len(dataset) * val_size
val_ds = test_ds.take(13)
len(val_ds)
test_ds = test_ds.skip(13)
len(test_ds)
def get_dataset_partitions_tf(
ds, train_split=0.8, val_split=0.1, test_split=0.1, shuffle=True, shuffle_size=10000
):
ds_size = len(ds)
if shuffle:
ds = ds.shuffle(shuffle_size, seed=12)
train_size = int(train_split * ds_size)
val_size = int(val_split * ds_size)
train_ds = ds.take(train_size)
val_ds = ds.skip(train_size).take(val_size)
test_ds = ds.skip(train_size).take(val_size)
return train_ds, val_ds, test_ds
train_ds, val_ds, test_ds = get_dataset_partitions_tf(dataset)
len(train_ds)
len(val_ds)
len(test_ds)
train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=tf.data.AUTOTUNE)
val_ds = val_ds.cache().shuffle(1000).prefetch(buffer_size=tf.data.AUTOTUNE)
test_ds = test_ds.cache().shuffle(1000).prefetch(buffer_size=tf.data.AUTOTUNE)
# **MEMBUAT MODEL**
# **membuat layer untuk mengubah ukuran dan normalisasi**
# Sebelum memasukkan gambar ke jaringan, kita harus mengubah ukurannya ke ukuran yang diinginkan. Selain itu, untuk meningkatkan performa model, kita harus menormalkan nilai piksel gambar
resize_and_rescale = tf.keras.Sequential(
[
layers.experimental.preprocessing.Resizing(IMAGE_SIZE, IMAGE_SIZE),
layers.experimental.preprocessing.Rescaling(1.0 / 225),
]
)
# **Data Augmentation**
# 1. RandomFlip: horizontal and vertical
# 2. RandomRotation: 0.2
data_augmentation = tf.keras.Sequential(
[
layers.experimental.preprocessing.RandomFlip("horizontal_and_vertical"),
layers.experimental.preprocessing.RandomRotation(0.2),
]
)
from tensorflow.keras import activations
# **ARSITEKTUR MODEL**
input_shape = (BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, CHANNELS)
n_classes = 4
model = models.Sequential(
[
resize_and_rescale,
data_augmentation,
layers.Conv2D(32, (3, 3), activation="relu", input_shape=input_shape),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(64, kernel_size=(3, 3), activation="relu"),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(64, kernel_size=(3, 3), activation="relu"),
layers.MaxPooling2D((2, 2)),
# # 1
# layers.Conv2D(64, kernel_size = (3,3), activation ='relu'),
# layers.MaxPooling2D((2,2)),
layers.Conv2D(64, (3, 3), activation="relu"),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(64, (3, 3), activation="relu"),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(64, (3, 3), activation="relu"),
layers.MaxPooling2D((2, 2)),
layers.Flatten(),
layers.Dense(64, activation="relu"),
layers.Dense(n_classes, activation="softmax"),
]
)
model.build(input_shape=input_shape)
model.summary()
# **optimasi ADAM**
model.compile(
optimizer="adam",
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
metrics=["accuracy"],
)
history = model.fit(
train_ds, epochs=EPOCHS, batch_size=BATCH_SIZE, verbose=1, validation_data=val_ds
)
scores = model.evaluate(test_ds)
scores
# **Plotting akurasi dan loss**
history.params
history.history.keys()
acc = history.history["accuracy"]
val_acc = history.history["val_accuracy"]
loss = history.history["loss"]
val_loss = history.history["val_loss"]
plt.figure(figsize=(8, 8))
plt.subplot(1, 2, 1)
plt.plot(range(EPOCHS), acc, label="Training Accuracy")
plt.plot(range(EPOCHS), val_acc, label="Validation Accuracy")
plt.legend(loc="lower right")
plt.title("Training and Validation Accuracy")
plt.subplot(1, 2, 2)
plt.plot(range(EPOCHS), loss, label="Training Loss")
plt.plot(range(EPOCHS), val_loss, label="Validation Loss")
plt.legend(loc="upper right")
plt.title("Training and Validation Loss")
# **menjalankan prediksi pada sebuah gambar**
for (
images_batch,
labels_batch,
) in test_ds.take(1):
first_image = images_batch[0].numpy().astype("uint8")
first_label = labels_batch[0].numpy()
print("first image to predict")
plt.imshow(first_image)
print("actual label:", class_name[first_label])
batch_prediction = model.predict(images_batch)
print("predicted label:", class_name[np.argmax(batch_prediction[0])])
# **menulis Fungsi untuk kesimpulan deteksi gambar**
def predict(model, img):
img_array = tf.keras.preprocessing.image.img_to_array(images[i].numpy())
img_array = tf.expand_dims(img_array, 0) # create a batch
predictions = model.predict(img_array)
predicted_class = class_name[np.argmax(predictions[0])]
confidence = round(100 * (np.max(predictions[0])), 2)
return predicted_class, confidence
# **Menjalankan Prediksi kesimpulan pada kumpulan gambar**
plt.figure(figsize=(15, 15))
for images, labels in test_ds.take(1):
for i in range(9):
ax = plt.subplot(3, 3, i + 1)
plt.imshow(images[i].numpy().astype("uint8"))
predicted_class, confidence = predict(model, images[i].numpy())
actual_class = class_name[labels[i]]
plt.title(
f"Actual: {actual_class},\n Predicted: {predicted_class}.\n Confidence: {confidence}%"
)
plt.axis("off")
# model_version=1
# model.save(f"../models/{model_version}")
import joblib
# joblib.dump(model,'knn_joblib')
filename = "model2"
model.save(filename)
# SAVE MODEL
filename = "model.h5"
model.save(filename)
# SAVE MODEL
filename = "model.tflite"
model.save(filename)
# SAVE MODEL
filename = "modeltf.tflite"
model.save(filename)
import tensorflow as tf
tf.keras.models.save_model(model, "model.pbtxt")
converter = tf.lite.TFLiteConverter.from_keras_model(model=model)
model_tflite = converter.convert()
open("MachineLearningModel.tflite", "wb").write(model_tflite)
# import tensorflow as tf
# # Convert the model
# converter = tf.lite.TFLiteConverter.from_saved_model(/kaggle/working/model.h5) # path to the SavedModel directory
# tflite_model = converter.convert()
# # Save the model.
# with open('modell.tflite', 'wb') as f:
# f.write(tflite_model)
# # Save the model.
# with open('model.tflite', 'wb') as f:
# f.write(tflite_model)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/367/129367548.ipynb
|
daun-jagung-dataset
|
andril22
|
[{"Id": 129367548, "ScriptId": 33547736, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 9166171, "CreationDate": "05/13/2023 07:25:54", "VersionNumber": 1.0, "Title": "SimpleCnn_jadi", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 300.0, "LinesInsertedFromPrevious": 300.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 185349007, "KernelVersionId": 129367548, "SourceDatasetVersionId": 4737965}]
|
[{"Id": 4737965, "DatasetId": 2741726, "DatasourceVersionId": 4800941, "CreatorUserId": 9166171, "LicenseName": "Unknown", "CreationDate": "12/18/2022 02:22:29", "VersionNumber": 1.0, "Title": "daun_jagung_dataset", "Slug": "daun-jagung-dataset", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 2741726, "CreatorUserId": 9166171, "OwnerUserId": 9166171.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 4737965.0, "CurrentDatasourceVersionId": 4800941.0, "ForumId": 2775198, "Type": 2, "CreationDate": "12/18/2022 02:22:29", "LastActivityDate": "12/18/2022", "TotalViews": 267, "TotalDownloads": 13, "TotalVotes": 0, "TotalKernels": 1}]
|
[{"Id": 9166171, "UserName": "andril22", "DisplayName": "Andril22_", "RegisterDate": "12/14/2021", "PerformanceTier": 0}]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import tensorflow as tf
from tensorflow.keras import models, layers
import matplotlib.pyplot as plt
IMAGE_SIZE = 256
BATCH_SIZE = 32
CHANNELS = 3
EPOCHS = 40
dataset = tf.keras.preprocessing.image_dataset_from_directory(
"/kaggle/input/second-maize-dataset/corn disease",
shuffle=True,
image_size=(IMAGE_SIZE, IMAGE_SIZE),
batch_size=BATCH_SIZE,
)
class_name = dataset.class_names
class_name
import matplotlib.image as mpimg
for image_batch, labels_batch in dataset.take(1):
print(image_batch.shape)
print(labels_batch.numpy())
# **VISUALISASI BEBERAPA IMAGE DARI DATA**
plt.figure(figsize=(15, 15))
for image_batch, labels_batch in dataset.take(1):
for i in range(12):
ax = plt.subplot(3, 4, i + 1)
plt.imshow(image_batch[i].numpy().astype("uint8"))
plt.title(class_name[labels_batch[i]])
plt.axis("off")
# **Function Membagi Dataset**
# Dataset dibagi menjadi 3 set.
# 1. Training: Dataset yang digunakan untuk pelatihan.
# 2. Validasi: Dataset yang akan diuji saat pelatihan.
# 3. Test: Data yang akan diuji saat melatih model.
len(dataset)
# 80% ==> training
# 20% ==> 10% validation, 10% test
train_size = 0.8
len(dataset) * train_size
train_ds = dataset.take(109)
len(train_ds)
test_ds = dataset.skip(109)
len(test_ds)
val_size = 0.1
len(dataset) * val_size
val_ds = test_ds.take(13)
len(val_ds)
test_ds = test_ds.skip(13)
len(test_ds)
def get_dataset_partitions_tf(
ds, train_split=0.8, val_split=0.1, test_split=0.1, shuffle=True, shuffle_size=10000
):
ds_size = len(ds)
if shuffle:
ds = ds.shuffle(shuffle_size, seed=12)
train_size = int(train_split * ds_size)
val_size = int(val_split * ds_size)
train_ds = ds.take(train_size)
val_ds = ds.skip(train_size).take(val_size)
test_ds = ds.skip(train_size).take(val_size)
return train_ds, val_ds, test_ds
train_ds, val_ds, test_ds = get_dataset_partitions_tf(dataset)
len(train_ds)
len(val_ds)
len(test_ds)
train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=tf.data.AUTOTUNE)
val_ds = val_ds.cache().shuffle(1000).prefetch(buffer_size=tf.data.AUTOTUNE)
test_ds = test_ds.cache().shuffle(1000).prefetch(buffer_size=tf.data.AUTOTUNE)
# **MEMBUAT MODEL**
# **membuat layer untuk mengubah ukuran dan normalisasi**
# Sebelum memasukkan gambar ke jaringan, kita harus mengubah ukurannya ke ukuran yang diinginkan. Selain itu, untuk meningkatkan performa model, kita harus menormalkan nilai piksel gambar
resize_and_rescale = tf.keras.Sequential(
[
layers.experimental.preprocessing.Resizing(IMAGE_SIZE, IMAGE_SIZE),
layers.experimental.preprocessing.Rescaling(1.0 / 225),
]
)
# **Data Augmentation**
# 1. RandomFlip: horizontal and vertical
# 2. RandomRotation: 0.2
data_augmentation = tf.keras.Sequential(
[
layers.experimental.preprocessing.RandomFlip("horizontal_and_vertical"),
layers.experimental.preprocessing.RandomRotation(0.2),
]
)
from tensorflow.keras import activations
# **ARSITEKTUR MODEL**
input_shape = (BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, CHANNELS)
n_classes = 4
model = models.Sequential(
[
resize_and_rescale,
data_augmentation,
layers.Conv2D(32, (3, 3), activation="relu", input_shape=input_shape),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(64, kernel_size=(3, 3), activation="relu"),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(64, kernel_size=(3, 3), activation="relu"),
layers.MaxPooling2D((2, 2)),
# # 1
# layers.Conv2D(64, kernel_size = (3,3), activation ='relu'),
# layers.MaxPooling2D((2,2)),
layers.Conv2D(64, (3, 3), activation="relu"),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(64, (3, 3), activation="relu"),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(64, (3, 3), activation="relu"),
layers.MaxPooling2D((2, 2)),
layers.Flatten(),
layers.Dense(64, activation="relu"),
layers.Dense(n_classes, activation="softmax"),
]
)
model.build(input_shape=input_shape)
model.summary()
# **optimasi ADAM**
model.compile(
optimizer="adam",
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
metrics=["accuracy"],
)
history = model.fit(
train_ds, epochs=EPOCHS, batch_size=BATCH_SIZE, verbose=1, validation_data=val_ds
)
scores = model.evaluate(test_ds)
scores
# **Plotting akurasi dan loss**
history.params
history.history.keys()
acc = history.history["accuracy"]
val_acc = history.history["val_accuracy"]
loss = history.history["loss"]
val_loss = history.history["val_loss"]
plt.figure(figsize=(8, 8))
plt.subplot(1, 2, 1)
plt.plot(range(EPOCHS), acc, label="Training Accuracy")
plt.plot(range(EPOCHS), val_acc, label="Validation Accuracy")
plt.legend(loc="lower right")
plt.title("Training and Validation Accuracy")
plt.subplot(1, 2, 2)
plt.plot(range(EPOCHS), loss, label="Training Loss")
plt.plot(range(EPOCHS), val_loss, label="Validation Loss")
plt.legend(loc="upper right")
plt.title("Training and Validation Loss")
# **menjalankan prediksi pada sebuah gambar**
for (
images_batch,
labels_batch,
) in test_ds.take(1):
first_image = images_batch[0].numpy().astype("uint8")
first_label = labels_batch[0].numpy()
print("first image to predict")
plt.imshow(first_image)
print("actual label:", class_name[first_label])
batch_prediction = model.predict(images_batch)
print("predicted label:", class_name[np.argmax(batch_prediction[0])])
# **menulis Fungsi untuk kesimpulan deteksi gambar**
def predict(model, img):
img_array = tf.keras.preprocessing.image.img_to_array(images[i].numpy())
img_array = tf.expand_dims(img_array, 0) # create a batch
predictions = model.predict(img_array)
predicted_class = class_name[np.argmax(predictions[0])]
confidence = round(100 * (np.max(predictions[0])), 2)
return predicted_class, confidence
# **Menjalankan Prediksi kesimpulan pada kumpulan gambar**
plt.figure(figsize=(15, 15))
for images, labels in test_ds.take(1):
for i in range(9):
ax = plt.subplot(3, 3, i + 1)
plt.imshow(images[i].numpy().astype("uint8"))
predicted_class, confidence = predict(model, images[i].numpy())
actual_class = class_name[labels[i]]
plt.title(
f"Actual: {actual_class},\n Predicted: {predicted_class}.\n Confidence: {confidence}%"
)
plt.axis("off")
# model_version=1
# model.save(f"../models/{model_version}")
import joblib
# joblib.dump(model,'knn_joblib')
filename = "model2"
model.save(filename)
# SAVE MODEL
filename = "model.h5"
model.save(filename)
# SAVE MODEL
filename = "model.tflite"
model.save(filename)
# SAVE MODEL
filename = "modeltf.tflite"
model.save(filename)
import tensorflow as tf
tf.keras.models.save_model(model, "model.pbtxt")
converter = tf.lite.TFLiteConverter.from_keras_model(model=model)
model_tflite = converter.convert()
open("MachineLearningModel.tflite", "wb").write(model_tflite)
# import tensorflow as tf
# # Convert the model
# converter = tf.lite.TFLiteConverter.from_saved_model(/kaggle/working/model.h5) # path to the SavedModel directory
# tflite_model = converter.convert()
# # Save the model.
# with open('modell.tflite', 'wb') as f:
# f.write(tflite_model)
# # Save the model.
# with open('model.tflite', 'wb') as f:
# f.write(tflite_model)
| false | 0 | 2,690 | 0 | 2,720 | 2,690 |
||
129367151
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import json
train_xl = pd.read_json(
"/kaggle/input/train-data/computers_train_xlarge.json", lines=True
)
train_xl.head()
# # **Are there null data in the dataset?**
#
print("Are there any null values?")
print(train_xl.isnull().values.any(), "\n")
# Find out how many - This gives you the breakdown per column
print("How many null values are in each column?"),
print(train_xl.isnull().sum(), "\n")
# Get total overall null values
print("How many null values are in the data in total?")
print(train_xl.isnull().sum().sum())
# #### Removing all null values will make the dataset too small for training algorithm.
# I will remove some columns that have too many null values.
# Delete columns price_left' , 'price_right', 'specTableContent_left', 'specTableContent_right','keyValuePairs_left','keyValuePairs_right', 'id_left' , 'id_right', 'pair_id'
train_xl = train_xl.drop(
[
"price_left",
"price_right",
"specTableContent_left",
"specTableContent_right",
"keyValuePairs_left",
"keyValuePairs_right",
"id_left",
"id_right",
"pair_id",
],
axis=1,
)
train_xl = train_xl.dropna()
train_xl.head(2)
train_data = train_xl.drop(["label"], axis=1)
label = train_xl[["label"]]
import re
# Remove punctuation and apply case folding
def preprocessor(text):
text = re.sub("<[^>]*>", "", text)
emoticons = re.findall("(?::|;|=)(?:-_)?(?:\)|\(|D|P)", text)
text = re.sub("[\W]+", " ", text.lower()) + " ".join(emoticons).replace("-", "")
return text
# apply the preprocessor to all description_left
train_data["category_left"] = train_data["category_left"].apply(preprocessor)
train_data["category_right"] = train_data["category_right"].apply(preprocessor)
train_data["brand_left"] = train_data["brand_left"].apply(preprocessor)
train_data["brand_right"] = train_data["brand_right"].apply(preprocessor)
train_data["description_left"] = train_data["description_left"].apply(preprocessor)
train_data["description_right"] = train_data["description_right"].apply(preprocessor)
train_data["title_left"] = train_data["title_left"].apply(preprocessor)
train_data["title_right"] = train_data["title_right"].apply(preprocessor)
train_data.head()
# Copy the Title column of the dataframe into a numpy array
train_data["category_left"] = train_data["category_left"].to_numpy()
train_data["category_right"] = train_data["category_right"].to_numpy()
train_data["brand_left"] = train_data["brand_left"].to_numpy()
train_data["brand_right"] = train_data["brand_right"].to_numpy()
train_data["description_left"] = train_data["description_left"].to_numpy()
train_data["description_right"] = train_data["description_right"].to_numpy()
train_data["title_left"] = train_data["title_left"].to_numpy()
train_data["title_right"] = train_data["title_right"].to_numpy()
# view the data
train_data.head()
train_data["title_right"]
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/367/129367151.ipynb
| null | null |
[{"Id": 129367151, "ScriptId": 38239302, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7523258, "CreationDate": "05/13/2023 07:21:01", "VersionNumber": 2.0, "Title": "Product Matching Using Logistics Regression", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 91.0, "LinesInsertedFromPrevious": 48.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 43.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import json
train_xl = pd.read_json(
"/kaggle/input/train-data/computers_train_xlarge.json", lines=True
)
train_xl.head()
# # **Are there null data in the dataset?**
#
print("Are there any null values?")
print(train_xl.isnull().values.any(), "\n")
# Find out how many - This gives you the breakdown per column
print("How many null values are in each column?"),
print(train_xl.isnull().sum(), "\n")
# Get total overall null values
print("How many null values are in the data in total?")
print(train_xl.isnull().sum().sum())
# #### Removing all null values will make the dataset too small for training algorithm.
# I will remove some columns that have too many null values.
# Delete columns price_left' , 'price_right', 'specTableContent_left', 'specTableContent_right','keyValuePairs_left','keyValuePairs_right', 'id_left' , 'id_right', 'pair_id'
train_xl = train_xl.drop(
[
"price_left",
"price_right",
"specTableContent_left",
"specTableContent_right",
"keyValuePairs_left",
"keyValuePairs_right",
"id_left",
"id_right",
"pair_id",
],
axis=1,
)
train_xl = train_xl.dropna()
train_xl.head(2)
train_data = train_xl.drop(["label"], axis=1)
label = train_xl[["label"]]
import re
# Remove punctuation and apply case folding
def preprocessor(text):
text = re.sub("<[^>]*>", "", text)
emoticons = re.findall("(?::|;|=)(?:-_)?(?:\)|\(|D|P)", text)
text = re.sub("[\W]+", " ", text.lower()) + " ".join(emoticons).replace("-", "")
return text
# apply the preprocessor to all description_left
train_data["category_left"] = train_data["category_left"].apply(preprocessor)
train_data["category_right"] = train_data["category_right"].apply(preprocessor)
train_data["brand_left"] = train_data["brand_left"].apply(preprocessor)
train_data["brand_right"] = train_data["brand_right"].apply(preprocessor)
train_data["description_left"] = train_data["description_left"].apply(preprocessor)
train_data["description_right"] = train_data["description_right"].apply(preprocessor)
train_data["title_left"] = train_data["title_left"].apply(preprocessor)
train_data["title_right"] = train_data["title_right"].apply(preprocessor)
train_data.head()
# Copy the Title column of the dataframe into a numpy array
train_data["category_left"] = train_data["category_left"].to_numpy()
train_data["category_right"] = train_data["category_right"].to_numpy()
train_data["brand_left"] = train_data["brand_left"].to_numpy()
train_data["brand_right"] = train_data["brand_right"].to_numpy()
train_data["description_left"] = train_data["description_left"].to_numpy()
train_data["description_right"] = train_data["description_right"].to_numpy()
train_data["title_left"] = train_data["title_left"].to_numpy()
train_data["title_right"] = train_data["title_right"].to_numpy()
# view the data
train_data.head()
train_data["title_right"]
| false | 0 | 1,063 | 0 | 1,063 | 1,063 |
||
129897023
|
import pandas as pd
df = pd.read_csv("/kaggle/input/body-sensors/left_arm_raw.csv")
df
greater_than_60 = (df["Elevation_angle"] > 60).mean() * 100
between_30_60 = (
(df["Elevation_angle"] >= 30) & (df["Elevation_angle"] <= 60)
).mean() * 100
less_than_30 = (df["Elevation_angle"] < 30).mean() * 100
average_angle = df["Elevation_angle"].mean()
print(f"Percentage of angles > 60: {greater_than_60:.2f}%")
print(f"Percentage of angles between 30 and 60: {between_30_60:.2f}%")
print(f"Percentage of angles < 30: {less_than_30:.2f}%")
print(f"Average angle: {average_angle:.2f}")
df["Duration"] = df["Timestamp_Android"].diff()
# Calculate the duration of angles greater than 60
greater_than_60_duration = df.loc[df["Elevation_angle"] > 60, "Duration"].sum()
# Calculate the duration of angles between 30 and 60
between_30_60_duration = df.loc[
(df["Elevation_angle"] >= 30) & (df["Elevation_angle"] <= 60), "Duration"
].sum()
# Calculate the duration of angles less than 30
less_than_30_duration = df.loc[df["Elevation_angle"] < 30, "Duration"].sum()
# Calculate the total duration
total_duration = df["Duration"].sum()
print(f"Duration of angles > 60: {greater_than_60_duration:.2f} seconds")
print(f"Duration of angles between 30 and 60: {between_30_60_duration:.2f} seconds")
print(f"Duration of angles < 30: {less_than_30_duration:.2f} seconds")
print(f"Total duration: {total_duration:.2f} seconds")
def calculate_stats(file):
df = pd.read_csv(file)
greater_than_60 = (df["Elevation_angle"] > 60).mean() * 100
between_30_60 = (
(df["Elevation_angle"] >= 30) & (df["Elevation_angle"] <= 60)
).mean() * 100
less_than_30 = (df["Elevation_angle"] < 30).mean() * 100
average_angle = df["Elevation_angle"].mean()
df["Duration"] = df["Timestamp_Android"].diff()
greater_than_60_duration = df.loc[df["Elevation_angle"] > 60, "Duration"].sum()
between_30_60_duration = df.loc[
(df["Elevation_angle"] >= 30) & (df["Elevation_angle"] <= 60), "Duration"
].sum()
less_than_30_duration = df.loc[df["Elevation_angle"] < 30, "Duration"].sum()
total_duration = df["Duration"].sum()
response = {
"angles": {
"greater_than_60": greater_than_60,
"between_30_60": between_30_60,
"less_than_30": less_than_30,
"average": average_angle,
},
"duration": {
"greater_than_60_duration": greater_than_60_duration,
"between_30_60_duration": between_30_60_duration,
"less_than_30_duration": less_than_30_duration,
"duration": total_duration,
},
}
return response
test = calculate_stats("/kaggle/input/body-sensors/left_arm_raw.csv")
print(test)
test2 = calculate_stats("/kaggle/input/body-sensors/right_arm_raw.csv")
print(test2)
test3 = calculate_stats("/kaggle/input/body-sensors/trunk_raw.csv")
print(test3)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/897/129897023.ipynb
| null | null |
[{"Id": 129897023, "ScriptId": 38638072, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7117453, "CreationDate": "05/17/2023 09:24:43", "VersionNumber": 1.0, "Title": "notebook52c71eb284", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 74.0, "LinesInsertedFromPrevious": 74.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import pandas as pd
df = pd.read_csv("/kaggle/input/body-sensors/left_arm_raw.csv")
df
greater_than_60 = (df["Elevation_angle"] > 60).mean() * 100
between_30_60 = (
(df["Elevation_angle"] >= 30) & (df["Elevation_angle"] <= 60)
).mean() * 100
less_than_30 = (df["Elevation_angle"] < 30).mean() * 100
average_angle = df["Elevation_angle"].mean()
print(f"Percentage of angles > 60: {greater_than_60:.2f}%")
print(f"Percentage of angles between 30 and 60: {between_30_60:.2f}%")
print(f"Percentage of angles < 30: {less_than_30:.2f}%")
print(f"Average angle: {average_angle:.2f}")
df["Duration"] = df["Timestamp_Android"].diff()
# Calculate the duration of angles greater than 60
greater_than_60_duration = df.loc[df["Elevation_angle"] > 60, "Duration"].sum()
# Calculate the duration of angles between 30 and 60
between_30_60_duration = df.loc[
(df["Elevation_angle"] >= 30) & (df["Elevation_angle"] <= 60), "Duration"
].sum()
# Calculate the duration of angles less than 30
less_than_30_duration = df.loc[df["Elevation_angle"] < 30, "Duration"].sum()
# Calculate the total duration
total_duration = df["Duration"].sum()
print(f"Duration of angles > 60: {greater_than_60_duration:.2f} seconds")
print(f"Duration of angles between 30 and 60: {between_30_60_duration:.2f} seconds")
print(f"Duration of angles < 30: {less_than_30_duration:.2f} seconds")
print(f"Total duration: {total_duration:.2f} seconds")
def calculate_stats(file):
df = pd.read_csv(file)
greater_than_60 = (df["Elevation_angle"] > 60).mean() * 100
between_30_60 = (
(df["Elevation_angle"] >= 30) & (df["Elevation_angle"] <= 60)
).mean() * 100
less_than_30 = (df["Elevation_angle"] < 30).mean() * 100
average_angle = df["Elevation_angle"].mean()
df["Duration"] = df["Timestamp_Android"].diff()
greater_than_60_duration = df.loc[df["Elevation_angle"] > 60, "Duration"].sum()
between_30_60_duration = df.loc[
(df["Elevation_angle"] >= 30) & (df["Elevation_angle"] <= 60), "Duration"
].sum()
less_than_30_duration = df.loc[df["Elevation_angle"] < 30, "Duration"].sum()
total_duration = df["Duration"].sum()
response = {
"angles": {
"greater_than_60": greater_than_60,
"between_30_60": between_30_60,
"less_than_30": less_than_30,
"average": average_angle,
},
"duration": {
"greater_than_60_duration": greater_than_60_duration,
"between_30_60_duration": between_30_60_duration,
"less_than_30_duration": less_than_30_duration,
"duration": total_duration,
},
}
return response
test = calculate_stats("/kaggle/input/body-sensors/left_arm_raw.csv")
print(test)
test2 = calculate_stats("/kaggle/input/body-sensors/right_arm_raw.csv")
print(test2)
test3 = calculate_stats("/kaggle/input/body-sensors/trunk_raw.csv")
print(test3)
| false | 0 | 1,033 | 0 | 1,033 | 1,033 |
||
129897780
|
<jupyter_start><jupyter_text>Big Bang Theory All Seasons Dataset
### Context
Whoa! Being a huge fan of this series, finally I'm using my skills to analyse the data of related to the series. I am very excited to see how you guys will use this dataset.
### Content
The lives of four socially awkward friends, Leonard, Sheldon, Howard and Raj, take a wild turn when they meet the beautiful and free-spirited Penny.
This dataset content data of all the seasons, The columns are as follow:
* No.overall - Number of Episode Overall
* No. inseason - Number of Episode in the specific Season
* Title - Title of the Episode
* Directed by - Director of the Episode
* Written by - Writer of the Episode
* Original air date - Original Date when the Episode was Aired
* Prod.code - Alphanumeric production code of an episode
* U.S. viewers(millions) - Number of US Viewers in Millions
* Season - Number of Season
Kaggle dataset identifier: big-bang-theory-all-seasons-dataset
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
emp = [
("siva", 34, "banglour", 90),
("Ravi", 32, "chennai", 96),
("kavi", 12, "kolkata", 89),
("jadu", 13, "bombay", 99),
("mahatha", 45, "Delhi", 89),
]
df = pd.DataFrame(emp, columns=["name", "age", "city", "marks"])
df
df.dtypes
df["age"] = df["age"].astype("object")
df.dtypes
df1 = pd.read_csv("/kaggle/input/big-bang-theory-all-seasons-dataset/dataset.csv")
df1.info()
df1.columns
df1["Original air date"]
df1["Original air date"] = pd.to_datetime(df1["Original air date"])
df1.info()
df1["Season"] = df1["Season"].astype("object")
df1.info()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/897/129897780.ipynb
|
big-bang-theory-all-seasons-dataset
|
shivavashishtha
|
[{"Id": 129897780, "ScriptId": 38635493, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11466021, "CreationDate": "05/17/2023 09:29:56", "VersionNumber": 1.0, "Title": "L9 changing the data types", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 48.0, "LinesInsertedFromPrevious": 48.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 186309307, "KernelVersionId": 129897780, "SourceDatasetVersionId": 3261857}]
|
[{"Id": 3261857, "DatasetId": 1976176, "DatasourceVersionId": 3312258, "CreatorUserId": 4972751, "LicenseName": "CC0: Public Domain", "CreationDate": "03/06/2022 11:56:41", "VersionNumber": 1.0, "Title": "Big Bang Theory All Seasons Dataset", "Slug": "big-bang-theory-all-seasons-dataset", "Subtitle": "Complete Data of Big Bang Theory All Seasons scrapped from Wikipedia", "Description": "### Context\n\nWhoa! Being a huge fan of this series, finally I'm using my skills to analyse the data of related to the series. I am very excited to see how you guys will use this dataset. \n\n### Content\nThe lives of four socially awkward friends, Leonard, Sheldon, Howard and Raj, take a wild turn when they meet the beautiful and free-spirited Penny.\nThis dataset content data of all the seasons, The columns are as follow:\n* No.overall - Number of Episode Overall\n* No. inseason - Number of Episode in the specific Season\n* Title - Title of the Episode\n* Directed by - Director of the Episode\n* Written by - Writer of the Episode\n* Original air date - Original Date when the Episode was Aired\n* Prod.code - Alphanumeric production code of an episode\n* U.S. viewers(millions) - Number of US Viewers in Millions\n* Season - Number of Season\n\n### Acknowledgements\n\nWikipedia - I have used Web Scraping to scrap the data from wikipedia.\n\n### Inspiration\nSharing my work with others and, watching others doing great this with it is always a great inspiration", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1976176, "CreatorUserId": 4972751, "OwnerUserId": 4972751.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 3261857.0, "CurrentDatasourceVersionId": 3312258.0, "ForumId": 2000436, "Type": 2, "CreationDate": "03/06/2022 11:56:41", "LastActivityDate": "03/06/2022", "TotalViews": 4771, "TotalDownloads": 345, "TotalVotes": 16, "TotalKernels": 26}]
|
[{"Id": 4972751, "UserName": "shivavashishtha", "DisplayName": "Shiva Vashishtha", "RegisterDate": "04/27/2020", "PerformanceTier": 2}]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
emp = [
("siva", 34, "banglour", 90),
("Ravi", 32, "chennai", 96),
("kavi", 12, "kolkata", 89),
("jadu", 13, "bombay", 99),
("mahatha", 45, "Delhi", 89),
]
df = pd.DataFrame(emp, columns=["name", "age", "city", "marks"])
df
df.dtypes
df["age"] = df["age"].astype("object")
df.dtypes
df1 = pd.read_csv("/kaggle/input/big-bang-theory-all-seasons-dataset/dataset.csv")
df1.info()
df1.columns
df1["Original air date"]
df1["Original air date"] = pd.to_datetime(df1["Original air date"])
df1.info()
df1["Season"] = df1["Season"].astype("object")
df1.info()
| false | 1 | 423 | 0 | 689 | 423 |
||
129897908
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
url = "https://www.news18.com/cricketnext/ipl-auction-2022/mi-players-list-8/"
intial = pd.read_html(url)
intial[1]
gujarat_titans = intial[1]
gujarat_titans["Team"] = "Gujarat_Titians"
gujarat_titans.rename(columns={"2022 Squad GT": "player"}, inplace=True)
gujarat_titans.head(3)
csk = intial[2]
csk["Team"] = "Chennai Super Kings"
csk.rename(columns={"2022 Squad CSK": "player"}, inplace=True)
final = gujarat_titans.append(csk, ignore_index=True)
final
dc = intial[3]
dc["Team"] = "Delhi Capitals"
dc.rename(columns={"2022 Squad DC": "player"}, inplace=True)
final = final.append(dc, ignore_index=True)
final
kkr = intial[4]
kkr["Team"] = "kolkataknight Riders"
kkr.rename(columns={"2022 Squad KKR": "player"}, inplace=True)
final = final.append(kkr, ignore_index=True)
final
pbks = intial[5]
pbks["Team"] = "Punjab Kings"
pbks.rename(columns={"2022 Squad PBKS": "player"}, inplace=True)
final = final.append(pbks, ignore_index=True)
final
lsg = intial[6]
lsg["Team"] = "Lucknow Super Gaints"
lsg.rename(columns={"2022 Squad LSG": "player"}, inplace=True)
final = final.append(lsg, ignore_index=True)
final
mi = intial[7]
mi["Team"] = "Mumbai Indians"
mi.rename(columns={"2022 Squad MI": "player"}, inplace=True)
final = final.append(mi, ignore_index=True)
final
rcb = intial[8]
rcb["Team"] = "Royal Challengers Banglour"
rcb.rename(columns={"2022 Squad RCB": "player"}, inplace=True)
final = final.append(rcb, ignore_index=True)
final
rr = intial[9]
rr["Team"] = "Royal Challenges"
rr.rename(columns={"2022 Squad RR": "player"}, inplace=True)
final = final.append(rr, ignore_index=True)
final
srh = intial[10]
srh["Team"] = "Sun Risisie Hyderbad"
srh.rename(columns={"2022 Squad SRH": "player"}, inplace=True)
final = final.append(srh, ignore_index=True)
final.head(2)
final.columns
intial[11]
text = intial[11]
text.rename(columns={"Base Price IN ₹ (CR.)": "Base Price"}, inplace=True)
text.rename(columns={"Players": "player"}, inplace=True)
text["COST IN ₹ (CR.)"] = np.nan
text["Cost IN $ (000)"] = np.nan
text.drop("Base Price IN $ (000)", axis=1, inplace=True)
text["Team"] = "Unsold"
text = text[
[
"player",
"Base Price",
"TYPE",
"COST IN ₹ (CR.)",
"Cost IN $ (000)",
"2021 Squad",
"Team",
]
]
final = final.append(text, ignore_index=True)
final
final.to_csv("ipl-2022-data.csv")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/897/129897908.ipynb
| null | null |
[{"Id": 129897908, "ScriptId": 37153584, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11466021, "CreationDate": "05/17/2023 09:30:57", "VersionNumber": 1.0, "Title": "L6 CREATING A IPL DATASET", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 110.0, "LinesInsertedFromPrevious": 110.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
url = "https://www.news18.com/cricketnext/ipl-auction-2022/mi-players-list-8/"
intial = pd.read_html(url)
intial[1]
gujarat_titans = intial[1]
gujarat_titans["Team"] = "Gujarat_Titians"
gujarat_titans.rename(columns={"2022 Squad GT": "player"}, inplace=True)
gujarat_titans.head(3)
csk = intial[2]
csk["Team"] = "Chennai Super Kings"
csk.rename(columns={"2022 Squad CSK": "player"}, inplace=True)
final = gujarat_titans.append(csk, ignore_index=True)
final
dc = intial[3]
dc["Team"] = "Delhi Capitals"
dc.rename(columns={"2022 Squad DC": "player"}, inplace=True)
final = final.append(dc, ignore_index=True)
final
kkr = intial[4]
kkr["Team"] = "kolkataknight Riders"
kkr.rename(columns={"2022 Squad KKR": "player"}, inplace=True)
final = final.append(kkr, ignore_index=True)
final
pbks = intial[5]
pbks["Team"] = "Punjab Kings"
pbks.rename(columns={"2022 Squad PBKS": "player"}, inplace=True)
final = final.append(pbks, ignore_index=True)
final
lsg = intial[6]
lsg["Team"] = "Lucknow Super Gaints"
lsg.rename(columns={"2022 Squad LSG": "player"}, inplace=True)
final = final.append(lsg, ignore_index=True)
final
mi = intial[7]
mi["Team"] = "Mumbai Indians"
mi.rename(columns={"2022 Squad MI": "player"}, inplace=True)
final = final.append(mi, ignore_index=True)
final
rcb = intial[8]
rcb["Team"] = "Royal Challengers Banglour"
rcb.rename(columns={"2022 Squad RCB": "player"}, inplace=True)
final = final.append(rcb, ignore_index=True)
final
rr = intial[9]
rr["Team"] = "Royal Challenges"
rr.rename(columns={"2022 Squad RR": "player"}, inplace=True)
final = final.append(rr, ignore_index=True)
final
srh = intial[10]
srh["Team"] = "Sun Risisie Hyderbad"
srh.rename(columns={"2022 Squad SRH": "player"}, inplace=True)
final = final.append(srh, ignore_index=True)
final.head(2)
final.columns
intial[11]
text = intial[11]
text.rename(columns={"Base Price IN ₹ (CR.)": "Base Price"}, inplace=True)
text.rename(columns={"Players": "player"}, inplace=True)
text["COST IN ₹ (CR.)"] = np.nan
text["Cost IN $ (000)"] = np.nan
text.drop("Base Price IN $ (000)", axis=1, inplace=True)
text["Team"] = "Unsold"
text = text[
[
"player",
"Base Price",
"TYPE",
"COST IN ₹ (CR.)",
"Cost IN $ (000)",
"2021 Squad",
"Team",
]
]
final = final.append(text, ignore_index=True)
final
final.to_csv("ipl-2022-data.csv")
| false | 0 | 1,118 | 0 | 1,118 | 1,118 |
||
129559629
|
<jupyter_start><jupyter_text>CAPTCHA Images
### Context
This dataset contains CAPTCHA (Completely Automated Public Turing test to tell Computers and Humans Apart) images. Built in 1997 as way for users to identify and block bots (in order to prevent spam, DDOS etc.). They have since then been replace by reCAPTCHA because they are breakable using Artificial Intelligence (as I encourage you to do).
### Content
The images are 5 letter words that can contain numbers. The images have had noise applied to them (blur and a line). They are 200 x 50 PNGs.
Kaggle dataset identifier: captcha-version-2-images
<jupyter_script># # Travail IA - Machine Learning
# ## LAREU MATHIEU - LA2 IG2I - 2023
# L'objectif de ce travail est de réussir à décrypter un captcha en noir et blanc composé de 5 caractères.
# On compte un total de 19 caractères différents (2,3,4,5,6,7,8,b,c,d,e,f,g,m,n,p,w,x,y) et l'objectif pour chaque captcha est de réussir à découper chaque caractère puis d'utiliser un FC Classifier pour analyser et prédire les 5 caractères du captcha.
# ## Importation des librairies
import os
import cv2 as cv
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import seaborn as sns
from sklearn.model_selection import train_test_split
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
kernel = np.ones((5, 2), np.uint8)
kernel2 = np.ones((2, 2), np.uint8)
img_folder = "/kaggle/input/captcha-version-2-images/samples/samples/"
# ## Affichage d'une image
def plot_(img1):
plt.figure(figsize=(20, 5))
plt.subplot(1, 2, 1)
plt.imshow(img1, "gray")
plt.axis("off")
image_test = cv.imread(img_folder + "2en7g.png", cv.IMREAD_GRAYSCALE)
plot_(image_test)
# ## Amélioration de la qualité de l'image pour simplifier la reconnaissance
# ### Retrait du fond
# On va utiliser la fonction ***AdaptativeThresholding*** de la bibliothèque OpenCV en python pour créer une image binarisée (noir et blanc uniquement).
# Les paramètres sont :
# * image_test : notre image
# * 255 : la valeur maximale de la couleur des pixels blancs dans l'image binarisée
# * cv2.ADAPTIVE_THRESH_GAUSSIAN_C = la méthode de calcul du seuil des pixels qui utilise la méthode de la moyenne pondérée gaussienne
# * cv2.THRESH_BINARY = le type de seuil à appliquer. Ici on veut que les pixels soient blancs si la valeur est supérieure au seuil, sinon ils seront noirs
# * 145 = la taille du voisinage pour calculer le seuil de chaque pixel
# * 0 = une constante qui permet d'ajuster finement le seuil en fonction des caractéristiques de l'image
thresh = cv.adaptiveThreshold(
image_test, 255, cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY, 145, 0
)
plot_(thresh)
# ### Amélioration de la qualité de notre image
# Nous allons désormais essayer de cacher les lignes qui passent par dessus le code afin que la prediction soit plus simple par la suite. Pour ce faire, nous allons utiliser la fonction ***morphologyEx*** de la bibliothèque OpenCV en Python
closing = cv.morphologyEx(thresh, cv.MORPH_CLOSE, kernel)
plot_(closing)
dilation = cv.dilate(closing, kernel2, iterations=1)
plot_(dilation)
blurring = cv2.GaussianBlur(dilation, (1, 1), 0)
plot_(blurring)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/559/129559629.ipynb
|
captcha-version-2-images
|
fournierp
|
[{"Id": 129559629, "ScriptId": 38401206, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14297646, "CreationDate": "05/14/2023 20:44:11", "VersionNumber": 1.0, "Title": "CAPTCHA-LM", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 83.0, "LinesInsertedFromPrevious": 83.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 185742483, "KernelVersionId": 129559629, "SourceDatasetVersionId": 306654}]
|
[{"Id": 306654, "DatasetId": 38019, "DatasourceVersionId": 319594, "CreatorUserId": 1912216, "LicenseName": "Other (specified in description)", "CreationDate": "02/27/2019 03:04:17", "VersionNumber": 2.0, "Title": "CAPTCHA Images", "Slug": "captcha-version-2-images", "Subtitle": "Version 2 CAPTCHA Images", "Description": "### Context\n\nThis dataset contains CAPTCHA (Completely Automated Public Turing test to tell Computers and Humans Apart) images. Built in 1997 as way for users to identify and block bots (in order to prevent spam, DDOS etc.). They have since then been replace by reCAPTCHA because they are breakable using Artificial Intelligence (as I encourage you to do).\n\n### Content\n\nThe images are 5 letter words that can contain numbers. The images have had noise applied to them (blur and a line). They are 200 x 50 PNGs.\n\n### Acknowledgements\n\nThe dataset comes from [Wilhelmy, Rodrigo & Rosas, Horacio. (2013). captcha dataset.][1] \n[1]: https://www.researchgate.net/publication/248380891_captcha_dataset\n\nThumbnail image from [Accessibility of CAPTCHAs]\n[2]: http://www.bespecular.com/blog/accessibility-of-captchas/\n### Inspiration\n\nThis dataset is a perfect opportunity to attempt to make Optical Character Recognition algorithms.", "VersionNotes": "Updated (correct mislabled image)", "TotalCompressedBytes": 9084225.0, "TotalUncompressedBytes": 9084225.0}]
|
[{"Id": 38019, "CreatorUserId": 1912216, "OwnerUserId": 1912216.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 306654.0, "CurrentDatasourceVersionId": 319594.0, "ForumId": 46467, "Type": 2, "CreationDate": "07/20/2018 15:28:48", "LastActivityDate": "07/20/2018", "TotalViews": 143419, "TotalDownloads": 13999, "TotalVotes": 304, "TotalKernels": 92}]
|
[{"Id": 1912216, "UserName": "fournierp", "DisplayName": "Fournierp", "RegisterDate": "05/14/2018", "PerformanceTier": 1}]
|
# # Travail IA - Machine Learning
# ## LAREU MATHIEU - LA2 IG2I - 2023
# L'objectif de ce travail est de réussir à décrypter un captcha en noir et blanc composé de 5 caractères.
# On compte un total de 19 caractères différents (2,3,4,5,6,7,8,b,c,d,e,f,g,m,n,p,w,x,y) et l'objectif pour chaque captcha est de réussir à découper chaque caractère puis d'utiliser un FC Classifier pour analyser et prédire les 5 caractères du captcha.
# ## Importation des librairies
import os
import cv2 as cv
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import seaborn as sns
from sklearn.model_selection import train_test_split
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
kernel = np.ones((5, 2), np.uint8)
kernel2 = np.ones((2, 2), np.uint8)
img_folder = "/kaggle/input/captcha-version-2-images/samples/samples/"
# ## Affichage d'une image
def plot_(img1):
plt.figure(figsize=(20, 5))
plt.subplot(1, 2, 1)
plt.imshow(img1, "gray")
plt.axis("off")
image_test = cv.imread(img_folder + "2en7g.png", cv.IMREAD_GRAYSCALE)
plot_(image_test)
# ## Amélioration de la qualité de l'image pour simplifier la reconnaissance
# ### Retrait du fond
# On va utiliser la fonction ***AdaptativeThresholding*** de la bibliothèque OpenCV en python pour créer une image binarisée (noir et blanc uniquement).
# Les paramètres sont :
# * image_test : notre image
# * 255 : la valeur maximale de la couleur des pixels blancs dans l'image binarisée
# * cv2.ADAPTIVE_THRESH_GAUSSIAN_C = la méthode de calcul du seuil des pixels qui utilise la méthode de la moyenne pondérée gaussienne
# * cv2.THRESH_BINARY = le type de seuil à appliquer. Ici on veut que les pixels soient blancs si la valeur est supérieure au seuil, sinon ils seront noirs
# * 145 = la taille du voisinage pour calculer le seuil de chaque pixel
# * 0 = une constante qui permet d'ajuster finement le seuil en fonction des caractéristiques de l'image
thresh = cv.adaptiveThreshold(
image_test, 255, cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY, 145, 0
)
plot_(thresh)
# ### Amélioration de la qualité de notre image
# Nous allons désormais essayer de cacher les lignes qui passent par dessus le code afin que la prediction soit plus simple par la suite. Pour ce faire, nous allons utiliser la fonction ***morphologyEx*** de la bibliothèque OpenCV en Python
closing = cv.morphologyEx(thresh, cv.MORPH_CLOSE, kernel)
plot_(closing)
dilation = cv.dilate(closing, kernel2, iterations=1)
plot_(dilation)
blurring = cv2.GaussianBlur(dilation, (1, 1), 0)
plot_(blurring)
| false | 0 | 893 | 0 | 1,051 | 893 |
||
129559372
|
# Published on May 14, 2023 by Marília Prata, mpwolke
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
import json
# Ignore warnings
import warnings
warnings.filterwarnings("ignore")
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# Brain Station 23
df = pd.read_csv(
"/kaggle/input/2023-kaggle-ai-report/kaggle_writeups_20230510.csv",
delimiter=",",
encoding="UTF-8",
)
pd.set_option("display.max_columns", None)
df.head()
# #Missing Values
df.isnull().sum()
# By Alaa Sedeeq https://www.kaggle.com/alaasedeeq/commonlit-readability-eda/execution
# Define a function to plot a bar plot easily
def bar_plot(df, x, x_title, y, title, colors=None, text=None):
fig = px.bar(
x=x,
y=y,
text=text,
labels={x: x_title.title()}, # replaces default labels by column name
data_frame=df,
color=colors,
barmode="group",
template="simple_white",
color_discrete_sequence=px.colors.qualitative.Prism,
)
texts = [df[col].values for col in y]
for i, t in enumerate(texts):
fig.data[i].text = t
fig.data[i].textposition = "inside"
fig["layout"].title = title
for trace in fig.data:
trace.name = trace.name.replace("_", " ").title()
fig.update_yaxes(tickprefix="", showgrid=True)
fig.show()
# By Alaa Sedeeq https://www.kaggle.com/alaasedeeq/commonlit-readability-eda/execution
# lets define a function to plot a histogram plot easily
def hist_plot(df, x, title):
fig = px.histogram(x=df[x], color_discrete_sequence=colors, opacity=0.8)
fig["layout"].title = title
fig.update_yaxes(tickprefix="", showgrid=True)
fig.show()
# #Word Frequency on Writeup's Titles
# By Alaa Sedeeq https://www.kaggle.com/alaasedeeq/commonlit-readability-eda/execution
import plotly.express as px
import plotly.graph_objs as go
import itertools
# Code by Alaa Sedeeq https://www.kaggle.com/alaasedeeq/commonlit-readability-eda/comments#1292516
# Find words spreading (each word frequency)
freq_d = pd.Series(" ".join(df["Title of Writeup"]).split()).value_counts()
# Plot the words distribution
fig = px.line(freq_d, title="Word frequency on Writeups Titles")
fig.update_layout(showlegend=False)
# #Replace the Nans, otherwise you'll get that error below (Float found=> Nan)
# TypeError: sequence item 105: expected str instance, float found
# categorical features with missing values
categorical_nan = [
feature
for feature in df.columns
if df[feature].isna().sum() > 0 and df[feature].dtypes == "O"
]
print(categorical_nan)
# replacing missing values in categorical features
for feature in categorical_nan:
df[feature] = df[feature].fillna("None")
df[categorical_nan].isna().sum()
# #Word frequency on Writeups
# By Alaa Sedeeq https://www.kaggle.com/alaasedeeq/commonlit-readability-eda/execution
import plotly.express as px
import plotly.graph_objs as go
import itertools
# Code by Alaa Sedeeq https://www.kaggle.com/alaasedeeq/commonlit-readability-eda/comments#1292516
# Find words spreading (each word frequency)
freq_d = pd.Series(" ".join(df["Writeup"]).split()).value_counts()
# Plot the words distribution
fig = px.line(freq_d, title="Word frequency on Writeups")
fig.update_layout(showlegend=False)
prepared_as_text = [line for line in df["Title of Writeup"]]
text_prepared_results = "/n".join(prepared_as_text)
text = " ".join(t for t in df["Title of Writeup"])
words_list = text.split()
word_freq = {}
for word in set(words_list):
word_freq[word] = words_list.count(word)
# sorting the dictionary
word_freq = dict(sorted(word_freq.items(), reverse=True, key=lambda item: item[1]))
import itertools
# sort the data and put it in a data frame for the visualization
word_freq_temp = dict(itertools.islice(word_freq.items(), 25))
word_freq_df = pd.DataFrame(
word_freq_temp.items(), columns=["word", "count"]
).sort_values("count", ascending=False)
# #Frequent words on Writeups Titles
import plotly.express as px
# Code by Alaa Sedeeq https://www.kaggle.com/alaasedeeq/commonlit-readability-eda/comments#1292516
bar_plot(
word_freq_df.reset_index(),
"word",
"Words",
["count"],
title="20 Frequent words on Writeups Titles",
)
import nltk
import string
from wordcloud import WordCloud
nltk.download("stopwords")
from nltk.corpus import stopwords
stop = stopwords.words("english")
from nltk.stem import WordNetLemmatizer
from textblob import TextBlob, Word
from collections import Counter
# Code by Alaa Sedeeq https://www.kaggle.com/alaasedeeq/commonlit-readability-eda/comments#1292516
# Bigrams
from nltk.util import ngrams
def get_n_grans_count(text, n_grams, min_freq):
output = {}
tokens = nltk.word_tokenize(text)
# Create the n_gram
if n_grams == 2:
gs = nltk.bigrams(tokens)
elif n_grams == 3:
gs = nltk.trigrams(tokens)
else:
return "Only 2_grams and 3_grams are supported"
# compute frequency distribution for all the bigrams in the text
fdist = nltk.FreqDist(gs)
for k, v in fdist.items():
if v > min_freq:
index = " ".join(k)
output[index] = v
return output
# #Bigrams
two_grams = get_n_grans_count(text, n_grams=2, min_freq=10)
two_grams_df = pd.DataFrame(data=two_grams.items())
two_grams_df = two_grams_df.sort_values(by=1, ascending=False).rename(
columns={0: "Two grams", 1: "Count"}
)
two_grams_df
# #Frequent Bigram on Writeup Titles
bar_plot(
two_grams_df.iloc[:20],
"Two grams",
"Two grams",
["Count"],
title="Frequent bigram on Writeup Titles",
)
# #Trigrams
three_grams = get_n_grans_count(text, n_grams=3, min_freq=0)
three_grams_df = pd.DataFrame(data=three_grams.items())
three_grams_df = three_grams_df.sort_values(by=1, ascending=False).rename(
columns={0: "Three grams", 1: "Count"}
)
three_grams_df
bar_plot(
three_grams_df.iloc[:20],
"Three grams",
"Three grams",
["Count"],
title="Frequent trigrams on Writeups Titles",
)
# #Words length
words_length = {}
for word in set(words_list):
words_length[word] = len(word)
words_length = dict(
sorted(words_length.items(), reverse=True, key=lambda item: item[1])
)
# sort the data and put it in a data frame for the visualization
word_length_temp = dict(itertools.islice(words_length.items(), 25))
words_length_df = pd.DataFrame(
words_length.items(), columns=["Title of Writeup", "count"]
).sort_values("count", ascending=False)
# #Sentence level analysis
# Sentence level analysis Text statistics include sentence length distribution, minimum, maximum, and average length. To check the sentence length distribution. Code and output are as follows:
df["sentence_len"] = df["Title of Writeup"].str.len()
print(
"Max length : {} \nMin length : {} \nAverage Length : {}".format(
max(df["sentence_len"]), min(df["sentence_len"]), df["sentence_len"].mean()
)
)
# #The Lnogest Sentece on Writeups Titles
# the longest sentence we have
df[df["sentence_len"] == max(df["sentence_len"])]["Title of Writeup"].values[0]
# #The Shortest sentence: 'models'
# the shortest sentence we have
df[df["sentence_len"] == min(df["sentence_len"])]["Title of Writeup"].values[0]
# #Sentences Lenght Distribution
colors = px.colors.qualitative.Prism
hist_plot(df, "sentence_len", title="Sentences lenght distribution with spaces")
# #Competition's Titles
# By Rob Mulla https://www.kaggle.com/code/robikscube/sign-language-recognition-eda-twitch-stream
fig, ax = plt.subplots(figsize=(4, 4))
df["Title of Competition"].value_counts().head(10).sort_values(ascending=True).plot(
kind="barh", color="g", ax=ax, title="Kaggle Title of Competitions"
)
ax.set_xlabel("Number of Training Examples")
plt.show()
df["Title of Writeup"].value_counts()
# #Titles of Writeups: 1st place Solution
# We have a difference between 1st place (lower case: 44 times) and 1st Place (upper case)
# By Rob Mulla https://www.kaggle.com/code/robikscube/sign-language-recognition-eda-twitch-stream
fig, ax = plt.subplots(figsize=(4, 4))
df["Title of Writeup"].value_counts().head(10).sort_values(ascending=True).plot(
kind="barh", color="r", ax=ax, title="Kaggle Competitions Writeups Rock!"
)
ax.set_xlabel("Number of Training Examples")
plt.show()
# #Checking Writeup Titles
# We have Solution, Place,Medal, Silver and even Bronze. Where is the Gold? Not many, just for fews.
##Code by Taha07 https://www.kaggle.com/taha07/data-scientists-jobs-analysis-visualization/notebook
from wordcloud import WordCloud
from wordcloud import STOPWORDS
stopwords = set(STOPWORDS)
wordcloud = WordCloud(
background_color="black", colormap="Set2", height=2000, width=2000
).generate(str(df["Title of Writeup"]))
plt.rcParams["figure.figsize"] = (12, 12)
plt.axis("off")
plt.imshow(wordcloud)
plt.title("Title of Kaggle Competitions Writeups")
plt.show()
# #Thank, Organisers, Hosting, Surprised, Congratulation, Fun and Kaggle: Writeups
##Code by Taha07 https://www.kaggle.com/taha07/data-scientists-jobs-analysis-visualization/notebook
from wordcloud import WordCloud
from wordcloud import STOPWORDS
stopwords = set(STOPWORDS)
wordcloud = WordCloud(
background_color="lightblue", colormap="Set3", height=2000, width=2000
).generate(str(df["Writeup"]))
plt.rcParams["figure.figsize"] = (12, 12)
plt.axis("off")
plt.imshow(wordcloud)
plt.title("Kaggle Competitions Writeups")
plt.show()
import glob
import spacy
from spacy import displacy
from pathlib import Path
from sklearn.feature_extraction.text import CountVectorizer
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
# Code by Abu Bakar https://www.kaggle.com/bakar31/writing-evaluation-noob-eda/notebook
def get_top_n_words(corpus, n=None, remove_stop_words=False, n_words=1):
if remove_stop_words:
vec = CountVectorizer(stop_words="english", ngram_range=(n_words, n_words)).fit(
corpus
)
else:
vec = CountVectorizer(ngram_range=(n_words, n_words)).fit(corpus)
bag_of_words = vec.transform(corpus)
sum_words = bag_of_words.sum(axis=0)
words_freq = [(word, sum_words[0, idx]) for word, idx in vec.vocabulary_.items()]
words_freq = sorted(words_freq, key=lambda x: x[1], reverse=True)
return words_freq[:n]
# #Working on: Solution sharing by Jiwei Liu 1st Place - 9 years ago
# Competition: Tradeshift Text Classification
# https://www.kaggle.com/c/3984/discussion/10901
# https://www.kaggle.com/competitions/tradeshift-text-classification/overview
# Only 3 Kaggle Notebooks!
import spacy
from spacy.lang.ru.examples import sentences
nlp = spacy.load("en_core_web_sm")
doc = nlp(
"""Hi, sorry to disappoint you that there is no magic but brute forcing and many many machine hours. All our work are based on Dmitry and tinrtgu's great benchmarks, and Tianqi Chen's great tool Xgboost. https://github.com/tqchen/xgboost
Many many thanks! You are the true heroes!
Our winning solution ensembles 14 two-stage xgb models and 7 online models. Our best single xgb model gets 0.0043835/0.0044595 for public and private LB. It is generated as follows:
1) Use the second half training data as base and the first half training data as meta, instead of random split. (this is key!)
2) we use four base classifiers: random forest for numerical features, SGDClassifier for sparse features, online logistic for all features and xgb for all features.
3) For meta classifier, we use xgb with depth 18, 120 trees and 0.09 eta.
The xgb models could be memory intensive. We use a 8-core 32 GB memory server for most of our submissions. Thank my boss for the machine :P
We will make a formal description and code release after some cleaning up. Cheers!"""
)
entities = [(i, i.label_, i.label) for i in doc.ents]
entities
# print(doc.text)
# for token in doc:
# print(token.text, token.pos_, token.dep_)
# #We will make a formal description and code release after some cleaning up. Cheers!
# That part of the Solution I understood.
displacy.render(doc, style="ent", jupyter=True)
# #Tokenization
import spacy
nlp = spacy.load("en_core_web_sm")
doc = nlp(
"""Hi, sorry to disappoint you that there is no magic but brute forcing and many many machine hours. All our work are based on Dmitry and tinrtgu's great benchmarks, and Tianqi Chen's great tool Xgboost. https://github.com/tqchen/xgboost
Many many thanks! You are the true heroes!
Our winning solution ensembles 14 two-stage xgb models and 7 online models. Our best single xgb model gets 0.0043835/0.0044595 for public and private LB. It is generated as follows:
1) Use the second half training data as base and the first half training data as meta, instead of random split. (this is key!)
2) we use four base classifiers: random forest for numerical features, SGDClassifier for sparse features, online logistic for all features and xgb for all features.
3) For meta classifier, we use xgb with depth 18, 120 trees and 0.09 eta.
The xgb models could be memory intensive. We use a 8-core 32 GB memory server for most of our submissions. Thank my boss for the machine :P
We will make a formal description and code release after some cleaning up. Cheers!"""
)
for token in doc:
print(token.text)
# #Part-Of-Speech (POS) Tagging
nlp = spacy.load("en_core_web_sm")
# Create an nlp object
doc = nlp(
"""Hi, sorry to disappoint you that there is no magic but brute forcing and many many machine hours. All our work are based on Dmitry and tinrtgu's great benchmarks, and Tianqi Chen's great tool Xgboost. https://github.com/tqchen/xgboost
Many many thanks! You are the true heroes!
Our winning solution ensembles 14 two-stage xgb models and 7 online models. Our best single xgb model gets 0.0043835/0.0044595 for public and private LB. It is generated as follows:
1) Use the second half training data as base and the first half training data as meta, instead of random split. (this is key!)
2) we use four base classifiers: random forest for numerical features, SGDClassifier for sparse features, online logistic for all features and xgb for all features.
3) For meta classifier, we use xgb with depth 18, 120 trees and 0.09 eta.
The xgb models could be memory intensive. We use a 8-core 32 GB memory server for most of our submissions. Thank my boss for the machine :P
We will make a formal description and code release after some cleaning up. Cheers!"""
)
# Iterate over the tokens
for token in doc:
# Print the token and its part-of-speech tag
print(token, token.tag_, token.pos_, spacy.explain(token.tag_))
# #WordCloud - Spacy
from wordcloud import WordCloud, STOPWORDS
import matplotlib.pyplot as plt
nlp = spacy.load("en_core_web_sm") # make sure to use larger model!
tokens = nlp(
"""Hi, sorry to disappoint you that there is no magic but brute forcing and many many machine hours. All our work are based on Dmitry and tinrtgu's great benchmarks, and Tianqi Chen's great tool Xgboost. https://github.com/tqchen/xgboost
Many many thanks! You are the true heroes!
Our winning solution ensembles 14 two-stage xgb models and 7 online models. Our best single xgb model gets 0.0043835/0.0044595 for public and private LB. It is generated as follows:
1) Use the second half training data as base and the first half training data as meta, instead of random split. (this is key!)
2) we use four base classifiers: random forest for numerical features, SGDClassifier for sparse features, online logistic for all features and xgb for all features.
3) For meta classifier, we use xgb with depth 18, 120 trees and 0.09 eta.
The xgb models could be memory intensive. We use a 8-core 32 GB memory server for most of our submissions. Thank my boss for the machine :P
We will make a formal description and code release after some cleaning up. Cheers!"""
)
newText = ""
for word in tokens:
if word.pos_ in ["ADJ", "NOUN"]:
newText = " ".join((newText, word.text.lower()))
wordcloud = WordCloud(
stopwords=STOPWORDS, colormap="Reds", background_color="blue"
).generate(newText)
plt.imshow(wordcloud, interpolation="bilinear")
plt.axis("off")
plt.show()
# #Post-Mortem. Posted Eleven years ago - Most Creative Title among Many, many Solutions
# Post-Mortem
# By BarrenWuffet (9th Place on that "Raising Money to Fund an Organizational Mission" competition)
# Unfortunately, Barren "last seen on Kaggle" was 6 months ago. I hope to see you soon back.
# "Anybody do or find anything interesting in this dataset? Or find any good tools for working with large data sets?"
# "I found it barely manageable given the size. It also took me awhile to wrap by head around what was in each of the files. Also the database restrictions (1 and 2 but not 3, 1 and 2 and 3, 2 and 3 but not 1, etc) made a really difficult task much harder and considerably less enjoyable. I ended up having to write 12 large files (and accompanying SQL code) for each model to compensate for this."
# "I started out with a ~2% sample of the training data in R but even this was rough. I tried using the ff package without much luck. I ended up doing most of the data manipulation using SQL Server 2008 R2 Express and the SSMS which I found to be a bright spot in the whole process as it performed really well given the data size. I especially appreciated the data import manager which helps with wide data sets."
# "My best model ended up just being prior averages for prospectid, zip5, and packageid with linear regression. I predicted donation amount (not amount2) and response rate using separate models. I'd then do predictedGift^1.15 * predictedResponseRate for a final prediction."
# "I tried to use some of the demographic data but had a hard time as I was using zip5 as the key to get state abbreviations as a factor, but some of the zip codes cross state lines which leads to duplicates and zip9, even when indexed, just took too long."
# "I think the contest was a cool idea but would have been much better on just one of the 3 databases. Without that restriction I would have had more time to explore the demographic and historical data. The one thing I would have really liked to explore is people's giving before the training period especially as much of the mailings seemed to be political and the test data is sitting within 12 months of a presidential election."
# https://www.kaggle.com/c/2863/discussion/2712
mor = df[(df["Title of Writeup"] == "Post-Mortem")].reset_index(drop=True)
mor.head()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/559/129559372.ipynb
| null | null |
[{"Id": 129559372, "ScriptId": 38491070, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 3012786, "CreationDate": "05/14/2023 20:41:13", "VersionNumber": 2.0, "Title": "Kaggle Competitions still Rock Spacy!", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 537.0, "LinesInsertedFromPrevious": 232.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 305.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# Published on May 14, 2023 by Marília Prata, mpwolke
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
import json
# Ignore warnings
import warnings
warnings.filterwarnings("ignore")
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# Brain Station 23
df = pd.read_csv(
"/kaggle/input/2023-kaggle-ai-report/kaggle_writeups_20230510.csv",
delimiter=",",
encoding="UTF-8",
)
pd.set_option("display.max_columns", None)
df.head()
# #Missing Values
df.isnull().sum()
# By Alaa Sedeeq https://www.kaggle.com/alaasedeeq/commonlit-readability-eda/execution
# Define a function to plot a bar plot easily
def bar_plot(df, x, x_title, y, title, colors=None, text=None):
fig = px.bar(
x=x,
y=y,
text=text,
labels={x: x_title.title()}, # replaces default labels by column name
data_frame=df,
color=colors,
barmode="group",
template="simple_white",
color_discrete_sequence=px.colors.qualitative.Prism,
)
texts = [df[col].values for col in y]
for i, t in enumerate(texts):
fig.data[i].text = t
fig.data[i].textposition = "inside"
fig["layout"].title = title
for trace in fig.data:
trace.name = trace.name.replace("_", " ").title()
fig.update_yaxes(tickprefix="", showgrid=True)
fig.show()
# By Alaa Sedeeq https://www.kaggle.com/alaasedeeq/commonlit-readability-eda/execution
# lets define a function to plot a histogram plot easily
def hist_plot(df, x, title):
fig = px.histogram(x=df[x], color_discrete_sequence=colors, opacity=0.8)
fig["layout"].title = title
fig.update_yaxes(tickprefix="", showgrid=True)
fig.show()
# #Word Frequency on Writeup's Titles
# By Alaa Sedeeq https://www.kaggle.com/alaasedeeq/commonlit-readability-eda/execution
import plotly.express as px
import plotly.graph_objs as go
import itertools
# Code by Alaa Sedeeq https://www.kaggle.com/alaasedeeq/commonlit-readability-eda/comments#1292516
# Find words spreading (each word frequency)
freq_d = pd.Series(" ".join(df["Title of Writeup"]).split()).value_counts()
# Plot the words distribution
fig = px.line(freq_d, title="Word frequency on Writeups Titles")
fig.update_layout(showlegend=False)
# #Replace the Nans, otherwise you'll get that error below (Float found=> Nan)
# TypeError: sequence item 105: expected str instance, float found
# categorical features with missing values
categorical_nan = [
feature
for feature in df.columns
if df[feature].isna().sum() > 0 and df[feature].dtypes == "O"
]
print(categorical_nan)
# replacing missing values in categorical features
for feature in categorical_nan:
df[feature] = df[feature].fillna("None")
df[categorical_nan].isna().sum()
# #Word frequency on Writeups
# By Alaa Sedeeq https://www.kaggle.com/alaasedeeq/commonlit-readability-eda/execution
import plotly.express as px
import plotly.graph_objs as go
import itertools
# Code by Alaa Sedeeq https://www.kaggle.com/alaasedeeq/commonlit-readability-eda/comments#1292516
# Find words spreading (each word frequency)
freq_d = pd.Series(" ".join(df["Writeup"]).split()).value_counts()
# Plot the words distribution
fig = px.line(freq_d, title="Word frequency on Writeups")
fig.update_layout(showlegend=False)
prepared_as_text = [line for line in df["Title of Writeup"]]
text_prepared_results = "/n".join(prepared_as_text)
text = " ".join(t for t in df["Title of Writeup"])
words_list = text.split()
word_freq = {}
for word in set(words_list):
word_freq[word] = words_list.count(word)
# sorting the dictionary
word_freq = dict(sorted(word_freq.items(), reverse=True, key=lambda item: item[1]))
import itertools
# sort the data and put it in a data frame for the visualization
word_freq_temp = dict(itertools.islice(word_freq.items(), 25))
word_freq_df = pd.DataFrame(
word_freq_temp.items(), columns=["word", "count"]
).sort_values("count", ascending=False)
# #Frequent words on Writeups Titles
import plotly.express as px
# Code by Alaa Sedeeq https://www.kaggle.com/alaasedeeq/commonlit-readability-eda/comments#1292516
bar_plot(
word_freq_df.reset_index(),
"word",
"Words",
["count"],
title="20 Frequent words on Writeups Titles",
)
import nltk
import string
from wordcloud import WordCloud
nltk.download("stopwords")
from nltk.corpus import stopwords
stop = stopwords.words("english")
from nltk.stem import WordNetLemmatizer
from textblob import TextBlob, Word
from collections import Counter
# Code by Alaa Sedeeq https://www.kaggle.com/alaasedeeq/commonlit-readability-eda/comments#1292516
# Bigrams
from nltk.util import ngrams
def get_n_grans_count(text, n_grams, min_freq):
output = {}
tokens = nltk.word_tokenize(text)
# Create the n_gram
if n_grams == 2:
gs = nltk.bigrams(tokens)
elif n_grams == 3:
gs = nltk.trigrams(tokens)
else:
return "Only 2_grams and 3_grams are supported"
# compute frequency distribution for all the bigrams in the text
fdist = nltk.FreqDist(gs)
for k, v in fdist.items():
if v > min_freq:
index = " ".join(k)
output[index] = v
return output
# #Bigrams
two_grams = get_n_grans_count(text, n_grams=2, min_freq=10)
two_grams_df = pd.DataFrame(data=two_grams.items())
two_grams_df = two_grams_df.sort_values(by=1, ascending=False).rename(
columns={0: "Two grams", 1: "Count"}
)
two_grams_df
# #Frequent Bigram on Writeup Titles
bar_plot(
two_grams_df.iloc[:20],
"Two grams",
"Two grams",
["Count"],
title="Frequent bigram on Writeup Titles",
)
# #Trigrams
three_grams = get_n_grans_count(text, n_grams=3, min_freq=0)
three_grams_df = pd.DataFrame(data=three_grams.items())
three_grams_df = three_grams_df.sort_values(by=1, ascending=False).rename(
columns={0: "Three grams", 1: "Count"}
)
three_grams_df
bar_plot(
three_grams_df.iloc[:20],
"Three grams",
"Three grams",
["Count"],
title="Frequent trigrams on Writeups Titles",
)
# #Words length
words_length = {}
for word in set(words_list):
words_length[word] = len(word)
words_length = dict(
sorted(words_length.items(), reverse=True, key=lambda item: item[1])
)
# sort the data and put it in a data frame for the visualization
word_length_temp = dict(itertools.islice(words_length.items(), 25))
words_length_df = pd.DataFrame(
words_length.items(), columns=["Title of Writeup", "count"]
).sort_values("count", ascending=False)
# #Sentence level analysis
# Sentence level analysis Text statistics include sentence length distribution, minimum, maximum, and average length. To check the sentence length distribution. Code and output are as follows:
df["sentence_len"] = df["Title of Writeup"].str.len()
print(
"Max length : {} \nMin length : {} \nAverage Length : {}".format(
max(df["sentence_len"]), min(df["sentence_len"]), df["sentence_len"].mean()
)
)
# #The Lnogest Sentece on Writeups Titles
# the longest sentence we have
df[df["sentence_len"] == max(df["sentence_len"])]["Title of Writeup"].values[0]
# #The Shortest sentence: 'models'
# the shortest sentence we have
df[df["sentence_len"] == min(df["sentence_len"])]["Title of Writeup"].values[0]
# #Sentences Lenght Distribution
colors = px.colors.qualitative.Prism
hist_plot(df, "sentence_len", title="Sentences lenght distribution with spaces")
# #Competition's Titles
# By Rob Mulla https://www.kaggle.com/code/robikscube/sign-language-recognition-eda-twitch-stream
fig, ax = plt.subplots(figsize=(4, 4))
df["Title of Competition"].value_counts().head(10).sort_values(ascending=True).plot(
kind="barh", color="g", ax=ax, title="Kaggle Title of Competitions"
)
ax.set_xlabel("Number of Training Examples")
plt.show()
df["Title of Writeup"].value_counts()
# #Titles of Writeups: 1st place Solution
# We have a difference between 1st place (lower case: 44 times) and 1st Place (upper case)
# By Rob Mulla https://www.kaggle.com/code/robikscube/sign-language-recognition-eda-twitch-stream
fig, ax = plt.subplots(figsize=(4, 4))
df["Title of Writeup"].value_counts().head(10).sort_values(ascending=True).plot(
kind="barh", color="r", ax=ax, title="Kaggle Competitions Writeups Rock!"
)
ax.set_xlabel("Number of Training Examples")
plt.show()
# #Checking Writeup Titles
# We have Solution, Place,Medal, Silver and even Bronze. Where is the Gold? Not many, just for fews.
##Code by Taha07 https://www.kaggle.com/taha07/data-scientists-jobs-analysis-visualization/notebook
from wordcloud import WordCloud
from wordcloud import STOPWORDS
stopwords = set(STOPWORDS)
wordcloud = WordCloud(
background_color="black", colormap="Set2", height=2000, width=2000
).generate(str(df["Title of Writeup"]))
plt.rcParams["figure.figsize"] = (12, 12)
plt.axis("off")
plt.imshow(wordcloud)
plt.title("Title of Kaggle Competitions Writeups")
plt.show()
# #Thank, Organisers, Hosting, Surprised, Congratulation, Fun and Kaggle: Writeups
##Code by Taha07 https://www.kaggle.com/taha07/data-scientists-jobs-analysis-visualization/notebook
from wordcloud import WordCloud
from wordcloud import STOPWORDS
stopwords = set(STOPWORDS)
wordcloud = WordCloud(
background_color="lightblue", colormap="Set3", height=2000, width=2000
).generate(str(df["Writeup"]))
plt.rcParams["figure.figsize"] = (12, 12)
plt.axis("off")
plt.imshow(wordcloud)
plt.title("Kaggle Competitions Writeups")
plt.show()
import glob
import spacy
from spacy import displacy
from pathlib import Path
from sklearn.feature_extraction.text import CountVectorizer
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
# Code by Abu Bakar https://www.kaggle.com/bakar31/writing-evaluation-noob-eda/notebook
def get_top_n_words(corpus, n=None, remove_stop_words=False, n_words=1):
if remove_stop_words:
vec = CountVectorizer(stop_words="english", ngram_range=(n_words, n_words)).fit(
corpus
)
else:
vec = CountVectorizer(ngram_range=(n_words, n_words)).fit(corpus)
bag_of_words = vec.transform(corpus)
sum_words = bag_of_words.sum(axis=0)
words_freq = [(word, sum_words[0, idx]) for word, idx in vec.vocabulary_.items()]
words_freq = sorted(words_freq, key=lambda x: x[1], reverse=True)
return words_freq[:n]
# #Working on: Solution sharing by Jiwei Liu 1st Place - 9 years ago
# Competition: Tradeshift Text Classification
# https://www.kaggle.com/c/3984/discussion/10901
# https://www.kaggle.com/competitions/tradeshift-text-classification/overview
# Only 3 Kaggle Notebooks!
import spacy
from spacy.lang.ru.examples import sentences
nlp = spacy.load("en_core_web_sm")
doc = nlp(
"""Hi, sorry to disappoint you that there is no magic but brute forcing and many many machine hours. All our work are based on Dmitry and tinrtgu's great benchmarks, and Tianqi Chen's great tool Xgboost. https://github.com/tqchen/xgboost
Many many thanks! You are the true heroes!
Our winning solution ensembles 14 two-stage xgb models and 7 online models. Our best single xgb model gets 0.0043835/0.0044595 for public and private LB. It is generated as follows:
1) Use the second half training data as base and the first half training data as meta, instead of random split. (this is key!)
2) we use four base classifiers: random forest for numerical features, SGDClassifier for sparse features, online logistic for all features and xgb for all features.
3) For meta classifier, we use xgb with depth 18, 120 trees and 0.09 eta.
The xgb models could be memory intensive. We use a 8-core 32 GB memory server for most of our submissions. Thank my boss for the machine :P
We will make a formal description and code release after some cleaning up. Cheers!"""
)
entities = [(i, i.label_, i.label) for i in doc.ents]
entities
# print(doc.text)
# for token in doc:
# print(token.text, token.pos_, token.dep_)
# #We will make a formal description and code release after some cleaning up. Cheers!
# That part of the Solution I understood.
displacy.render(doc, style="ent", jupyter=True)
# #Tokenization
import spacy
nlp = spacy.load("en_core_web_sm")
doc = nlp(
"""Hi, sorry to disappoint you that there is no magic but brute forcing and many many machine hours. All our work are based on Dmitry and tinrtgu's great benchmarks, and Tianqi Chen's great tool Xgboost. https://github.com/tqchen/xgboost
Many many thanks! You are the true heroes!
Our winning solution ensembles 14 two-stage xgb models and 7 online models. Our best single xgb model gets 0.0043835/0.0044595 for public and private LB. It is generated as follows:
1) Use the second half training data as base and the first half training data as meta, instead of random split. (this is key!)
2) we use four base classifiers: random forest for numerical features, SGDClassifier for sparse features, online logistic for all features and xgb for all features.
3) For meta classifier, we use xgb with depth 18, 120 trees and 0.09 eta.
The xgb models could be memory intensive. We use a 8-core 32 GB memory server for most of our submissions. Thank my boss for the machine :P
We will make a formal description and code release after some cleaning up. Cheers!"""
)
for token in doc:
print(token.text)
# #Part-Of-Speech (POS) Tagging
nlp = spacy.load("en_core_web_sm")
# Create an nlp object
doc = nlp(
"""Hi, sorry to disappoint you that there is no magic but brute forcing and many many machine hours. All our work are based on Dmitry and tinrtgu's great benchmarks, and Tianqi Chen's great tool Xgboost. https://github.com/tqchen/xgboost
Many many thanks! You are the true heroes!
Our winning solution ensembles 14 two-stage xgb models and 7 online models. Our best single xgb model gets 0.0043835/0.0044595 for public and private LB. It is generated as follows:
1) Use the second half training data as base and the first half training data as meta, instead of random split. (this is key!)
2) we use four base classifiers: random forest for numerical features, SGDClassifier for sparse features, online logistic for all features and xgb for all features.
3) For meta classifier, we use xgb with depth 18, 120 trees and 0.09 eta.
The xgb models could be memory intensive. We use a 8-core 32 GB memory server for most of our submissions. Thank my boss for the machine :P
We will make a formal description and code release after some cleaning up. Cheers!"""
)
# Iterate over the tokens
for token in doc:
# Print the token and its part-of-speech tag
print(token, token.tag_, token.pos_, spacy.explain(token.tag_))
# #WordCloud - Spacy
from wordcloud import WordCloud, STOPWORDS
import matplotlib.pyplot as plt
nlp = spacy.load("en_core_web_sm") # make sure to use larger model!
tokens = nlp(
"""Hi, sorry to disappoint you that there is no magic but brute forcing and many many machine hours. All our work are based on Dmitry and tinrtgu's great benchmarks, and Tianqi Chen's great tool Xgboost. https://github.com/tqchen/xgboost
Many many thanks! You are the true heroes!
Our winning solution ensembles 14 two-stage xgb models and 7 online models. Our best single xgb model gets 0.0043835/0.0044595 for public and private LB. It is generated as follows:
1) Use the second half training data as base and the first half training data as meta, instead of random split. (this is key!)
2) we use four base classifiers: random forest for numerical features, SGDClassifier for sparse features, online logistic for all features and xgb for all features.
3) For meta classifier, we use xgb with depth 18, 120 trees and 0.09 eta.
The xgb models could be memory intensive. We use a 8-core 32 GB memory server for most of our submissions. Thank my boss for the machine :P
We will make a formal description and code release after some cleaning up. Cheers!"""
)
newText = ""
for word in tokens:
if word.pos_ in ["ADJ", "NOUN"]:
newText = " ".join((newText, word.text.lower()))
wordcloud = WordCloud(
stopwords=STOPWORDS, colormap="Reds", background_color="blue"
).generate(newText)
plt.imshow(wordcloud, interpolation="bilinear")
plt.axis("off")
plt.show()
# #Post-Mortem. Posted Eleven years ago - Most Creative Title among Many, many Solutions
# Post-Mortem
# By BarrenWuffet (9th Place on that "Raising Money to Fund an Organizational Mission" competition)
# Unfortunately, Barren "last seen on Kaggle" was 6 months ago. I hope to see you soon back.
# "Anybody do or find anything interesting in this dataset? Or find any good tools for working with large data sets?"
# "I found it barely manageable given the size. It also took me awhile to wrap by head around what was in each of the files. Also the database restrictions (1 and 2 but not 3, 1 and 2 and 3, 2 and 3 but not 1, etc) made a really difficult task much harder and considerably less enjoyable. I ended up having to write 12 large files (and accompanying SQL code) for each model to compensate for this."
# "I started out with a ~2% sample of the training data in R but even this was rough. I tried using the ff package without much luck. I ended up doing most of the data manipulation using SQL Server 2008 R2 Express and the SSMS which I found to be a bright spot in the whole process as it performed really well given the data size. I especially appreciated the data import manager which helps with wide data sets."
# "My best model ended up just being prior averages for prospectid, zip5, and packageid with linear regression. I predicted donation amount (not amount2) and response rate using separate models. I'd then do predictedGift^1.15 * predictedResponseRate for a final prediction."
# "I tried to use some of the demographic data but had a hard time as I was using zip5 as the key to get state abbreviations as a factor, but some of the zip codes cross state lines which leads to duplicates and zip9, even when indexed, just took too long."
# "I think the contest was a cool idea but would have been much better on just one of the 3 databases. Without that restriction I would have had more time to explore the demographic and historical data. The one thing I would have really liked to explore is people's giving before the training period especially as much of the mailings seemed to be political and the test data is sitting within 12 months of a presidential election."
# https://www.kaggle.com/c/2863/discussion/2712
mor = df[(df["Title of Writeup"] == "Post-Mortem")].reset_index(drop=True)
mor.head()
| false | 0 | 5,811 | 0 | 5,811 | 5,811 |
||
129559161
|
<jupyter_start><jupyter_text>ImageNet 1000 (mini)
### Context
https://github.com/pytorch/examples/tree/master/imagenet
Kaggle dataset identifier: imagenetmini-1000
<jupyter_script>import os
del os.environ["PYTHONPATH"]
import sys
print("Python version")
print(sys.version)
print("Version info.")
print(sys.version_info)
# GPU
#!wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/cuda-ubuntu1804.pin
#!mv cuda-ubuntu1804.pin /etc/apt/preferences.d/cuda-repository-pin-600
#!wget https://developer.download.nvidia.com/compute/cuda/11.1.1/local_installers/cuda-repo-ubuntu1804-11-1-local_11.1.1-455.32.00-1_amd64.deb
#!dpkg -i cuda-repo-ubuntu1804-11-1-local_11.1.1-455.32.00-1_amd64.deb
#!apt-key add /var/cuda-repo-ubuntu1804-11-1-local/7fa2af80.pub
#!echo "deb https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64 /" > /etc/apt/sources.list.d/cuda.list
#!echo "deb https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64 /" > /etc/apt/sources.list.d/nvidia-ml.list
#!apt-get update
#!apt-get -y install cuda
#!wget http://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64/nvidia-machine-learning-repo-ubuntu1804_1.0.0-1_amd64.deb
#!apt-get --assume-yes install ./nvidia-machine-learning-repo-ubuntu1804_1.0.0-1_amd64.deb
#!apt-get update
# GPU
# CPU
# %%capture
#
#!python3 -m pip install https://github.com/quic/aimet/releases/download/1.25.0/AimetCommon-torch_cpu_1.25.0-cp38-cp38-linux_x86_64.whl
#!python3 -m pip install https://github.com/quic/aimet/releases/download/1.25.0/AimetTorch-torch_cpu_1.25.0-cp38-cp38-linux_x86_64.whl -f https://download.pytorch.org/whl/torch_stable.html
#!python3 -m pip install https://github.com/quic/aimet/releases/download/1.25.0/Aimet-torch_cpu_1.25.0-cp38-cp38-linux_x86_64.whl
#!python -m site
#!ls ~/miniconda3/envs/py38/lib/python3.8/site-packages
#! python3 -c 'import sysconfig; print(sysconfig.get_paths()["purelib"])'
#! python -m site
#! cat ~/miniconda3/envs/py38/lib/python3.8/site-packages/aimet_common/bin/reqs_deb_common.txt | xargs apt-get --assume-yes install
# GPU
# GPU
# If you installed the CUDA 11.x drivers
# ln -s /usr/local/cuda-11.0 /usr/local/cuda
# OR if you installed the CUDA 10.x drivers
# ln -s /usr/local/cuda-10.0 /usr/local/cuda
#! echo $PYTHONPATH
# pip freeze
#!cat ~/miniconda3/envs/py38/lib/python3.8/site-packages/aimet_common/bin/envsetup.sh
import os
os.environ[
"LD_LIBRARY_PATH"
] = "/usr/local/nvidia/lib:/usr/local/nvidia/lib64:/usr/local/cuda/lib64:/usr/local/cuda/targets/x86_64-linux/lib"
os.environ["CUDA_TOOLKIT_PATH"] = "/usr/local/cuda"
os.environ["CUDNN_INSTALL_PATH"] = "/usr/local/cuda"
os.environ["CUDA_HOME"] = "/usr/local/cuda"
os.environ["NVIDIA_DRIVER_CAPABILITIES"] = "compute,utility"
os.environ["NVIDIA_VISIBLE_DEVICES"] = "all"
os.environ[
"PYTHONPATH"
] = "/root/miniconda3/envs/py38/lib/python3.8/site-packages/aimet_common/x86_64-linux-gnu"
os.environ[
"LD_LIBRARY_PATH"
] += ":/root/miniconda3/envs/py38/lib/python3.8/site-packages/aimet_common"
os.environ[
"LD_LIBRARY_PATH"
] += ":/root/miniconda3/envs/py38/lib/python3.8/site-packages/aimet_common/x86_64-linux-gnu"
os.environ[
"LD_LIBRARY_PATH"
] += ":/root/miniconda3/envs/py38/lib/python3.8/site-packages"
os.environ["LD_LIBRARY_PATH"] += ":/usr/lib/x86_64-linux-gnu/"
#! ls /root/miniconda3/envs/py38/lib/python3.8/site-packages/aimet_common
# os.environ['LD_LIBRARY_PATH']
import sys
sys.path.append("/root/miniconda3/envs/py38/lib/python3.8/site-packages/aimet_common")
#! ls /root/miniconda3/envs/py38/lib/python3.8/site-packages/aimet_common
#! sudo mv /root/miniconda3/envs/py38/lib/python3.8/site-packages/aimet_common/libpymo.cpython-38-x86_64-linux-gnu.so /root/miniconda3/envs/py38/lib/python3.8/site-packages/aimet_common/libpymo.so
#! sudo mv /root/miniconda3/envs/py38/lib/python3.8/site-packages/aimet_common/libpymo.so /root/miniconda3/envs/py38/lib/python3.8/site-packages/aimet_common/libpymo.cpython-38-x86_64-linux-gnu.so
#! ls /root/miniconda3/envs/py38/lib/python3.8/site-packages/aimet_common
# # Python
# update image_net_dataloader
"""
Creates data-loader for Image-Net dataset
source: https://github.com/quic/aimet/blob/develop/Examples/torch/utils/image_net_data_loader.py
"""
import logging
import os
from torchvision import transforms
from torchvision.datasets.folder import default_loader, has_file_allowed_extension
from torch.utils.data import Dataset
import torch.utils.data as torch_data
from Examples.common import image_net_config
logger = logging.getLogger("Dataloader")
IMG_EXTENSIONS = ".jpg", ".jpeg", ".png", ".ppm", ".bmp", ".pgm", ".tif"
def make_dataset(
directory: str, class_to_idx: dict, extensions: tuple, num_samples_per_class: int
) -> list:
"""
Creates a dataset of images with num_samples_per_class images in each class
:param directory: The string path to the data directory.
:param class_to_idx: A dictionary mapping the name of the class to the index (label)
:param extensions: list of valid extensions to load data
:param num_samples_per_class: Number of samples to use per class.
:return: list of images containing the entire dataset.
"""
images = []
num_classes = 0
directory = os.path.expanduser(directory)
for class_name in sorted(class_to_idx.keys()):
class_path = os.path.join(directory, class_name)
if os.path.isdir(class_path):
class_idx = class_to_idx[class_name]
class_images = add_images_for_class(
class_path, extensions, num_samples_per_class, class_idx
)
images.extend(class_images)
num_classes += 1
logger.info("Dataset consists of %d images in %d classes", len(images), num_classes)
return images
def add_images_for_class(
class_path: str, extensions: tuple, num_samples_per_class: int, class_idx: int
) -> list:
"""
For a given class, adds num_samples_per_class images to a list.
:param class_path: The string path to the class directory.
:param extensions: List of valid extensions to load data
:param num_samples_per_class: Number of samples to use per class.
:param class_idx: numerical index of class.
:return: list of images for given class.
"""
class_images = []
count = 0
for file_name in os.listdir(class_path):
if num_samples_per_class and count >= num_samples_per_class:
break
if has_file_allowed_extension(file_name, extensions):
image_path = os.path.join(class_path, file_name)
item = (image_path, class_idx)
class_images.append(item)
count += 1
return class_images
class ImageFolder(Dataset):
"""
Dataset class inspired by torchvision.datasets.folder.DatasetFolder for images organized as
individual files grouped by category.
"""
def __init__(
self,
root: str,
transform=None,
target_transform=None,
num_samples_per_class: int = None,
):
"""
:param root: The path to the data directory.
:param transform: The required processing to be applied on the sample.
:param target_transform: The required processing to be applied on the target.
:param num_samples_per_class: Number of samples to use per class.
"""
Dataset.__init__(self)
classes, class_to_idx = self._find_classes(root)
self.samples = make_dataset(
root, class_to_idx, IMG_EXTENSIONS, num_samples_per_class
)
if not self.samples:
raise (
RuntimeError(
"Found 0 files in sub folders of: {}\nSupported extensions are: {}".format(
root, ",".join(IMG_EXTENSIONS)
)
)
)
self.root = root
self.loader = default_loader
self.extensions = IMG_EXTENSIONS
self.classes = classes
self.class_to_idx = class_to_idx
self.targets = [s[1] for s in self.samples]
self.transform = transform
self.target_transform = target_transform
self.imgs = self.samples
@staticmethod
def _find_classes(directory: str):
classes = [
d
for d in os.listdir(directory)
if os.path.isdir(os.path.join(directory, d))
]
classes.sort()
class_to_idx = {classes[i]: i for i in range(len(classes))}
return classes, class_to_idx
def __getitem__(self, index: int):
path, target = self.samples[index]
sample = self.loader(path)
if self.transform is not None:
sample = self.transform(sample)
if self.target_transform is not None:
target = self.target_transform(target)
return sample, target
def __len__(self):
return len(self.samples)
class ImageNetDataLoader:
"""
For loading Validation data from the ImageNet dataset.
"""
def __init__(
self,
images_dir: str,
image_size: int,
batch_size: int = 128,
is_training: bool = False,
num_workers: int = 8,
num_samples_per_class: int = None,
):
"""
:param images_dir: The path to the data directory
:param image_size: The length of the image
:param batch_size: The batch size to use for training and validation
:param is_training: Indicates whether to load the training or validation data
:param num_workers: Indiicates to the data loader how many sub-processes to use for data loading.
:param num_samples_per_class: Number of samples to use per class.
"""
# For normalization, mean and std dev values are calculated per channel
# and can be found on the web.
normalize = transforms.Normalize(
mean=image_net_config.dataset["images_mean"],
std=image_net_config.dataset["images_std"],
)
self.train_transforms = transforms.Compose(
[
transforms.RandomResizedCrop(image_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]
)
self.val_transforms = transforms.Compose(
[
transforms.Resize(image_size + 24),
transforms.CenterCrop(image_size),
transforms.ToTensor(),
normalize,
]
)
if is_training:
data_set = ImageFolder(
root=os.path.join(images_dir, "train"),
transform=self.train_transforms,
num_samples_per_class=num_samples_per_class,
)
else:
data_set = ImageFolder(
root=os.path.join(images_dir, "val"),
transform=self.val_transforms,
num_samples_per_class=num_samples_per_class,
)
self._data_loader = torch_data.DataLoader(
data_set,
batch_size=batch_size,
shuffle=is_training,
num_workers=num_workers,
)
@property
def data_loader(self) -> torch_data.DataLoader:
"""
Returns the data-loader
"""
return self._data_loader
# adaround script
ROOT_DATA_AND_OUTPUTS = "./"
N = 16 # number of classes and samples per class
BIWIDTH = 4
BIWIDTH_ACTIVATION = 8 # quantization on the input and output of the layer
DATASET_FOLDER_PATH = "../input/imagenetmini-1000/imagenet-mini/"
# batch size of evaluation is 32
# it is recommended to have ~2000 images
# this means we need 62 batches
# in the paper they used 2048 images -> 64 batches !!!!!!!!!
ADAROUND_NUM_BATCHES = 64
ADAROUND_ITERATIONS = 20000
# ADAROUND_NUM_BATCHES = 16
# ADAROUND_ITERATIONS = 10000
# in paper their claim to have accuracy ~68.6%
import datetime
DATASET_DIR = f"{ROOT_DATA_AND_OUTPUTS}{DATASET_FOLDER_PATH}"
output_dir = (
f'{ROOT_DATA_AND_OUTPUTS}output_{datetime.datetime.now().strftime("%Y%m%d_%H%M")}/'
)
import os
os.makedirs(output_dir, exist_ok=True)
import os
import torch
from Examples.common import image_net_config
from Examples.torch.utils.image_net_evaluator import ImageNetEvaluator
# from Examples.torch.utils.image_net_data_loader import ImageNetDataLoader
from imagenet_dataloader import ImageNetDataLoader
class ImageNetDataPipeline:
@staticmethod
def get_val_dataloader() -> torch.utils.data.DataLoader:
"""
Instantiates a validation dataloader for ImageNet dataset and returns it
"""
data_loader = ImageNetDataLoader(
DATASET_DIR,
image_size=image_net_config.dataset["image_size"],
batch_size=image_net_config.evaluation["batch_size"],
is_training=False,
num_workers=image_net_config.evaluation["num_workers"],
num_samples_per_class=N,
).data_loader
return data_loader
@staticmethod
def evaluate(model: torch.nn.Module, use_cuda: bool) -> float:
"""
Given a torch model, evaluates its Top-1 accuracy on the dataset
:param model: the model to evaluate
:param use_cuda: whether or not the GPU should be used.
"""
evaluator = ImageNetEvaluator(
DATASET_DIR,
image_size=image_net_config.dataset["image_size"],
batch_size=image_net_config.evaluation["batch_size"],
num_workers=image_net_config.evaluation["num_workers"],
num_val_samples_per_class=N,
)
return evaluator.evaluate(model, iterations=None, use_cuda=use_cuda)
from torchvision.models import resnet18
model = resnet18(pretrained=True)
# model preperation
from aimet_torch.model_preparer import prepare_model
model = prepare_model(model)
# move to device
use_cuda = False
if torch.cuda.is_available():
use_cuda = True
model.to(torch.device("cuda"))
print("Using cuda: {}".format(use_cuda))
# accuracy of the original model
accuracy = ImageNetDataPipeline.evaluate(model, use_cuda)
from termcolor import colored
print(
colored(
"###########################################################################################################",
"green",
)
)
print(colored(f"Original model accuracy: {accuracy}", "red"))
print(
colored(
"###########################################################################################################",
"green",
)
)
from aimet_torch.batch_norm_fold import fold_all_batch_norms
_ = fold_all_batch_norms(model, input_shapes=(1, 3, 224, 224))
from aimet_common.defs import QuantScheme
from aimet_torch.quantsim import QuantizationSimModel
dummy_input = torch.rand(
1, 3, 224, 224
) # Shape for each ImageNet sample is (3 channels) x (224 height) x (224 width)
if use_cuda:
dummy_input = dummy_input.cuda()
# https://arxiv.org/pdf/2201.08442.pdf
# in the paper they use min-max ----> quant_scheme=QuantScheme.post_training_tf
sim = QuantizationSimModel(
model=model,
# quant_scheme=QuantScheme.post_training_tf_enhanced,
quant_scheme=QuantScheme.post_training_tf,
dummy_input=dummy_input,
default_output_bw=BIWIDTH_ACTIVATION,
default_param_bw=BIWIDTH,
)
def pass_calibration_data(sim_model, use_cuda):
data_loader = ImageNetDataPipeline.get_val_dataloader()
batch_size = data_loader.batch_size
if use_cuda:
device = torch.device("cuda")
else:
device = torch.device("cpu")
sim_model.eval()
samples = 1000
batch_cntr = 0
with torch.no_grad():
for input_data, target_data in data_loader:
inputs_batch = input_data.to(device)
sim_model(inputs_batch)
batch_cntr += 1
if (batch_cntr * batch_size) > samples:
break
sim.compute_encodings(
forward_pass_callback=pass_calibration_data, forward_pass_callback_args=use_cuda
)
accuracy = ImageNetDataPipeline.evaluate(sim.model, use_cuda)
print(
colored(
"###########################################################################################################",
"green",
)
)
print(colored(f"Simulated quantized model accuracy: {accuracy}", "red"))
print(
colored(
"###########################################################################################################",
"green",
)
)
# AdaRound
from aimet_torch.adaround.adaround_weight import Adaround, AdaroundParameters
data_loader = ImageNetDataPipeline.get_val_dataloader()
params = AdaroundParameters(
data_loader=data_loader,
num_batches=ADAROUND_NUM_BATCHES,
default_num_iterations=ADAROUND_ITERATIONS,
)
dummy_input = torch.rand(1, 3, 224, 224)
if use_cuda:
dummy_input = dummy_input.cuda()
os.makedirs(f"{output_dir}", exist_ok=True)
ada_model = Adaround.apply_adaround(
model,
dummy_input,
params,
path=f"{output_dir}",
filename_prefix="adaround",
default_param_bw=BIWIDTH,
# default_quant_scheme=QuantScheme.post_training_tf_enhanced
default_quant_scheme=QuantScheme.post_training_tf,
)
# model ready to use
sim = QuantizationSimModel(
model=ada_model,
dummy_input=dummy_input,
# quant_scheme=QuantScheme.post_training_tf_enhanced,
quant_scheme=QuantScheme.post_training_tf,
default_output_bw=BIWIDTH_ACTIVATION,
default_param_bw=BIWIDTH,
)
sim.set_and_freeze_param_encodings(encoding_path=f"{output_dir}adaround.encodings")
sim.compute_encodings(
forward_pass_callback=pass_calibration_data, forward_pass_callback_args=use_cuda
)
accuracy = ImageNetDataPipeline.evaluate(sim.model, use_cuda)
print(
colored(
"###########################################################################################################",
"green",
)
)
print(colored(f"Quantized (after AdaRound) model accuracy: {accuracy}", "red"))
print(
colored(
"###########################################################################################################",
"green",
)
)
dummy_input = dummy_input.cpu()
sim.export(
path=output_dir, filename_prefix="resnet18_after_adaround", dummy_input=dummy_input
)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/559/129559161.ipynb
|
imagenetmini-1000
|
ifigotin
|
[{"Id": 129559161, "ScriptId": 38109697, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 8749054, "CreationDate": "05/14/2023 20:37:51", "VersionNumber": 14.0, "Title": "aimnet", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 583.0, "LinesInsertedFromPrevious": 210.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 373.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 185741617, "KernelVersionId": 129559161, "SourceDatasetVersionId": 998277}]
|
[{"Id": 998277, "DatasetId": 547506, "DatasourceVersionId": 1026923, "CreatorUserId": 2424380, "LicenseName": "Unknown", "CreationDate": "03/10/2020 01:05:11", "VersionNumber": 1.0, "Title": "ImageNet 1000 (mini)", "Slug": "imagenetmini-1000", "Subtitle": "1000 samples from ImageNet", "Description": "### Context\n\nhttps://github.com/pytorch/examples/tree/master/imagenet\n\n### Acknowledgements\n\nhttps://github.com/pytorch/examples/tree/master/imagenet", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 547506, "CreatorUserId": 2424380, "OwnerUserId": 2424380.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 998277.0, "CurrentDatasourceVersionId": 1026923.0, "ForumId": 561077, "Type": 2, "CreationDate": "03/10/2020 01:05:11", "LastActivityDate": "03/10/2020", "TotalViews": 62479, "TotalDownloads": 11891, "TotalVotes": 134, "TotalKernels": 57}]
|
[{"Id": 2424380, "UserName": "ifigotin", "DisplayName": "Ilya Figotin", "RegisterDate": "10/29/2018", "PerformanceTier": 1}]
|
import os
del os.environ["PYTHONPATH"]
import sys
print("Python version")
print(sys.version)
print("Version info.")
print(sys.version_info)
# GPU
#!wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/cuda-ubuntu1804.pin
#!mv cuda-ubuntu1804.pin /etc/apt/preferences.d/cuda-repository-pin-600
#!wget https://developer.download.nvidia.com/compute/cuda/11.1.1/local_installers/cuda-repo-ubuntu1804-11-1-local_11.1.1-455.32.00-1_amd64.deb
#!dpkg -i cuda-repo-ubuntu1804-11-1-local_11.1.1-455.32.00-1_amd64.deb
#!apt-key add /var/cuda-repo-ubuntu1804-11-1-local/7fa2af80.pub
#!echo "deb https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64 /" > /etc/apt/sources.list.d/cuda.list
#!echo "deb https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64 /" > /etc/apt/sources.list.d/nvidia-ml.list
#!apt-get update
#!apt-get -y install cuda
#!wget http://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64/nvidia-machine-learning-repo-ubuntu1804_1.0.0-1_amd64.deb
#!apt-get --assume-yes install ./nvidia-machine-learning-repo-ubuntu1804_1.0.0-1_amd64.deb
#!apt-get update
# GPU
# CPU
# %%capture
#
#!python3 -m pip install https://github.com/quic/aimet/releases/download/1.25.0/AimetCommon-torch_cpu_1.25.0-cp38-cp38-linux_x86_64.whl
#!python3 -m pip install https://github.com/quic/aimet/releases/download/1.25.0/AimetTorch-torch_cpu_1.25.0-cp38-cp38-linux_x86_64.whl -f https://download.pytorch.org/whl/torch_stable.html
#!python3 -m pip install https://github.com/quic/aimet/releases/download/1.25.0/Aimet-torch_cpu_1.25.0-cp38-cp38-linux_x86_64.whl
#!python -m site
#!ls ~/miniconda3/envs/py38/lib/python3.8/site-packages
#! python3 -c 'import sysconfig; print(sysconfig.get_paths()["purelib"])'
#! python -m site
#! cat ~/miniconda3/envs/py38/lib/python3.8/site-packages/aimet_common/bin/reqs_deb_common.txt | xargs apt-get --assume-yes install
# GPU
# GPU
# If you installed the CUDA 11.x drivers
# ln -s /usr/local/cuda-11.0 /usr/local/cuda
# OR if you installed the CUDA 10.x drivers
# ln -s /usr/local/cuda-10.0 /usr/local/cuda
#! echo $PYTHONPATH
# pip freeze
#!cat ~/miniconda3/envs/py38/lib/python3.8/site-packages/aimet_common/bin/envsetup.sh
import os
os.environ[
"LD_LIBRARY_PATH"
] = "/usr/local/nvidia/lib:/usr/local/nvidia/lib64:/usr/local/cuda/lib64:/usr/local/cuda/targets/x86_64-linux/lib"
os.environ["CUDA_TOOLKIT_PATH"] = "/usr/local/cuda"
os.environ["CUDNN_INSTALL_PATH"] = "/usr/local/cuda"
os.environ["CUDA_HOME"] = "/usr/local/cuda"
os.environ["NVIDIA_DRIVER_CAPABILITIES"] = "compute,utility"
os.environ["NVIDIA_VISIBLE_DEVICES"] = "all"
os.environ[
"PYTHONPATH"
] = "/root/miniconda3/envs/py38/lib/python3.8/site-packages/aimet_common/x86_64-linux-gnu"
os.environ[
"LD_LIBRARY_PATH"
] += ":/root/miniconda3/envs/py38/lib/python3.8/site-packages/aimet_common"
os.environ[
"LD_LIBRARY_PATH"
] += ":/root/miniconda3/envs/py38/lib/python3.8/site-packages/aimet_common/x86_64-linux-gnu"
os.environ[
"LD_LIBRARY_PATH"
] += ":/root/miniconda3/envs/py38/lib/python3.8/site-packages"
os.environ["LD_LIBRARY_PATH"] += ":/usr/lib/x86_64-linux-gnu/"
#! ls /root/miniconda3/envs/py38/lib/python3.8/site-packages/aimet_common
# os.environ['LD_LIBRARY_PATH']
import sys
sys.path.append("/root/miniconda3/envs/py38/lib/python3.8/site-packages/aimet_common")
#! ls /root/miniconda3/envs/py38/lib/python3.8/site-packages/aimet_common
#! sudo mv /root/miniconda3/envs/py38/lib/python3.8/site-packages/aimet_common/libpymo.cpython-38-x86_64-linux-gnu.so /root/miniconda3/envs/py38/lib/python3.8/site-packages/aimet_common/libpymo.so
#! sudo mv /root/miniconda3/envs/py38/lib/python3.8/site-packages/aimet_common/libpymo.so /root/miniconda3/envs/py38/lib/python3.8/site-packages/aimet_common/libpymo.cpython-38-x86_64-linux-gnu.so
#! ls /root/miniconda3/envs/py38/lib/python3.8/site-packages/aimet_common
# # Python
# update image_net_dataloader
"""
Creates data-loader for Image-Net dataset
source: https://github.com/quic/aimet/blob/develop/Examples/torch/utils/image_net_data_loader.py
"""
import logging
import os
from torchvision import transforms
from torchvision.datasets.folder import default_loader, has_file_allowed_extension
from torch.utils.data import Dataset
import torch.utils.data as torch_data
from Examples.common import image_net_config
logger = logging.getLogger("Dataloader")
IMG_EXTENSIONS = ".jpg", ".jpeg", ".png", ".ppm", ".bmp", ".pgm", ".tif"
def make_dataset(
directory: str, class_to_idx: dict, extensions: tuple, num_samples_per_class: int
) -> list:
"""
Creates a dataset of images with num_samples_per_class images in each class
:param directory: The string path to the data directory.
:param class_to_idx: A dictionary mapping the name of the class to the index (label)
:param extensions: list of valid extensions to load data
:param num_samples_per_class: Number of samples to use per class.
:return: list of images containing the entire dataset.
"""
images = []
num_classes = 0
directory = os.path.expanduser(directory)
for class_name in sorted(class_to_idx.keys()):
class_path = os.path.join(directory, class_name)
if os.path.isdir(class_path):
class_idx = class_to_idx[class_name]
class_images = add_images_for_class(
class_path, extensions, num_samples_per_class, class_idx
)
images.extend(class_images)
num_classes += 1
logger.info("Dataset consists of %d images in %d classes", len(images), num_classes)
return images
def add_images_for_class(
class_path: str, extensions: tuple, num_samples_per_class: int, class_idx: int
) -> list:
"""
For a given class, adds num_samples_per_class images to a list.
:param class_path: The string path to the class directory.
:param extensions: List of valid extensions to load data
:param num_samples_per_class: Number of samples to use per class.
:param class_idx: numerical index of class.
:return: list of images for given class.
"""
class_images = []
count = 0
for file_name in os.listdir(class_path):
if num_samples_per_class and count >= num_samples_per_class:
break
if has_file_allowed_extension(file_name, extensions):
image_path = os.path.join(class_path, file_name)
item = (image_path, class_idx)
class_images.append(item)
count += 1
return class_images
class ImageFolder(Dataset):
"""
Dataset class inspired by torchvision.datasets.folder.DatasetFolder for images organized as
individual files grouped by category.
"""
def __init__(
self,
root: str,
transform=None,
target_transform=None,
num_samples_per_class: int = None,
):
"""
:param root: The path to the data directory.
:param transform: The required processing to be applied on the sample.
:param target_transform: The required processing to be applied on the target.
:param num_samples_per_class: Number of samples to use per class.
"""
Dataset.__init__(self)
classes, class_to_idx = self._find_classes(root)
self.samples = make_dataset(
root, class_to_idx, IMG_EXTENSIONS, num_samples_per_class
)
if not self.samples:
raise (
RuntimeError(
"Found 0 files in sub folders of: {}\nSupported extensions are: {}".format(
root, ",".join(IMG_EXTENSIONS)
)
)
)
self.root = root
self.loader = default_loader
self.extensions = IMG_EXTENSIONS
self.classes = classes
self.class_to_idx = class_to_idx
self.targets = [s[1] for s in self.samples]
self.transform = transform
self.target_transform = target_transform
self.imgs = self.samples
@staticmethod
def _find_classes(directory: str):
classes = [
d
for d in os.listdir(directory)
if os.path.isdir(os.path.join(directory, d))
]
classes.sort()
class_to_idx = {classes[i]: i for i in range(len(classes))}
return classes, class_to_idx
def __getitem__(self, index: int):
path, target = self.samples[index]
sample = self.loader(path)
if self.transform is not None:
sample = self.transform(sample)
if self.target_transform is not None:
target = self.target_transform(target)
return sample, target
def __len__(self):
return len(self.samples)
class ImageNetDataLoader:
"""
For loading Validation data from the ImageNet dataset.
"""
def __init__(
self,
images_dir: str,
image_size: int,
batch_size: int = 128,
is_training: bool = False,
num_workers: int = 8,
num_samples_per_class: int = None,
):
"""
:param images_dir: The path to the data directory
:param image_size: The length of the image
:param batch_size: The batch size to use for training and validation
:param is_training: Indicates whether to load the training or validation data
:param num_workers: Indiicates to the data loader how many sub-processes to use for data loading.
:param num_samples_per_class: Number of samples to use per class.
"""
# For normalization, mean and std dev values are calculated per channel
# and can be found on the web.
normalize = transforms.Normalize(
mean=image_net_config.dataset["images_mean"],
std=image_net_config.dataset["images_std"],
)
self.train_transforms = transforms.Compose(
[
transforms.RandomResizedCrop(image_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]
)
self.val_transforms = transforms.Compose(
[
transforms.Resize(image_size + 24),
transforms.CenterCrop(image_size),
transforms.ToTensor(),
normalize,
]
)
if is_training:
data_set = ImageFolder(
root=os.path.join(images_dir, "train"),
transform=self.train_transforms,
num_samples_per_class=num_samples_per_class,
)
else:
data_set = ImageFolder(
root=os.path.join(images_dir, "val"),
transform=self.val_transforms,
num_samples_per_class=num_samples_per_class,
)
self._data_loader = torch_data.DataLoader(
data_set,
batch_size=batch_size,
shuffle=is_training,
num_workers=num_workers,
)
@property
def data_loader(self) -> torch_data.DataLoader:
"""
Returns the data-loader
"""
return self._data_loader
# adaround script
ROOT_DATA_AND_OUTPUTS = "./"
N = 16 # number of classes and samples per class
BIWIDTH = 4
BIWIDTH_ACTIVATION = 8 # quantization on the input and output of the layer
DATASET_FOLDER_PATH = "../input/imagenetmini-1000/imagenet-mini/"
# batch size of evaluation is 32
# it is recommended to have ~2000 images
# this means we need 62 batches
# in the paper they used 2048 images -> 64 batches !!!!!!!!!
ADAROUND_NUM_BATCHES = 64
ADAROUND_ITERATIONS = 20000
# ADAROUND_NUM_BATCHES = 16
# ADAROUND_ITERATIONS = 10000
# in paper their claim to have accuracy ~68.6%
import datetime
DATASET_DIR = f"{ROOT_DATA_AND_OUTPUTS}{DATASET_FOLDER_PATH}"
output_dir = (
f'{ROOT_DATA_AND_OUTPUTS}output_{datetime.datetime.now().strftime("%Y%m%d_%H%M")}/'
)
import os
os.makedirs(output_dir, exist_ok=True)
import os
import torch
from Examples.common import image_net_config
from Examples.torch.utils.image_net_evaluator import ImageNetEvaluator
# from Examples.torch.utils.image_net_data_loader import ImageNetDataLoader
from imagenet_dataloader import ImageNetDataLoader
class ImageNetDataPipeline:
@staticmethod
def get_val_dataloader() -> torch.utils.data.DataLoader:
"""
Instantiates a validation dataloader for ImageNet dataset and returns it
"""
data_loader = ImageNetDataLoader(
DATASET_DIR,
image_size=image_net_config.dataset["image_size"],
batch_size=image_net_config.evaluation["batch_size"],
is_training=False,
num_workers=image_net_config.evaluation["num_workers"],
num_samples_per_class=N,
).data_loader
return data_loader
@staticmethod
def evaluate(model: torch.nn.Module, use_cuda: bool) -> float:
"""
Given a torch model, evaluates its Top-1 accuracy on the dataset
:param model: the model to evaluate
:param use_cuda: whether or not the GPU should be used.
"""
evaluator = ImageNetEvaluator(
DATASET_DIR,
image_size=image_net_config.dataset["image_size"],
batch_size=image_net_config.evaluation["batch_size"],
num_workers=image_net_config.evaluation["num_workers"],
num_val_samples_per_class=N,
)
return evaluator.evaluate(model, iterations=None, use_cuda=use_cuda)
from torchvision.models import resnet18
model = resnet18(pretrained=True)
# model preperation
from aimet_torch.model_preparer import prepare_model
model = prepare_model(model)
# move to device
use_cuda = False
if torch.cuda.is_available():
use_cuda = True
model.to(torch.device("cuda"))
print("Using cuda: {}".format(use_cuda))
# accuracy of the original model
accuracy = ImageNetDataPipeline.evaluate(model, use_cuda)
from termcolor import colored
print(
colored(
"###########################################################################################################",
"green",
)
)
print(colored(f"Original model accuracy: {accuracy}", "red"))
print(
colored(
"###########################################################################################################",
"green",
)
)
from aimet_torch.batch_norm_fold import fold_all_batch_norms
_ = fold_all_batch_norms(model, input_shapes=(1, 3, 224, 224))
from aimet_common.defs import QuantScheme
from aimet_torch.quantsim import QuantizationSimModel
dummy_input = torch.rand(
1, 3, 224, 224
) # Shape for each ImageNet sample is (3 channels) x (224 height) x (224 width)
if use_cuda:
dummy_input = dummy_input.cuda()
# https://arxiv.org/pdf/2201.08442.pdf
# in the paper they use min-max ----> quant_scheme=QuantScheme.post_training_tf
sim = QuantizationSimModel(
model=model,
# quant_scheme=QuantScheme.post_training_tf_enhanced,
quant_scheme=QuantScheme.post_training_tf,
dummy_input=dummy_input,
default_output_bw=BIWIDTH_ACTIVATION,
default_param_bw=BIWIDTH,
)
def pass_calibration_data(sim_model, use_cuda):
data_loader = ImageNetDataPipeline.get_val_dataloader()
batch_size = data_loader.batch_size
if use_cuda:
device = torch.device("cuda")
else:
device = torch.device("cpu")
sim_model.eval()
samples = 1000
batch_cntr = 0
with torch.no_grad():
for input_data, target_data in data_loader:
inputs_batch = input_data.to(device)
sim_model(inputs_batch)
batch_cntr += 1
if (batch_cntr * batch_size) > samples:
break
sim.compute_encodings(
forward_pass_callback=pass_calibration_data, forward_pass_callback_args=use_cuda
)
accuracy = ImageNetDataPipeline.evaluate(sim.model, use_cuda)
print(
colored(
"###########################################################################################################",
"green",
)
)
print(colored(f"Simulated quantized model accuracy: {accuracy}", "red"))
print(
colored(
"###########################################################################################################",
"green",
)
)
# AdaRound
from aimet_torch.adaround.adaround_weight import Adaround, AdaroundParameters
data_loader = ImageNetDataPipeline.get_val_dataloader()
params = AdaroundParameters(
data_loader=data_loader,
num_batches=ADAROUND_NUM_BATCHES,
default_num_iterations=ADAROUND_ITERATIONS,
)
dummy_input = torch.rand(1, 3, 224, 224)
if use_cuda:
dummy_input = dummy_input.cuda()
os.makedirs(f"{output_dir}", exist_ok=True)
ada_model = Adaround.apply_adaround(
model,
dummy_input,
params,
path=f"{output_dir}",
filename_prefix="adaround",
default_param_bw=BIWIDTH,
# default_quant_scheme=QuantScheme.post_training_tf_enhanced
default_quant_scheme=QuantScheme.post_training_tf,
)
# model ready to use
sim = QuantizationSimModel(
model=ada_model,
dummy_input=dummy_input,
# quant_scheme=QuantScheme.post_training_tf_enhanced,
quant_scheme=QuantScheme.post_training_tf,
default_output_bw=BIWIDTH_ACTIVATION,
default_param_bw=BIWIDTH,
)
sim.set_and_freeze_param_encodings(encoding_path=f"{output_dir}adaround.encodings")
sim.compute_encodings(
forward_pass_callback=pass_calibration_data, forward_pass_callback_args=use_cuda
)
accuracy = ImageNetDataPipeline.evaluate(sim.model, use_cuda)
print(
colored(
"###########################################################################################################",
"green",
)
)
print(colored(f"Quantized (after AdaRound) model accuracy: {accuracy}", "red"))
print(
colored(
"###########################################################################################################",
"green",
)
)
dummy_input = dummy_input.cpu()
sim.export(
path=output_dir, filename_prefix="resnet18_after_adaround", dummy_input=dummy_input
)
| false | 0 | 5,363 | 0 | 5,416 | 5,363 |
||
129559323
|
<jupyter_start><jupyter_text>Iris Species
The Iris dataset was used in R.A. Fisher's classic 1936 paper, [The Use of Multiple Measurements in Taxonomic Problems](http://rcs.chemometrics.ru/Tutorials/classification/Fisher.pdf), and can also be found on the [UCI Machine Learning Repository][1].
It includes three iris species with 50 samples each as well as some properties about each flower. One flower species is linearly separable from the other two, but the other two are not linearly separable from each other.
The columns in this dataset are:
- Id
- SepalLengthCm
- SepalWidthCm
- PetalLengthCm
- PetalWidthCm
- Species
[](https://www.kaggle.com/benhamner/d/uciml/iris/sepal-width-vs-length)
[1]: http://archive.ics.uci.edu/ml/
Kaggle dataset identifier: iris
<jupyter_script>import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
from sklearn.metrics import *
from sklearn.model_selection import *
from sklearn.impute import KNNImputer
from imblearn.under_sampling import RandomUnderSampler
from imblearn.over_sampling import SMOTE
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import (
RandomForestClassifier,
ExtraTreesClassifier,
VotingClassifier,
)
from xgboost import XGBClassifier
df = pd.read_csv("/kaggle/input/iris/Iris.csv")
df.head(20)
df.shape
df["Species"].value_counts()
df.corr()
df1 = df.drop(["Id"], axis=1)
df1.head()
sns.pairplot(df1, hue="Species")
sns.boxplot(data=df, x="Species", y="PetalWidthCm")
sns.boxplot(data=df, x="Species", y="SepalLengthCm")
sns.violinplot(data=df, x="Species", y="SepalWidthCm")
sns.histplot(data=df, x="SepalWidthCm", hue="Species", fill=True)
sns.kdeplot(
data=df, x="SepalWidthCm", y="PetalWidthCm", hue="Species", fill=True, alpha=0.5
)
y = df["Species"]
X = df.drop(["Species"], axis=1)
model = LogisticRegression(solver="liblinear")
model.fit(X, y)
y_hat = model.predict(X)
accuracy_score(y, y_hat)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
model = RandomForestClassifier(max_depth=5, n_estimators=500)
model.fit(X_train, y_train)
y_hat = model.predict(X_test)
accuracy_score(y_test, y_hat)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/559/129559323.ipynb
|
iris
| null |
[{"Id": 129559323, "ScriptId": 38523170, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14297692, "CreationDate": "05/14/2023 20:40:26", "VersionNumber": 1.0, "Title": "notebook3613bce99b", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 58.0, "LinesInsertedFromPrevious": 58.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 185741835, "KernelVersionId": 129559323, "SourceDatasetVersionId": 420}]
|
[{"Id": 420, "DatasetId": 19, "DatasourceVersionId": 420, "CreatorUserId": 1, "LicenseName": "CC0: Public Domain", "CreationDate": "09/27/2016 07:38:05", "VersionNumber": 2.0, "Title": "Iris Species", "Slug": "iris", "Subtitle": "Classify iris plants into three species in this classic dataset", "Description": "The Iris dataset was used in R.A. Fisher's classic 1936 paper, [The Use of Multiple Measurements in Taxonomic Problems](http://rcs.chemometrics.ru/Tutorials/classification/Fisher.pdf), and can also be found on the [UCI Machine Learning Repository][1].\n\nIt includes three iris species with 50 samples each as well as some properties about each flower. One flower species is linearly separable from the other two, but the other two are not linearly separable from each other.\n\nThe columns in this dataset are:\n\n - Id\n - SepalLengthCm\n - SepalWidthCm\n - PetalLengthCm\n - PetalWidthCm\n - Species\n\n[](https://www.kaggle.com/benhamner/d/uciml/iris/sepal-width-vs-length)\n\n\n [1]: http://archive.ics.uci.edu/ml/", "VersionNotes": "Republishing files so they're formally in our system", "TotalCompressedBytes": 15347.0, "TotalUncompressedBytes": 15347.0}]
|
[{"Id": 19, "CreatorUserId": 1, "OwnerUserId": NaN, "OwnerOrganizationId": 7.0, "CurrentDatasetVersionId": 420.0, "CurrentDatasourceVersionId": 420.0, "ForumId": 997, "Type": 2, "CreationDate": "01/12/2016 00:33:31", "LastActivityDate": "02/06/2018", "TotalViews": 1637863, "TotalDownloads": 423540, "TotalVotes": 3416, "TotalKernels": 6420}]
| null |
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
from sklearn.metrics import *
from sklearn.model_selection import *
from sklearn.impute import KNNImputer
from imblearn.under_sampling import RandomUnderSampler
from imblearn.over_sampling import SMOTE
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import (
RandomForestClassifier,
ExtraTreesClassifier,
VotingClassifier,
)
from xgboost import XGBClassifier
df = pd.read_csv("/kaggle/input/iris/Iris.csv")
df.head(20)
df.shape
df["Species"].value_counts()
df.corr()
df1 = df.drop(["Id"], axis=1)
df1.head()
sns.pairplot(df1, hue="Species")
sns.boxplot(data=df, x="Species", y="PetalWidthCm")
sns.boxplot(data=df, x="Species", y="SepalLengthCm")
sns.violinplot(data=df, x="Species", y="SepalWidthCm")
sns.histplot(data=df, x="SepalWidthCm", hue="Species", fill=True)
sns.kdeplot(
data=df, x="SepalWidthCm", y="PetalWidthCm", hue="Species", fill=True, alpha=0.5
)
y = df["Species"]
X = df.drop(["Species"], axis=1)
model = LogisticRegression(solver="liblinear")
model.fit(X, y)
y_hat = model.predict(X)
accuracy_score(y, y_hat)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
model = RandomForestClassifier(max_depth=5, n_estimators=500)
model.fit(X_train, y_train)
y_hat = model.predict(X_test)
accuracy_score(y_test, y_hat)
| false | 0 | 492 | 0 | 791 | 492 |
||
129903911
|
<jupyter_start><jupyter_text>Air Passenger Data for Time Series Analysis
### Context
This data is used for making ARIMA model forecasting.
### Content
This contains the increasing rate of passenger
Kaggle dataset identifier: air-passenger-data-for-time-series-analysis
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # Load Dataset
df = pd.read_csv(
"/kaggle/input/air-passenger-data-for-time-series-analysis/AirPassengers.csv"
)
df
# # Exploratory Data Analysis (EDA)
# ## View Dataset Description
df["Month"] = pd.to_datetime(df["Month"], format="%Y-%m")
# df['Month'] = df['Month'].dt.strftime('%Y-%m')
df.info()
df
# ## Change Data Index
df.set_index("Month", inplace=True)
# ## Data Visualization
import matplotlib.pyplot as plt
import datetime
plt.figure(figsize=(12, 6))
plt.plot(df)
plt.xlabel("Time")
# plt.xticks(rotation=45)
plt.ylabel("Num of Passengers")
plt.title("US Airline Num of Passengers Trend 1949 - 1960")
plt.show()
# There is a positive trend with some repetitive pattern
# # Time Series Decomposition
from statsmodels.tsa.seasonal import seasonal_decompose
from dateutil.parser import parse
# ## Additive Decomposition
additive_dec = seasonal_decompose(df, model="additive", period=30)
plt.figure(figsize=(12, 8))
additive_dec.plot()
plt.suptitle("Additive Decomposition", fontsize=12)
plt.tight_layout()
plt.show()
multiplicative_dec = seasonal_decompose(df, model="multiplicative", period=30)
plt.figure(figsize=(12, 8))
multiplicative_dec.plot()
plt.suptitle("Multiplicative Decomposition", fontsize=12)
plt.tight_layout()
plt.show()
# Residual in additive decomposition still have a pattern, while in multiplicative is not really showing and quite random.
# # Stationary Test for Time Series
from statsmodels.tsa.stattools import adfuller, kpss
from statsmodels.graphics.tsaplots import plot_acf
# ## Augmented Dickey Fuller Test (ADF Test)
# H0: time series data is non-stationary
# H1: time series data is stationary
# p-value reject null hypothesis (H0)
result = adfuller(df.values, autolag="AIC")
print(f"ADF Statistic: {result[0]}")
print(f"p-value: {result[1]}")
# ## KPSS Test
# H0: time series data is stationary
# H1: time series data is non-stationary
# p-value reject null hypothesis (H0)
result = kpss(df)
print("KPSS Statistic:", result[0])
print("p-value:", result[1])
# ## Rolling Test
# plt.plot(df['Month'], df['#Passengers'])
rolling_mean = df.rolling(6).mean()
rolling_std = df.rolling(6).std()
plt.plot(df, label="Passenger Data")
plt.plot(rolling_mean, color="red", label="Rolling Num of Passenger Mean")
plt.plot(
rolling_std, color="green", label="Rolling Passenger Number Standard Deviation"
)
plt.xlabel("Time")
plt.title("Passenger Time Series, Rolling Mean, Standard Deviation")
plt.legend(loc="best")
plt.show()
# From two test result above, We can see that current data is non-stationary
# # Find ARIMA Order Term
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
import matplotlib.pyplot as plt
# ## ARIMA d Parameter
plt.rcParams.update({"figure.figsize": (12, 8), "figure.dpi": 120})
# Original Series
fig, axes = plt.subplots(3, 2, sharex=False)
axes[0, 0].plot(df)
axes[0, 0].set_title("Original Series")
plot_acf(df.values, ax=axes[0, 1])
# 1st Differencing
axes[1, 0].plot(df.diff())
axes[1, 0].set_title("1st Order Differencing")
plot_acf(df.diff().dropna(), ax=axes[1, 1])
# 2nd Differencing
axes[2, 0].plot(df.diff().diff())
axes[2, 0].set_title("2nd Order Differencing")
plot_acf(df.diff().diff().dropna(), ax=axes[2, 1])
plt.tight_layout()
plt.show()
# From visualization above, we can see at 2nd order differencing the lag has gone on the negative side, this means that in the 2nd order the series has become over the difference. So d = 1
# ### AR Term (p)
plt.rcParams.update({"figure.figsize": (9, 6), "figure.dpi": 120})
fig, axes = plt.subplots(2, 2, sharex=False)
# PACF plot of 1st differenced series
axes[0, 0].plot(df.diff())
axes[0, 0].set_title("1st Differencing")
axes[0, 1].set(ylim=(0, 5))
plot_pacf(df.diff().dropna(), ax=axes[0, 1])
# PACF plot of 2nd differenced series
axes[1, 0].plot(df.diff().diff())
axes[1, 0].set_title("2nd Differencing")
axes[1, 1].set(ylim=(0, 5))
plot_pacf(df.diff().diff().dropna(), ax=axes[1, 1])
plt.tight_layout()
plt.show()
# Here we can in 1st differencing, the first lag is above limit. So select p = 1
# ### MA Term (q)
plt.rcParams.update({"figure.figsize": (9, 6), "figure.dpi": 120})
fig, axes = plt.subplots(2, 2, sharex=False)
# ACF plot of 1st differenced series
axes[0, 0].plot(df.diff())
axes[0, 0].set_title("1st Differencing")
axes[0, 1].set(ylim=(0, 5))
plot_acf(df.diff().dropna(), ax=axes[0, 1])
# ACF plot of 2nd differenced series
axes[1, 0].plot(df.diff().diff())
axes[1, 0].set_title("2nd Differencing")
axes[1, 1].set(ylim=(0, 5))
plot_acf(df.diff().diff().dropna(), ax=axes[1, 1])
plt.tight_layout()
plt.show()
# For MA term, select q = 2
# ## Splitting Data
# train = dfs2.loc[:'1959']
# test = dfs2.loc['1960':]
# # train.rename({'#Passenger':''})
# # Model Building
from statsmodels.tsa.arima.model import ARIMA
Arima = ARIMA(df, order=(1, 2, 2))
Ar = Arima.fit()
Ar.summary()
# Plot residual errors
residuals = pd.DataFrame(Ar.resid)
fig, ax = plt.subplots(1, 2, figsize=(12, 6))
# plt.figure()
residuals.plot(title="Residuals", ax=ax[0])
residuals.plot(kind="kde", title="Density", ax=ax[1])
plt.show()
# # Evaluation
from statsmodels.graphics.tsaplots import plot_predict
def forecast_accuracy(forecast, actual):
mape = np.mean(np.abs(forecast - actual) / np.abs(actual)) # MAPE
# me = np.mean(forecast - actual) # ME
mae = np.mean(np.abs(forecast - actual)) # MAE
# mpe = np.mean((forecast - actual)/actual) # MPE
rmse = np.mean((forecast - actual) ** 2) ** 0.5 # RMSE
corr = np.corrcoef(forecast, actual)[0, 1] # corr
# mins = np.amin(np.hstack([forecast[:,None],
# actual[:,None]]), axis=1)
# maxs = np.amax(np.hstack([forecast[:,None],
# actual[:,None]]), axis=1)
# minmax = 1 - np.mean(mins/maxs) # minmax
# acf1 = acf(fc-test)[1] # ACF1
return {"mape": mape, "mae": mae, "rmse": rmse}
# Create Training and Test
train = df.loc[:"1955"]
test = df.loc["1956":]
# Re-train ARIMA
model = ARIMA(train, order=(1, 1, 1))
Ar = model.fit()
# Forecast
fc = Ar.get_forecast(60).summary_frame()
# fc_conf_int = fc.conf_int(alpha=0.05)
fc = pd.DataFrame(fc).rename({"mean": "#Passengers"}, axis=1)
# fc
fig, ax = plt.subplots(figsize=(12, 6))
plt.plot(train, label="train time series")
plt.plot(test, label="test time series")
# plot_predict(Ar, '1956', '1962', ax=ax)
plt.plot(fc["#Passengers"])
plt.fill_between(
fc.index, fc["mean_ci_lower"], fc["mean_ci_upper"], color="k", alpha=0.15
)
plt.legend(loc="upper left")
plt.show()
# fc['#Passengers'].values
# test
# len(test)
forecast_accuracy(fc["#Passengers"], test["#Passengers"])
# ## Auto ARIMA
import pmdarima as pm
model = pm.auto_arima(
train.values,
start_p=1,
start_q=1,
test="adf", # use adftest to find optimal 'd'
max_p=5,
max_q=5, # maximum p and q
m=1, # frequency of series
d=None, # let model determine 'd'
seasonal=False, # No Seasonality
start_P=0,
D=0,
trace=True,
error_action="ignore",
suppress_warnings=True,
stepwise=True,
)
print(model.summary())
# Forecast
n_periods = 60
fitted, confint = model.predict(n_periods=n_periods, return_conf_int=True)
index_of_fc = pd.date_range(train.index[-1], periods=n_periods, freq="MS")
# make series for plotting purpose
fitted_series = pd.Series(fitted, index=index_of_fc)
lower_series = pd.Series(confint[:, 0], index=index_of_fc)
upper_series = pd.Series(confint[:, 1], index=index_of_fc)
# Plot
plt.plot(train)
plt.plot(fitted_series, color="darkgreen")
plt.fill_between(lower_series.index, lower_series, upper_series, color="k", alpha=0.15)
plt.title("Auto ARIMA")
plt.show()
# # Plot
# plt.plot(dfs2)
# plt.plot(fc, color='darkgreen')
# plt.fill_between(lower_series.index,
# lower_series,
# upper_series,
# color='k', alpha=.15)
# plt.title("SARIMA - Final Forecast of a10 - Drug Sales")
# plt.show()
# ## SARIMA
# Seasonal - fit stepwise auto-ARIMA
smodel = pm.auto_arima(
train,
start_p=1,
start_q=1,
test="adf",
max_p=5,
max_q=5,
m=12,
start_P=0,
seasonal=True,
d=None,
D=1,
trace=True,
error_action="ignore",
suppress_warnings=True,
stepwise=True,
)
smodel.summary()
# Forecast
n_periods = 60
fitted, confint = smodel.predict(n_periods=n_periods, return_conf_int=True)
index_of_fc = pd.date_range(train.index[-1], periods=n_periods, freq="MS")
# make series for plotting purpose
fitted_series = pd.Series(fitted, index=index_of_fc)
lower_series = pd.Series(confint[:, 0], index=index_of_fc)
upper_series = pd.Series(confint[:, 1], index=index_of_fc)
# Plot
plt.plot(train)
plt.plot(fitted_series, color="darkgreen")
plt.fill_between(lower_series.index, lower_series, upper_series, color="k", alpha=0.15)
plt.title("Auto ARIMA")
plt.show()
# Arima = ARIMA(df, order=(0,1,1), seasonal_order=(2,1,[],12))
# Ar = Arima.fit()
# Ar.summary()
# Forecast
fc = Ar.forecast(24, alpha=0.05) # 95% conf
fc = pd.DataFrame(fc).rename({"predicted_mean": "#Passengers"}, axis=1)
fig, ax = plt.subplots(figsize=(12, 6))
ax = df.plot(ax=ax, label="train time series")
ax.set_label("sdf")
ax = test.plot(ax=ax, label="test time series")
# plot_predict(Ar, '1959', '1961', ax=ax)
ax = fc.plot(ax=ax)
ax = plt.legend(loc="best")
plt.show()
print("mape", np.mean(np.abs(fc - test) / np.abs(test))) # MAPE)
print("rmse", np.mean((fc - test) ** 2) ** 0.5) # RMSE
fig, ax = plt.subplots(figsize=(12, 6))
ax = df.plot(ax=ax, label="train time series")
ax.set_label("sdf")
# ax = test.plot(ax=ax, label='test time series')
# plot_predict(Ar, '1959', '1961', ax=ax)
ax = fc.plot(ax=ax)
ax = plt.legend(loc="best")
plt.show()
n_periods = 24
fitted, confint = smodel.predict(n_periods=n_periods, return_conf_int=True)
index_of_fc = pd.date_range(df.index[-1], periods=n_periods, freq="MS")
# make series for plotting purpose
fitted_series = pd.Series(fitted, index=index_of_fc)
lower_series = pd.Series(confint[:, 0], index=index_of_fc)
upper_series = pd.Series(confint[:, 1], index=index_of_fc)
# Plot
plt.plot(df)
plt.plot(fitted_series, color="darkgreen")
plt.fill_between(lower_series.index, lower_series, upper_series, color="k", alpha=0.15)
plt.title("SARIMA - Final Forecast of a10 - Drug Sales")
plt.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/903/129903911.ipynb
|
air-passenger-data-for-time-series-analysis
|
ashfakyeafi
|
[{"Id": 129903911, "ScriptId": 38534427, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6654637, "CreationDate": "05/17/2023 10:23:28", "VersionNumber": 5.0, "Title": "Airline Passenger Forecasting using ARIMA", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 393.0, "LinesInsertedFromPrevious": 160.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 233.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 186318097, "KernelVersionId": 129903911, "SourceDatasetVersionId": 2504188}]
|
[{"Id": 2504188, "DatasetId": 1516462, "DatasourceVersionId": 2546888, "CreatorUserId": 5154008, "LicenseName": "CC0: Public Domain", "CreationDate": "08/06/2021 14:46:29", "VersionNumber": 1.0, "Title": "Air Passenger Data for Time Series Analysis", "Slug": "air-passenger-data-for-time-series-analysis", "Subtitle": "There is a list of passenger data from year 1949 to 1960", "Description": "### Context\n\nThis data is used for making ARIMA model forecasting.\n\n\n### Content\n\nThis contains the increasing rate of passenger\n\n\n### Acknowledgements\n\nWe wouldn't be here without the help of others. If you owe any attributions or thanks, include them here along with any citations of past research.\n\n\n### Inspiration\n\nYour data will be in front of the world's largest data science community. What questions do you want to see answered?", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1516462, "CreatorUserId": 5154008, "OwnerUserId": 5154008.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2504188.0, "CurrentDatasourceVersionId": 2546888.0, "ForumId": 1536251, "Type": 2, "CreationDate": "08/06/2021 14:46:29", "LastActivityDate": "08/06/2021", "TotalViews": 11264, "TotalDownloads": 1480, "TotalVotes": 43, "TotalKernels": 9}]
|
[{"Id": 5154008, "UserName": "ashfakyeafi", "DisplayName": "Ashfak Yeafi", "RegisterDate": "05/24/2020", "PerformanceTier": 3}]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # Load Dataset
df = pd.read_csv(
"/kaggle/input/air-passenger-data-for-time-series-analysis/AirPassengers.csv"
)
df
# # Exploratory Data Analysis (EDA)
# ## View Dataset Description
df["Month"] = pd.to_datetime(df["Month"], format="%Y-%m")
# df['Month'] = df['Month'].dt.strftime('%Y-%m')
df.info()
df
# ## Change Data Index
df.set_index("Month", inplace=True)
# ## Data Visualization
import matplotlib.pyplot as plt
import datetime
plt.figure(figsize=(12, 6))
plt.plot(df)
plt.xlabel("Time")
# plt.xticks(rotation=45)
plt.ylabel("Num of Passengers")
plt.title("US Airline Num of Passengers Trend 1949 - 1960")
plt.show()
# There is a positive trend with some repetitive pattern
# # Time Series Decomposition
from statsmodels.tsa.seasonal import seasonal_decompose
from dateutil.parser import parse
# ## Additive Decomposition
additive_dec = seasonal_decompose(df, model="additive", period=30)
plt.figure(figsize=(12, 8))
additive_dec.plot()
plt.suptitle("Additive Decomposition", fontsize=12)
plt.tight_layout()
plt.show()
multiplicative_dec = seasonal_decompose(df, model="multiplicative", period=30)
plt.figure(figsize=(12, 8))
multiplicative_dec.plot()
plt.suptitle("Multiplicative Decomposition", fontsize=12)
plt.tight_layout()
plt.show()
# Residual in additive decomposition still have a pattern, while in multiplicative is not really showing and quite random.
# # Stationary Test for Time Series
from statsmodels.tsa.stattools import adfuller, kpss
from statsmodels.graphics.tsaplots import plot_acf
# ## Augmented Dickey Fuller Test (ADF Test)
# H0: time series data is non-stationary
# H1: time series data is stationary
# p-value reject null hypothesis (H0)
result = adfuller(df.values, autolag="AIC")
print(f"ADF Statistic: {result[0]}")
print(f"p-value: {result[1]}")
# ## KPSS Test
# H0: time series data is stationary
# H1: time series data is non-stationary
# p-value reject null hypothesis (H0)
result = kpss(df)
print("KPSS Statistic:", result[0])
print("p-value:", result[1])
# ## Rolling Test
# plt.plot(df['Month'], df['#Passengers'])
rolling_mean = df.rolling(6).mean()
rolling_std = df.rolling(6).std()
plt.plot(df, label="Passenger Data")
plt.plot(rolling_mean, color="red", label="Rolling Num of Passenger Mean")
plt.plot(
rolling_std, color="green", label="Rolling Passenger Number Standard Deviation"
)
plt.xlabel("Time")
plt.title("Passenger Time Series, Rolling Mean, Standard Deviation")
plt.legend(loc="best")
plt.show()
# From two test result above, We can see that current data is non-stationary
# # Find ARIMA Order Term
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
import matplotlib.pyplot as plt
# ## ARIMA d Parameter
plt.rcParams.update({"figure.figsize": (12, 8), "figure.dpi": 120})
# Original Series
fig, axes = plt.subplots(3, 2, sharex=False)
axes[0, 0].plot(df)
axes[0, 0].set_title("Original Series")
plot_acf(df.values, ax=axes[0, 1])
# 1st Differencing
axes[1, 0].plot(df.diff())
axes[1, 0].set_title("1st Order Differencing")
plot_acf(df.diff().dropna(), ax=axes[1, 1])
# 2nd Differencing
axes[2, 0].plot(df.diff().diff())
axes[2, 0].set_title("2nd Order Differencing")
plot_acf(df.diff().diff().dropna(), ax=axes[2, 1])
plt.tight_layout()
plt.show()
# From visualization above, we can see at 2nd order differencing the lag has gone on the negative side, this means that in the 2nd order the series has become over the difference. So d = 1
# ### AR Term (p)
plt.rcParams.update({"figure.figsize": (9, 6), "figure.dpi": 120})
fig, axes = plt.subplots(2, 2, sharex=False)
# PACF plot of 1st differenced series
axes[0, 0].plot(df.diff())
axes[0, 0].set_title("1st Differencing")
axes[0, 1].set(ylim=(0, 5))
plot_pacf(df.diff().dropna(), ax=axes[0, 1])
# PACF plot of 2nd differenced series
axes[1, 0].plot(df.diff().diff())
axes[1, 0].set_title("2nd Differencing")
axes[1, 1].set(ylim=(0, 5))
plot_pacf(df.diff().diff().dropna(), ax=axes[1, 1])
plt.tight_layout()
plt.show()
# Here we can in 1st differencing, the first lag is above limit. So select p = 1
# ### MA Term (q)
plt.rcParams.update({"figure.figsize": (9, 6), "figure.dpi": 120})
fig, axes = plt.subplots(2, 2, sharex=False)
# ACF plot of 1st differenced series
axes[0, 0].plot(df.diff())
axes[0, 0].set_title("1st Differencing")
axes[0, 1].set(ylim=(0, 5))
plot_acf(df.diff().dropna(), ax=axes[0, 1])
# ACF plot of 2nd differenced series
axes[1, 0].plot(df.diff().diff())
axes[1, 0].set_title("2nd Differencing")
axes[1, 1].set(ylim=(0, 5))
plot_acf(df.diff().diff().dropna(), ax=axes[1, 1])
plt.tight_layout()
plt.show()
# For MA term, select q = 2
# ## Splitting Data
# train = dfs2.loc[:'1959']
# test = dfs2.loc['1960':]
# # train.rename({'#Passenger':''})
# # Model Building
from statsmodels.tsa.arima.model import ARIMA
Arima = ARIMA(df, order=(1, 2, 2))
Ar = Arima.fit()
Ar.summary()
# Plot residual errors
residuals = pd.DataFrame(Ar.resid)
fig, ax = plt.subplots(1, 2, figsize=(12, 6))
# plt.figure()
residuals.plot(title="Residuals", ax=ax[0])
residuals.plot(kind="kde", title="Density", ax=ax[1])
plt.show()
# # Evaluation
from statsmodels.graphics.tsaplots import plot_predict
def forecast_accuracy(forecast, actual):
mape = np.mean(np.abs(forecast - actual) / np.abs(actual)) # MAPE
# me = np.mean(forecast - actual) # ME
mae = np.mean(np.abs(forecast - actual)) # MAE
# mpe = np.mean((forecast - actual)/actual) # MPE
rmse = np.mean((forecast - actual) ** 2) ** 0.5 # RMSE
corr = np.corrcoef(forecast, actual)[0, 1] # corr
# mins = np.amin(np.hstack([forecast[:,None],
# actual[:,None]]), axis=1)
# maxs = np.amax(np.hstack([forecast[:,None],
# actual[:,None]]), axis=1)
# minmax = 1 - np.mean(mins/maxs) # minmax
# acf1 = acf(fc-test)[1] # ACF1
return {"mape": mape, "mae": mae, "rmse": rmse}
# Create Training and Test
train = df.loc[:"1955"]
test = df.loc["1956":]
# Re-train ARIMA
model = ARIMA(train, order=(1, 1, 1))
Ar = model.fit()
# Forecast
fc = Ar.get_forecast(60).summary_frame()
# fc_conf_int = fc.conf_int(alpha=0.05)
fc = pd.DataFrame(fc).rename({"mean": "#Passengers"}, axis=1)
# fc
fig, ax = plt.subplots(figsize=(12, 6))
plt.plot(train, label="train time series")
plt.plot(test, label="test time series")
# plot_predict(Ar, '1956', '1962', ax=ax)
plt.plot(fc["#Passengers"])
plt.fill_between(
fc.index, fc["mean_ci_lower"], fc["mean_ci_upper"], color="k", alpha=0.15
)
plt.legend(loc="upper left")
plt.show()
# fc['#Passengers'].values
# test
# len(test)
forecast_accuracy(fc["#Passengers"], test["#Passengers"])
# ## Auto ARIMA
import pmdarima as pm
model = pm.auto_arima(
train.values,
start_p=1,
start_q=1,
test="adf", # use adftest to find optimal 'd'
max_p=5,
max_q=5, # maximum p and q
m=1, # frequency of series
d=None, # let model determine 'd'
seasonal=False, # No Seasonality
start_P=0,
D=0,
trace=True,
error_action="ignore",
suppress_warnings=True,
stepwise=True,
)
print(model.summary())
# Forecast
n_periods = 60
fitted, confint = model.predict(n_periods=n_periods, return_conf_int=True)
index_of_fc = pd.date_range(train.index[-1], periods=n_periods, freq="MS")
# make series for plotting purpose
fitted_series = pd.Series(fitted, index=index_of_fc)
lower_series = pd.Series(confint[:, 0], index=index_of_fc)
upper_series = pd.Series(confint[:, 1], index=index_of_fc)
# Plot
plt.plot(train)
plt.plot(fitted_series, color="darkgreen")
plt.fill_between(lower_series.index, lower_series, upper_series, color="k", alpha=0.15)
plt.title("Auto ARIMA")
plt.show()
# # Plot
# plt.plot(dfs2)
# plt.plot(fc, color='darkgreen')
# plt.fill_between(lower_series.index,
# lower_series,
# upper_series,
# color='k', alpha=.15)
# plt.title("SARIMA - Final Forecast of a10 - Drug Sales")
# plt.show()
# ## SARIMA
# Seasonal - fit stepwise auto-ARIMA
smodel = pm.auto_arima(
train,
start_p=1,
start_q=1,
test="adf",
max_p=5,
max_q=5,
m=12,
start_P=0,
seasonal=True,
d=None,
D=1,
trace=True,
error_action="ignore",
suppress_warnings=True,
stepwise=True,
)
smodel.summary()
# Forecast
n_periods = 60
fitted, confint = smodel.predict(n_periods=n_periods, return_conf_int=True)
index_of_fc = pd.date_range(train.index[-1], periods=n_periods, freq="MS")
# make series for plotting purpose
fitted_series = pd.Series(fitted, index=index_of_fc)
lower_series = pd.Series(confint[:, 0], index=index_of_fc)
upper_series = pd.Series(confint[:, 1], index=index_of_fc)
# Plot
plt.plot(train)
plt.plot(fitted_series, color="darkgreen")
plt.fill_between(lower_series.index, lower_series, upper_series, color="k", alpha=0.15)
plt.title("Auto ARIMA")
plt.show()
# Arima = ARIMA(df, order=(0,1,1), seasonal_order=(2,1,[],12))
# Ar = Arima.fit()
# Ar.summary()
# Forecast
fc = Ar.forecast(24, alpha=0.05) # 95% conf
fc = pd.DataFrame(fc).rename({"predicted_mean": "#Passengers"}, axis=1)
fig, ax = plt.subplots(figsize=(12, 6))
ax = df.plot(ax=ax, label="train time series")
ax.set_label("sdf")
ax = test.plot(ax=ax, label="test time series")
# plot_predict(Ar, '1959', '1961', ax=ax)
ax = fc.plot(ax=ax)
ax = plt.legend(loc="best")
plt.show()
print("mape", np.mean(np.abs(fc - test) / np.abs(test))) # MAPE)
print("rmse", np.mean((fc - test) ** 2) ** 0.5) # RMSE
fig, ax = plt.subplots(figsize=(12, 6))
ax = df.plot(ax=ax, label="train time series")
ax.set_label("sdf")
# ax = test.plot(ax=ax, label='test time series')
# plot_predict(Ar, '1959', '1961', ax=ax)
ax = fc.plot(ax=ax)
ax = plt.legend(loc="best")
plt.show()
n_periods = 24
fitted, confint = smodel.predict(n_periods=n_periods, return_conf_int=True)
index_of_fc = pd.date_range(df.index[-1], periods=n_periods, freq="MS")
# make series for plotting purpose
fitted_series = pd.Series(fitted, index=index_of_fc)
lower_series = pd.Series(confint[:, 0], index=index_of_fc)
upper_series = pd.Series(confint[:, 1], index=index_of_fc)
# Plot
plt.plot(df)
plt.plot(fitted_series, color="darkgreen")
plt.fill_between(lower_series.index, lower_series, upper_series, color="k", alpha=0.15)
plt.title("SARIMA - Final Forecast of a10 - Drug Sales")
plt.show()
| false | 1 | 4,064 | 0 | 4,132 | 4,064 |
||
129903554
|
<jupyter_start><jupyter_text>Intel Image Classification
### Context
This is image data of Natural Scenes around the world.
### Content
This Data contains around 25k images of size 150x150 distributed under 6 categories.
{'buildings' -> 0,
'forest' -> 1,
'glacier' -> 2,
'mountain' -> 3,
'sea' -> 4,
'street' -> 5 }
The Train, Test and Prediction data is separated in each zip files. There are around 14k images in Train, 3k in Test and 7k in Prediction.
This data was initially published on https://datahack.analyticsvidhya.com by Intel to host a Image classification Challenge.
Kaggle dataset identifier: intel-image-classification
<jupyter_script># ## Setup
from google.colab import drive
drive.mount("/content/drive")
import os
import cv2
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
import tensorflow as tf
from keras import backend as K
from keras.models import load_model
from tensorflow.keras.utils import img_to_array
from tensorflow.keras.optimizers import Adam, RMSprop
from tensorflow.keras.callbacks import ReduceLROnPlateau
from tensorflow.keras.preprocessing.image import ImageDataGenerator
# ## Paths
base_dir = "/content/drive/MyDrive/Image classification/dataset"
train_dir = "/content/drive/MyDrive/Image classification/dataset/train"
train_street_dir = "/content/drive/MyDrive/Image classification/dataset/train/street"
train_sea_dir = "/content/drive/MyDrive/Image classification/dataset/train/sea"
train_mountain_dir = (
"/content/drive/MyDrive/Image classification/dataset/train/mountain"
)
train_glacier_dir = "/content/drive/MyDrive/Image classification/dataset/train/glacier"
train_forest_dir = "/content/drive/MyDrive/Image classification/dataset/train/forest"
train_buildings_dir = (
"/content/drive/MyDrive/Image classification/dataset/train/buildings"
)
test_dir = "/content/drive/MyDrive/Image classification/dataset/test"
test_street_dir = "/content/drive/MyDrive/Image classification/dataset/test/street"
test_sea_dir = "/content/drive/MyDrive/Image classification/dataset/test/sea"
test_mountain_dir = "/content/drive/MyDrive/Image classification/dataset/test/mountain"
test_glacier_dir = "/content/drive/MyDrive/Image classification/dataset/test/glacier"
test_forest_dir = "/content/drive/MyDrive/Image classification/dataset/test/forest"
test_buildings_dir = (
"/content/drive/MyDrive/Image classification/dataset/test/buildings"
)
validation_dir = "/content/drive/MyDrive/Image classification/dataset/validation"
validation_street_dir = (
"/content/drive/MyDrive/Image classification/dataset/validation/street"
)
validation_sea_dir = (
"/content/drive/MyDrive/Image classification/dataset/validation/sea"
)
validation_mountain_dir = (
"/content/drive/MyDrive/Image classification/dataset/validation/mountain"
)
validation_glacier_dir = (
"/content/drive/MyDrive/Image classification/dataset/validation/glacier"
)
validation_forest_dir = (
"/content/drive/MyDrive/Image classification/dataset/validation/forest"
)
validation_buildings_dir = (
"/content/drive/MyDrive/Image classification/dataset/validation/buildings"
)
num_street_train = len(os.listdir(train_street_dir))
num_sea_train = len(os.listdir(train_sea_dir))
num_mountain_train = len(os.listdir(train_mountain_dir))
num_glacier_train = len(os.listdir(train_glacier_dir))
num_forest_train = len(os.listdir(train_forest_dir))
num_buildings_train = len(os.listdir(train_buildings_dir))
total_train = (
num_street_train
+ num_sea_train
+ num_mountain_train
+ num_glacier_train
+ num_forest_train
+ num_buildings_train
)
num_street_test = len(os.listdir(test_street_dir))
num_sea_test = len(os.listdir(test_sea_dir))
num_mountain_test = len(os.listdir(test_mountain_dir))
num_glacier_test = len(os.listdir(test_glacier_dir))
num_forest_test = len(os.listdir(test_forest_dir))
num_buildings_test = len(os.listdir(test_buildings_dir))
total_test = (
num_street_test
+ num_sea_test
+ num_mountain_test
+ num_glacier_test
+ num_forest_test
+ num_buildings_test
)
num_street_validation = len(os.listdir(validation_street_dir))
num_sea_validation = len(os.listdir(validation_sea_dir))
num_mountain_validation = len(os.listdir(validation_mountain_dir))
num_glacier_validation = len(os.listdir(validation_glacier_dir))
num_forest_validation = len(os.listdir(validation_forest_dir))
num_buildings_validation = len(os.listdir(validation_buildings_dir))
total_validation = (
num_street_validation
+ num_sea_validation
+ num_mountain_validation
+ num_glacier_validation
+ num_forest_validation
+ num_buildings_validation
)
# ## count
print("Number of street images in train:", num_street_train)
print("Number of sea images in train:", num_sea_train)
print("Number of mountain images in train:", num_mountain_train)
print("Number of glacier images in train:", num_glacier_train)
print("Number of forest images in train:", num_forest_train)
print("Number of buildings images in train:", num_buildings_train)
print("Total of images in train:", total_train)
print("Number of street images in test:", num_street_test)
print("Number of sea images in test:", num_sea_test)
print("Number of mountain images in test:", num_mountain_test)
print("Number of glacier images in test:", num_glacier_test)
print("Number of forest images in test:", num_forest_test)
print("Number of buildings images in test:", num_buildings_test)
print("Total of images in test:", total_test)
print("Number of street images in validation:", num_street_validation)
print("Number of sea images in validation:", num_sea_validation)
print("Number of mountain images in validation:", num_mountain_validation)
print("Number of glacier images in validation:", num_glacier_validation)
print("Number of forest images in validation:", num_forest_validation)
print("Number of buildings images in validation:", num_buildings_validation)
print("Total of images in validation:", total_validation)
# ## configuration
IMG_SHAPE = 224
batch_size = 32
# ## preprocess data (train, test, validation)
image_gen_train = ImageDataGenerator(rescale=1.0 / 255)
train_data_gen = image_gen_train.flow_from_directory(
batch_size=batch_size,
directory=train_dir,
shuffle=True,
target_size=(IMG_SHAPE, IMG_SHAPE),
class_mode="categorical",
)
image_generator_validation = ImageDataGenerator(rescale=1.0 / 255)
val_data_gen = image_generator_validation.flow_from_directory(
batch_size=batch_size,
directory=validation_dir,
target_size=(IMG_SHAPE, IMG_SHAPE),
class_mode="categorical",
)
image_gen_test = ImageDataGenerator(rescale=1.0 / 255)
test_data_gen = image_gen_test.flow_from_directory(
batch_size=batch_size,
directory=test_dir,
target_size=(IMG_SHAPE, IMG_SHAPE),
class_mode="categorical",
)
class myCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if logs.get("val_acc") > 0.85:
print("\nReached 85% accuracy so cancelling training!")
self.model.stop_training = True
# ## download VGG-16 weights
# We know VGG-16 is trained with many classes, so if we use (top_layer = True), then we need to retrain it on all classes at which VGG-16 trained, but if we use (top_layer = False), then in retraining, we only need to add our training classes.
pre_trained_model = tf.keras.applications.VGG16(
input_shape=(224, 224, 3), include_top=False, weights="imagenet"
)
# Now, we need to freeze the training layers of VGG-16. (because VGG-16, is already trained on huge data).
for layer in pre_trained_model.layers:
# print(layer.name)
layer.trainable = False
last_layer = pre_trained_model.get_layer("block5_pool")
last_output = last_layer.output
x = tf.keras.layers.GlobalMaxPooling2D()(last_output)
x = tf.keras.layers.Dense(512, activation="relu")(x)
x = tf.keras.layers.Dropout(0.5)(x)
x = tf.keras.layers.Dense(6, activation="softmax")(x)
# Now, we need to merge the original VGG-16 layers, with our custom layers.
model = tf.keras.Model(pre_trained_model.input, x)
# for “Multiclass classification”, change the loss with categorical_crossentropy.
model.compile(
optimizer="adam", loss=tf.keras.losses.categorical_crossentropy, metrics=["acc"]
)
model.summary()
# ## Train the model
print(tf.config.list_physical_devices("GPU"))
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
callbacks = myCallback()
history = model.fit_generator(
generator=train_data_gen,
validation_data=val_data_gen,
steps_per_epoch=(total_train // batch_size),
epochs=100,
validation_steps=(total_validation // batch_size),
verbose=1,
callbacks=[callbacks],
)
# vgg_classifier = model.fit(train_data_gen,
# steps_per_epoch=(total_train//batch_size), epochs = 60, validation_data=val_data_gen,
# validation_steps=(total_validation//batch_size),batch_size = batch_size, verbose = 1)
# ## Evaluate the model
result = model.evaluate(test_data_gen, batch_size=batch_size)
print("test_loss, test accuracy", result)
print(test_data_gen.class_indices)
true_classes = test_data_gen.classes
class_names = ["buildings", "forest", "glacier", "mountain", "sea", "street"]
class_indices = test_data_gen.class_indices
class_indices = dict((v, k) for k, v in class_indices.items())
from tensorflow.keras.preprocessing import image
# Define the number of images to plot per class
num_images = 10
# Loop over the class directories in the test directory
for class_dir in os.listdir(test_dir):
if class_dir in class_names:
# Load the first num_images images from the current class directory
images = []
filenames = os.listdir(os.path.join(test_dir, class_dir))
for i in range(num_images):
img_path = os.path.join(test_dir, class_dir, filenames[i])
img = image.load_img(img_path, target_size=(224, 224))
images.append(np.array(img))
images = np.array(images)
# Get the true labels for the images
true_labels = [
class_indices[test_data_gen.class_indices[class_dir]]
] * num_images
# Make predictions on the images
preds = model.predict(images)
pred_labels = [class_indices[np.argmax(pred)] for pred in preds]
# Plot the images with their predicted labels
fig, axs = plt.subplots(2, 5, figsize=(15, 7))
fig.suptitle(f"{class_dir} Predictions")
for i in range(num_images):
axs[i // 5, i % 5].imshow(images[i])
axs[i // 5, i % 5].set_title(
f"Pred: {pred_labels[i]}\nTrue: {true_labels[i]}"
)
axs[i // 5, i % 5].axis("off")
plt.show()
# ## Save the model
#
model_json = model.to_json()
with open(
"/content/drive/MyDrive/Image classification/VGG_Classifier.json", "w"
) as json_file:
json_file.write(model_json)
model.save("/content/drive/MyDrive/Image classification/VGG_Classifier.h5")
print("Saved model to disk")
model.save_weights("/content/drive/MyDrive/Image classification/VGG_weights.h5")
model = tf.keras.saving.load_model(
"/content/drive/MyDrive/Image classification/VGG_Classifier.h5",
custom_objects=None,
compile=True,
safe_mode=True,
)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/903/129903554.ipynb
|
intel-image-classification
|
puneet6060
|
[{"Id": 129903554, "ScriptId": 38640554, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11821002, "CreationDate": "05/17/2023 10:20:29", "VersionNumber": 1.0, "Title": "VGG16 - Image Classification", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 233.0, "LinesInsertedFromPrevious": 233.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
|
[{"Id": 186317636, "KernelVersionId": 129903554, "SourceDatasetVersionId": 269359}]
|
[{"Id": 269359, "DatasetId": 111880, "DatasourceVersionId": 281586, "CreatorUserId": 2307235, "LicenseName": "Data files \u00a9 Original Authors", "CreationDate": "01/30/2019 09:22:58", "VersionNumber": 2.0, "Title": "Intel Image Classification", "Slug": "intel-image-classification", "Subtitle": "Image Scene Classification of Multiclass", "Description": "### Context\n\nThis is image data of Natural Scenes around the world. \n\n### Content\n\nThis Data contains around 25k images of size 150x150 distributed under 6 categories.\n{'buildings' -> 0, \n'forest' -> 1,\n'glacier' -> 2,\n'mountain' -> 3,\n'sea' -> 4,\n'street' -> 5 }\n\nThe Train, Test and Prediction data is separated in each zip files. There are around 14k images in Train, 3k in Test and 7k in Prediction.\nThis data was initially published on https://datahack.analyticsvidhya.com by Intel to host a Image classification Challenge.\n\n\n### Acknowledgements\n\nThanks to https://datahack.analyticsvidhya.com for the challenge and Intel for the Data\n\nPhoto by [Jan B\u00f6ttinger on Unsplash][1]\n\n### Inspiration\n\nWant to build powerful Neural network that can classify these images with more accuracy.\n\n\n [1]: https://unsplash.com/photos/27xFENkt-lc", "VersionNotes": "Added Prediction Images", "TotalCompressedBytes": 108365415.0, "TotalUncompressedBytes": 361713334.0}]
|
[{"Id": 111880, "CreatorUserId": 2307235, "OwnerUserId": 2307235.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 269359.0, "CurrentDatasourceVersionId": 281586.0, "ForumId": 121691, "Type": 2, "CreationDate": "01/29/2019 10:37:42", "LastActivityDate": "01/29/2019", "TotalViews": 441103, "TotalDownloads": 83887, "TotalVotes": 1345, "TotalKernels": 815}]
|
[{"Id": 2307235, "UserName": "puneet6060", "DisplayName": "Puneet Bansal", "RegisterDate": "10/01/2018", "PerformanceTier": 0}]
|
# ## Setup
from google.colab import drive
drive.mount("/content/drive")
import os
import cv2
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
import tensorflow as tf
from keras import backend as K
from keras.models import load_model
from tensorflow.keras.utils import img_to_array
from tensorflow.keras.optimizers import Adam, RMSprop
from tensorflow.keras.callbacks import ReduceLROnPlateau
from tensorflow.keras.preprocessing.image import ImageDataGenerator
# ## Paths
base_dir = "/content/drive/MyDrive/Image classification/dataset"
train_dir = "/content/drive/MyDrive/Image classification/dataset/train"
train_street_dir = "/content/drive/MyDrive/Image classification/dataset/train/street"
train_sea_dir = "/content/drive/MyDrive/Image classification/dataset/train/sea"
train_mountain_dir = (
"/content/drive/MyDrive/Image classification/dataset/train/mountain"
)
train_glacier_dir = "/content/drive/MyDrive/Image classification/dataset/train/glacier"
train_forest_dir = "/content/drive/MyDrive/Image classification/dataset/train/forest"
train_buildings_dir = (
"/content/drive/MyDrive/Image classification/dataset/train/buildings"
)
test_dir = "/content/drive/MyDrive/Image classification/dataset/test"
test_street_dir = "/content/drive/MyDrive/Image classification/dataset/test/street"
test_sea_dir = "/content/drive/MyDrive/Image classification/dataset/test/sea"
test_mountain_dir = "/content/drive/MyDrive/Image classification/dataset/test/mountain"
test_glacier_dir = "/content/drive/MyDrive/Image classification/dataset/test/glacier"
test_forest_dir = "/content/drive/MyDrive/Image classification/dataset/test/forest"
test_buildings_dir = (
"/content/drive/MyDrive/Image classification/dataset/test/buildings"
)
validation_dir = "/content/drive/MyDrive/Image classification/dataset/validation"
validation_street_dir = (
"/content/drive/MyDrive/Image classification/dataset/validation/street"
)
validation_sea_dir = (
"/content/drive/MyDrive/Image classification/dataset/validation/sea"
)
validation_mountain_dir = (
"/content/drive/MyDrive/Image classification/dataset/validation/mountain"
)
validation_glacier_dir = (
"/content/drive/MyDrive/Image classification/dataset/validation/glacier"
)
validation_forest_dir = (
"/content/drive/MyDrive/Image classification/dataset/validation/forest"
)
validation_buildings_dir = (
"/content/drive/MyDrive/Image classification/dataset/validation/buildings"
)
num_street_train = len(os.listdir(train_street_dir))
num_sea_train = len(os.listdir(train_sea_dir))
num_mountain_train = len(os.listdir(train_mountain_dir))
num_glacier_train = len(os.listdir(train_glacier_dir))
num_forest_train = len(os.listdir(train_forest_dir))
num_buildings_train = len(os.listdir(train_buildings_dir))
total_train = (
num_street_train
+ num_sea_train
+ num_mountain_train
+ num_glacier_train
+ num_forest_train
+ num_buildings_train
)
num_street_test = len(os.listdir(test_street_dir))
num_sea_test = len(os.listdir(test_sea_dir))
num_mountain_test = len(os.listdir(test_mountain_dir))
num_glacier_test = len(os.listdir(test_glacier_dir))
num_forest_test = len(os.listdir(test_forest_dir))
num_buildings_test = len(os.listdir(test_buildings_dir))
total_test = (
num_street_test
+ num_sea_test
+ num_mountain_test
+ num_glacier_test
+ num_forest_test
+ num_buildings_test
)
num_street_validation = len(os.listdir(validation_street_dir))
num_sea_validation = len(os.listdir(validation_sea_dir))
num_mountain_validation = len(os.listdir(validation_mountain_dir))
num_glacier_validation = len(os.listdir(validation_glacier_dir))
num_forest_validation = len(os.listdir(validation_forest_dir))
num_buildings_validation = len(os.listdir(validation_buildings_dir))
total_validation = (
num_street_validation
+ num_sea_validation
+ num_mountain_validation
+ num_glacier_validation
+ num_forest_validation
+ num_buildings_validation
)
# ## count
print("Number of street images in train:", num_street_train)
print("Number of sea images in train:", num_sea_train)
print("Number of mountain images in train:", num_mountain_train)
print("Number of glacier images in train:", num_glacier_train)
print("Number of forest images in train:", num_forest_train)
print("Number of buildings images in train:", num_buildings_train)
print("Total of images in train:", total_train)
print("Number of street images in test:", num_street_test)
print("Number of sea images in test:", num_sea_test)
print("Number of mountain images in test:", num_mountain_test)
print("Number of glacier images in test:", num_glacier_test)
print("Number of forest images in test:", num_forest_test)
print("Number of buildings images in test:", num_buildings_test)
print("Total of images in test:", total_test)
print("Number of street images in validation:", num_street_validation)
print("Number of sea images in validation:", num_sea_validation)
print("Number of mountain images in validation:", num_mountain_validation)
print("Number of glacier images in validation:", num_glacier_validation)
print("Number of forest images in validation:", num_forest_validation)
print("Number of buildings images in validation:", num_buildings_validation)
print("Total of images in validation:", total_validation)
# ## configuration
IMG_SHAPE = 224
batch_size = 32
# ## preprocess data (train, test, validation)
image_gen_train = ImageDataGenerator(rescale=1.0 / 255)
train_data_gen = image_gen_train.flow_from_directory(
batch_size=batch_size,
directory=train_dir,
shuffle=True,
target_size=(IMG_SHAPE, IMG_SHAPE),
class_mode="categorical",
)
image_generator_validation = ImageDataGenerator(rescale=1.0 / 255)
val_data_gen = image_generator_validation.flow_from_directory(
batch_size=batch_size,
directory=validation_dir,
target_size=(IMG_SHAPE, IMG_SHAPE),
class_mode="categorical",
)
image_gen_test = ImageDataGenerator(rescale=1.0 / 255)
test_data_gen = image_gen_test.flow_from_directory(
batch_size=batch_size,
directory=test_dir,
target_size=(IMG_SHAPE, IMG_SHAPE),
class_mode="categorical",
)
class myCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if logs.get("val_acc") > 0.85:
print("\nReached 85% accuracy so cancelling training!")
self.model.stop_training = True
# ## download VGG-16 weights
# We know VGG-16 is trained with many classes, so if we use (top_layer = True), then we need to retrain it on all classes at which VGG-16 trained, but if we use (top_layer = False), then in retraining, we only need to add our training classes.
pre_trained_model = tf.keras.applications.VGG16(
input_shape=(224, 224, 3), include_top=False, weights="imagenet"
)
# Now, we need to freeze the training layers of VGG-16. (because VGG-16, is already trained on huge data).
for layer in pre_trained_model.layers:
# print(layer.name)
layer.trainable = False
last_layer = pre_trained_model.get_layer("block5_pool")
last_output = last_layer.output
x = tf.keras.layers.GlobalMaxPooling2D()(last_output)
x = tf.keras.layers.Dense(512, activation="relu")(x)
x = tf.keras.layers.Dropout(0.5)(x)
x = tf.keras.layers.Dense(6, activation="softmax")(x)
# Now, we need to merge the original VGG-16 layers, with our custom layers.
model = tf.keras.Model(pre_trained_model.input, x)
# for “Multiclass classification”, change the loss with categorical_crossentropy.
model.compile(
optimizer="adam", loss=tf.keras.losses.categorical_crossentropy, metrics=["acc"]
)
model.summary()
# ## Train the model
print(tf.config.list_physical_devices("GPU"))
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
callbacks = myCallback()
history = model.fit_generator(
generator=train_data_gen,
validation_data=val_data_gen,
steps_per_epoch=(total_train // batch_size),
epochs=100,
validation_steps=(total_validation // batch_size),
verbose=1,
callbacks=[callbacks],
)
# vgg_classifier = model.fit(train_data_gen,
# steps_per_epoch=(total_train//batch_size), epochs = 60, validation_data=val_data_gen,
# validation_steps=(total_validation//batch_size),batch_size = batch_size, verbose = 1)
# ## Evaluate the model
result = model.evaluate(test_data_gen, batch_size=batch_size)
print("test_loss, test accuracy", result)
print(test_data_gen.class_indices)
true_classes = test_data_gen.classes
class_names = ["buildings", "forest", "glacier", "mountain", "sea", "street"]
class_indices = test_data_gen.class_indices
class_indices = dict((v, k) for k, v in class_indices.items())
from tensorflow.keras.preprocessing import image
# Define the number of images to plot per class
num_images = 10
# Loop over the class directories in the test directory
for class_dir in os.listdir(test_dir):
if class_dir in class_names:
# Load the first num_images images from the current class directory
images = []
filenames = os.listdir(os.path.join(test_dir, class_dir))
for i in range(num_images):
img_path = os.path.join(test_dir, class_dir, filenames[i])
img = image.load_img(img_path, target_size=(224, 224))
images.append(np.array(img))
images = np.array(images)
# Get the true labels for the images
true_labels = [
class_indices[test_data_gen.class_indices[class_dir]]
] * num_images
# Make predictions on the images
preds = model.predict(images)
pred_labels = [class_indices[np.argmax(pred)] for pred in preds]
# Plot the images with their predicted labels
fig, axs = plt.subplots(2, 5, figsize=(15, 7))
fig.suptitle(f"{class_dir} Predictions")
for i in range(num_images):
axs[i // 5, i % 5].imshow(images[i])
axs[i // 5, i % 5].set_title(
f"Pred: {pred_labels[i]}\nTrue: {true_labels[i]}"
)
axs[i // 5, i % 5].axis("off")
plt.show()
# ## Save the model
#
model_json = model.to_json()
with open(
"/content/drive/MyDrive/Image classification/VGG_Classifier.json", "w"
) as json_file:
json_file.write(model_json)
model.save("/content/drive/MyDrive/Image classification/VGG_Classifier.h5")
print("Saved model to disk")
model.save_weights("/content/drive/MyDrive/Image classification/VGG_weights.h5")
model = tf.keras.saving.load_model(
"/content/drive/MyDrive/Image classification/VGG_Classifier.h5",
custom_objects=None,
compile=True,
safe_mode=True,
)
| false | 0 | 3,218 | 1 | 3,419 | 3,218 |
||
129903890
|
# Amazon Reviews Sentiment - VADER & RoBERTa
# Preface
# In the bustling world of online shopping, customer reviews have become a powerful voice that shapes our purchasing decisions. Each day, millions of people flock to Amazon, the e-commerce giant, to explore an endless array of products and discover what others have to say about them. It is in this vast ocean of reviews that our project finds its purpose – to uncover the overall sentiment of Amazon customers through the analysis of their invaluable feedback.
# With the goal of delving into the minds of consumers, we embarked on a journey through a massive dataset containing a plethora of Amazon reviews. Our mission was to extract insights, uncover patterns, and decipher the underlying sentiment that reverberates within these candid testimonials. We knew that behind each review lay a story, a personal experience, and an opinion waiting to be heard.
# As we ventured into this project, we understood that sentiment analysis would be the key to unlocking the collective sentiment hidden within the sea of reviews. Armed with the power of Natural Language Processing and machine learning, we set out to analyze the text, decode emotions, and reveal the sentiment that influenced the customer's overall perception of the products they encountered.
# Throughout our exploration, we encountered both challenges and triumphs. We meticulously examined the dataset, taking into account various factors such as ratings, review length, and the inherent positivity or negativity conveyed through the customers' words. We employed advanced techniques, leveraging state-of-the-art models like Roberta and Vader, to discern the sentiment expressed in each review. Our quest was to paint a comprehensive picture of the sentiments prevailing among the Amazon customer community.
# Through the rich tapestry of reviews, we uncovered fascinating insights. We witnessed the sheer diversity of sentiments, ranging from exuberant praise to scathing criticism. We noticed the varying degrees of positivity, negativity, and neutrality that shaped the overall sentiment of the customers. We marveled at the power of language and its ability to influence perception and purchase decisions.
# This project is an exploration, an ode to the voice of the customers who have left their mark on the digital landscape. It is a testament to the immense value of their opinions and the role they play in shaping the modern consumer landscape. As we present our findings and delve into the world of sentiments, I invite you to join me on this captivating journey through the realm of Amazon reviews. Together, let us unravel the sentiment that lies within the words and experiences of countless customers, and gain a deeper understanding of the sentiments that underpin the Amazon shopping experience.
# Libraries
# basics
import pandas as pd
import numpy as np
# visualisation
import matplotlib.pyplot as plt
import seaborn as sns
from tqdm import tqdm
# regular expression
import re
import string
# NLP toolkit
import nltk
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
# VADER
from nltk.sentiment import SentimentIntensityAnalyzer
# RoBERTa
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from scipy.special import softmax
# other
from collections import Counter
import warnings
warnings.filterwarnings("ignore")
# Loading Datasets
data1 = pd.read_csv("/kaggle/input/amazon-reviews/data1.csv")
data2 = pd.read_csv("/kaggle/input/amazon-reviews/data2.csv")
data3 = pd.read_csv("/kaggle/input/amazon-reviews/data3.csv")
data1.head(2)
data2.head(2)
data3.head(2)
# Basic Checks
print("SHAPES")
print("data1", data1.shape)
print("======================")
print("data2", data2.shape)
print("======================")
print("data3", data3.shape)
# In this project, the aim is to find the sentiments of the customers towards the company based on the reviews provided by them. The project will not include any other form of anaylis based on given data in this project.
# So, we will look for only the relevant features from all the three datasets and make a separate combined dataset of all
print("COLUMNS:")
for data in [data1, data2, data3]:
print(data.columns)
print("======================================")
# extracting common columns:
set(data1.columns).intersection(set(data2.columns)).intersection(set(data2.columns))
# To fullfill the score of our project, we just need following columns:
# - id
# - reviews.date
# - reviews.rating
# - reviews.text
# - reviews.title
reviews = pd.DataFrame()
for data in tqdm([data1, data2, data3]):
df = data[["id", "reviews.date", "reviews.rating", "reviews.text", "reviews.title"]]
reviews = pd.concat([reviews, df], ignore_index=True)
reviews.columns = ["id", "date", "rating", "review", "title"]
reviews.head(3)
reviews.info()
# - There are total 34929 reviews for products
# - Only rating in numeric column
# - There are missing values in the dataset
reviews.describe()
# - The average rating is approximately 4.52, which suggests that, on average, customers are generally positive about the products being reviewed.
# - The standard deviation of 0.912454 indicates a moderate degree of variation in the ratings. This means that there is some diversity in customer opinions, with some ratings deviating from the mean.
# - The minimum rating is 1, while the maximum rating is 5. This indicates that the dataset includes the full range of possible ratings, allowing for a comprehensive analysis of sentiment.
# - Overall, these observations indicate that the dataset predominantly consists of positive reviews, as indicated by the high mean and median ratings. However, the presence of a standard deviation and the range of ratings suggest that there is still some variation in customer sentiment, allowing for a more nuanced analysis of the reviews.
reviews.isna().mean()
# We would prefer to have maximum reviews so that the model will be well-trained to carry out sentiment analysis. Review column has no missing value. But there are 30 titles missing for reviews. Additionally, around 10% values are missing from data column whereas 12% reviews have no information available for ratings
# We would be losing out on data if we drop any rows or column. But for some analysis, missing values might give us trouble.
# We can go for a simple solution. Create a relica of dataset and drop all missing values in it
no_missing_reviews = reviews.copy()
no_missing_reviews.dropna(subset=["rating"], inplace=True)
# Thus, all the missing values are handled.
# Exploratory Data Analysis
plt.hist(x="rating", data=no_missing_reviews)
# Since the values are discrete, the histogram acts as countplot.
# - The majority of the reviews (24,116) have a rating of 5. This indicates a strong positive sentiment among customers, as the highest rating is the most prevalent in the dataset. This sounds like good news for the company.
# - When considering ratings of 4 and 5 combined, there are a total of 31,208 reviews. This indicates that approximately 90% of the reviews in the dataset are positive, as they fall into the higher rating range.
# - Combining ratings of 1, 2, and 3, there are a total of 3,301 reviews that fall into this category. Analyzing these reviews can provide insights into potential areas of improvement or specific issues that customers encountered.
# These observations highlight the overall positive sentiment among customers, but also indicate the presence of moderate and negative reviews. Analyzing the content and sentiments of these reviews can provide valuable insights for improving products, addressing customer concerns, and enhancing the overall customer experience.
no_missing_reviews.head(3)
# number of characters, words and sentences in each review
no_missing_reviews["characters"] = no_missing_reviews["review"].apply(len)
no_missing_reviews["words"] = no_missing_reviews["review"].apply(
lambda x: len(nltk.word_tokenize(x))
)
no_missing_reviews["sentences"] = no_missing_reviews["review"].apply(
lambda x: len(nltk.sent_tokenize(x))
)
no_missing_reviews.head(3)
no_missing_reviews[["characters", "words", "sentences"]].describe()
# **Maximum Lengths:** The maximum values for the 'characters', 'words', and 'sentences' features are quite high, with the longest review having 19,739 characters, 4,017 words, and 125 sentences. These extremely long reviews could potentially contain detailed and extensive feedback or comments.
# **Minimum Lengths:** The minimum values for all three features are 1, indicating the presence of extremely short reviews. These reviews might be very concise or could be outliers, possibly lacking substantial information or meaningful content.
# **Average Lengths:** The mean values reveal the average length of reviews. On average, the reviews contain approximately 176 characters, 37 words, and 2.65 sentences. These averages can serve as baselines for understanding the typical length of reviews in the dataset.
# **High Standard Deviations:** The standard deviations for the 'characters' and 'words' features are relatively high, indicating significant variation in the length of reviews. This suggests a wide range of review lengths, implying that some reviews are much longer or shorter than the average.''**
# review with one character
no_missing_reviews[no_missing_reviews["characters"] == 1]
# Single character reviews do not convey any meaning. Hence they will contribute much to out models. So lets drop these rows
no_missing_reviews.drop(
index=no_missing_reviews[no_missing_reviews["characters"] == 1].index, inplace=True
)
no_missing_reviews[no_missing_reviews["characters"] == 2].head()
# We can still see there are some non-alphabet characters in the review. We will deal during the data cleaning
# review with one words
no_missing_reviews[no_missing_reviews["words"] == 1].head()
# We want to know what are the text differentiator among reviews corresponding to high rating and comparatively lower ratings. We will create two categories, lets say, first group with product having ratings greater than 3 and other group with ratings less than or equal to 3
no_missing_reviews["rating_type"] = no_missing_reviews["rating"].apply(
lambda x: "high" if x > 3 else "low"
)
# average characters, words, sentences count based on rating type
data1 = pd.DataFrame(no_missing_reviews.groupby("rating_type")["characters"].mean())
data2 = pd.DataFrame(no_missing_reviews.groupby("rating_type")["words"].mean())
data3 = pd.DataFrame(no_missing_reviews.groupby("rating_type")["sentences"].mean())
data = pd.concat([data1, data2, data3], axis=1)
data
fig, ax = plt.subplots(1, 3, figsize=(15, 5))
sns.barplot(x=data.index, y="characters", data=data, ax=ax[0])
sns.barplot(x=data.index, y="words", data=data, ax=ax[1])
sns.barplot(x=data.index, y="sentences", data=data, ax=ax[2])
ax[0].set_title("character count")
ax[1].set_title("word count")
ax[2].set_title("sentence count")
plt.tight_layout()
sns.pairplot(no_missing_reviews, hue="rating_type")
corr = no_missing_reviews.corr()
mask = np.triu(np.ones_like(corr, dtype=bool))
sns.heatmap(corr, mask=mask, annot=True, cmap="flare")
# Data Processing
# Data preprocessing is a crucial step in natural language processing (NLP) tasks, including sentiment analysis. It involves transforming raw text data into a format that is suitable for analysis and modeling. By applying various preprocessing techniques, we can clean and standardize the textual data, reducing noise and irrelevant information.
# In the context of Amazon review sentiment analysis, data preprocessing plays a vital role in improving the accuracy and effectiveness of the analysis. The steps you mentioned are common techniques used in text preprocessing to enhance the quality of the data and extract meaningful features. Let's discuss each step in more detail:
# **Remove punctuation:** Punctuation marks like commas, periods, or exclamation marks do not carry significant sentiment information and can be safely removed. This step helps in reducing the dimensionality of the data and simplifying the subsequent analysis.
# **Lowercasing:** Converting all the text to lowercase ensures that the analysis treats words with the same spelling but different cases as the same. It helps in avoiding redundant duplicate features and improves the accuracy of downstream tasks such as sentiment analysis.
# **Remove stop words:** Stop words are commonly used words that do not contribute much to the overall sentiment or meaning of the text, such as "a," "an," or "the." Removing these words helps to reduce noise and focus on the more important content-bearing words.
# **Remove emojis:** Emojis are graphical representations used to express emotions. In sentiment analysis, they can add noise to the text and may not carry direct semantic meaning. Removing emojis helps to simplify the text and improve the interpretability of the sentiment analysis results.
# **Tokenization:** Tokenization is the process of breaking down a text into individual tokens, such as words or subwords. It helps in preparing the text for further analysis and feature extraction.
# **Stemming:** Stemming involves reducing words to their root or base form. For example, converting "running," "runs," and "ran" to the common stem "run." This step helps to normalize the words and reduce the dimensionality of the feature space.
# By performing these preprocessing steps, we can clean the Amazon review data, standardize it, and remove noise and irrelevant information. This will provide a cleaner and more representative dataset for sentiment analysis, allowing for more accurate and insightful results.
# instantiating PorterSteemer object
ps = PorterStemmer()
def transform_text(text):
# lower casing
text = text.lower()
# removing html tags
pattern = re.compile("<.*?>")
text = pattern.sub(r"", text)
# removing urls
pattern_url = re.compile(r"https?://\S+|www\.\S+")
text = pattern_url.sub(r"", text)
# removing punctuations
for (
char
) in string.punctuation: ###punctuation_marks '!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~'
text = text.replace(char, "")
# tokenization
text = nltk.word_tokenize(text)
# removing stop words
new_text = []
for word in text:
if word not in stopwords.words("english"):
new_text.append(word)
# stemming
new_text_stem = []
for word in new_text:
word = ps.stem(word)
new_text_stem.append(word)
return " ".join(new_text_stem)
reviews["transformed_review"] = reviews["review"].apply(transform_text)
reviews.head(3)
# sample review
example = reviews["transformed_review"][np.random.randint(len(reviews))]
example
ratings = []
for i, row in reviews.iterrows():
if i in no_missing_reviews.index:
type = no_missing_reviews.loc[i, "rating_type"]
ratings.append(type)
else:
ratings.append("NA")
reviews["rating_type"] = ratings
reviews.head(3)
l = reviews[reviews["rating_type"] == "high"]["transformed_review"].tolist()
word_corpus_high = []
for sent in l:
for word in sent.split():
word_corpus_high.append(word)
positves = pd.DataFrame(Counter(word_corpus_high).most_common(20))
plt.figure(figsize=(10, 6))
sns.barplot(x=positves[0], y=positves[1])
plt.xticks(rotation=90)
plt.xlabel("words")
plt.ylabel("frequency")
plt.title("Most frequent words in Positive Reviews")
# Words such as 'great', 'love', 'like' appear the most in positive reviews
l = reviews[reviews["rating_type"] == "low"]["transformed_review"].tolist()
word_corpus_low = []
for sent in l:
for word in sent.split():
word_corpus_low.append(word)
negatives = pd.DataFrame(Counter(word_corpus_low).most_common(20))
plt.figure(figsize=(10, 6))
sns.barplot(x=negatives[0], y=negatives[1])
plt.xticks(rotation=90)
plt.xlabel("words")
plt.ylabel("frequency")
plt.title("Most frequent words in Negative Reviews")
# Negative reviews mostly focus on word 'battery'. It might suggest that the battery product is the area where Amazon need to look at. It attracted most of the negative reviews.
# VADER (Valence Aware Dictionary and sEntiment Reasoner)
# VADER (Valence Aware Dictionary and sEntiment Reasoner) is a lexicon and rule-based sentiment analysis tool specifically designed for analyzing sentiments expressed in social media text. It is built on a pre-existing list of lexical features that have been labeled with their corresponding sentiment intensities. The VADER sentiment score calculates the sentiment polarity (positive, negative, or neutral) and the sentiment intensity (how strong the sentiment is) of a given text.
# **Advantages of VADER Sentiment Score:**
# - Built for Social Media Text: VADER is particularly effective for analyzing sentiments in social media text, such as tweets or online reviews. It handles informal language, slangs, and emoticons commonly used in these platforms.
# - Rule-Based Approach: VADER utilizes a rule-based approach, which makes it more interpretable compared to machine learning-based models. The rules are designed to capture linguistic nuances and sentiment intensity.
# - Domain Adaptability: VADER is not limited to specific domains or topics. It can be applied to a wide range of domains and does not require domain-specific training data.
# - Handles Negation and Capitalization: VADER is capable of understanding the impact of negations and capitalization on sentiment. It considers the context in which words appear, allowing it to handle phrases like "not good" correctly.
# **Disadvantages of VADER Sentiment Score:**
# - Lexicon Limitations: The sentiment analysis accuracy of VADER heavily relies on the lexicon it is built upon. While VADER's lexicon is extensive, it may not capture all possible variations or new emerging words, leading to potential inaccuracies.
# - Contextual Ambiguity: VADER's rule-based approach might struggle with sentences that contain sarcasm, irony, or other forms of ambiguous contexts. These cases may require a deeper understanding of the context to accurately determine sentiment.
# - Lack of Granularity: VADER provides sentiment scores as positive, negative, or neutral, but it does not offer fine-grained sentiment labels. It may not distinguish between subtle nuances of sentiment or provide detailed sentiment analysis.
# **Helpfulness in Sentiment Analysis:**
# VADER's strengths lie in its ability to handle variety of text, adapt to different domains, and consider contextual factors like negations. Its rule-based approach provides transparency and interpretability. Due to these advantages, VADER is a valuable tool for sentiment analysis tasks in social media monitoring, brand reputation analysis, customer feedback analysis, and other applications where quick sentiment insights are required. However, for more nuanced and complex sentiment analysis tasks, machine learning-based approaches may be more appropriate.
# intstantiating SentimentIntensityAnalyzer class
sia = SentimentIntensityAnalyzer()
reviews = reviews.reset_index()
sentiment = {}
for index, row in tqdm(reviews.iterrows(), total=len(reviews)):
text = row["transformed_review"]
id = row["index"]
sentiment[id] = sia.polarity_scores(text)
vader_scores = pd.DataFrame(sentiment).T.reset_index()
vader = reviews.merge(vader_scores, on="index")
vader.head()
sns.barplot(x="rating_type", y="compound", data=vader)
fig, ax = plt.subplots(1, 4, figsize=(20, 5))
sns.barplot(x="rating", y="pos", data=vader, ax=ax[0])
sns.barplot(x="rating", y="neg", data=vader, ax=ax[1])
sns.barplot(x="rating", y="neu", data=vader, ax=ax[2])
sns.barplot(x="rating", y="compound", data=vader, ax=ax[3])
ax[0].set_title("Rating vs Positive Score")
ax[1].set_title("Rating vs Negative Score")
ax[2].set_title("Rating vs Neutral Score")
ax[3].set_title("Rating vs Compound Score")
# RoBERTa (Robustly Optimized BERT approach)
# RoBERTa (Robustly Optimized BERT approach) is a transformer-based language model that has been pretrained on a large corpus of unlabeled text data. It is similar to the VADER (Valence Aware Dictionary and sEntiment Reasoner) sentiment analysis model in the sense that both are powerful tools used in sentiment analysis tasks.
# RoBERTa, based on the BERT (Bidirectional Encoder Representations from Transformers) architecture, excels in understanding the contextual meaning of words and sentences. It captures the relationships and dependencies among words, enabling it to generate more accurate sentiment predictions.
# **Advantages of RoBERTa Pretrained Model:**
# - Contextual Understanding: RoBERTa has been pretrained on a massive amount of text data, which helps it grasp the nuances of language and context. This contextual understanding enables more accurate sentiment analysis by considering the surrounding words and their meanings.
# - Fine-tuning Capabilities: The RoBERTa model can be fine-tuned on specific sentiment analysis tasks using labeled data. This allows it to adapt and specialize its predictions for the particular sentiment classification problem at hand.
# - Language-Agnostic: RoBERTa is designed to work effectively across multiple languages, making it suitable for sentiment analysis tasks in diverse linguistic contexts.
# - State-of-the-Art Performance: RoBERTa has achieved state-of-the-art performance on various natural language processing (NLP) benchmarks and competitions. Its advanced architecture and training methodology contribute to its impressive accuracy and robustness.
# **Disadvantages of RoBERTa Pretrained Model:**
# - Computational Resources: Training and fine-tuning RoBERTa models can be computationally intensive and may require substantial computational resources, including high-performance GPUs or TPUs.
# - Data Dependency: RoBERTa heavily relies on large amounts of labeled data for fine-tuning, and the quality and representativeness of the training data can significantly impact its performance.
# - Interpretability: Transformer-based models like RoBERTa are known to be "black-box" models, meaning they provide accurate predictions but lack interpretability. Understanding the specific reasons behind the sentiment predictions made by RoBERTa can be challenging.
# RoBERTa, with its strong contextual understanding and ability to capture intricate linguistic patterns, proves to be a valuable tool for sentiment analysis tasks. Its performance, combined with the ability to fine-tune for specific applications, makes it a popular choice in the field of NLP and sentiment analysis.
# Tasks:
# emoji, emotion, hate, irony, offensive, sentiment
task = "sentiment"
MODEL = f"cardiffnlp/twitter-roberta-base-{task}"
tokenizer = AutoTokenizer.from_pretrained(MODEL)
model = AutoModelForSequenceClassification.from_pretrained(MODEL)
def roberta_scores(example):
encoded_text = tokenizer(example, return_tensors="pt")
output = model(**encoded_text)
scores = output[0][0].detach().numpy()
scores = softmax(scores)
scores_dict = {
"roberta_neg": scores[0],
"roberta_neu": scores[1],
"roberta_pos": scores[2],
}
return scores_dict
sentiments_roberta = {}
for ind, row in tqdm(reviews.iterrows(), total=len(reviews)):
try:
text = row["transformed_review"]
id = row["index"]
roberta_result = roberta_scores(text)
sentiments_roberta[id] = roberta_result
except RuntimeError:
print(r"Error in row with {} index".format(id))
roberta = pd.DataFrame(sentiments_roberta).T.reset_index()
roberta.head()
roberta = reviews.merge(roberta, on="index")
sentiment_df = roberta.merge(
vader[["index", "neg", "neu", "pos", "compound"]], on="index"
)
fig, ax = plt.subplots(1, 3, figsize=(20, 5))
sns.barplot(x="rating", y="roberta_pos", data=sentiment_df, ax=ax[0])
sns.barplot(x="rating", y="roberta_neg", data=sentiment_df, ax=ax[1])
sns.barplot(x="rating", y="roberta_neu", data=sentiment_df, ax=ax[2])
ax[0].set_title("Rating vs Roberta Positive Score")
ax[1].set_title("Rating vs Roberta Negative Score")
ax[2].set_title("Rating vs Roberta Neutral Score")
# Comparing RoBERTa and VADER
def get_sentiment_scores(review):
# roberta
encoded_text = tokenizer(review, return_tensors="pt")
output = model(**encoded_text)
scores = output[0][0].detach().numpy()
scores = softmax(scores)
scores_roberta = {"neg": scores[0], "neu": scores[1], "pos": scores[2]}
scores_vader = sia.polarity_scores(review)
del scores_vader["compound"]
combined = {}
combined["roberta"] = scores_roberta
combined["vader"] = scores_vader
df = pd.DataFrame(combined)
# Set the width of each bar
bar_width = 0.25
# Calculate the x-coordinates for each dataset
x_roberta = np.arange(len(df))
x_vader = x_roberta + bar_width
# Create the figure and axis
fig, ax = plt.subplots()
# Plot the bars for each dataset
ax.bar(x_roberta, df["roberta"], width=bar_width, label="Roberta")
ax.bar(x_vader, df["vader"], width=bar_width, label="Vader")
# Set the x-axis tick labels
ax.set_xticks(x_roberta + bar_width / 2)
ax.set_xticklabels(df.index)
ax.set_title(review)
# Add a legend
ax.legend()
return fig
# another sample review
example = reviews["transformed_review"][np.random.randint(len(reviews))]
example
get_sentiment_scores(example)
# Based on analysis done on tons of reviews, several differences can be observed. Here is a summary of the comparison:
# **Sentiment Differentiation:** The Roberta model shows a better ability to differentiate between positive, negative, and neutral sentiments compared to the Vader model. The Roberta model assigns higher scores to the positive and negative categories, while the Vader model assigns higher scores to the neutral category.
# **Valuing Neutrality:** The Vader model tends to value neutrality more for most of the reviews. This is evident from the higher neutral score assigned by the Vader model compared to the Roberta model for the given review.
# **Sensitivity to Negativity:** The Roberta model appears to be more sensitive to negative sentiment compared to the Vader model. This is indicated by the higher negative score assigned by the Roberta model for the given review.
# **Overall Sentiment Polarity:** The Roberta model assigns a higher positive score and a lower neutral score compared to the Vader model for the given review. This suggests that the Roberta model perceives the review as more positive overall, while the Vader model perceives it as more neutral.
# Overall, the comparison highlights that the Roberta model tends to provide more nuanced sentiment analysis by differentiating between positive, negative, and neutral sentiments, while the Vader model leans towards valuing neutrality and may be less sensitive to negativity.
# ## Discrepancies
# ### Case1: User gave low rating but Model identify Positive sentiment
odd_ones = sentiment_df[sentiment_df["rating"] == 1].sort_values(
"roberta_pos", ascending=False
)
odd_ones.head(3)
index = 0
print(odd_ones["review"].values[index])
get_sentiment_scores(odd_ones["transformed_review"].values[index])
# Review seems positive. but user gave it low rating. Roberta manage to get the correct sentiment from the review. Notice that Vader say that the reviews is more of neutral sentiment which is wrong
index = 1
print(odd_ones["review"].values[index])
get_sentiment_scores(odd_ones["transformed_review"].values[index])
# This review tends more on negative side. But both Roberta and Vader say that it is either neutral or positive. It could be because of the absense of an negative words and presence of words such as 'working', 'good', 'still'.
index = 2
print(odd_ones["review"].values[index])
get_sentiment_scores(odd_ones["transformed_review"].values[index])
# This looks fine. The user wrote a positve review but gave 1 rating. The review conveys a strong positive sentiment which is correctly captured by Roberta. Vader again seems confused between neutral and positive sentiments
# ### Case2: User gave high rating but Model identify negative sentiment
odd_ones1 = sentiment_df[sentiment_df["rating"] == 5].sort_values(
"roberta_neg", ascending=False
)
odd_ones1.head(3)
index = 0
print(odd_ones1["review"].values[index])
get_sentiment_scores(odd_ones1["transformed_review"].values[index])
# The review has strong postive sentiment. It seems that, both models fail to detect it because of the usage of word 'wrong' in positve context. This is an example of instances where Roberta model unable to capture the context with play of words. As usual, Vader is sticking with Neutral Sentiments
index = 1
print(odd_ones1["review"].values[index])
get_sentiment_scores(odd_ones1["transformed_review"].values[index])
# The user wanted to say that the product is great for xbox which has very positive sentiment. However, Roberta model seems to have got infuenced by the word 'sucking' which was used as an adjective for xbox controller.
index = 2
print(odd_ones1["review"].values[index])
get_sentiment_scores(odd_ones1["transformed_review"].values[index])
# In this instance, Vader model delivered correct output. The review sounds like the user do not bother much about performance. Roberta models classify it as negative sentiment. This could be because of words such as 'never' and 'don't'.
# With all of these example, we can say that, while the Roberta model offers improved sentiment analysis capabilities compared to the Vader model, it is essential to acknowledge its limitations. Here are some limitations of the Roberta model:
# **Language Dependency:** The Roberta model's performance can vary depending on the language it is trained on. If the model is trained on a specific language, it may not generalize well to other languages. Therefore, its effectiveness in sentiment analysis tasks may be limited to the languages it was trained on.
# **Data Bias:** The performance of the Roberta model is influenced by the training data it receives. If the training data contains biases, such as underrepresentation or overrepresentation of certain demographics or perspectives, the model may inherit those biases. This can result in biased sentiment analysis outputs, affecting the reliability and fairness of the model.
# **Contextual Understanding:** While the Roberta model has a strong understanding of contextual language, it may still struggle with certain nuances, sarcasm, or context-specific sentiments. These limitations stem from the complexities of language comprehension, and the model may not always capture the intended sentiment accurately in such cases.
# **Interpretability:** Like many deep learning models, the Roberta model's internal workings and decision-making process can be challenging to interpret. It can be challenging to understand how the model arrives at its sentiment analysis outputs, making it difficult to explain its predictions or identify potential biases or errors.
# While it offers advancements over traditional models like Vader, it is crucial to assess RoBERTa's performance within specific contexts, validate its outputs, and consider potential biases and limitations when interpreting the results.
# Overall Sentiments Analysis from the Dataset
# To calculate the overall sentiment of a dataset based on the Roberta scores, we will:
# - Take the average ofthe sentiment scores for each sentiment category (negative, neutral, positive) across all the data points.
# - Determine the sentiment with the highest average value as the overall sentiment of the dataset.
roberta.head(2)
new_rob = roberta[["roberta_neg", "roberta_neu", "roberta_pos"]]
# Summing up the sentiment scores
overall_sentiment = new_rob.mean(axis=0).idxmax()
print("Overall Sentiment:", overall_sentiment)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/903/129903890.ipynb
| null | null |
[{"Id": 129903890, "ScriptId": 38610622, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11739628, "CreationDate": "05/17/2023 10:23:14", "VersionNumber": 1.0, "Title": "Amazon Reviews Sentiment - VADER & RoBERTa", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 607.0, "LinesInsertedFromPrevious": 607.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
| null | null | null | null |
# Amazon Reviews Sentiment - VADER & RoBERTa
# Preface
# In the bustling world of online shopping, customer reviews have become a powerful voice that shapes our purchasing decisions. Each day, millions of people flock to Amazon, the e-commerce giant, to explore an endless array of products and discover what others have to say about them. It is in this vast ocean of reviews that our project finds its purpose – to uncover the overall sentiment of Amazon customers through the analysis of their invaluable feedback.
# With the goal of delving into the minds of consumers, we embarked on a journey through a massive dataset containing a plethora of Amazon reviews. Our mission was to extract insights, uncover patterns, and decipher the underlying sentiment that reverberates within these candid testimonials. We knew that behind each review lay a story, a personal experience, and an opinion waiting to be heard.
# As we ventured into this project, we understood that sentiment analysis would be the key to unlocking the collective sentiment hidden within the sea of reviews. Armed with the power of Natural Language Processing and machine learning, we set out to analyze the text, decode emotions, and reveal the sentiment that influenced the customer's overall perception of the products they encountered.
# Throughout our exploration, we encountered both challenges and triumphs. We meticulously examined the dataset, taking into account various factors such as ratings, review length, and the inherent positivity or negativity conveyed through the customers' words. We employed advanced techniques, leveraging state-of-the-art models like Roberta and Vader, to discern the sentiment expressed in each review. Our quest was to paint a comprehensive picture of the sentiments prevailing among the Amazon customer community.
# Through the rich tapestry of reviews, we uncovered fascinating insights. We witnessed the sheer diversity of sentiments, ranging from exuberant praise to scathing criticism. We noticed the varying degrees of positivity, negativity, and neutrality that shaped the overall sentiment of the customers. We marveled at the power of language and its ability to influence perception and purchase decisions.
# This project is an exploration, an ode to the voice of the customers who have left their mark on the digital landscape. It is a testament to the immense value of their opinions and the role they play in shaping the modern consumer landscape. As we present our findings and delve into the world of sentiments, I invite you to join me on this captivating journey through the realm of Amazon reviews. Together, let us unravel the sentiment that lies within the words and experiences of countless customers, and gain a deeper understanding of the sentiments that underpin the Amazon shopping experience.
# Libraries
# basics
import pandas as pd
import numpy as np
# visualisation
import matplotlib.pyplot as plt
import seaborn as sns
from tqdm import tqdm
# regular expression
import re
import string
# NLP toolkit
import nltk
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
# VADER
from nltk.sentiment import SentimentIntensityAnalyzer
# RoBERTa
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from scipy.special import softmax
# other
from collections import Counter
import warnings
warnings.filterwarnings("ignore")
# Loading Datasets
data1 = pd.read_csv("/kaggle/input/amazon-reviews/data1.csv")
data2 = pd.read_csv("/kaggle/input/amazon-reviews/data2.csv")
data3 = pd.read_csv("/kaggle/input/amazon-reviews/data3.csv")
data1.head(2)
data2.head(2)
data3.head(2)
# Basic Checks
print("SHAPES")
print("data1", data1.shape)
print("======================")
print("data2", data2.shape)
print("======================")
print("data3", data3.shape)
# In this project, the aim is to find the sentiments of the customers towards the company based on the reviews provided by them. The project will not include any other form of anaylis based on given data in this project.
# So, we will look for only the relevant features from all the three datasets and make a separate combined dataset of all
print("COLUMNS:")
for data in [data1, data2, data3]:
print(data.columns)
print("======================================")
# extracting common columns:
set(data1.columns).intersection(set(data2.columns)).intersection(set(data2.columns))
# To fullfill the score of our project, we just need following columns:
# - id
# - reviews.date
# - reviews.rating
# - reviews.text
# - reviews.title
reviews = pd.DataFrame()
for data in tqdm([data1, data2, data3]):
df = data[["id", "reviews.date", "reviews.rating", "reviews.text", "reviews.title"]]
reviews = pd.concat([reviews, df], ignore_index=True)
reviews.columns = ["id", "date", "rating", "review", "title"]
reviews.head(3)
reviews.info()
# - There are total 34929 reviews for products
# - Only rating in numeric column
# - There are missing values in the dataset
reviews.describe()
# - The average rating is approximately 4.52, which suggests that, on average, customers are generally positive about the products being reviewed.
# - The standard deviation of 0.912454 indicates a moderate degree of variation in the ratings. This means that there is some diversity in customer opinions, with some ratings deviating from the mean.
# - The minimum rating is 1, while the maximum rating is 5. This indicates that the dataset includes the full range of possible ratings, allowing for a comprehensive analysis of sentiment.
# - Overall, these observations indicate that the dataset predominantly consists of positive reviews, as indicated by the high mean and median ratings. However, the presence of a standard deviation and the range of ratings suggest that there is still some variation in customer sentiment, allowing for a more nuanced analysis of the reviews.
reviews.isna().mean()
# We would prefer to have maximum reviews so that the model will be well-trained to carry out sentiment analysis. Review column has no missing value. But there are 30 titles missing for reviews. Additionally, around 10% values are missing from data column whereas 12% reviews have no information available for ratings
# We would be losing out on data if we drop any rows or column. But for some analysis, missing values might give us trouble.
# We can go for a simple solution. Create a relica of dataset and drop all missing values in it
no_missing_reviews = reviews.copy()
no_missing_reviews.dropna(subset=["rating"], inplace=True)
# Thus, all the missing values are handled.
# Exploratory Data Analysis
plt.hist(x="rating", data=no_missing_reviews)
# Since the values are discrete, the histogram acts as countplot.
# - The majority of the reviews (24,116) have a rating of 5. This indicates a strong positive sentiment among customers, as the highest rating is the most prevalent in the dataset. This sounds like good news for the company.
# - When considering ratings of 4 and 5 combined, there are a total of 31,208 reviews. This indicates that approximately 90% of the reviews in the dataset are positive, as they fall into the higher rating range.
# - Combining ratings of 1, 2, and 3, there are a total of 3,301 reviews that fall into this category. Analyzing these reviews can provide insights into potential areas of improvement or specific issues that customers encountered.
# These observations highlight the overall positive sentiment among customers, but also indicate the presence of moderate and negative reviews. Analyzing the content and sentiments of these reviews can provide valuable insights for improving products, addressing customer concerns, and enhancing the overall customer experience.
no_missing_reviews.head(3)
# number of characters, words and sentences in each review
no_missing_reviews["characters"] = no_missing_reviews["review"].apply(len)
no_missing_reviews["words"] = no_missing_reviews["review"].apply(
lambda x: len(nltk.word_tokenize(x))
)
no_missing_reviews["sentences"] = no_missing_reviews["review"].apply(
lambda x: len(nltk.sent_tokenize(x))
)
no_missing_reviews.head(3)
no_missing_reviews[["characters", "words", "sentences"]].describe()
# **Maximum Lengths:** The maximum values for the 'characters', 'words', and 'sentences' features are quite high, with the longest review having 19,739 characters, 4,017 words, and 125 sentences. These extremely long reviews could potentially contain detailed and extensive feedback or comments.
# **Minimum Lengths:** The minimum values for all three features are 1, indicating the presence of extremely short reviews. These reviews might be very concise or could be outliers, possibly lacking substantial information or meaningful content.
# **Average Lengths:** The mean values reveal the average length of reviews. On average, the reviews contain approximately 176 characters, 37 words, and 2.65 sentences. These averages can serve as baselines for understanding the typical length of reviews in the dataset.
# **High Standard Deviations:** The standard deviations for the 'characters' and 'words' features are relatively high, indicating significant variation in the length of reviews. This suggests a wide range of review lengths, implying that some reviews are much longer or shorter than the average.''**
# review with one character
no_missing_reviews[no_missing_reviews["characters"] == 1]
# Single character reviews do not convey any meaning. Hence they will contribute much to out models. So lets drop these rows
no_missing_reviews.drop(
index=no_missing_reviews[no_missing_reviews["characters"] == 1].index, inplace=True
)
no_missing_reviews[no_missing_reviews["characters"] == 2].head()
# We can still see there are some non-alphabet characters in the review. We will deal during the data cleaning
# review with one words
no_missing_reviews[no_missing_reviews["words"] == 1].head()
# We want to know what are the text differentiator among reviews corresponding to high rating and comparatively lower ratings. We will create two categories, lets say, first group with product having ratings greater than 3 and other group with ratings less than or equal to 3
no_missing_reviews["rating_type"] = no_missing_reviews["rating"].apply(
lambda x: "high" if x > 3 else "low"
)
# average characters, words, sentences count based on rating type
data1 = pd.DataFrame(no_missing_reviews.groupby("rating_type")["characters"].mean())
data2 = pd.DataFrame(no_missing_reviews.groupby("rating_type")["words"].mean())
data3 = pd.DataFrame(no_missing_reviews.groupby("rating_type")["sentences"].mean())
data = pd.concat([data1, data2, data3], axis=1)
data
fig, ax = plt.subplots(1, 3, figsize=(15, 5))
sns.barplot(x=data.index, y="characters", data=data, ax=ax[0])
sns.barplot(x=data.index, y="words", data=data, ax=ax[1])
sns.barplot(x=data.index, y="sentences", data=data, ax=ax[2])
ax[0].set_title("character count")
ax[1].set_title("word count")
ax[2].set_title("sentence count")
plt.tight_layout()
sns.pairplot(no_missing_reviews, hue="rating_type")
corr = no_missing_reviews.corr()
mask = np.triu(np.ones_like(corr, dtype=bool))
sns.heatmap(corr, mask=mask, annot=True, cmap="flare")
# Data Processing
# Data preprocessing is a crucial step in natural language processing (NLP) tasks, including sentiment analysis. It involves transforming raw text data into a format that is suitable for analysis and modeling. By applying various preprocessing techniques, we can clean and standardize the textual data, reducing noise and irrelevant information.
# In the context of Amazon review sentiment analysis, data preprocessing plays a vital role in improving the accuracy and effectiveness of the analysis. The steps you mentioned are common techniques used in text preprocessing to enhance the quality of the data and extract meaningful features. Let's discuss each step in more detail:
# **Remove punctuation:** Punctuation marks like commas, periods, or exclamation marks do not carry significant sentiment information and can be safely removed. This step helps in reducing the dimensionality of the data and simplifying the subsequent analysis.
# **Lowercasing:** Converting all the text to lowercase ensures that the analysis treats words with the same spelling but different cases as the same. It helps in avoiding redundant duplicate features and improves the accuracy of downstream tasks such as sentiment analysis.
# **Remove stop words:** Stop words are commonly used words that do not contribute much to the overall sentiment or meaning of the text, such as "a," "an," or "the." Removing these words helps to reduce noise and focus on the more important content-bearing words.
# **Remove emojis:** Emojis are graphical representations used to express emotions. In sentiment analysis, they can add noise to the text and may not carry direct semantic meaning. Removing emojis helps to simplify the text and improve the interpretability of the sentiment analysis results.
# **Tokenization:** Tokenization is the process of breaking down a text into individual tokens, such as words or subwords. It helps in preparing the text for further analysis and feature extraction.
# **Stemming:** Stemming involves reducing words to their root or base form. For example, converting "running," "runs," and "ran" to the common stem "run." This step helps to normalize the words and reduce the dimensionality of the feature space.
# By performing these preprocessing steps, we can clean the Amazon review data, standardize it, and remove noise and irrelevant information. This will provide a cleaner and more representative dataset for sentiment analysis, allowing for more accurate and insightful results.
# instantiating PorterSteemer object
ps = PorterStemmer()
def transform_text(text):
# lower casing
text = text.lower()
# removing html tags
pattern = re.compile("<.*?>")
text = pattern.sub(r"", text)
# removing urls
pattern_url = re.compile(r"https?://\S+|www\.\S+")
text = pattern_url.sub(r"", text)
# removing punctuations
for (
char
) in string.punctuation: ###punctuation_marks '!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~'
text = text.replace(char, "")
# tokenization
text = nltk.word_tokenize(text)
# removing stop words
new_text = []
for word in text:
if word not in stopwords.words("english"):
new_text.append(word)
# stemming
new_text_stem = []
for word in new_text:
word = ps.stem(word)
new_text_stem.append(word)
return " ".join(new_text_stem)
reviews["transformed_review"] = reviews["review"].apply(transform_text)
reviews.head(3)
# sample review
example = reviews["transformed_review"][np.random.randint(len(reviews))]
example
ratings = []
for i, row in reviews.iterrows():
if i in no_missing_reviews.index:
type = no_missing_reviews.loc[i, "rating_type"]
ratings.append(type)
else:
ratings.append("NA")
reviews["rating_type"] = ratings
reviews.head(3)
l = reviews[reviews["rating_type"] == "high"]["transformed_review"].tolist()
word_corpus_high = []
for sent in l:
for word in sent.split():
word_corpus_high.append(word)
positves = pd.DataFrame(Counter(word_corpus_high).most_common(20))
plt.figure(figsize=(10, 6))
sns.barplot(x=positves[0], y=positves[1])
plt.xticks(rotation=90)
plt.xlabel("words")
plt.ylabel("frequency")
plt.title("Most frequent words in Positive Reviews")
# Words such as 'great', 'love', 'like' appear the most in positive reviews
l = reviews[reviews["rating_type"] == "low"]["transformed_review"].tolist()
word_corpus_low = []
for sent in l:
for word in sent.split():
word_corpus_low.append(word)
negatives = pd.DataFrame(Counter(word_corpus_low).most_common(20))
plt.figure(figsize=(10, 6))
sns.barplot(x=negatives[0], y=negatives[1])
plt.xticks(rotation=90)
plt.xlabel("words")
plt.ylabel("frequency")
plt.title("Most frequent words in Negative Reviews")
# Negative reviews mostly focus on word 'battery'. It might suggest that the battery product is the area where Amazon need to look at. It attracted most of the negative reviews.
# VADER (Valence Aware Dictionary and sEntiment Reasoner)
# VADER (Valence Aware Dictionary and sEntiment Reasoner) is a lexicon and rule-based sentiment analysis tool specifically designed for analyzing sentiments expressed in social media text. It is built on a pre-existing list of lexical features that have been labeled with their corresponding sentiment intensities. The VADER sentiment score calculates the sentiment polarity (positive, negative, or neutral) and the sentiment intensity (how strong the sentiment is) of a given text.
# **Advantages of VADER Sentiment Score:**
# - Built for Social Media Text: VADER is particularly effective for analyzing sentiments in social media text, such as tweets or online reviews. It handles informal language, slangs, and emoticons commonly used in these platforms.
# - Rule-Based Approach: VADER utilizes a rule-based approach, which makes it more interpretable compared to machine learning-based models. The rules are designed to capture linguistic nuances and sentiment intensity.
# - Domain Adaptability: VADER is not limited to specific domains or topics. It can be applied to a wide range of domains and does not require domain-specific training data.
# - Handles Negation and Capitalization: VADER is capable of understanding the impact of negations and capitalization on sentiment. It considers the context in which words appear, allowing it to handle phrases like "not good" correctly.
# **Disadvantages of VADER Sentiment Score:**
# - Lexicon Limitations: The sentiment analysis accuracy of VADER heavily relies on the lexicon it is built upon. While VADER's lexicon is extensive, it may not capture all possible variations or new emerging words, leading to potential inaccuracies.
# - Contextual Ambiguity: VADER's rule-based approach might struggle with sentences that contain sarcasm, irony, or other forms of ambiguous contexts. These cases may require a deeper understanding of the context to accurately determine sentiment.
# - Lack of Granularity: VADER provides sentiment scores as positive, negative, or neutral, but it does not offer fine-grained sentiment labels. It may not distinguish between subtle nuances of sentiment or provide detailed sentiment analysis.
# **Helpfulness in Sentiment Analysis:**
# VADER's strengths lie in its ability to handle variety of text, adapt to different domains, and consider contextual factors like negations. Its rule-based approach provides transparency and interpretability. Due to these advantages, VADER is a valuable tool for sentiment analysis tasks in social media monitoring, brand reputation analysis, customer feedback analysis, and other applications where quick sentiment insights are required. However, for more nuanced and complex sentiment analysis tasks, machine learning-based approaches may be more appropriate.
# intstantiating SentimentIntensityAnalyzer class
sia = SentimentIntensityAnalyzer()
reviews = reviews.reset_index()
sentiment = {}
for index, row in tqdm(reviews.iterrows(), total=len(reviews)):
text = row["transformed_review"]
id = row["index"]
sentiment[id] = sia.polarity_scores(text)
vader_scores = pd.DataFrame(sentiment).T.reset_index()
vader = reviews.merge(vader_scores, on="index")
vader.head()
sns.barplot(x="rating_type", y="compound", data=vader)
fig, ax = plt.subplots(1, 4, figsize=(20, 5))
sns.barplot(x="rating", y="pos", data=vader, ax=ax[0])
sns.barplot(x="rating", y="neg", data=vader, ax=ax[1])
sns.barplot(x="rating", y="neu", data=vader, ax=ax[2])
sns.barplot(x="rating", y="compound", data=vader, ax=ax[3])
ax[0].set_title("Rating vs Positive Score")
ax[1].set_title("Rating vs Negative Score")
ax[2].set_title("Rating vs Neutral Score")
ax[3].set_title("Rating vs Compound Score")
# RoBERTa (Robustly Optimized BERT approach)
# RoBERTa (Robustly Optimized BERT approach) is a transformer-based language model that has been pretrained on a large corpus of unlabeled text data. It is similar to the VADER (Valence Aware Dictionary and sEntiment Reasoner) sentiment analysis model in the sense that both are powerful tools used in sentiment analysis tasks.
# RoBERTa, based on the BERT (Bidirectional Encoder Representations from Transformers) architecture, excels in understanding the contextual meaning of words and sentences. It captures the relationships and dependencies among words, enabling it to generate more accurate sentiment predictions.
# **Advantages of RoBERTa Pretrained Model:**
# - Contextual Understanding: RoBERTa has been pretrained on a massive amount of text data, which helps it grasp the nuances of language and context. This contextual understanding enables more accurate sentiment analysis by considering the surrounding words and their meanings.
# - Fine-tuning Capabilities: The RoBERTa model can be fine-tuned on specific sentiment analysis tasks using labeled data. This allows it to adapt and specialize its predictions for the particular sentiment classification problem at hand.
# - Language-Agnostic: RoBERTa is designed to work effectively across multiple languages, making it suitable for sentiment analysis tasks in diverse linguistic contexts.
# - State-of-the-Art Performance: RoBERTa has achieved state-of-the-art performance on various natural language processing (NLP) benchmarks and competitions. Its advanced architecture and training methodology contribute to its impressive accuracy and robustness.
# **Disadvantages of RoBERTa Pretrained Model:**
# - Computational Resources: Training and fine-tuning RoBERTa models can be computationally intensive and may require substantial computational resources, including high-performance GPUs or TPUs.
# - Data Dependency: RoBERTa heavily relies on large amounts of labeled data for fine-tuning, and the quality and representativeness of the training data can significantly impact its performance.
# - Interpretability: Transformer-based models like RoBERTa are known to be "black-box" models, meaning they provide accurate predictions but lack interpretability. Understanding the specific reasons behind the sentiment predictions made by RoBERTa can be challenging.
# RoBERTa, with its strong contextual understanding and ability to capture intricate linguistic patterns, proves to be a valuable tool for sentiment analysis tasks. Its performance, combined with the ability to fine-tune for specific applications, makes it a popular choice in the field of NLP and sentiment analysis.
# Tasks:
# emoji, emotion, hate, irony, offensive, sentiment
task = "sentiment"
MODEL = f"cardiffnlp/twitter-roberta-base-{task}"
tokenizer = AutoTokenizer.from_pretrained(MODEL)
model = AutoModelForSequenceClassification.from_pretrained(MODEL)
def roberta_scores(example):
encoded_text = tokenizer(example, return_tensors="pt")
output = model(**encoded_text)
scores = output[0][0].detach().numpy()
scores = softmax(scores)
scores_dict = {
"roberta_neg": scores[0],
"roberta_neu": scores[1],
"roberta_pos": scores[2],
}
return scores_dict
sentiments_roberta = {}
for ind, row in tqdm(reviews.iterrows(), total=len(reviews)):
try:
text = row["transformed_review"]
id = row["index"]
roberta_result = roberta_scores(text)
sentiments_roberta[id] = roberta_result
except RuntimeError:
print(r"Error in row with {} index".format(id))
roberta = pd.DataFrame(sentiments_roberta).T.reset_index()
roberta.head()
roberta = reviews.merge(roberta, on="index")
sentiment_df = roberta.merge(
vader[["index", "neg", "neu", "pos", "compound"]], on="index"
)
fig, ax = plt.subplots(1, 3, figsize=(20, 5))
sns.barplot(x="rating", y="roberta_pos", data=sentiment_df, ax=ax[0])
sns.barplot(x="rating", y="roberta_neg", data=sentiment_df, ax=ax[1])
sns.barplot(x="rating", y="roberta_neu", data=sentiment_df, ax=ax[2])
ax[0].set_title("Rating vs Roberta Positive Score")
ax[1].set_title("Rating vs Roberta Negative Score")
ax[2].set_title("Rating vs Roberta Neutral Score")
# Comparing RoBERTa and VADER
def get_sentiment_scores(review):
# roberta
encoded_text = tokenizer(review, return_tensors="pt")
output = model(**encoded_text)
scores = output[0][0].detach().numpy()
scores = softmax(scores)
scores_roberta = {"neg": scores[0], "neu": scores[1], "pos": scores[2]}
scores_vader = sia.polarity_scores(review)
del scores_vader["compound"]
combined = {}
combined["roberta"] = scores_roberta
combined["vader"] = scores_vader
df = pd.DataFrame(combined)
# Set the width of each bar
bar_width = 0.25
# Calculate the x-coordinates for each dataset
x_roberta = np.arange(len(df))
x_vader = x_roberta + bar_width
# Create the figure and axis
fig, ax = plt.subplots()
# Plot the bars for each dataset
ax.bar(x_roberta, df["roberta"], width=bar_width, label="Roberta")
ax.bar(x_vader, df["vader"], width=bar_width, label="Vader")
# Set the x-axis tick labels
ax.set_xticks(x_roberta + bar_width / 2)
ax.set_xticklabels(df.index)
ax.set_title(review)
# Add a legend
ax.legend()
return fig
# another sample review
example = reviews["transformed_review"][np.random.randint(len(reviews))]
example
get_sentiment_scores(example)
# Based on analysis done on tons of reviews, several differences can be observed. Here is a summary of the comparison:
# **Sentiment Differentiation:** The Roberta model shows a better ability to differentiate between positive, negative, and neutral sentiments compared to the Vader model. The Roberta model assigns higher scores to the positive and negative categories, while the Vader model assigns higher scores to the neutral category.
# **Valuing Neutrality:** The Vader model tends to value neutrality more for most of the reviews. This is evident from the higher neutral score assigned by the Vader model compared to the Roberta model for the given review.
# **Sensitivity to Negativity:** The Roberta model appears to be more sensitive to negative sentiment compared to the Vader model. This is indicated by the higher negative score assigned by the Roberta model for the given review.
# **Overall Sentiment Polarity:** The Roberta model assigns a higher positive score and a lower neutral score compared to the Vader model for the given review. This suggests that the Roberta model perceives the review as more positive overall, while the Vader model perceives it as more neutral.
# Overall, the comparison highlights that the Roberta model tends to provide more nuanced sentiment analysis by differentiating between positive, negative, and neutral sentiments, while the Vader model leans towards valuing neutrality and may be less sensitive to negativity.
# ## Discrepancies
# ### Case1: User gave low rating but Model identify Positive sentiment
odd_ones = sentiment_df[sentiment_df["rating"] == 1].sort_values(
"roberta_pos", ascending=False
)
odd_ones.head(3)
index = 0
print(odd_ones["review"].values[index])
get_sentiment_scores(odd_ones["transformed_review"].values[index])
# Review seems positive. but user gave it low rating. Roberta manage to get the correct sentiment from the review. Notice that Vader say that the reviews is more of neutral sentiment which is wrong
index = 1
print(odd_ones["review"].values[index])
get_sentiment_scores(odd_ones["transformed_review"].values[index])
# This review tends more on negative side. But both Roberta and Vader say that it is either neutral or positive. It could be because of the absense of an negative words and presence of words such as 'working', 'good', 'still'.
index = 2
print(odd_ones["review"].values[index])
get_sentiment_scores(odd_ones["transformed_review"].values[index])
# This looks fine. The user wrote a positve review but gave 1 rating. The review conveys a strong positive sentiment which is correctly captured by Roberta. Vader again seems confused between neutral and positive sentiments
# ### Case2: User gave high rating but Model identify negative sentiment
odd_ones1 = sentiment_df[sentiment_df["rating"] == 5].sort_values(
"roberta_neg", ascending=False
)
odd_ones1.head(3)
index = 0
print(odd_ones1["review"].values[index])
get_sentiment_scores(odd_ones1["transformed_review"].values[index])
# The review has strong postive sentiment. It seems that, both models fail to detect it because of the usage of word 'wrong' in positve context. This is an example of instances where Roberta model unable to capture the context with play of words. As usual, Vader is sticking with Neutral Sentiments
index = 1
print(odd_ones1["review"].values[index])
get_sentiment_scores(odd_ones1["transformed_review"].values[index])
# The user wanted to say that the product is great for xbox which has very positive sentiment. However, Roberta model seems to have got infuenced by the word 'sucking' which was used as an adjective for xbox controller.
index = 2
print(odd_ones1["review"].values[index])
get_sentiment_scores(odd_ones1["transformed_review"].values[index])
# In this instance, Vader model delivered correct output. The review sounds like the user do not bother much about performance. Roberta models classify it as negative sentiment. This could be because of words such as 'never' and 'don't'.
# With all of these example, we can say that, while the Roberta model offers improved sentiment analysis capabilities compared to the Vader model, it is essential to acknowledge its limitations. Here are some limitations of the Roberta model:
# **Language Dependency:** The Roberta model's performance can vary depending on the language it is trained on. If the model is trained on a specific language, it may not generalize well to other languages. Therefore, its effectiveness in sentiment analysis tasks may be limited to the languages it was trained on.
# **Data Bias:** The performance of the Roberta model is influenced by the training data it receives. If the training data contains biases, such as underrepresentation or overrepresentation of certain demographics or perspectives, the model may inherit those biases. This can result in biased sentiment analysis outputs, affecting the reliability and fairness of the model.
# **Contextual Understanding:** While the Roberta model has a strong understanding of contextual language, it may still struggle with certain nuances, sarcasm, or context-specific sentiments. These limitations stem from the complexities of language comprehension, and the model may not always capture the intended sentiment accurately in such cases.
# **Interpretability:** Like many deep learning models, the Roberta model's internal workings and decision-making process can be challenging to interpret. It can be challenging to understand how the model arrives at its sentiment analysis outputs, making it difficult to explain its predictions or identify potential biases or errors.
# While it offers advancements over traditional models like Vader, it is crucial to assess RoBERTa's performance within specific contexts, validate its outputs, and consider potential biases and limitations when interpreting the results.
# Overall Sentiments Analysis from the Dataset
# To calculate the overall sentiment of a dataset based on the Roberta scores, we will:
# - Take the average ofthe sentiment scores for each sentiment category (negative, neutral, positive) across all the data points.
# - Determine the sentiment with the highest average value as the overall sentiment of the dataset.
roberta.head(2)
new_rob = roberta[["roberta_neg", "roberta_neu", "roberta_pos"]]
# Summing up the sentiment scores
overall_sentiment = new_rob.mean(axis=0).idxmax()
print("Overall Sentiment:", overall_sentiment)
| false | 0 | 7,985 | 1 | 7,985 | 7,985 |
||
129974150
|
<jupyter_start><jupyter_text>South Korean Lottery Numbers
### Background
The South Korean lottery pays out millions of dollars to the winners. To date, there have been over 1000 draws (1 a week). The numbers are drawn by a vacuum sucking up plastic balls with the winning numbers written on them. Many South Korean citizens speculate that this system is rigged (or at least not 100% fair) because many numbers have been chosen unproportionally. Is it possible that choosing certain numbers will improve one's chances of winning?
### Data
<ul>
<li><strong>TIME</strong> - The nth lottery draw</li>
<li><strong>NUM1</strong> - Winning number 1</li>
<li><strong>NUM2</strong> - Winning number 2</li>
<li><strong>NUM3</strong> - Winning number 3</li>
<li><strong>NUM4</strong> - Winning number 4</li>
<li><strong>NUM5</strong> - Winning number 5</li>
<li><strong>NUM6</strong> - Winning number 6</li>
<li><strong>BONUS</strong> - Winning bonus number</li>
</ul>
### Additional Info
Per draw, 6 numbers are chosen + 1 bonus number
Of the 6 primary numbers, if at least 3 are correct the ticket is a winner.
The bonus number will add a bonus if 5 out of 6 primary numbers are correct.
The order of the numbers do not matter. They will always be from least->greatest.
The following needs to be taken into consideration to calculate if your model is making money:
-Each guess of lottery numbers costs about $1 (you can guess an unlimited amount of times)
-If 3 numbers match you win $5 (ie. if you guess 5 times and only one ticket wins, you get your money back)
-if 4 numbers match you win $100
-if 5 numbers match you win $1,000
-if 5 numbers and the bonus number match you win $10,000
-if all 6 numbers are correct you get the jackpot (usually at least $100,000-> $10M)
### Source
https://m.dhlottery.co.kr/
Kaggle dataset identifier: south-korean-lottery-numbers
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
df = pd.read_csv("/kaggle/input/south-korean-lottery-numbers/fake_lotto.csv")
df
import seaborn as sns
countplot = sns.countplot(data=df, x="NUM1")
num_1 = df[df.NUM1 >= 20]
by_time = num_1.groupby("TIME", as_index=False)
mean_by_time = by_time["NUM1"].mean()
mean_by_time = mean_by_time.head()
barplot = sns.barplot(x="TIME", y="NUM1", data=mean_by_time)
# ### if I take the first 5 values where the winning number1 >20, he got the maximum value in round number 6
by_time = df.groupby("TIME", as_index=False).NUM2.mean().head()
relplot = sns.relplot(x="TIME", y="NUM2", data=by_time)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/974/129974150.ipynb
|
south-korean-lottery-numbers
|
calebreigada
|
[{"Id": 129974150, "ScriptId": 38659267, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14989378, "CreationDate": "05/17/2023 20:57:40", "VersionNumber": 3.0, "Title": "Data_Visualization", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 40.0, "LinesInsertedFromPrevious": 7.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 33.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
|
[{"Id": 186415008, "KernelVersionId": 129974150, "SourceDatasetVersionId": 3233531}]
|
[{"Id": 3233531, "DatasetId": 1946000, "DatasourceVersionId": 3283627, "CreatorUserId": 5720392, "LicenseName": "CC0: Public Domain", "CreationDate": "02/27/2022 05:16:41", "VersionNumber": 2.0, "Title": "South Korean Lottery Numbers", "Slug": "south-korean-lottery-numbers", "Subtitle": "Determining the best numbers to choose in the South Korean Lottery", "Description": "### Background\nThe South Korean lottery pays out millions of dollars to the winners. To date, there have been over 1000 draws (1 a week). The numbers are drawn by a vacuum sucking up plastic balls with the winning numbers written on them. Many South Korean citizens speculate that this system is rigged (or at least not 100% fair) because many numbers have been chosen unproportionally. Is it possible that choosing certain numbers will improve one's chances of winning?\n\n### Data\n<ul>\n<li><strong>TIME</strong> - The nth lottery draw</li>\n<li><strong>NUM1</strong> - Winning number 1</li>\n<li><strong>NUM2</strong> - Winning number 2</li>\n<li><strong>NUM3</strong> - Winning number 3</li>\n<li><strong>NUM4</strong> - Winning number 4</li>\n<li><strong>NUM5</strong> - Winning number 5</li>\n<li><strong>NUM6</strong> - Winning number 6</li>\n<li><strong>BONUS</strong> - Winning bonus number</li>\n</ul>\n\n\n### Additional Info\nPer draw, 6 numbers are chosen + 1 bonus number\nOf the 6 primary numbers, if at least 3 are correct the ticket is a winner.\nThe bonus number will add a bonus if 5 out of 6 primary numbers are correct.\nThe order of the numbers do not matter. They will always be from least->greatest.\n\nThe following needs to be taken into consideration to calculate if your model is making money:\n-Each guess of lottery numbers costs about $1 (you can guess an unlimited amount of times)\n-If 3 numbers match you win $5 (ie. if you guess 5 times and only one ticket wins, you get your money back)\n-if 4 numbers match you win $100\n-if 5 numbers match you win $1,000\n-if 5 numbers and the bonus number match you win $10,000\n-if all 6 numbers are correct you get the jackpot (usually at least $100,000-> $10M)\n\n\n\n### Source\nhttps://m.dhlottery.co.kr/", "VersionNotes": "Data Update 2022/02/27", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1946000, "CreatorUserId": 5720392, "OwnerUserId": 5720392.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 3233531.0, "CurrentDatasourceVersionId": 3283627.0, "ForumId": 1969854, "Type": 2, "CreationDate": "02/20/2022 10:08:58", "LastActivityDate": "02/20/2022", "TotalViews": 9985, "TotalDownloads": 762, "TotalVotes": 34, "TotalKernels": 3}]
|
[{"Id": 5720392, "UserName": "calebreigada", "DisplayName": "Caleb Reigada", "RegisterDate": "09/04/2020", "PerformanceTier": 3}]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
df = pd.read_csv("/kaggle/input/south-korean-lottery-numbers/fake_lotto.csv")
df
import seaborn as sns
countplot = sns.countplot(data=df, x="NUM1")
num_1 = df[df.NUM1 >= 20]
by_time = num_1.groupby("TIME", as_index=False)
mean_by_time = by_time["NUM1"].mean()
mean_by_time = mean_by_time.head()
barplot = sns.barplot(x="TIME", y="NUM1", data=mean_by_time)
# ### if I take the first 5 values where the winning number1 >20, he got the maximum value in round number 6
by_time = df.groupby("TIME", as_index=False).NUM2.mean().head()
relplot = sns.relplot(x="TIME", y="NUM2", data=by_time)
| false | 1 | 413 | 1 | 993 | 413 |
||
129974788
|
# # What is HuggingFace's Diffusers ?
# > Diffusers is the go-to library for state-of-the-art pretrained diffusion models for generating images, audio, and even 3D structures of molecules. It is a modular toolbox that can be used to build our own inference soulution i.e to train our own diffusion models.
# > Diffusion models are trained to denoise random Gaussian noise step-by-step to generate a sample of interest, such as an image or audio.
# Diffusers Consists of three main componets
# 1. State-of-art diffusion pipeline for interences.The Diffusion Pipeline is a high-level end-to-end class designed to rapidly generate samples from pretrained diffusion models for inference.
# 2. Interchangeable noise schedulers for balancing trade-offs between generation speed and quality.Popular pretrained model architectures and modules that can be used as building blocks for creating diffusion systems
# 3. Pretrained models that can be used as a building blocks and combined with schedulers for creating our own end to end diffusion systems. Many different schedulers - algorithms that control how noise is added for training, and how to generate denoised images during inference.
# Hence first we learn how to use the DiffusionPipeline for inference and then going through how to combine a model and scheduler to replicate what's happening insie the diffusionpipeline.
# Make sure to install the all library
# 1. Diffusers for Pipelines
# 2. Accelerate for speedup the model loading for inferences and training
# 3. Transformers for running the most popular diffusion algorithms like stable diffusion
# DiffusersPipeline help us to use as a pretrained
# diffusion system for making inferences. It is end-to-end system containing model and scheduler. It can be used out-of-box for many tasks like
# 1. Unconditional Image Generation : Generate an image from gaussian noise
# 2. Text-Guided Image Generation : generate an image given a text prompt
# 3. Text-Guided Image-to-Image Translation : adapt an image guided by a text prompt
# 4. Text-Guided Image-Inpainting : fill the masked part of an image given the image, the mask and a text prompt
# 5. Text-Guided Depth-to-Image Translations : adapt parts of an image guided by a text prompt while preserving structure via depth estimation
# We will start with the following steps
# 1. Creating an instance of a DiffusionPipeline and also specifying which pipeline checkpoint we wants to download.
# Import the importent library like DiffusionPipeline from diffusers
from diffusers import DiffusionPipeline
# Load the model with from_pretrained method
pipeline = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5")
# Now, DiffusionPipeline downloads and caches the all modeling, tokenization and scheduling components and Stable Diffusion Pipeline composed of the UNet2DConditionalModel and PNDMScheduler etc.
# Note : Please try to run these pipelines on GPU becouse stable diffusion model roughly consists of 1.4 billion parameters therefor we just move the generator object to the GPU just like in pytorch
pipeline.to("cuda")
# Now, we will pass the text prompt to the our stable diffusion pipeline to generate an image and then access the denoised image.
# Note : by default, the image output is wrapped in a `PIL.Image` object.
image = pipeline(
"An Image of the cow in a green field taking bath in the river"
).images[0]
image
# to save images
image.save("Image_of_cow_in_river.png")
image = pipeline("An Image of children taking bath in canal as fun").images[0]
image
image = pipeline("An Image of lord ram in ancient time with his dharma patni").images[0]
image
image = pipeline("An Image of Lord ram in the modern avatar").images[0]
image
image = pipeline("An Image of british brutality on indians").images[0]
image
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/974/129974788.ipynb
| null | null |
[{"Id": 129974788, "ScriptId": 38663988, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 8324004, "CreationDate": "05/17/2023 21:07:05", "VersionNumber": 2.0, "Title": "Diffusors_DiffusionPipeline", "EvaluationDate": "05/17/2023", "IsChange": false, "TotalLines": 65.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 65.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 2}]
| null | null | null | null |
# # What is HuggingFace's Diffusers ?
# > Diffusers is the go-to library for state-of-the-art pretrained diffusion models for generating images, audio, and even 3D structures of molecules. It is a modular toolbox that can be used to build our own inference soulution i.e to train our own diffusion models.
# > Diffusion models are trained to denoise random Gaussian noise step-by-step to generate a sample of interest, such as an image or audio.
# Diffusers Consists of three main componets
# 1. State-of-art diffusion pipeline for interences.The Diffusion Pipeline is a high-level end-to-end class designed to rapidly generate samples from pretrained diffusion models for inference.
# 2. Interchangeable noise schedulers for balancing trade-offs between generation speed and quality.Popular pretrained model architectures and modules that can be used as building blocks for creating diffusion systems
# 3. Pretrained models that can be used as a building blocks and combined with schedulers for creating our own end to end diffusion systems. Many different schedulers - algorithms that control how noise is added for training, and how to generate denoised images during inference.
# Hence first we learn how to use the DiffusionPipeline for inference and then going through how to combine a model and scheduler to replicate what's happening insie the diffusionpipeline.
# Make sure to install the all library
# 1. Diffusers for Pipelines
# 2. Accelerate for speedup the model loading for inferences and training
# 3. Transformers for running the most popular diffusion algorithms like stable diffusion
# DiffusersPipeline help us to use as a pretrained
# diffusion system for making inferences. It is end-to-end system containing model and scheduler. It can be used out-of-box for many tasks like
# 1. Unconditional Image Generation : Generate an image from gaussian noise
# 2. Text-Guided Image Generation : generate an image given a text prompt
# 3. Text-Guided Image-to-Image Translation : adapt an image guided by a text prompt
# 4. Text-Guided Image-Inpainting : fill the masked part of an image given the image, the mask and a text prompt
# 5. Text-Guided Depth-to-Image Translations : adapt parts of an image guided by a text prompt while preserving structure via depth estimation
# We will start with the following steps
# 1. Creating an instance of a DiffusionPipeline and also specifying which pipeline checkpoint we wants to download.
# Import the importent library like DiffusionPipeline from diffusers
from diffusers import DiffusionPipeline
# Load the model with from_pretrained method
pipeline = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5")
# Now, DiffusionPipeline downloads and caches the all modeling, tokenization and scheduling components and Stable Diffusion Pipeline composed of the UNet2DConditionalModel and PNDMScheduler etc.
# Note : Please try to run these pipelines on GPU becouse stable diffusion model roughly consists of 1.4 billion parameters therefor we just move the generator object to the GPU just like in pytorch
pipeline.to("cuda")
# Now, we will pass the text prompt to the our stable diffusion pipeline to generate an image and then access the denoised image.
# Note : by default, the image output is wrapped in a `PIL.Image` object.
image = pipeline(
"An Image of the cow in a green field taking bath in the river"
).images[0]
image
# to save images
image.save("Image_of_cow_in_river.png")
image = pipeline("An Image of children taking bath in canal as fun").images[0]
image
image = pipeline("An Image of lord ram in ancient time with his dharma patni").images[0]
image
image = pipeline("An Image of Lord ram in the modern avatar").images[0]
image
image = pipeline("An Image of british brutality on indians").images[0]
image
| false | 0 | 932 | 2 | 932 | 932 |
||
129974198
|
from auptitcafe.menus import Menus
import pandas as pd
menu_instance = Menus()
menus = "menus.csv"
menu_instance.to_csv(menus)
df = pd.read_csv(menus)
df
import duckdb
con = duckdb.connect(database="auptitcafe.duckdb", read_only=False)
con.execute(
"""create or replace table menus(
titre_plat varchar not null,
prix integer not null,
category varchar not null check (category in ('PLAT','DESSERT')),
recette varchar not null,
image_url varchar not null
);"""
)
con.execute("""insert into menus SELECT * from 'menus.csv';""")
df = con.query(
"""select * from menus
where category = 'PLAT';"""
).to_df()
df
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/974/129974198.ipynb
| null | null |
[{"Id": 129974198, "ScriptId": 38393360, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 12771330, "CreationDate": "05/17/2023 20:58:18", "VersionNumber": 6.0, "Title": "Au p'tit caf\u00e9 (pypi package)", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 31.0, "LinesInsertedFromPrevious": 16.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 15.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
from auptitcafe.menus import Menus
import pandas as pd
menu_instance = Menus()
menus = "menus.csv"
menu_instance.to_csv(menus)
df = pd.read_csv(menus)
df
import duckdb
con = duckdb.connect(database="auptitcafe.duckdb", read_only=False)
con.execute(
"""create or replace table menus(
titre_plat varchar not null,
prix integer not null,
category varchar not null check (category in ('PLAT','DESSERT')),
recette varchar not null,
image_url varchar not null
);"""
)
con.execute("""insert into menus SELECT * from 'menus.csv';""")
df = con.query(
"""select * from menus
where category = 'PLAT';"""
).to_df()
df
| false | 0 | 201 | 0 | 201 | 201 |
||
129974277
|
# 
# trust game
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib as mpl
import seaborn as sns
# Reset rcParams to default values
mpl.rcParams.update()
# globally setting seaborn
sns.set(style="ticks", palette="muted", font_scale=1.2, context="talk")
mpl.rcParams["lines.linewidth"] = 2
mpl.rcParams["lines.markersize"] = 10
mpl.rcParams["font.size"] = 16
dirname = "/kaggle/input/economic-games-ba/"
sl_all_avg = pd.read_csv(dirname + "sl_all_avg.csv")
sl_both75_avg = pd.read_csv(dirname + "sl_both75_avg.csv")
sl_both75_unfold = pd.read_csv(dirname + "sl_both75_unfold.csv")
sl_dg75_avg = pd.read_csv(dirname + "sl_dg75_avg.csv")
sl_dg75_unfold = pd.read_csv(dirname + "sl_dg75_unfold.csv")
sl_tg75_avg = pd.read_csv(dirname + "sl_tg75_avg.csv")
sl_tg75_unfold = pd.read_csv(dirname + "sl_tg75_unfold.csv")
# TG by gender
sl_all_avg[sl_all_avg["Gender"] == "Female"]["TG_trustor"].hist()
sl_all_avg[sl_all_avg["Gender"] == "Male"]["TG_trustor"].hist()
plt.legend(["F", "M"])
plt.title("Histogram of TG_trustor by Gender")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/974/129974277.ipynb
| null | null |
[{"Id": 129974277, "ScriptId": 38662336, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 15152859, "CreationDate": "05/17/2023 20:59:35", "VersionNumber": 1.0, "Title": "EDA on decisions of strategic uncertainty", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 41.0, "LinesInsertedFromPrevious": 41.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# 
# trust game
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib as mpl
import seaborn as sns
# Reset rcParams to default values
mpl.rcParams.update()
# globally setting seaborn
sns.set(style="ticks", palette="muted", font_scale=1.2, context="talk")
mpl.rcParams["lines.linewidth"] = 2
mpl.rcParams["lines.markersize"] = 10
mpl.rcParams["font.size"] = 16
dirname = "/kaggle/input/economic-games-ba/"
sl_all_avg = pd.read_csv(dirname + "sl_all_avg.csv")
sl_both75_avg = pd.read_csv(dirname + "sl_both75_avg.csv")
sl_both75_unfold = pd.read_csv(dirname + "sl_both75_unfold.csv")
sl_dg75_avg = pd.read_csv(dirname + "sl_dg75_avg.csv")
sl_dg75_unfold = pd.read_csv(dirname + "sl_dg75_unfold.csv")
sl_tg75_avg = pd.read_csv(dirname + "sl_tg75_avg.csv")
sl_tg75_unfold = pd.read_csv(dirname + "sl_tg75_unfold.csv")
# TG by gender
sl_all_avg[sl_all_avg["Gender"] == "Female"]["TG_trustor"].hist()
sl_all_avg[sl_all_avg["Gender"] == "Male"]["TG_trustor"].hist()
plt.legend(["F", "M"])
plt.title("Histogram of TG_trustor by Gender")
| false | 0 | 454 | 0 | 454 | 454 |
||
129974168
|
<jupyter_start><jupyter_text>Intel Image Classification
### Context
This is image data of Natural Scenes around the world.
### Content
This Data contains around 25k images of size 150x150 distributed under 6 categories.
{'buildings' -> 0,
'forest' -> 1,
'glacier' -> 2,
'mountain' -> 3,
'sea' -> 4,
'street' -> 5 }
The Train, Test and Prediction data is separated in each zip files. There are around 14k images in Train, 3k in Test and 7k in Prediction.
This data was initially published on https://datahack.analyticsvidhya.com by Intel to host a Image classification Challenge.
Kaggle dataset identifier: intel-image-classification
<jupyter_script># # Assignment-05 Convolutional Neural Networks
# ### Students:
# - Sharon Sarai Maygua Mendiola
# - Franklin Ruben Rosembluth Prado
# Utils to run notebook on Kaggle
import os
import cv2
import glob
import pickle
import matplotlib
import numpy as np
import pandas as pd
import imageio as im
import seaborn as sns
import tensorflow as tf
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
from PIL import Image
from tensorflow import keras
from keras import models
from pickle import dump
from pickle import load
from tensorflow import keras
from tensorflow.keras.utils import (
to_categorical,
plot_model,
img_to_array,
load_img,
array_to_img,
)
from tensorflow.keras import regularizers
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import (
Conv2D,
MaxPooling2D,
Flatten,
Dense,
Dropout,
Activation,
BatchNormalization,
)
from tensorflow.keras.preprocessing import image_dataset_from_directory
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras import layers
from tensorflow.keras.layers import LeakyReLU
from tensorflow.keras.losses import categorical_crossentropy
from tensorflow.keras.optimizers import Adam, RMSprop
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.utils import shuffle
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import ModelCheckpoint
# from keras.preprocessing import image
import keras.utils as image
# load and save files with pickle
def save_pickle(file, file_name):
dump(file, open(file_name, "wb"))
print("Saved: %s" % file_name)
def load_pickle(file_name):
return load(open(file_name, "rb"))
# PATHS
# path to the folder containing the subfolders with the training images
trainpath = "/kaggle/input/intel-image-classification/seg_train/seg_train"
# path to the folder containing the subfolders with the testing images
testpath = "/kaggle/input/intel-image-classification/seg_test/seg_test"
predpath = "/kaggle/input/intel-image-classification/seg_pred/seg_pred"
# Tensorflow datasets creator from directory, making images to categorical
# Not used, because we wanted to learn how to label our images by our own
train_ds = image_dataset_from_directory(
trainpath, seed=123, image_size=(150, 150), batch_size=64, label_mode="categorical"
)
test_ds = image_dataset_from_directory(
testpath, seed=123, image_size=(150, 150), batch_size=64, label_mode="categorical"
)
print("Train class names:", train_ds.class_names)
print("Test class names:", test_ds.class_names)
plt.figure(figsize=(5, 5))
for images, labels in train_ds.take(1):
for i in range(9):
ax = plt.subplot(3, 3, i + 1)
plt.imshow(images[i].numpy().astype("uint8"))
label = tf.argmax(labels[i]).numpy()
plt.title(train_ds.class_names[label])
plt.axis("off")
# # Labeling
# - This dataset needed some pre-processing.
# - The images were generally labeled, since they were in categorized folders. However, for training it is necessary that each image is associated with its label, so each of the training and test images was labeled.
# - The validation images could not be processed in this way because they were not categorized.
# With this objective, the *'def labeling'* function was created, which also transforms the text labels to numeric labels and converts the lists in which the images and labels had been stored, into numpy arrays of type (float32) and type (int32).
# This is because working with this type of data reduces the amount of storage memory, improves model performance, and because Keras needs its input data to be of this type.
# Also to reduce the amount of the images size, we resized all the images with (150x150) in labeling to normalize after concludes the labels of each image.
# Create a dictionary to change text labels into int numerical labels (Ordered alphabetically)
class_names = ["buildings", "forest", "glacier", "mountain", "sea", "street"]
class_labels = {class_name: i for i, class_name in enumerate(class_names)}
print(class_labels)
# Resize of images
IMAGE_SIZE = (150, 150)
# def for labeling
def labeling(folder_path, images, labels):
# loop through all subfolders in the folder_path
for label in os.listdir(folder_path):
# get the path to the subfolder
label_path = os.path.join(folder_path, label)
# convert label text to label number
label_number = class_labels[label]
# loop through all images in subfolder
for file_name in os.listdir(label_path):
# upload image using Pillow
image = Image.open(os.path.join(label_path, file_name))
# resize image to desired size
image = image.resize(IMAGE_SIZE)
# convert the image to a Numpy array
image = np.array(image)
# add image to testing_image list
images.append(image)
# add image label to testing_label list
labels.append(label_number)
# convert the images and labels list to numpy array
images = np.array(images, dtype="float32")
labels = np.array(labels, dtype="int32")
return images, labels
# # Data Visualization
# In this section you can see the results of the labeling.
# An image of the training set is plotted and its label is printed, both are consistent.
# Training labeling
# list to store the images and their labels
training_images = []
training_labels = []
x_train, y_train = labeling(trainpath, training_images, training_labels)
# Testing labeling
# list to store the images and their labels
testing_images = []
testing_labels = []
x_test, y_test = labeling(testpath, testing_images, testing_labels)
plt.imshow(training_images[5])
print(f"label: {training_labels[5]}, name: {class_names[training_labels[5]]}")
# # Data preparation
# - This part of the code has to be mean with hot-encodes, normalization, and splits of the data.
# In the first part, we find the number of unique classes in the training tag set and then converts the categorical tags into a one-hot encoding representation for the training and test tag sets.
# Find the unique numbers from the train labels
num_clases = len(np.unique(y_train))
# Change the labels from categorical to one-hot encoding
y_train = to_categorical(y_train, num_clases)
y_test = to_categorical(y_test, num_clases)
# Visualize y_train after one hot encoding
y_train[0]
# ### Normalization
# Second Part, the train and test images are normalized to make sure that all images have comparable pixel values and are in a manageable range.
# This helps to improve the accuracy of the model and to reduce the variance of the input data.
# The normalization being used here is known as **"Z-score normalization"** or **"standard normalization"**.
# The mean and standard deviation of the training data are calculated and then used to scale both the training and test data, the formula used is: **(x - mean) / standard deviation**
# This normalization centers the data at zero and scales the units to have a variance of one. The constant 1e-7 is added to the denominator to avoid a possible division by zero in case the standard deviation is very small.
# Using Z-score normalization to converge faster and improve accuracy
mean = np.mean(x_train)
std = np.std(x_train)
x_train = (x_train - mean) / (std + 1e-7)
x_test = (x_test - mean) / (std + 1e-7)
# Finally, 10 percent of the **train set** is separated for validation since the set destined for validation was not labeled.
# We chose to do the validation in this way (training by sending the train data and validation) to save time, make better use of the data, detect overfitting problems early and optimize the overall performance of the model.
# Split train and test
x_train, x_valid, y_train, y_valid = train_test_split(
x_train, y_train, test_size=0.1, random_state=13
)
print("train splitted:", x_train.shape[0])
print("val:", x_valid.shape[0])
print("test:", x_test.shape[0])
# # Building our Convnet Model
# Convnet architecture:
# Having the input images ready, these images go through a convolution network that extracts features (edges, textures, etc.) at first in a very superficial way and then, as it goes deeper into the network, much more complex features are extracted.
# These convolution layers are linked to a maxpooling layer that reduces complexity by limiting the length and width of the images. And so layer after layer of stacked convolutions with maxpooling will give us back an image that is smaller and smaller but deeper in its meaning and information.
# Next, a layer called Flatten is applied to flat the image input.
# ### General parameters:
# **Kernel (filters)**:
# We know that images are understood as arrays of pixels.
# The kernel is also a matrix (but smaller) that moves from the upper left corner to the lower right corner of the image, going step by step until it completes the entire image by doing a little mathematical operation called convolution. And in this tour, a mathematical multiplication operation is executed that obtains the data and patterns for each row and column of the image.
# The result of this convolution results in a new image with certain features highlighted. Thus, the objective of the filters is to detect features of the input images.
# In our model we started using 32 filters in the first layer that were later increased in the following layers.
# **Padding:**
# It is a margin that is added to the image so that when performing the convolution operation the resulting image does not reduce its size. 'same' is used so that it does not alter the characteristics of the original images.
# In our model we use padding = 'same'.
# **Maxpooling:**
# Reduces the size of the images resulting from the convolution thanks to a kernel that highlights only the most relevant features of the image.
# ### Parameters of the optimized models:
# **Regularizers:**
# We use the L2 regularization (which controls the magnitude of the weights)
# kernel_regularizer=regularizers.l2(w_regularizer) is used to apply L2 regularization to the weights of a convolutional layer on a CNN. It helps to avoid overfitting and improve the generalizability of the model by penalizing large weights.
# **Batch normalization:**
# It is a normalization within the hidden layers, throught the training, since the weights vary constantly, these values can be standardized within the network. This facilitates gradient descent and works in batches. The result is standardized data even within the network training.
# Which brings the resulting tensor to one dimension. And having only one dimension, the classification is done by stacking dense layers as it was done in typical neural networks.
#
filter_batch = 32
w_regulatizer = 1e-4
model = Sequential()
## conv 1
model.add(
Conv2D(
filter_batch,
(3, 3),
padding="same",
kernel_regularizer=regularizers.l2(w_regulatizer),
input_shape=x_train.shape[1:],
)
)
model.add(Activation("relu"))
model.add(BatchNormalization())
## conv 2
model.add(
Conv2D(
filter_batch,
(3, 3),
padding="same",
kernel_regularizer=regularizers.l2(w_regulatizer),
)
)
model.add(Activation("relu"))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
## conv 3
model.add(
Conv2D(
2 * filter_batch,
(3, 3),
padding="same",
kernel_regularizer=regularizers.l2(w_regulatizer),
)
)
model.add(Activation("relu"))
model.add(BatchNormalization())
model.add(Dropout(0.2))
## conv 4
model.add(
Conv2D(
2 * filter_batch,
(3, 3),
padding="same",
kernel_regularizer=regularizers.l2(w_regulatizer),
)
)
model.add(Activation("relu"))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.3))
## conv 5
model.add(
Conv2D(
4 * filter_batch,
(3, 3),
padding="same",
kernel_regularizer=regularizers.l2(w_regulatizer),
)
)
model.add(Activation("relu"))
model.add(BatchNormalization())
## conv 6
model.add(
Conv2D(
4 * filter_batch,
(3, 3),
padding="same",
kernel_regularizer=regularizers.l2(w_regulatizer),
)
)
model.add(Activation("relu"))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.4))
## Clasificacion - Flatten
model.add(Flatten())
model.add(Dense(num_clases, activation="softmax"))
# summary of the created convnet
# model.summary()
# This is the summary of our first Convnet created plotted:
plot_model_file = "first_model.png"
plot_model(model, to_file=plot_model_file, show_shapes=True)
# As we can observe from the summary of our model, our first Convnet utilizes images that have already been resized to 150x150 and with each convolutional layer that passes, the size of the image is reduced by half, starting from the original size of 150 pixels and reaching 18 pixels as its limit.
# With that we can see the importance to understand the extent to which size can be reduced because if we exceed too many small pixels like 2 or 1 as the minimum resize, the prediction accuracy could be bad.
# ### Data Augmentation
# By using the ImageDataGenerator its parameters, we generate augmented images on the fly during the training.
# These augmented images can then be used to train our saved model, enhancing and helping to generalize improve performance on unseen images.
## Data augmentation
datagen = ImageDataGenerator(
rotation_range=15,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=True,
vertical_flip=True,
)
# # Compiling our first model
# Using Adam optimizer and categorical_crossentropy lost function
model.compile(loss="categorical_crossentropy", optimizer=Adam(), metrics=["accuracy"])
# **Callbacks:** early stopping and checkpoints
# Early Stopping:
# When a neural network has stopped optimizing the accuracy or the metric that we put in 'monitor', if this metric does not rise, this can decide to end the training when the network begins to diverge.
# Checkpoint:
# With this we can fully execute the training network throughout the epochs that we defined but in this file the weights of the neural network that had a better accuracy are saved. This ensures that the model is always the best.
checkpoint_model_name = "my_first_model.hdf5"
checkpoint = ModelCheckpoint(
checkpoint_model_name, verbose=1, save_best_only=True, monitor="val_accuracy"
)
BATCH_SIZE = 128
EPOCHS = 40
history_first_model = model.fit(
datagen.flow(x_train, y_train, batch_size=BATCH_SIZE),
callbacks=[checkpoint],
steps_per_epoch=x_train.shape[0] // BATCH_SIZE,
epochs=EPOCHS,
verbose=1,
validation_data=(x_valid, y_valid),
)
# Defining accuracy and loss plot funtion
def plot_loss_accuracy(hist, save_image_filename, len_epochs):
epochs = len_epochs
acc = hist.history["accuracy"]
val_acc = hist.history["val_accuracy"]
loss = hist.history["loss"]
val_loss = hist.history["val_loss"]
epochs_range = range(epochs)
plt.figure(figsize=(12, 5))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, acc, "r--", label="Training Accuracy")
plt.plot(epochs_range, val_acc, "b-", label="Validation Accuracy")
plt.legend(loc="lower right")
plt.title("Training and Validation Accuracy")
plt.subplot(1, 2, 2)
plt.plot(epochs_range, loss, "r--", label="Training Loss")
plt.plot(epochs_range, val_loss, "b-", label="Validation Loss")
plt.legend(loc="upper right")
plt.title("Training and Validation Loss")
plt.savefig(save_image_filename)
plt.show()
# call the function to plot the curves
plot_loss_accuracy(history_first_model, "loss_accuracy_40.png", EPOCHS)
# Saving history of our first model fit
save_first_history_model_file = "history_first_model.pkl"
save_pickle(history_first_model, save_first_history_model_file)
# Saving our first model
save_first_model_file = "first-model-40-epochs.pkl"
save_pickle(model, save_first_model_file)
# Loading our first model with history plot
saved_history_first_model = load_pickle("/kaggle/working/history_first_model.pkl")
plot_loss_accuracy(saved_history_first_model, "loss_accuracy_40_saved.png", EPOCHS)
# Loading our first model with history plot
saved_first_model = load_pickle("/kaggle/working/first-model-40-epochs.pkl")
# # Visualizing intermediate activations Images
img_path = (
"/kaggle/input/intel-image-classification/seg_test/seg_test/glacier/20253.jpg"
)
img = image.load_img(img_path, target_size=IMAGE_SIZE)
img_tensor = image.img_to_array(img)
img_tensor = np.expand_dims(img_tensor, axis=0)
img_tensor /= 255.0
plt.imshow(img_tensor[0])
plt.show()
print(img_tensor.shape)
# Loading our first model
# predicting images
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
images = np.vstack([x])
# classes = my_pickle_model.predict_classes(images, batch_size=10)
predict_x = saved_first_model.predict(images)
classes_x = np.argmax(predict_x, axis=1)
print("Predicted class is:", classes_x)
# ### Instantiating a model from an input tensor and a list of output tensors
# Extracts the outputs of the top 12 layers
layer_outputs = [layer.output for layer in saved_first_model.layers[:12]]
# Creates a model that will return these outputs, given the model input
activation_model = Model(inputs=saved_first_model.input, outputs=layer_outputs)
# Returns a list of five Numpy arrays: one array per layer activation
activations = activation_model.predict(img_tensor)
first_layer_activation = activations[0]
print(first_layer_activation.shape)
plt.matshow(first_layer_activation[0, :, :, 4], cmap="viridis")
# ### Visualizing every channel in every intermediate activation
layer_names = []
for layer in saved_first_model.layers[:12]:
layer_names.append(
layer.name
) # Names of the layers, so you can have them as part of your plot
images_per_row = 16
for layer_name, layer_activation in zip(
layer_names, activations
): # Displays the feature maps
n_features = layer_activation.shape[-1] # Number of features in the feature map
size = layer_activation.shape[
1
] # The feature map has shape (1, size, size, n_features).
n_cols = (
n_features // images_per_row
) # Tiles the activation channels in this matrix
display_grid = np.zeros((size * n_cols, images_per_row * size))
for col in range(n_cols): # Tiles each filter into a big horizontal grid
for row in range(images_per_row):
channel_image = layer_activation[0, :, :, col * images_per_row + row]
channel_image -= (
channel_image.mean()
) # Post-processes the feature to make it visually palatable
channel_image /= channel_image.std()
channel_image *= 64
channel_image += 128
channel_image = np.clip(channel_image, 0, 255).astype("uint8")
display_grid[
col * size : (col + 1) * size, # Displays the grid
row * size : (row + 1) * size,
] = channel_image
scale = 1.0 / size
plt.figure(figsize=(scale * display_grid.shape[1], scale * display_grid.shape[0]))
plt.title(layer_name)
plt.grid(False)
plt.imshow(display_grid, aspect="auto", cmap="viridis")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/974/129974168.ipynb
|
intel-image-classification
|
puneet6060
|
[{"Id": 129974168, "ScriptId": 38658728, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 10470120, "CreationDate": "05/17/2023 20:57:56", "VersionNumber": 2.0, "Title": "Final-Assignment-05", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 476.0, "LinesInsertedFromPrevious": 324.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 152.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 186415064, "KernelVersionId": 129974168, "SourceDatasetVersionId": 269359}]
|
[{"Id": 269359, "DatasetId": 111880, "DatasourceVersionId": 281586, "CreatorUserId": 2307235, "LicenseName": "Data files \u00a9 Original Authors", "CreationDate": "01/30/2019 09:22:58", "VersionNumber": 2.0, "Title": "Intel Image Classification", "Slug": "intel-image-classification", "Subtitle": "Image Scene Classification of Multiclass", "Description": "### Context\n\nThis is image data of Natural Scenes around the world. \n\n### Content\n\nThis Data contains around 25k images of size 150x150 distributed under 6 categories.\n{'buildings' -> 0, \n'forest' -> 1,\n'glacier' -> 2,\n'mountain' -> 3,\n'sea' -> 4,\n'street' -> 5 }\n\nThe Train, Test and Prediction data is separated in each zip files. There are around 14k images in Train, 3k in Test and 7k in Prediction.\nThis data was initially published on https://datahack.analyticsvidhya.com by Intel to host a Image classification Challenge.\n\n\n### Acknowledgements\n\nThanks to https://datahack.analyticsvidhya.com for the challenge and Intel for the Data\n\nPhoto by [Jan B\u00f6ttinger on Unsplash][1]\n\n### Inspiration\n\nWant to build powerful Neural network that can classify these images with more accuracy.\n\n\n [1]: https://unsplash.com/photos/27xFENkt-lc", "VersionNotes": "Added Prediction Images", "TotalCompressedBytes": 108365415.0, "TotalUncompressedBytes": 361713334.0}]
|
[{"Id": 111880, "CreatorUserId": 2307235, "OwnerUserId": 2307235.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 269359.0, "CurrentDatasourceVersionId": 281586.0, "ForumId": 121691, "Type": 2, "CreationDate": "01/29/2019 10:37:42", "LastActivityDate": "01/29/2019", "TotalViews": 441103, "TotalDownloads": 83887, "TotalVotes": 1345, "TotalKernels": 815}]
|
[{"Id": 2307235, "UserName": "puneet6060", "DisplayName": "Puneet Bansal", "RegisterDate": "10/01/2018", "PerformanceTier": 0}]
|
# # Assignment-05 Convolutional Neural Networks
# ### Students:
# - Sharon Sarai Maygua Mendiola
# - Franklin Ruben Rosembluth Prado
# Utils to run notebook on Kaggle
import os
import cv2
import glob
import pickle
import matplotlib
import numpy as np
import pandas as pd
import imageio as im
import seaborn as sns
import tensorflow as tf
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
from PIL import Image
from tensorflow import keras
from keras import models
from pickle import dump
from pickle import load
from tensorflow import keras
from tensorflow.keras.utils import (
to_categorical,
plot_model,
img_to_array,
load_img,
array_to_img,
)
from tensorflow.keras import regularizers
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import (
Conv2D,
MaxPooling2D,
Flatten,
Dense,
Dropout,
Activation,
BatchNormalization,
)
from tensorflow.keras.preprocessing import image_dataset_from_directory
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras import layers
from tensorflow.keras.layers import LeakyReLU
from tensorflow.keras.losses import categorical_crossentropy
from tensorflow.keras.optimizers import Adam, RMSprop
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.utils import shuffle
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import ModelCheckpoint
# from keras.preprocessing import image
import keras.utils as image
# load and save files with pickle
def save_pickle(file, file_name):
dump(file, open(file_name, "wb"))
print("Saved: %s" % file_name)
def load_pickle(file_name):
return load(open(file_name, "rb"))
# PATHS
# path to the folder containing the subfolders with the training images
trainpath = "/kaggle/input/intel-image-classification/seg_train/seg_train"
# path to the folder containing the subfolders with the testing images
testpath = "/kaggle/input/intel-image-classification/seg_test/seg_test"
predpath = "/kaggle/input/intel-image-classification/seg_pred/seg_pred"
# Tensorflow datasets creator from directory, making images to categorical
# Not used, because we wanted to learn how to label our images by our own
train_ds = image_dataset_from_directory(
trainpath, seed=123, image_size=(150, 150), batch_size=64, label_mode="categorical"
)
test_ds = image_dataset_from_directory(
testpath, seed=123, image_size=(150, 150), batch_size=64, label_mode="categorical"
)
print("Train class names:", train_ds.class_names)
print("Test class names:", test_ds.class_names)
plt.figure(figsize=(5, 5))
for images, labels in train_ds.take(1):
for i in range(9):
ax = plt.subplot(3, 3, i + 1)
plt.imshow(images[i].numpy().astype("uint8"))
label = tf.argmax(labels[i]).numpy()
plt.title(train_ds.class_names[label])
plt.axis("off")
# # Labeling
# - This dataset needed some pre-processing.
# - The images were generally labeled, since they were in categorized folders. However, for training it is necessary that each image is associated with its label, so each of the training and test images was labeled.
# - The validation images could not be processed in this way because they were not categorized.
# With this objective, the *'def labeling'* function was created, which also transforms the text labels to numeric labels and converts the lists in which the images and labels had been stored, into numpy arrays of type (float32) and type (int32).
# This is because working with this type of data reduces the amount of storage memory, improves model performance, and because Keras needs its input data to be of this type.
# Also to reduce the amount of the images size, we resized all the images with (150x150) in labeling to normalize after concludes the labels of each image.
# Create a dictionary to change text labels into int numerical labels (Ordered alphabetically)
class_names = ["buildings", "forest", "glacier", "mountain", "sea", "street"]
class_labels = {class_name: i for i, class_name in enumerate(class_names)}
print(class_labels)
# Resize of images
IMAGE_SIZE = (150, 150)
# def for labeling
def labeling(folder_path, images, labels):
# loop through all subfolders in the folder_path
for label in os.listdir(folder_path):
# get the path to the subfolder
label_path = os.path.join(folder_path, label)
# convert label text to label number
label_number = class_labels[label]
# loop through all images in subfolder
for file_name in os.listdir(label_path):
# upload image using Pillow
image = Image.open(os.path.join(label_path, file_name))
# resize image to desired size
image = image.resize(IMAGE_SIZE)
# convert the image to a Numpy array
image = np.array(image)
# add image to testing_image list
images.append(image)
# add image label to testing_label list
labels.append(label_number)
# convert the images and labels list to numpy array
images = np.array(images, dtype="float32")
labels = np.array(labels, dtype="int32")
return images, labels
# # Data Visualization
# In this section you can see the results of the labeling.
# An image of the training set is plotted and its label is printed, both are consistent.
# Training labeling
# list to store the images and their labels
training_images = []
training_labels = []
x_train, y_train = labeling(trainpath, training_images, training_labels)
# Testing labeling
# list to store the images and their labels
testing_images = []
testing_labels = []
x_test, y_test = labeling(testpath, testing_images, testing_labels)
plt.imshow(training_images[5])
print(f"label: {training_labels[5]}, name: {class_names[training_labels[5]]}")
# # Data preparation
# - This part of the code has to be mean with hot-encodes, normalization, and splits of the data.
# In the first part, we find the number of unique classes in the training tag set and then converts the categorical tags into a one-hot encoding representation for the training and test tag sets.
# Find the unique numbers from the train labels
num_clases = len(np.unique(y_train))
# Change the labels from categorical to one-hot encoding
y_train = to_categorical(y_train, num_clases)
y_test = to_categorical(y_test, num_clases)
# Visualize y_train after one hot encoding
y_train[0]
# ### Normalization
# Second Part, the train and test images are normalized to make sure that all images have comparable pixel values and are in a manageable range.
# This helps to improve the accuracy of the model and to reduce the variance of the input data.
# The normalization being used here is known as **"Z-score normalization"** or **"standard normalization"**.
# The mean and standard deviation of the training data are calculated and then used to scale both the training and test data, the formula used is: **(x - mean) / standard deviation**
# This normalization centers the data at zero and scales the units to have a variance of one. The constant 1e-7 is added to the denominator to avoid a possible division by zero in case the standard deviation is very small.
# Using Z-score normalization to converge faster and improve accuracy
mean = np.mean(x_train)
std = np.std(x_train)
x_train = (x_train - mean) / (std + 1e-7)
x_test = (x_test - mean) / (std + 1e-7)
# Finally, 10 percent of the **train set** is separated for validation since the set destined for validation was not labeled.
# We chose to do the validation in this way (training by sending the train data and validation) to save time, make better use of the data, detect overfitting problems early and optimize the overall performance of the model.
# Split train and test
x_train, x_valid, y_train, y_valid = train_test_split(
x_train, y_train, test_size=0.1, random_state=13
)
print("train splitted:", x_train.shape[0])
print("val:", x_valid.shape[0])
print("test:", x_test.shape[0])
# # Building our Convnet Model
# Convnet architecture:
# Having the input images ready, these images go through a convolution network that extracts features (edges, textures, etc.) at first in a very superficial way and then, as it goes deeper into the network, much more complex features are extracted.
# These convolution layers are linked to a maxpooling layer that reduces complexity by limiting the length and width of the images. And so layer after layer of stacked convolutions with maxpooling will give us back an image that is smaller and smaller but deeper in its meaning and information.
# Next, a layer called Flatten is applied to flat the image input.
# ### General parameters:
# **Kernel (filters)**:
# We know that images are understood as arrays of pixels.
# The kernel is also a matrix (but smaller) that moves from the upper left corner to the lower right corner of the image, going step by step until it completes the entire image by doing a little mathematical operation called convolution. And in this tour, a mathematical multiplication operation is executed that obtains the data and patterns for each row and column of the image.
# The result of this convolution results in a new image with certain features highlighted. Thus, the objective of the filters is to detect features of the input images.
# In our model we started using 32 filters in the first layer that were later increased in the following layers.
# **Padding:**
# It is a margin that is added to the image so that when performing the convolution operation the resulting image does not reduce its size. 'same' is used so that it does not alter the characteristics of the original images.
# In our model we use padding = 'same'.
# **Maxpooling:**
# Reduces the size of the images resulting from the convolution thanks to a kernel that highlights only the most relevant features of the image.
# ### Parameters of the optimized models:
# **Regularizers:**
# We use the L2 regularization (which controls the magnitude of the weights)
# kernel_regularizer=regularizers.l2(w_regularizer) is used to apply L2 regularization to the weights of a convolutional layer on a CNN. It helps to avoid overfitting and improve the generalizability of the model by penalizing large weights.
# **Batch normalization:**
# It is a normalization within the hidden layers, throught the training, since the weights vary constantly, these values can be standardized within the network. This facilitates gradient descent and works in batches. The result is standardized data even within the network training.
# Which brings the resulting tensor to one dimension. And having only one dimension, the classification is done by stacking dense layers as it was done in typical neural networks.
#
filter_batch = 32
w_regulatizer = 1e-4
model = Sequential()
## conv 1
model.add(
Conv2D(
filter_batch,
(3, 3),
padding="same",
kernel_regularizer=regularizers.l2(w_regulatizer),
input_shape=x_train.shape[1:],
)
)
model.add(Activation("relu"))
model.add(BatchNormalization())
## conv 2
model.add(
Conv2D(
filter_batch,
(3, 3),
padding="same",
kernel_regularizer=regularizers.l2(w_regulatizer),
)
)
model.add(Activation("relu"))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
## conv 3
model.add(
Conv2D(
2 * filter_batch,
(3, 3),
padding="same",
kernel_regularizer=regularizers.l2(w_regulatizer),
)
)
model.add(Activation("relu"))
model.add(BatchNormalization())
model.add(Dropout(0.2))
## conv 4
model.add(
Conv2D(
2 * filter_batch,
(3, 3),
padding="same",
kernel_regularizer=regularizers.l2(w_regulatizer),
)
)
model.add(Activation("relu"))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.3))
## conv 5
model.add(
Conv2D(
4 * filter_batch,
(3, 3),
padding="same",
kernel_regularizer=regularizers.l2(w_regulatizer),
)
)
model.add(Activation("relu"))
model.add(BatchNormalization())
## conv 6
model.add(
Conv2D(
4 * filter_batch,
(3, 3),
padding="same",
kernel_regularizer=regularizers.l2(w_regulatizer),
)
)
model.add(Activation("relu"))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.4))
## Clasificacion - Flatten
model.add(Flatten())
model.add(Dense(num_clases, activation="softmax"))
# summary of the created convnet
# model.summary()
# This is the summary of our first Convnet created plotted:
plot_model_file = "first_model.png"
plot_model(model, to_file=plot_model_file, show_shapes=True)
# As we can observe from the summary of our model, our first Convnet utilizes images that have already been resized to 150x150 and with each convolutional layer that passes, the size of the image is reduced by half, starting from the original size of 150 pixels and reaching 18 pixels as its limit.
# With that we can see the importance to understand the extent to which size can be reduced because if we exceed too many small pixels like 2 or 1 as the minimum resize, the prediction accuracy could be bad.
# ### Data Augmentation
# By using the ImageDataGenerator its parameters, we generate augmented images on the fly during the training.
# These augmented images can then be used to train our saved model, enhancing and helping to generalize improve performance on unseen images.
## Data augmentation
datagen = ImageDataGenerator(
rotation_range=15,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=True,
vertical_flip=True,
)
# # Compiling our first model
# Using Adam optimizer and categorical_crossentropy lost function
model.compile(loss="categorical_crossentropy", optimizer=Adam(), metrics=["accuracy"])
# **Callbacks:** early stopping and checkpoints
# Early Stopping:
# When a neural network has stopped optimizing the accuracy or the metric that we put in 'monitor', if this metric does not rise, this can decide to end the training when the network begins to diverge.
# Checkpoint:
# With this we can fully execute the training network throughout the epochs that we defined but in this file the weights of the neural network that had a better accuracy are saved. This ensures that the model is always the best.
checkpoint_model_name = "my_first_model.hdf5"
checkpoint = ModelCheckpoint(
checkpoint_model_name, verbose=1, save_best_only=True, monitor="val_accuracy"
)
BATCH_SIZE = 128
EPOCHS = 40
history_first_model = model.fit(
datagen.flow(x_train, y_train, batch_size=BATCH_SIZE),
callbacks=[checkpoint],
steps_per_epoch=x_train.shape[0] // BATCH_SIZE,
epochs=EPOCHS,
verbose=1,
validation_data=(x_valid, y_valid),
)
# Defining accuracy and loss plot funtion
def plot_loss_accuracy(hist, save_image_filename, len_epochs):
epochs = len_epochs
acc = hist.history["accuracy"]
val_acc = hist.history["val_accuracy"]
loss = hist.history["loss"]
val_loss = hist.history["val_loss"]
epochs_range = range(epochs)
plt.figure(figsize=(12, 5))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, acc, "r--", label="Training Accuracy")
plt.plot(epochs_range, val_acc, "b-", label="Validation Accuracy")
plt.legend(loc="lower right")
plt.title("Training and Validation Accuracy")
plt.subplot(1, 2, 2)
plt.plot(epochs_range, loss, "r--", label="Training Loss")
plt.plot(epochs_range, val_loss, "b-", label="Validation Loss")
plt.legend(loc="upper right")
plt.title("Training and Validation Loss")
plt.savefig(save_image_filename)
plt.show()
# call the function to plot the curves
plot_loss_accuracy(history_first_model, "loss_accuracy_40.png", EPOCHS)
# Saving history of our first model fit
save_first_history_model_file = "history_first_model.pkl"
save_pickle(history_first_model, save_first_history_model_file)
# Saving our first model
save_first_model_file = "first-model-40-epochs.pkl"
save_pickle(model, save_first_model_file)
# Loading our first model with history plot
saved_history_first_model = load_pickle("/kaggle/working/history_first_model.pkl")
plot_loss_accuracy(saved_history_first_model, "loss_accuracy_40_saved.png", EPOCHS)
# Loading our first model with history plot
saved_first_model = load_pickle("/kaggle/working/first-model-40-epochs.pkl")
# # Visualizing intermediate activations Images
img_path = (
"/kaggle/input/intel-image-classification/seg_test/seg_test/glacier/20253.jpg"
)
img = image.load_img(img_path, target_size=IMAGE_SIZE)
img_tensor = image.img_to_array(img)
img_tensor = np.expand_dims(img_tensor, axis=0)
img_tensor /= 255.0
plt.imshow(img_tensor[0])
plt.show()
print(img_tensor.shape)
# Loading our first model
# predicting images
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
images = np.vstack([x])
# classes = my_pickle_model.predict_classes(images, batch_size=10)
predict_x = saved_first_model.predict(images)
classes_x = np.argmax(predict_x, axis=1)
print("Predicted class is:", classes_x)
# ### Instantiating a model from an input tensor and a list of output tensors
# Extracts the outputs of the top 12 layers
layer_outputs = [layer.output for layer in saved_first_model.layers[:12]]
# Creates a model that will return these outputs, given the model input
activation_model = Model(inputs=saved_first_model.input, outputs=layer_outputs)
# Returns a list of five Numpy arrays: one array per layer activation
activations = activation_model.predict(img_tensor)
first_layer_activation = activations[0]
print(first_layer_activation.shape)
plt.matshow(first_layer_activation[0, :, :, 4], cmap="viridis")
# ### Visualizing every channel in every intermediate activation
layer_names = []
for layer in saved_first_model.layers[:12]:
layer_names.append(
layer.name
) # Names of the layers, so you can have them as part of your plot
images_per_row = 16
for layer_name, layer_activation in zip(
layer_names, activations
): # Displays the feature maps
n_features = layer_activation.shape[-1] # Number of features in the feature map
size = layer_activation.shape[
1
] # The feature map has shape (1, size, size, n_features).
n_cols = (
n_features // images_per_row
) # Tiles the activation channels in this matrix
display_grid = np.zeros((size * n_cols, images_per_row * size))
for col in range(n_cols): # Tiles each filter into a big horizontal grid
for row in range(images_per_row):
channel_image = layer_activation[0, :, :, col * images_per_row + row]
channel_image -= (
channel_image.mean()
) # Post-processes the feature to make it visually palatable
channel_image /= channel_image.std()
channel_image *= 64
channel_image += 128
channel_image = np.clip(channel_image, 0, 255).astype("uint8")
display_grid[
col * size : (col + 1) * size, # Displays the grid
row * size : (row + 1) * size,
] = channel_image
scale = 1.0 / size
plt.figure(figsize=(scale * display_grid.shape[1], scale * display_grid.shape[0]))
plt.title(layer_name)
plt.grid(False)
plt.imshow(display_grid, aspect="auto", cmap="viridis")
| false | 0 | 5,375 | 0 | 5,576 | 5,375 |
||
129454168
|
# # **Análise Exploratória de Dados de Logística | Python**
# 
# **Neste projeto, será realizada uma Análise Exploratória de Dados (EDA), com o intuito de explorar a relação entre os hubs (locais estratégicos para a redistribuição de mercadorias) da empresa Loggi e seus pontos de entregas localizados no Distrito Federal (DF).**
# ---
# ## **Índice**
# 1. Contexto
# 1.1. O Projeto
# 1.2. A Empresa
# 1.3. Os Dados
# 2. Pacotes e bibliotecas
# 3. Exploração dos Dados
#
# 3.1. Coleta dos Dados
# 3.2. Wrangling
# 3.3. Geocodificação
# 3.4. Qualidade
# 4. Visualização
# 5. Insight
# ---
# # **1. Contexto**
# ## **1.1. O Projeto**
# Esse projeto consiste em analisar um conjunto de dados que contém as entregas da empresa Loggi na região de Brasilia.
# Verificaremos a relação entre a distância dos hubs e suas entregas e também a capacidade dos veículos.
# Após a análise, iremos discutir se e como essas relações podem ser otimizadas.
# ## **1.2. A Empresa**
# A Loggi é uma das maiores transportadoras privadas do país e uma empresa brasileira de logística que atua principalmente no setor de e-commerce.
# ## **1.3. Os Dados**
# Os dados são sintetizados de fontes públicas (IBGE, IPEA, etc.) e são representativos dos desafios que a startup enfrenta no dia a dia, especialmente com relação a sua escala.
# # **2\. Pacotes e bibliotecas**
# * import json - Codifica e decodifica dados no formato JSON
#
# * import pandas as pd - Manipula e análisa dados tabulares
# * import geopy - Geocodificação em Python
# * from geopy.geocoders import Nominatim - Converte endereços em coordenadas geográficas e vice-versa
# * from geopy.extra.rate_limiter import RateLimiter - Limita as taxas de chamada de uma função
#
# * import numpy as np - Fornece suporte para operações com matrizes e funções matemáticas
#
# * import geopandas - Adiciona funcionalidades geoespaciais ao Panda
#
# * import matplotlib.pyplot as plt - Gera gráficos
#
# * import seaborn as sns - Gera gráficos
#
#
# # **3. Exploração dos Dados**
# ## **3.1. Coleta dos Dados**
# **Baixar os dados de um arquivo JSON:**
# **Carregar os dados do arquivo em um dicionário Python:**
import json
import pandas as pd
with open("deliveries.json", mode="r", encoding="utf8") as file:
data = json.load(file)
# **Conhecendo alguns dos dados:**
# O Número de linhas
len(data)
# O nome das colunas
nomecol = data[0]
print(nomecol.keys())
# ## **3.2. Wrangling**
# **Criar um dataframe Pandas para facilitar a visualização e manipulação:**
#
entregas_df = pd.DataFrame(data)
entregas_df.head()
# **Normalizando a coluna origin**
# A coluna origin tem dados aninhados, vamos normalizar:
# 1. Em um novo dataframe, transforrmar cada chave da coluna origin em novas colunas *(normalize)*
# 2. Juntar o novo dataframe com as novas colunas ao dataframe principal
# 3. Remover a coluna origin
# 4. Reordenar as colunas
hub_origem_df = pd.json_normalize(entregas_df["origin"])
entregas_df = pd.merge(
left=entregas_df,
right=hub_origem_df,
how="inner",
left_index=True,
right_index=True,
)
entregas_df = entregas_df.drop("origin", axis=1)
entregas_df = entregas_df[
["name", "region", "lng", "lat", "vehicle_capacity", "deliveries"]
]
entregas_df.head()
#
# **Normalizando a coluna deliveries**
# A coluna deliveries também tem dados aninhados, vamos normalizar:
# 1. Em um novo dataframe, transformar cada elemento da lista contida na coluna em uma linha *(explode)*.
# 2. Criar um dataframe para cada coluna e depois concatenar os dataframes, ou seja combinar os 3 dataframes em um novo dataframe. (A coluna ID não é relevante para as análises desse projeto)
# 5. Remover a coluna deliveries
# 6. Combinar o dataframe obtido com o dataframe principal
#
entregas_exploded_df = entregas_df[["deliveries"]].explode("deliveries")
entregas_normalized_df = pd.concat(
[
pd.DataFrame(
entregas_exploded_df["deliveries"].apply(lambda record: record["size"])
).rename(columns={"deliveries": "delivery_size"}),
pd.DataFrame(
entregas_exploded_df["deliveries"].apply(
lambda record: record["point"]["lng"]
)
).rename(columns={"deliveries": "delivery_lng"}),
pd.DataFrame(
entregas_exploded_df["deliveries"].apply(
lambda record: record["point"]["lat"]
)
).rename(columns={"deliveries": "delivery_lat"}),
],
axis=1,
)
entregas_df = entregas_df.drop("deliveries", axis=1)
entregas_df = pd.merge(
left=entregas_df,
right=entregas_normalized_df,
how="right",
left_index=True,
right_index=True,
)
entregas_df.reset_index(inplace=True, drop=True)
entregas_df.head()
# **Vamos verificar alguns dados relacionados ao nosso dataframe:**
entregas_df.info()
# **Verificar se há Dados Faltantes**
entregas_df.isna().any()
# **Não há dados faltantes**
# ## **3.3. Geocodificação**
# A Geocodificação é um processo que transforma uma localização descrita em texto (endereço) em sua respectiva coordenada geográfica (latitude/longitude). Há também a Geocodificação reversa que faz o oposto e é ela que vamos empregar aqui. Utilizaremos um serviço gratuito de geocodificação através do pacote Geopy, chamado Nominatim.
# ### **3.3.1. Geocodificação Reversa dos Hubs**
# Processaremos as coordenadas geográficas para ter informações textuais do endereço através da Geocodificação reversa dos hubs.
# Para isso vamos extrair os dados de localização dos hubs
# aplicando a geocodificação nas coordenadas de cada região e extrair informações de cidade e bairro
# Extrair os dados das colunas region, lat e lnt para um novo dataframe
hub_df = entregas_df[["region", "lat", "lng"]]
hub_df = hub_df.drop_duplicates().sort_values(by="region").reset_index(drop=True)
hub_df.head()
import geopy
from geopy.geocoders import Nominatim
from geopy.extra.rate_limiter import RateLimiter
geolocator = Nominatim(user_agent="ebac_geocoder")
geocoder = RateLimiter(geolocator.reverse, min_delay_seconds=1)
# Criar a coluna coordinates com os dados da latitude e longitude dos hubs do tipo string
# Criar a coluna geodata que irá receber os dados da coluna coordinates com aplicação da função geocoder
hub_df["coordinates"] = hub_df["lat"].astype(str) + ", " + hub_df["lng"].astype(str)
hub_df["geodata"] = hub_df["coordinates"].apply(geocoder)
hub_df.head()
# Normalizar a coluna geodata
hub_geodata_df = pd.json_normalize(hub_df["geodata"].apply(lambda data: data.raw))
hub_geodata_df.head()
import numpy as np
# Extrair das colunas de interesse geradas
# Renomear as colunas
# Armazenar na coluna "hub_city" os dados da "hub_city",
# se não existirem, armazenar os dados da "hub_town"
# Armazenar na coluna "hub_suburb" os dados do "hub_suburb",
# se não existirem, armazenar os dados do "hub_city"
# Remove a coluna "hub_town"
hub_geodata_df = hub_geodata_df[["address.town", "address.suburb", "address.city"]]
hub_geodata_df.rename(
columns={
"address.town": "hub_town",
"address.suburb": "hub_suburb",
"address.city": "hub_city",
},
inplace=True,
)
hub_geodata_df["hub_city"] = np.where(
hub_geodata_df["hub_city"].notna(),
hub_geodata_df["hub_city"],
hub_geodata_df["hub_town"],
)
hub_geodata_df["hub_suburb"] = np.where(
hub_geodata_df["hub_suburb"].notna(),
hub_geodata_df["hub_suburb"],
hub_geodata_df["hub_city"],
)
hub_geodata_df = hub_geodata_df.drop("hub_town", axis=1)
hub_geodata_df.head()
# Combinar o dataframe "hub_geodata_df" (que contem cidades e bairros)
# com o dataframe "hub_df" (que contem as regioes)
# Extrair os dados das colunas: region, hub_suburb e hub_city
# Combinar o dataframe principal "entregas_df" com o novo dataframe "hub_df"
# Reorganizar as colunas
hub_df = pd.merge(left=hub_df, right=hub_geodata_df, left_index=True, right_index=True)
hub_df = hub_df[["region", "hub_suburb", "hub_city"]]
entregas_df = pd.merge(left=entregas_df, right=hub_df, how="inner", on="region")
entregas_df = entregas_df[
[
"name",
"region",
"lng",
"lat",
"hub_city",
"hub_suburb",
"vehicle_capacity",
"delivery_size",
"delivery_lng",
"delivery_lat",
]
]
entregas_df.head()
# ### **3.3.2 Geocodificação Reversa das Entregas**
# Como as entregas possuem mais de 600.000 localizações, vamos baixar um arquivo que já contém os dados geocodificados, extrair esse arquivo para um dataframe e combinar as colunas "delivery_city" e "delivery_suburb" com nosso arquivo principal.
# Download dos dados de geolocalização das entregas
# Carregar o arquivo baixado
deliveries_geodata_df = pd.read_csv("deliveries-geodata.csv")
# Combinar com o arquivo principal e extrair as colunas "delivery_city" e "delivery_suburb"
deliveries_df = pd.merge(
left=entregas_df,
right=deliveries_geodata_df[["delivery_city", "delivery_suburb"]],
how="inner",
left_index=True,
right_index=True,
)
deliveries_df.head()
# ## **3.4 - Qualidade**
# **Vamos observar alguns dados e verificar a qualidade do nosso material**
# Verificar as informações do dataframe
deliveries_df.info()
# Verificar dados nulos
deliveries_df.isna().any()
# **Verificando as entregas relacionadas a cidade e ao bairro de Brasília**
# Verificar a porcentagem de valores nulos nas entregas da cidade
100 * (deliveries_df["delivery_city"].isna().sum() / len(deliveries_df))
# Verificar a porcentagem de valores nulos nas entregas dos bairros
100 * (deliveries_df["delivery_suburb"].isna().sum() / len(deliveries_df))
# Verificar as entregas nas cidades de Brasilia
prop_df = deliveries_df[["delivery_city"]].value_counts() / len(deliveries_df)
prop_df.sort_values(ascending=False).head(10)
# Verificar as entregas nos bairros de Brasilia
prop_df = deliveries_df[["delivery_suburb"]].value_counts() / len(deliveries_df)
prop_df.sort_values(ascending=False).head(10)
# # **4. Visualização**
# **Instalação e importação do Geopandas**
# O GeopPandas adiciona funcionalidades geoespaciais ao pacote Python Pandas, que irá nos ajudar a visualizar as coordenadas dos hubs e das entregas no mapa do Distrito Federal, segmentados pela região dos hubs.
#
# Instalar o pacote geopandas
import geopandas
# **Vamos baixar os dados do mapa do Distrito Federal do site oficial do IBGE e gerar um dataframe**
mapa = geopandas.read_file("distrito-federal.shp")
mapa = mapa.loc[[0]]
mapa.head()
# **Criar um dataframe para os hubs com informações de geolocalização**
hub_df = (
deliveries_df[["region", "lng", "lat"]].drop_duplicates().reset_index(drop=True)
)
geo_hub_df = geopandas.GeoDataFrame(
hub_df, geometry=geopandas.points_from_xy(hub_df["lng"], hub_df["lat"])
)
geo_hub_df.head()
# **Criar um dataframe para as entregas com informações de geolocalização**
# Criar o dataframe das entregas
geo_deliveries_df = geopandas.GeoDataFrame(
deliveries_df,
geometry=geopandas.points_from_xy(
deliveries_df["delivery_lng"], deliveries_df["delivery_lat"]
),
)
geo_deliveries_df.head()
# ## **4.1. Mapa dos hubs e das entregas**
import matplotlib.pyplot as plt
# cria o plot vazio
fig, ax = plt.subplots(figsize=(25 / 2.54, 25 / 2.54))
# plot mapa do distrito federal
mapa.plot(ax=ax, alpha=0.4, color="lightgrey")
# plot das entregas
geo_deliveries_df.query("region == 'df-0'").plot(
ax=ax, markersize=1, color="sandybrown", label="df-0"
)
geo_deliveries_df.query("region == 'df-1'").plot(
ax=ax, markersize=1, color="darkred", label="df-1"
)
geo_deliveries_df.query("region == 'df-2'").plot(
ax=ax, markersize=1, color="firebrick", label="df-2"
)
# plot dos hubs
geo_hub_df.plot(ax=ax, markersize=30, marker="x", color="black", label="hub")
# plot da legenda
plt.title("Entregas no Distrito Federal por Região", fontdict={"fontsize": 14})
lgnd = plt.legend(prop={"size": 14})
for handle in lgnd.legendHandles:
handle.set_sizes([50])
# De acordo o mapa, observamos que os três hubs parecem estar bem localizados em relação ao trajeto das entregas. O hub da região central (df-1) possui grande parte das entregas concentradas ao seu redor. Já região df-2 as entregas começam e se distanciar. E na região df-0 talvez por haver uma menor concentração de pessoas, exista o maior espaçamento entre os pontos de entregas e uma maior distância do hub dessa região.
# ## **4.2. Gráfico de entregas por região**
# Construiremos um gráfico de barras com o percentual de entregas por região, mas antes, extrairemos as colunas de interesse para um novo dataframe, contaremos quantas vezes as regiões aparecem e através do atributo Normalize faremos uma contagem relativa de proporção que resultará em uma porcentagem.
# Extrair as colunas de regiao e de capacidade do veiculo
# Contar o número de ocorrências de cada combinação única de "region" e "vehicle_capacity", criar nova coluna
# e normalizar os resultados para que eles representem porcentagens em vez de contagens absolutas.
data = pd.DataFrame(
deliveries_df[["region", "vehicle_capacity"]].value_counts(normalize=True)
).reset_index()
# Renomear a nova coluna chamada de 0, para "region_percent"
data.rename(columns={0: "region_percent"}, inplace=True)
data.head()
# Visualizar o gráfico através do pacote seaborn
import seaborn as sns
with sns.axes_style("whitegrid"):
grafico = sns.barplot(
data=data, x="region", y="region_percent", errorbar=None, palette="rocket"
)
grafico.set(
title="Proporção de entregas por região", xlabel="Região", ylabel="Proporção"
)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/454/129454168.ipynb
| null | null |
[{"Id": 129454168, "ScriptId": 38491909, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13725001, "CreationDate": "05/14/2023 01:09:15", "VersionNumber": 1.0, "Title": "An\u00e1lise Explorat\u00f3ria de Dados de Log\u00edstica", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 364.0, "LinesInsertedFromPrevious": 364.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
| null | null | null | null |
# # **Análise Exploratória de Dados de Logística | Python**
# 
# **Neste projeto, será realizada uma Análise Exploratória de Dados (EDA), com o intuito de explorar a relação entre os hubs (locais estratégicos para a redistribuição de mercadorias) da empresa Loggi e seus pontos de entregas localizados no Distrito Federal (DF).**
# ---
# ## **Índice**
# 1. Contexto
# 1.1. O Projeto
# 1.2. A Empresa
# 1.3. Os Dados
# 2. Pacotes e bibliotecas
# 3. Exploração dos Dados
#
# 3.1. Coleta dos Dados
# 3.2. Wrangling
# 3.3. Geocodificação
# 3.4. Qualidade
# 4. Visualização
# 5. Insight
# ---
# # **1. Contexto**
# ## **1.1. O Projeto**
# Esse projeto consiste em analisar um conjunto de dados que contém as entregas da empresa Loggi na região de Brasilia.
# Verificaremos a relação entre a distância dos hubs e suas entregas e também a capacidade dos veículos.
# Após a análise, iremos discutir se e como essas relações podem ser otimizadas.
# ## **1.2. A Empresa**
# A Loggi é uma das maiores transportadoras privadas do país e uma empresa brasileira de logística que atua principalmente no setor de e-commerce.
# ## **1.3. Os Dados**
# Os dados são sintetizados de fontes públicas (IBGE, IPEA, etc.) e são representativos dos desafios que a startup enfrenta no dia a dia, especialmente com relação a sua escala.
# # **2\. Pacotes e bibliotecas**
# * import json - Codifica e decodifica dados no formato JSON
#
# * import pandas as pd - Manipula e análisa dados tabulares
# * import geopy - Geocodificação em Python
# * from geopy.geocoders import Nominatim - Converte endereços em coordenadas geográficas e vice-versa
# * from geopy.extra.rate_limiter import RateLimiter - Limita as taxas de chamada de uma função
#
# * import numpy as np - Fornece suporte para operações com matrizes e funções matemáticas
#
# * import geopandas - Adiciona funcionalidades geoespaciais ao Panda
#
# * import matplotlib.pyplot as plt - Gera gráficos
#
# * import seaborn as sns - Gera gráficos
#
#
# # **3. Exploração dos Dados**
# ## **3.1. Coleta dos Dados**
# **Baixar os dados de um arquivo JSON:**
# **Carregar os dados do arquivo em um dicionário Python:**
import json
import pandas as pd
with open("deliveries.json", mode="r", encoding="utf8") as file:
data = json.load(file)
# **Conhecendo alguns dos dados:**
# O Número de linhas
len(data)
# O nome das colunas
nomecol = data[0]
print(nomecol.keys())
# ## **3.2. Wrangling**
# **Criar um dataframe Pandas para facilitar a visualização e manipulação:**
#
entregas_df = pd.DataFrame(data)
entregas_df.head()
# **Normalizando a coluna origin**
# A coluna origin tem dados aninhados, vamos normalizar:
# 1. Em um novo dataframe, transforrmar cada chave da coluna origin em novas colunas *(normalize)*
# 2. Juntar o novo dataframe com as novas colunas ao dataframe principal
# 3. Remover a coluna origin
# 4. Reordenar as colunas
hub_origem_df = pd.json_normalize(entregas_df["origin"])
entregas_df = pd.merge(
left=entregas_df,
right=hub_origem_df,
how="inner",
left_index=True,
right_index=True,
)
entregas_df = entregas_df.drop("origin", axis=1)
entregas_df = entregas_df[
["name", "region", "lng", "lat", "vehicle_capacity", "deliveries"]
]
entregas_df.head()
#
# **Normalizando a coluna deliveries**
# A coluna deliveries também tem dados aninhados, vamos normalizar:
# 1. Em um novo dataframe, transformar cada elemento da lista contida na coluna em uma linha *(explode)*.
# 2. Criar um dataframe para cada coluna e depois concatenar os dataframes, ou seja combinar os 3 dataframes em um novo dataframe. (A coluna ID não é relevante para as análises desse projeto)
# 5. Remover a coluna deliveries
# 6. Combinar o dataframe obtido com o dataframe principal
#
entregas_exploded_df = entregas_df[["deliveries"]].explode("deliveries")
entregas_normalized_df = pd.concat(
[
pd.DataFrame(
entregas_exploded_df["deliveries"].apply(lambda record: record["size"])
).rename(columns={"deliveries": "delivery_size"}),
pd.DataFrame(
entregas_exploded_df["deliveries"].apply(
lambda record: record["point"]["lng"]
)
).rename(columns={"deliveries": "delivery_lng"}),
pd.DataFrame(
entregas_exploded_df["deliveries"].apply(
lambda record: record["point"]["lat"]
)
).rename(columns={"deliveries": "delivery_lat"}),
],
axis=1,
)
entregas_df = entregas_df.drop("deliveries", axis=1)
entregas_df = pd.merge(
left=entregas_df,
right=entregas_normalized_df,
how="right",
left_index=True,
right_index=True,
)
entregas_df.reset_index(inplace=True, drop=True)
entregas_df.head()
# **Vamos verificar alguns dados relacionados ao nosso dataframe:**
entregas_df.info()
# **Verificar se há Dados Faltantes**
entregas_df.isna().any()
# **Não há dados faltantes**
# ## **3.3. Geocodificação**
# A Geocodificação é um processo que transforma uma localização descrita em texto (endereço) em sua respectiva coordenada geográfica (latitude/longitude). Há também a Geocodificação reversa que faz o oposto e é ela que vamos empregar aqui. Utilizaremos um serviço gratuito de geocodificação através do pacote Geopy, chamado Nominatim.
# ### **3.3.1. Geocodificação Reversa dos Hubs**
# Processaremos as coordenadas geográficas para ter informações textuais do endereço através da Geocodificação reversa dos hubs.
# Para isso vamos extrair os dados de localização dos hubs
# aplicando a geocodificação nas coordenadas de cada região e extrair informações de cidade e bairro
# Extrair os dados das colunas region, lat e lnt para um novo dataframe
hub_df = entregas_df[["region", "lat", "lng"]]
hub_df = hub_df.drop_duplicates().sort_values(by="region").reset_index(drop=True)
hub_df.head()
import geopy
from geopy.geocoders import Nominatim
from geopy.extra.rate_limiter import RateLimiter
geolocator = Nominatim(user_agent="ebac_geocoder")
geocoder = RateLimiter(geolocator.reverse, min_delay_seconds=1)
# Criar a coluna coordinates com os dados da latitude e longitude dos hubs do tipo string
# Criar a coluna geodata que irá receber os dados da coluna coordinates com aplicação da função geocoder
hub_df["coordinates"] = hub_df["lat"].astype(str) + ", " + hub_df["lng"].astype(str)
hub_df["geodata"] = hub_df["coordinates"].apply(geocoder)
hub_df.head()
# Normalizar a coluna geodata
hub_geodata_df = pd.json_normalize(hub_df["geodata"].apply(lambda data: data.raw))
hub_geodata_df.head()
import numpy as np
# Extrair das colunas de interesse geradas
# Renomear as colunas
# Armazenar na coluna "hub_city" os dados da "hub_city",
# se não existirem, armazenar os dados da "hub_town"
# Armazenar na coluna "hub_suburb" os dados do "hub_suburb",
# se não existirem, armazenar os dados do "hub_city"
# Remove a coluna "hub_town"
hub_geodata_df = hub_geodata_df[["address.town", "address.suburb", "address.city"]]
hub_geodata_df.rename(
columns={
"address.town": "hub_town",
"address.suburb": "hub_suburb",
"address.city": "hub_city",
},
inplace=True,
)
hub_geodata_df["hub_city"] = np.where(
hub_geodata_df["hub_city"].notna(),
hub_geodata_df["hub_city"],
hub_geodata_df["hub_town"],
)
hub_geodata_df["hub_suburb"] = np.where(
hub_geodata_df["hub_suburb"].notna(),
hub_geodata_df["hub_suburb"],
hub_geodata_df["hub_city"],
)
hub_geodata_df = hub_geodata_df.drop("hub_town", axis=1)
hub_geodata_df.head()
# Combinar o dataframe "hub_geodata_df" (que contem cidades e bairros)
# com o dataframe "hub_df" (que contem as regioes)
# Extrair os dados das colunas: region, hub_suburb e hub_city
# Combinar o dataframe principal "entregas_df" com o novo dataframe "hub_df"
# Reorganizar as colunas
hub_df = pd.merge(left=hub_df, right=hub_geodata_df, left_index=True, right_index=True)
hub_df = hub_df[["region", "hub_suburb", "hub_city"]]
entregas_df = pd.merge(left=entregas_df, right=hub_df, how="inner", on="region")
entregas_df = entregas_df[
[
"name",
"region",
"lng",
"lat",
"hub_city",
"hub_suburb",
"vehicle_capacity",
"delivery_size",
"delivery_lng",
"delivery_lat",
]
]
entregas_df.head()
# ### **3.3.2 Geocodificação Reversa das Entregas**
# Como as entregas possuem mais de 600.000 localizações, vamos baixar um arquivo que já contém os dados geocodificados, extrair esse arquivo para um dataframe e combinar as colunas "delivery_city" e "delivery_suburb" com nosso arquivo principal.
# Download dos dados de geolocalização das entregas
# Carregar o arquivo baixado
deliveries_geodata_df = pd.read_csv("deliveries-geodata.csv")
# Combinar com o arquivo principal e extrair as colunas "delivery_city" e "delivery_suburb"
deliveries_df = pd.merge(
left=entregas_df,
right=deliveries_geodata_df[["delivery_city", "delivery_suburb"]],
how="inner",
left_index=True,
right_index=True,
)
deliveries_df.head()
# ## **3.4 - Qualidade**
# **Vamos observar alguns dados e verificar a qualidade do nosso material**
# Verificar as informações do dataframe
deliveries_df.info()
# Verificar dados nulos
deliveries_df.isna().any()
# **Verificando as entregas relacionadas a cidade e ao bairro de Brasília**
# Verificar a porcentagem de valores nulos nas entregas da cidade
100 * (deliveries_df["delivery_city"].isna().sum() / len(deliveries_df))
# Verificar a porcentagem de valores nulos nas entregas dos bairros
100 * (deliveries_df["delivery_suburb"].isna().sum() / len(deliveries_df))
# Verificar as entregas nas cidades de Brasilia
prop_df = deliveries_df[["delivery_city"]].value_counts() / len(deliveries_df)
prop_df.sort_values(ascending=False).head(10)
# Verificar as entregas nos bairros de Brasilia
prop_df = deliveries_df[["delivery_suburb"]].value_counts() / len(deliveries_df)
prop_df.sort_values(ascending=False).head(10)
# # **4. Visualização**
# **Instalação e importação do Geopandas**
# O GeopPandas adiciona funcionalidades geoespaciais ao pacote Python Pandas, que irá nos ajudar a visualizar as coordenadas dos hubs e das entregas no mapa do Distrito Federal, segmentados pela região dos hubs.
#
# Instalar o pacote geopandas
import geopandas
# **Vamos baixar os dados do mapa do Distrito Federal do site oficial do IBGE e gerar um dataframe**
mapa = geopandas.read_file("distrito-federal.shp")
mapa = mapa.loc[[0]]
mapa.head()
# **Criar um dataframe para os hubs com informações de geolocalização**
hub_df = (
deliveries_df[["region", "lng", "lat"]].drop_duplicates().reset_index(drop=True)
)
geo_hub_df = geopandas.GeoDataFrame(
hub_df, geometry=geopandas.points_from_xy(hub_df["lng"], hub_df["lat"])
)
geo_hub_df.head()
# **Criar um dataframe para as entregas com informações de geolocalização**
# Criar o dataframe das entregas
geo_deliveries_df = geopandas.GeoDataFrame(
deliveries_df,
geometry=geopandas.points_from_xy(
deliveries_df["delivery_lng"], deliveries_df["delivery_lat"]
),
)
geo_deliveries_df.head()
# ## **4.1. Mapa dos hubs e das entregas**
import matplotlib.pyplot as plt
# cria o plot vazio
fig, ax = plt.subplots(figsize=(25 / 2.54, 25 / 2.54))
# plot mapa do distrito federal
mapa.plot(ax=ax, alpha=0.4, color="lightgrey")
# plot das entregas
geo_deliveries_df.query("region == 'df-0'").plot(
ax=ax, markersize=1, color="sandybrown", label="df-0"
)
geo_deliveries_df.query("region == 'df-1'").plot(
ax=ax, markersize=1, color="darkred", label="df-1"
)
geo_deliveries_df.query("region == 'df-2'").plot(
ax=ax, markersize=1, color="firebrick", label="df-2"
)
# plot dos hubs
geo_hub_df.plot(ax=ax, markersize=30, marker="x", color="black", label="hub")
# plot da legenda
plt.title("Entregas no Distrito Federal por Região", fontdict={"fontsize": 14})
lgnd = plt.legend(prop={"size": 14})
for handle in lgnd.legendHandles:
handle.set_sizes([50])
# De acordo o mapa, observamos que os três hubs parecem estar bem localizados em relação ao trajeto das entregas. O hub da região central (df-1) possui grande parte das entregas concentradas ao seu redor. Já região df-2 as entregas começam e se distanciar. E na região df-0 talvez por haver uma menor concentração de pessoas, exista o maior espaçamento entre os pontos de entregas e uma maior distância do hub dessa região.
# ## **4.2. Gráfico de entregas por região**
# Construiremos um gráfico de barras com o percentual de entregas por região, mas antes, extrairemos as colunas de interesse para um novo dataframe, contaremos quantas vezes as regiões aparecem e através do atributo Normalize faremos uma contagem relativa de proporção que resultará em uma porcentagem.
# Extrair as colunas de regiao e de capacidade do veiculo
# Contar o número de ocorrências de cada combinação única de "region" e "vehicle_capacity", criar nova coluna
# e normalizar os resultados para que eles representem porcentagens em vez de contagens absolutas.
data = pd.DataFrame(
deliveries_df[["region", "vehicle_capacity"]].value_counts(normalize=True)
).reset_index()
# Renomear a nova coluna chamada de 0, para "region_percent"
data.rename(columns={0: "region_percent"}, inplace=True)
data.head()
# Visualizar o gráfico através do pacote seaborn
import seaborn as sns
with sns.axes_style("whitegrid"):
grafico = sns.barplot(
data=data, x="region", y="region_percent", errorbar=None, palette="rocket"
)
grafico.set(
title="Proporção de entregas por região", xlabel="Região", ylabel="Proporção"
)
| false | 0 | 203,355 | 1 | 203,355 | 203,355 |
||
129423198
|
<jupyter_start><jupyter_text>Random Sample of NIH Chest X-ray Dataset
# NIH Chest X-ray Dataset Sample
---
### National Institutes of Health Chest X-Ray Dataset
Chest X-ray exams are one of the most frequent and cost-effective medical imaging examinations available. However, clinical diagnosis of a chest X-ray can be challenging and sometimes more difficult than diagnosis via chest CT imaging. The lack of large publicly available datasets with annotations means it is still very difficult, if not impossible, to achieve clinically relevant computer-aided detection and diagnosis (CAD) in real world medical sites with chest X-rays. One major hurdle in creating large X-ray image datasets is the lack resources for labeling so many images. Prior to the release of this dataset, [Openi][1] was the largest publicly available source of chest X-ray images with 4,143 images available.
This NIH Chest X-ray Dataset is comprised of 112,120 X-ray images with disease labels from 30,805 unique patients. To create these labels, the authors used Natural Language Processing to text-mine disease classifications from the associated radiological reports. The labels are expected to be >90% accurate and suitable for weakly-supervised learning. The original radiology reports are not publicly available but you can find more details on the labeling process in this Open Access paper: "ChestX-ray8: Hospital-scale Chest X-ray Database and Benchmarks on Weakly-Supervised Classification and Localization of Common Thorax Diseases." (*Wang et al.*)
[Link to paper][30]
[1]: https://openi.nlm.nih.gov/
<br>
### File contents - This is a random sample (5%) of the full dataset:
- **sample.zip**: Contains 5,606 images with size 1024 x 1024
- **sample_labels.csv**: Class labels and patient data for the entire dataset
- Image Index: File name
- Finding Labels: Disease type (Class label)
- Follow-up #
- Patient ID
- Patient Age
- Patient Gender
- View Position: X-ray orientation
- OriginalImageWidth
- OriginalImageHeight
- OriginalImagePixelSpacing_x
- OriginalImagePixelSpacing_y
<br>
### Class descriptions
There are 15 classes (14 diseases, and one for "No findings") in the full dataset, but since this is drastically reduced version of the full dataset, some of the classes are sparse with the labeled as "No findings"
- Hernia - 13 images
- Pneumonia - 62 images
- Fibrosis - 84 images
- Edema - 118 images
- Emphysema - 127 images
- Cardiomegaly - 141 images
- Pleural_Thickening - 176 images
- Consolidation - 226 images
- Pneumothorax - 271 images
- Mass - 284 images
- Nodule - 313 images
- Atelectasis - 508 images
- Effusion - 644 images
- Infiltration - 967 images
- No Finding - 3044 images
<br>
### Full Dataset Content
[The full dataset can be found here][3]. There are 12 zip files in total and range from ~2 gb to 4 gb in size.
[3]: https://www.kaggle.com/nih-chest-xrays/data
<br>
### Data limitations:
1. The image labels are NLP extracted so there could be some erroneous labels but the NLP labeling accuracy is estimated to be >90%.
2. Very limited numbers of disease region bounding boxes (See BBox_list_2017.csv)
3. Chest x-ray radiology reports are not anticipated to be publicly shared. Parties who use this public dataset are encouraged to share their “updated” image labels and/or new bounding boxes in their own studied later, maybe through manual annotation
<br>
### Modifications to original data
- Original TAR archives were converted to ZIP archives to be compatible with the Kaggle platform
- CSV headers slightly modified to be more explicit in comma separation and also to allow fields to be self-explanatory
<br>
### Citations
- Wang X, Peng Y, Lu L, Lu Z, Bagheri M, Summers RM. ChestX-ray8: Hospital-scale Chest X-ray Database and Benchmarks on Weakly-Supervised Classification and Localization of Common Thorax Diseases. IEEE CVPR 2017, [ChestX-ray8_Hospital-Scale_Chest_CVPR_2017_paper.pdf][30]
- NIH News release: [NIH Clinical Center provides one of the largest publicly available chest x-ray datasets to scientific community][30]
- Original source files and documents: [https://nihcc.app.box.com/v/ChestXray-NIHCC/folder/36938765345][31]
<br>
Kaggle dataset identifier: sample
<jupyter_script># # Hi, Welcome to my Kernel
# Outline
# - EDA
# - Avoid Data Imbalance using Weighted Loss
# - Loading Dataset and Applying Transforms
# - Define Pre-trained Model
# - Train Model
# - Each label Accuracy
# - Plot results
import os
import numpy as np
import pandas as pd
from tqdm import tqdm
from operator import itemgetter
from collections import OrderedDict
import cv2
from torch.nn import functional as F
# from pytorch_grad_cam import GradCAM
from PIL import Image
import seaborn as sns
import matplotlib.pyplot as plt
import torch
from torch import optim, nn
import torch.nn.functional as F
from torchvision import transforms as T
from torch.utils.data import Dataset, DataLoader, random_split, ConcatDataset
from torchvision.utils import make_grid
# pd.options.plotting.backend = "plotly"
pd.set_option("plotting.backend", "plotly")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
from torch.utils.data import random_split
from torch.utils.data import Subset
print("Device: ", device)
# param_management
CSV_PATH = "../input/sample/sample_labels.csv"
IMG_DIR = "../input/sample/sample/sample/images/"
# # EDA
data = pd.read_csv(CSV_PATH)
data.head()
data["Patient Gender"].value_counts().plot.bar()
data["Patient Age"].apply(lambda x: int(x[1:3])).plot.hist()
data["Patient Age"].apply(lambda x: int(x[1:3])).plot.box()
data["View Position"].value_counts().plot.bar()
pathology_list = [
"Cardiomegaly",
"Emphysema",
"Effusion",
"Hernia",
"Nodule",
"Pneumothorax",
"Atelectasis",
"Pleural_Thickening",
"Mass",
"Edema",
"Consolidation",
"Infiltration",
"Fibrosis",
"Pneumonia",
]
for pathology in pathology_list:
data[pathology] = data["Finding Labels"].apply(lambda x: 1 if pathology in x else 0)
data["No Findings"] = data["Finding Labels"].apply(
lambda x: 1 if "No Finding" in x else 0
)
data = data.drop(list(data.iloc[:, 1:11].columns.values), axis=1)
data.iloc[:, 1:].sum().plot.barh()
data = data.drop(["No Findings"], axis=1)
data.iloc[:, 1:].sum().plot.barh()
data.iloc[:, 1:].mean().plot.barh()
# # Avoid Data Imbalance using Weighted Loss
def compute_class_freqs(labels):
labels = np.array(labels)
N = labels.shape[0]
positive_frequencies = np.sum(labels, axis=0) / N
negative_frequencies = 1 - positive_frequencies
return positive_frequencies, negative_frequencies
freq_pos, freq_neg = compute_class_freqs(data.iloc[:, 1:])
df = pd.DataFrame({"Class": pathology_list, "Label": "Positive", "Value": freq_pos})
df = df.append(
[
{"Class": pathology_list[l], "Label": "Negative", "Value": v}
for l, v in enumerate(freq_neg)
],
ignore_index=True,
)
plt.xticks(rotation=90)
f = sns.barplot(x="Class", y="Value", hue="Label", data=df)
pos_weights = freq_neg
neg_weights = freq_pos
pos_contribution = freq_pos * pos_weights
neg_contribution = freq_neg * neg_weights
df = pd.DataFrame(
{"Class": pathology_list, "Label": "Positive", "Value": pos_contribution}
)
df = df.append(
[
{"Class": pathology_list[l], "Label": "Negative", "Value": v}
for l, v in enumerate(neg_contribution)
],
ignore_index=True,
)
plt.xticks(rotation=90)
f = sns.barplot(x="Class", y="Value", hue="Label", data=df)
def weighted_loss(pos_weights, neg_weights, y_pred, y_true, epsilon=1e-7):
loss = 0.0
for i in range(len(pos_weights)):
loss_pos = -1 * torch.mean(
pos_weights[i] * y_true[:, i] * torch.log(y_pred[:, i] + epsilon)
)
loss_neg = -1 * torch.mean(
neg_weights[i]
* (1 - y_true[:, i])
* torch.log((1 - y_pred[:, i]) + epsilon)
)
loss += loss_pos + loss_neg
return loss
# # Loading Dataset and Applying Transforms
data_transform = T.Compose(
[
T.RandomRotation((-20, +20)),
T.Resize((224, 224)),
T.ToTensor(),
T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
]
)
class NIH_Dataset(Dataset):
def __init__(self, data, img_dir, transform=None):
self.data = data
self.img_dir = img_dir
self.transform = transform
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
img_file = self.img_dir + self.data.iloc[:, 0][idx]
img = Image.open(img_file).convert("RGB")
label = np.array(self.data.iloc[:, 1:].iloc[idx])
if self.transform:
img = self.transform(img)
return img, label
trainds = NIH_Dataset(data, img_dir=IMG_DIR, transform=data_transform)
labels = pathology_list
# subsets define
# subset1_indices = [i in index, label in enumerate(labels) if label == 'Cardiomegaly']
subset1_indices = data[data["Cardiomegaly"] == 1].index.tolist()
subset1 = Subset(
NIH_Dataset(data, "../input/sample/sample/sample/images/", data_transform),
subset1_indices,
)
subset2_indices = data[data["Emphysema"] == 1].index.tolist()
subset2 = Subset(
NIH_Dataset(data, "../input/sample/sample/sample/images/", data_transform),
subset2_indices,
)
subset3_indices = data[data["Effusion"] == 1].index.tolist()
subset3 = Subset(
NIH_Dataset(data, "../input/sample/sample/sample/images/", data_transform),
subset3_indices,
)
subset4_indices = data[data["Hernia"] == 1].index.tolist()
subset4 = Subset(
NIH_Dataset(data, "../input/sample/sample/sample/images/", data_transform),
subset4_indices,
)
subset5_indices = data[data["Nodule"] == 1].index.tolist()
subset5 = Subset(
NIH_Dataset(data, "../input/sample/sample/sample/images/", data_transform),
subset5_indices,
)
subset6_indices = data[data["Pneumothorax"] == 1].index.tolist()
subset6 = Subset(
NIH_Dataset(data, "../input/sample/sample/sample/images/", data_transform),
subset6_indices,
)
subset7_indices = data[data["Atelectasis"] == 1].index.tolist()
subset7 = Subset(
NIH_Dataset(data, "../input/sample/sample/sample/images/", data_transform),
subset7_indices,
)
subset8_indices = data[data["Pleural_Thickening"] == 1].index.tolist()
subset8 = Subset(
NIH_Dataset(data, "../input/sample/sample/sample/images/", data_transform),
subset8_indices,
)
subset9_indices = data[data["Mass"] == 1].index.tolist()
subset9 = Subset(
NIH_Dataset(data, "../input/sample/sample/sample/images/", data_transform),
subset9_indices,
)
subset10_indices = data[data["Edema"] == 1].index.tolist()
subset10 = Subset(
NIH_Dataset(data, "../input/sample/sample/sample/images/", data_transform),
subset10_indices,
)
subset11_indices = data[data["Consolidation"] == 1].index.tolist()
subset11 = Subset(
NIH_Dataset(data, "../input/sample/sample/sample/images/", data_transform),
subset11_indices,
)
subset12_indices = data[data["Infiltration"] == 1].index.tolist()
subset12 = Subset(
NIH_Dataset(data, "../input/sample/sample/sample/images/", data_transform),
subset12_indices,
)
subset13_indices = data[data["Fibrosis"] == 1].index.tolist()
subset13 = Subset(
NIH_Dataset(data, "../input/sample/sample/sample/images/", data_transform),
subset13_indices,
)
subset14_indices = data[data["Pneumonia"] == 1].index.tolist()
subset14 = Subset(
NIH_Dataset(data, "../input/sample/sample/sample/images/", data_transform),
subset14_indices,
)
print(subset1.__len__())
print(subset2.__len__())
print(subset3.__len__())
print(subset4.__len__())
print(subset5.__len__())
print(subset6.__len__())
print(subset7.__len__())
print(subset8.__len__())
print(subset9.__len__())
print(subset10.__len__())
print(subset11.__len__())
print(subset12.__len__())
print(subset13.__len__())
print(subset14.__len__())
print(int(len(subset14) * 0.7))
print(int(len(subset14) * 0.3))
trainset1, valset1, testset1 = random_split(subset1, [99, 28, 14])
trainset2, valset2, testset2 = random_split(subset2, [89, 26, 12])
trainset3, valset3, testset3 = random_split(subset3, [451, 129, 64])
trainset4, valset4, testset4 = random_split(subset4, [10, 2, 1])
trainset5, valset5, testset5 = random_split(subset5, [220, 62, 31])
trainset6, valset6, testset6 = random_split(subset6, [190, 54, 27])
trainset7, valset7, testset7 = random_split(subset7, [356, 101, 51])
trainset8, valset8, testset8 = random_split(subset8, [123, 35, 18])
trainset5, valset5, testset5 = random_split(subset5, [220, 62, 31])
trainset6, valset6, testset6 = random_split(subset6, [190, 54, 27])
trainset7, valset7, testset7 = random_split(subset7, [356, 101, 51])
trainset8, valset8, testset8 = random_split(subset8, [123, 35, 18])
trainset9, valset9, testset9 = random_split(subset9, [200, 56, 28])
trainset10, valset10, testset10 = random_split(subset10, [83, 23, 12])
trainset11, valset11, testset11 = random_split(subset11, [159, 45, 22])
trainset12, valset12, testset12 = random_split(subset12, [677, 193, 97])
trainset13, valset13, testset13 = random_split(subset13, [59, 17, 8])
trainset14, valset14, testset14 = random_split(subset14, [43, 12, 7])
trainset = ConcatDataset(
[
trainset1,
trainset2,
trainset3,
trainset4,
trainset5,
trainset6,
trainset7,
trainset8,
trainset9,
trainset10,
trainset11,
trainset12,
trainset13,
trainset14,
]
)
validset = ConcatDataset(
[
valset1,
valset2,
valset3,
valset4,
valset5,
valset6,
valset7,
valset8,
valset9,
valset10,
valset11,
valset12,
valset13,
valset14,
]
)
testset = ConcatDataset(
[
testset1,
testset2,
testset3,
testset4,
testset5,
testset6,
testset7,
testset8,
testset9,
testset10,
testset11,
testset12,
testset13,
testset14,
]
)
def deprocess(img):
img = img.permute(1, 2, 0)
img = img * torch.Tensor([0.229, 0.224, 0.225]) + torch.Tensor(
[0.485, 0.456, 0.406]
)
return img
image, label = trainds[0]
class_labels = list(np.where(label == 1)[0])
plt.imshow(deprocess(image))
plt.title(itemgetter(*class_labels)(pathology_list))
# # Split Dataset and create dataloaders
trainset, validset, testset = random_split(trainds, [5000, 303, 303])
print("Length of trainset : {}".format(len(trainset)))
print("Length of testset : {}".format(len(testset)))
print("Length of validset : {}".format(len(validset)))
trainloader = DataLoader(trainset, batch_size=32, shuffle=True)
validloader = DataLoader(validset, batch_size=32, shuffle=False)
testloader = DataLoader(testset, batch_size=32, shuffle=False)
# # Define Pre-trained Model
from torchvision import models
model = models.resnet50()
model.load_state_dict(
torch.load("/kaggle/input/pretrained-model-weights-pytorch/resnet50-19c8e357.pth")
)
model = torch.load("/kaggle/working/Res50_epoch10.pt")
for param in model.parameters():
param.requires_grad = False
model.fc = nn.Sequential(nn.Linear(2048, 14), nn.Sigmoid())
model.to(device)
# # Train Model
optimizer = optim.Adam(model.parameters(), lr=Learning_Rate)
schedular = optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=0.1, patience=4)
epochs = 5
valid_loss_min = np.Inf
#
epochs = 100
Learning_Rate = 0.01
optimizer = optim.Adam(model.parameters(), lr=Learning_Rate)
schedular = optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=0.1, patience=4)
for i in range(epochs):
train_loss = 0.0
valid_loss = 0.0
train_acc = 0.0
valid_acc = 0.0
model.train()
for images, labels in tqdm(trainloader):
images = images.to(device)
labels = labels.to(device)
ps = model(images)
loss = weighted_loss(pos_weights, neg_weights, ps, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss += loss.item()
avg_train_loss = train_loss / len(trainloader)
model.eval()
with torch.no_grad():
for images, labels in tqdm(validloader):
images = images.to(device)
labels = labels.to(device)
ps = model(images)
loss = weighted_loss(pos_weights, neg_weights, ps, labels)
valid_loss += loss.item()
avg_valid_loss = valid_loss / len(validloader)
schedular.step(avg_valid_loss)
if avg_valid_loss <= valid_loss_min:
print(
"Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...".format(
valid_loss_min, avg_valid_loss
)
)
torch.save(model, "Res50_epoch10.pt")
valid_loss_min = avg_valid_loss
print("Epoch : {} Train Loss : {:.6f} ".format(i + 1, avg_train_loss))
print("Epoch : {} Valid Loss : {:.6f} ".format(i + 1, avg_valid_loss))
# # Each Class Accuracy
def class_accuracy(dataloader, model):
per_class_accuracy = [0 for i in range(len(pathology_list))]
total = 0.0
with torch.no_grad():
for images, labels in dataloader:
ps = model(images.to(device))
labels = labels.to(device)
ps = (ps >= 0.5).float()
for i in range(ps.shape[1]):
x1 = ps[:, i : i + 1]
x2 = labels[:, i : i + 1]
per_class_accuracy[i] += int((x1 == x2).sum())
per_class_accuracy = [
(i / len(dataloader.dataset)) * 100.0 for i in per_class_accuracy
]
return per_class_accuracy
def get_acc_data(class_names, acc_list):
df = pd.DataFrame(list(zip(class_names, acc_list)), columns=["Labels", "Acc"])
return df
print("Train Dataset Accuracy Report")
acc_list = class_accuracy(trainloader, model)
get_acc_data(pathology_list, acc_list)
print("Test Dataset Accuracy Report")
acc_list = class_accuracy(testloader, model)
get_acc_data(pathology_list, acc_list)
print("Valid Dataset Accuracy Report")
acc_list = class_accuracy(validloader, model)
get_acc_data(pathology_list, acc_list)
from sklearn.metrics import roc_auc_score, roc_curve
def get_roc_curve(labels, preds, class_names):
plt.figure(figsize=(15, 10))
plt.title("Receiver Operating Characteristic")
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
for i in range(len(class_names)):
fpr, tpr, thresholds = roc_curve(labels[:, i], preds[:, i])
plt.plot(fpr, tpr, label=class_names[i])
plt.legend(loc="best")
plt.show()
def get_roc_auc_score(labels, preds):
roc_auc_scores = []
for i in range(len(pathology_list)):
roc_auc_scores.append(roc_auc_score(labels[:, i], preds[:, i]))
return roc_auc_scores
def get_roc_auc_data(class_names, roc_auc_scores):
df = pd.DataFrame(
list(zip(class_names, roc_auc_scores)), columns=["Labels", "ROC AUC Score"]
)
return df
def get_roc_data(labels, preds, class_names):
get_roc_curve(labels, preds, class_names)
roc_auc_scores = get_roc_auc_score(labels, preds)
return get_roc_auc_data(class_names, roc_auc_scores)
def get_roc_data_for_dataset(dataloader, model, class_names):
labels = []
preds = []
with torch.no_grad():
for images, labels_batch in dataloader:
labels_batch = labels_batch.numpy()
labels.append(labels_batch)
ps = model(images.to(device))
ps = ps.cpu().numpy()
preds.append(ps)
labels = np.concatenate(labels)
preds = np.concatenate(preds)
return get_roc_data(labels, preds, class_names)
print("Train Dataset ROC AUC Report")
get_roc_data_for_dataset(trainloader, model, pathology_list)
print("Test Dataset ROC AUC Report")
get_roc_data_for_dataset(testloader, model, pathology_list)
print("Valid Dataset ROC AUC Report")
get_roc_data_for_dataset(validloader, model, pathology_list)
# GradCAM
def get_gradcam(img, model, class_names, layer_name):
gradcam = GradCAM.from_config(
model_type="resnet", arch=model, layer_name=layer_name
)
img = np.float32(img) / 255
input = preprocess_image(img)
input = input.unsqueeze(0)
model.to(device)
model.eval()
input = input.to(device)
target_index = None
mask = gradcam(input, target_index)
heatmap, result = visualize_cam(mask, img)
return heatmap, result
def plot_heatmap(img, heatmap, result, class_names):
plt.figure(figsize=(10, 10))
ax = sns.heatmap(heatmap, cmap="jet")
ax2 = plt.axes([0, 0, 1, 1], frameon=False)
plt.axis("off")
plt.imshow(img)
plt.savefig("heatmap.png")
plt.show()
plt.figure(figsize=(10, 10))
plt.imshow(result)
plt.savefig("result.png")
plt.show()
def get_gradcam_for_dataset(dataloader, model, class_names, layer_name):
images, labels = next(iter(dataloader))
images = images.numpy()
labels = labels.numpy()
idx = np.random.randint(0, len(images))
img = images[idx]
img = np.transpose(img, (1, 2, 0))
heatmap, result = get_gradcam(img, model, class_names, layer_name)
plot_heatmap(img, heatmap, result, class_names)
get_gradcam_for_dataset(trainloader, model, pathology_list, "layer4")
get_gradcam_for_dataset(testloader, model, pathology_list, "layer4")
get_gradcam_for_dataset(validloader, model, pathology_list, "layer4")
# # Plot Results
def view_classify(img, ps, label):
class_name = pathology_list
classes = np.array(class_name)
ps = ps.cpu().data.numpy().squeeze()
img = deprocess(img)
class_labels = list(np.where(label == 1)[0])
if not class_labels:
title = "No Findings"
else:
title = itemgetter(*class_labels)(class_name)
fig, (ax1, ax2) = plt.subplots(figsize=(8, 12), ncols=2)
ax1.imshow(img)
ax1.set_title("Ground Truth : {}".format(title))
ax1.axis("off")
ax2.barh(classes, ps)
ax2.set_aspect(0.1)
ax2.set_yticks(classes)
ax2.set_yticklabels(classes)
ax2.set_title("Predicted Class")
ax2.set_xlim(0, 1.1)
plt.tight_layout()
return None
image, label = testset[33]
ps = model(image.unsqueeze(0).to(device))
view_classify(image, ps, label)
image, label = trainset[999]
ps = model(image.unsqueeze(0).to(device))
view_classify(image, ps, label)
image, label = validset[234]
ps = model(image.unsqueeze(0).to(device))
view_classify(image, ps, label)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/423/129423198.ipynb
|
sample
| null |
[{"Id": 129423198, "ScriptId": 38191677, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 9915414, "CreationDate": "05/13/2023 16:57:29", "VersionNumber": 2.0, "Title": "Multi-label Chest X-Ray classification", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 582.0, "LinesInsertedFromPrevious": 289.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 293.0, "LinesInsertedFromFork": 289.0, "LinesDeletedFromFork": 38.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 293.0, "TotalVotes": 0}]
|
[{"Id": 185460457, "KernelVersionId": 129423198, "SourceDatasetVersionId": 7773}, {"Id": 185460458, "KernelVersionId": 129423198, "SourceDatasetVersionId": 791155}]
|
[{"Id": 7773, "DatasetId": 4667, "DatasourceVersionId": 7773, "CreatorUserId": 484516, "LicenseName": "CC0: Public Domain", "CreationDate": "11/23/2017 02:58:24", "VersionNumber": 4.0, "Title": "Random Sample of NIH Chest X-ray Dataset", "Slug": "sample", "Subtitle": "5,606 images and labels sampled from the NIH Chest X-ray Dataset", "Description": "# NIH Chest X-ray Dataset Sample\n\n---\n\n### National Institutes of Health Chest X-Ray Dataset\n\nChest X-ray exams are one of the most frequent and cost-effective medical imaging examinations available. However, clinical diagnosis of a chest X-ray can be challenging and sometimes more difficult than diagnosis via chest CT imaging. The lack of large publicly available datasets with annotations means it is still very difficult, if not impossible, to achieve clinically relevant computer-aided detection and diagnosis (CAD) in real world medical sites with chest X-rays. One major hurdle in creating large X-ray image datasets is the lack resources for labeling so many images. Prior to the release of this dataset, [Openi][1] was the largest publicly available source of chest X-ray images with 4,143 images available.\n\nThis NIH Chest X-ray Dataset is comprised of 112,120 X-ray images with disease labels from 30,805 unique patients. To create these labels, the authors used Natural Language Processing to text-mine disease classifications from the associated radiological reports. The labels are expected to be >90% accurate and suitable for weakly-supervised learning. The original radiology reports are not publicly available but you can find more details on the labeling process in this Open Access paper: \"ChestX-ray8: Hospital-scale Chest X-ray Database and Benchmarks on Weakly-Supervised Classification and Localization of Common Thorax Diseases.\" (*Wang et al.*)\n\n[Link to paper][30]\n\n[1]: https://openi.nlm.nih.gov/\n\n\n<br>\n### File contents - This is a random sample (5%) of the full dataset:\n\n- **sample.zip**: Contains 5,606 images with size 1024 x 1024\n\n- **sample_labels.csv**: Class labels and patient data for the entire dataset\n - Image Index: File name\n - Finding Labels: Disease type (Class label)\n - Follow-up # \n - Patient ID\n - Patient Age\n - Patient Gender\n - View Position: X-ray orientation\n - OriginalImageWidth\n - OriginalImageHeight\n - OriginalImagePixelSpacing_x\n - OriginalImagePixelSpacing_y\n \n\n\n\n\n<br>\n### Class descriptions\n\nThere are 15 classes (14 diseases, and one for \"No findings\") in the full dataset, but since this is drastically reduced version of the full dataset, some of the classes are sparse with the labeled as \"No findings\"\n\n- Hernia - 13 images\n- Pneumonia - 62 images\n- Fibrosis - 84 images\n- Edema - 118 images\n- Emphysema - 127 images\n- Cardiomegaly - 141 images\n- Pleural_Thickening - 176 images\n- Consolidation - 226 images\n- Pneumothorax - 271 images\n- Mass - 284 images\n- Nodule - 313 images\n- Atelectasis - 508 images\n- Effusion - 644 images\n- Infiltration - 967 images\n- No Finding - 3044 images\n\n<br>\n### Full Dataset Content\n\n[The full dataset can be found here][3]. There are 12 zip files in total and range from ~2 gb to 4 gb in size. \n\n\n[3]: https://www.kaggle.com/nih-chest-xrays/data\n\n\n<br>\n### Data limitations: \n\n1. The image labels are NLP extracted so there could be some erroneous labels but the NLP labeling accuracy is estimated to be >90%. \n2. Very limited numbers of disease region bounding boxes (See BBox_list_2017.csv)\n3. Chest x-ray radiology reports are not anticipated to be publicly shared. Parties who use this public dataset are encouraged to share their \u201cupdated\u201d image labels and/or new bounding boxes in their own studied later, maybe through manual annotation\n\n\n<br>\n### Modifications to original data\n\n- Original TAR archives were converted to ZIP archives to be compatible with the Kaggle platform\n\n- CSV headers slightly modified to be more explicit in comma separation and also to allow fields to be self-explanatory\n\n<br>\n### Citations\n\n- Wang X, Peng Y, Lu L, Lu Z, Bagheri M, Summers RM. ChestX-ray8: Hospital-scale Chest X-ray Database and Benchmarks on Weakly-Supervised Classification and Localization of Common Thorax Diseases. IEEE CVPR 2017, [ChestX-ray8_Hospital-Scale_Chest_CVPR_2017_paper.pdf][30]\n\n- NIH News release: [NIH Clinical Center provides one of the largest publicly available chest x-ray datasets to scientific community][30]\n\n- Original source files and documents: [https://nihcc.app.box.com/v/ChestXray-NIHCC/folder/36938765345][31]\n\n<br>\n### Acknowledgements\n\nThis work was supported by the Intramural Research Program of the NClinical Center (clinicalcenter.nih.gov) and National Library of Medicine (www.nlm.nih.gov). \n\n\n [30]: https://www.nih.gov/news-events/news-releases/nih-clinical-center-provides-one-largest-publicly-available-chest-x-ray-datasets-scientific-community\n\n [31]: https://nihcc.app.box.com/v/ChestXray-NIHCC/folder/36938765345", "VersionNotes": "Simplified the ZIP file", "TotalCompressedBytes": 2253119529.0, "TotalUncompressedBytes": 2253119529.0}]
|
[{"Id": 4667, "CreatorUserId": 484516, "OwnerUserId": NaN, "OwnerOrganizationId": 1146.0, "CurrentDatasetVersionId": 7773.0, "CurrentDatasourceVersionId": 7773.0, "ForumId": 10494, "Type": 2, "CreationDate": "11/15/2017 17:04:12", "LastActivityDate": "02/06/2018", "TotalViews": 90551, "TotalDownloads": 16454, "TotalVotes": 251, "TotalKernels": 47}]
| null |
# # Hi, Welcome to my Kernel
# Outline
# - EDA
# - Avoid Data Imbalance using Weighted Loss
# - Loading Dataset and Applying Transforms
# - Define Pre-trained Model
# - Train Model
# - Each label Accuracy
# - Plot results
import os
import numpy as np
import pandas as pd
from tqdm import tqdm
from operator import itemgetter
from collections import OrderedDict
import cv2
from torch.nn import functional as F
# from pytorch_grad_cam import GradCAM
from PIL import Image
import seaborn as sns
import matplotlib.pyplot as plt
import torch
from torch import optim, nn
import torch.nn.functional as F
from torchvision import transforms as T
from torch.utils.data import Dataset, DataLoader, random_split, ConcatDataset
from torchvision.utils import make_grid
# pd.options.plotting.backend = "plotly"
pd.set_option("plotting.backend", "plotly")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
from torch.utils.data import random_split
from torch.utils.data import Subset
print("Device: ", device)
# param_management
CSV_PATH = "../input/sample/sample_labels.csv"
IMG_DIR = "../input/sample/sample/sample/images/"
# # EDA
data = pd.read_csv(CSV_PATH)
data.head()
data["Patient Gender"].value_counts().plot.bar()
data["Patient Age"].apply(lambda x: int(x[1:3])).plot.hist()
data["Patient Age"].apply(lambda x: int(x[1:3])).plot.box()
data["View Position"].value_counts().plot.bar()
pathology_list = [
"Cardiomegaly",
"Emphysema",
"Effusion",
"Hernia",
"Nodule",
"Pneumothorax",
"Atelectasis",
"Pleural_Thickening",
"Mass",
"Edema",
"Consolidation",
"Infiltration",
"Fibrosis",
"Pneumonia",
]
for pathology in pathology_list:
data[pathology] = data["Finding Labels"].apply(lambda x: 1 if pathology in x else 0)
data["No Findings"] = data["Finding Labels"].apply(
lambda x: 1 if "No Finding" in x else 0
)
data = data.drop(list(data.iloc[:, 1:11].columns.values), axis=1)
data.iloc[:, 1:].sum().plot.barh()
data = data.drop(["No Findings"], axis=1)
data.iloc[:, 1:].sum().plot.barh()
data.iloc[:, 1:].mean().plot.barh()
# # Avoid Data Imbalance using Weighted Loss
def compute_class_freqs(labels):
labels = np.array(labels)
N = labels.shape[0]
positive_frequencies = np.sum(labels, axis=0) / N
negative_frequencies = 1 - positive_frequencies
return positive_frequencies, negative_frequencies
freq_pos, freq_neg = compute_class_freqs(data.iloc[:, 1:])
df = pd.DataFrame({"Class": pathology_list, "Label": "Positive", "Value": freq_pos})
df = df.append(
[
{"Class": pathology_list[l], "Label": "Negative", "Value": v}
for l, v in enumerate(freq_neg)
],
ignore_index=True,
)
plt.xticks(rotation=90)
f = sns.barplot(x="Class", y="Value", hue="Label", data=df)
pos_weights = freq_neg
neg_weights = freq_pos
pos_contribution = freq_pos * pos_weights
neg_contribution = freq_neg * neg_weights
df = pd.DataFrame(
{"Class": pathology_list, "Label": "Positive", "Value": pos_contribution}
)
df = df.append(
[
{"Class": pathology_list[l], "Label": "Negative", "Value": v}
for l, v in enumerate(neg_contribution)
],
ignore_index=True,
)
plt.xticks(rotation=90)
f = sns.barplot(x="Class", y="Value", hue="Label", data=df)
def weighted_loss(pos_weights, neg_weights, y_pred, y_true, epsilon=1e-7):
loss = 0.0
for i in range(len(pos_weights)):
loss_pos = -1 * torch.mean(
pos_weights[i] * y_true[:, i] * torch.log(y_pred[:, i] + epsilon)
)
loss_neg = -1 * torch.mean(
neg_weights[i]
* (1 - y_true[:, i])
* torch.log((1 - y_pred[:, i]) + epsilon)
)
loss += loss_pos + loss_neg
return loss
# # Loading Dataset and Applying Transforms
data_transform = T.Compose(
[
T.RandomRotation((-20, +20)),
T.Resize((224, 224)),
T.ToTensor(),
T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
]
)
class NIH_Dataset(Dataset):
def __init__(self, data, img_dir, transform=None):
self.data = data
self.img_dir = img_dir
self.transform = transform
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
img_file = self.img_dir + self.data.iloc[:, 0][idx]
img = Image.open(img_file).convert("RGB")
label = np.array(self.data.iloc[:, 1:].iloc[idx])
if self.transform:
img = self.transform(img)
return img, label
trainds = NIH_Dataset(data, img_dir=IMG_DIR, transform=data_transform)
labels = pathology_list
# subsets define
# subset1_indices = [i in index, label in enumerate(labels) if label == 'Cardiomegaly']
subset1_indices = data[data["Cardiomegaly"] == 1].index.tolist()
subset1 = Subset(
NIH_Dataset(data, "../input/sample/sample/sample/images/", data_transform),
subset1_indices,
)
subset2_indices = data[data["Emphysema"] == 1].index.tolist()
subset2 = Subset(
NIH_Dataset(data, "../input/sample/sample/sample/images/", data_transform),
subset2_indices,
)
subset3_indices = data[data["Effusion"] == 1].index.tolist()
subset3 = Subset(
NIH_Dataset(data, "../input/sample/sample/sample/images/", data_transform),
subset3_indices,
)
subset4_indices = data[data["Hernia"] == 1].index.tolist()
subset4 = Subset(
NIH_Dataset(data, "../input/sample/sample/sample/images/", data_transform),
subset4_indices,
)
subset5_indices = data[data["Nodule"] == 1].index.tolist()
subset5 = Subset(
NIH_Dataset(data, "../input/sample/sample/sample/images/", data_transform),
subset5_indices,
)
subset6_indices = data[data["Pneumothorax"] == 1].index.tolist()
subset6 = Subset(
NIH_Dataset(data, "../input/sample/sample/sample/images/", data_transform),
subset6_indices,
)
subset7_indices = data[data["Atelectasis"] == 1].index.tolist()
subset7 = Subset(
NIH_Dataset(data, "../input/sample/sample/sample/images/", data_transform),
subset7_indices,
)
subset8_indices = data[data["Pleural_Thickening"] == 1].index.tolist()
subset8 = Subset(
NIH_Dataset(data, "../input/sample/sample/sample/images/", data_transform),
subset8_indices,
)
subset9_indices = data[data["Mass"] == 1].index.tolist()
subset9 = Subset(
NIH_Dataset(data, "../input/sample/sample/sample/images/", data_transform),
subset9_indices,
)
subset10_indices = data[data["Edema"] == 1].index.tolist()
subset10 = Subset(
NIH_Dataset(data, "../input/sample/sample/sample/images/", data_transform),
subset10_indices,
)
subset11_indices = data[data["Consolidation"] == 1].index.tolist()
subset11 = Subset(
NIH_Dataset(data, "../input/sample/sample/sample/images/", data_transform),
subset11_indices,
)
subset12_indices = data[data["Infiltration"] == 1].index.tolist()
subset12 = Subset(
NIH_Dataset(data, "../input/sample/sample/sample/images/", data_transform),
subset12_indices,
)
subset13_indices = data[data["Fibrosis"] == 1].index.tolist()
subset13 = Subset(
NIH_Dataset(data, "../input/sample/sample/sample/images/", data_transform),
subset13_indices,
)
subset14_indices = data[data["Pneumonia"] == 1].index.tolist()
subset14 = Subset(
NIH_Dataset(data, "../input/sample/sample/sample/images/", data_transform),
subset14_indices,
)
print(subset1.__len__())
print(subset2.__len__())
print(subset3.__len__())
print(subset4.__len__())
print(subset5.__len__())
print(subset6.__len__())
print(subset7.__len__())
print(subset8.__len__())
print(subset9.__len__())
print(subset10.__len__())
print(subset11.__len__())
print(subset12.__len__())
print(subset13.__len__())
print(subset14.__len__())
print(int(len(subset14) * 0.7))
print(int(len(subset14) * 0.3))
trainset1, valset1, testset1 = random_split(subset1, [99, 28, 14])
trainset2, valset2, testset2 = random_split(subset2, [89, 26, 12])
trainset3, valset3, testset3 = random_split(subset3, [451, 129, 64])
trainset4, valset4, testset4 = random_split(subset4, [10, 2, 1])
trainset5, valset5, testset5 = random_split(subset5, [220, 62, 31])
trainset6, valset6, testset6 = random_split(subset6, [190, 54, 27])
trainset7, valset7, testset7 = random_split(subset7, [356, 101, 51])
trainset8, valset8, testset8 = random_split(subset8, [123, 35, 18])
trainset5, valset5, testset5 = random_split(subset5, [220, 62, 31])
trainset6, valset6, testset6 = random_split(subset6, [190, 54, 27])
trainset7, valset7, testset7 = random_split(subset7, [356, 101, 51])
trainset8, valset8, testset8 = random_split(subset8, [123, 35, 18])
trainset9, valset9, testset9 = random_split(subset9, [200, 56, 28])
trainset10, valset10, testset10 = random_split(subset10, [83, 23, 12])
trainset11, valset11, testset11 = random_split(subset11, [159, 45, 22])
trainset12, valset12, testset12 = random_split(subset12, [677, 193, 97])
trainset13, valset13, testset13 = random_split(subset13, [59, 17, 8])
trainset14, valset14, testset14 = random_split(subset14, [43, 12, 7])
trainset = ConcatDataset(
[
trainset1,
trainset2,
trainset3,
trainset4,
trainset5,
trainset6,
trainset7,
trainset8,
trainset9,
trainset10,
trainset11,
trainset12,
trainset13,
trainset14,
]
)
validset = ConcatDataset(
[
valset1,
valset2,
valset3,
valset4,
valset5,
valset6,
valset7,
valset8,
valset9,
valset10,
valset11,
valset12,
valset13,
valset14,
]
)
testset = ConcatDataset(
[
testset1,
testset2,
testset3,
testset4,
testset5,
testset6,
testset7,
testset8,
testset9,
testset10,
testset11,
testset12,
testset13,
testset14,
]
)
def deprocess(img):
img = img.permute(1, 2, 0)
img = img * torch.Tensor([0.229, 0.224, 0.225]) + torch.Tensor(
[0.485, 0.456, 0.406]
)
return img
image, label = trainds[0]
class_labels = list(np.where(label == 1)[0])
plt.imshow(deprocess(image))
plt.title(itemgetter(*class_labels)(pathology_list))
# # Split Dataset and create dataloaders
trainset, validset, testset = random_split(trainds, [5000, 303, 303])
print("Length of trainset : {}".format(len(trainset)))
print("Length of testset : {}".format(len(testset)))
print("Length of validset : {}".format(len(validset)))
trainloader = DataLoader(trainset, batch_size=32, shuffle=True)
validloader = DataLoader(validset, batch_size=32, shuffle=False)
testloader = DataLoader(testset, batch_size=32, shuffle=False)
# # Define Pre-trained Model
from torchvision import models
model = models.resnet50()
model.load_state_dict(
torch.load("/kaggle/input/pretrained-model-weights-pytorch/resnet50-19c8e357.pth")
)
model = torch.load("/kaggle/working/Res50_epoch10.pt")
for param in model.parameters():
param.requires_grad = False
model.fc = nn.Sequential(nn.Linear(2048, 14), nn.Sigmoid())
model.to(device)
# # Train Model
optimizer = optim.Adam(model.parameters(), lr=Learning_Rate)
schedular = optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=0.1, patience=4)
epochs = 5
valid_loss_min = np.Inf
#
epochs = 100
Learning_Rate = 0.01
optimizer = optim.Adam(model.parameters(), lr=Learning_Rate)
schedular = optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=0.1, patience=4)
for i in range(epochs):
train_loss = 0.0
valid_loss = 0.0
train_acc = 0.0
valid_acc = 0.0
model.train()
for images, labels in tqdm(trainloader):
images = images.to(device)
labels = labels.to(device)
ps = model(images)
loss = weighted_loss(pos_weights, neg_weights, ps, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss += loss.item()
avg_train_loss = train_loss / len(trainloader)
model.eval()
with torch.no_grad():
for images, labels in tqdm(validloader):
images = images.to(device)
labels = labels.to(device)
ps = model(images)
loss = weighted_loss(pos_weights, neg_weights, ps, labels)
valid_loss += loss.item()
avg_valid_loss = valid_loss / len(validloader)
schedular.step(avg_valid_loss)
if avg_valid_loss <= valid_loss_min:
print(
"Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...".format(
valid_loss_min, avg_valid_loss
)
)
torch.save(model, "Res50_epoch10.pt")
valid_loss_min = avg_valid_loss
print("Epoch : {} Train Loss : {:.6f} ".format(i + 1, avg_train_loss))
print("Epoch : {} Valid Loss : {:.6f} ".format(i + 1, avg_valid_loss))
# # Each Class Accuracy
def class_accuracy(dataloader, model):
per_class_accuracy = [0 for i in range(len(pathology_list))]
total = 0.0
with torch.no_grad():
for images, labels in dataloader:
ps = model(images.to(device))
labels = labels.to(device)
ps = (ps >= 0.5).float()
for i in range(ps.shape[1]):
x1 = ps[:, i : i + 1]
x2 = labels[:, i : i + 1]
per_class_accuracy[i] += int((x1 == x2).sum())
per_class_accuracy = [
(i / len(dataloader.dataset)) * 100.0 for i in per_class_accuracy
]
return per_class_accuracy
def get_acc_data(class_names, acc_list):
df = pd.DataFrame(list(zip(class_names, acc_list)), columns=["Labels", "Acc"])
return df
print("Train Dataset Accuracy Report")
acc_list = class_accuracy(trainloader, model)
get_acc_data(pathology_list, acc_list)
print("Test Dataset Accuracy Report")
acc_list = class_accuracy(testloader, model)
get_acc_data(pathology_list, acc_list)
print("Valid Dataset Accuracy Report")
acc_list = class_accuracy(validloader, model)
get_acc_data(pathology_list, acc_list)
from sklearn.metrics import roc_auc_score, roc_curve
def get_roc_curve(labels, preds, class_names):
plt.figure(figsize=(15, 10))
plt.title("Receiver Operating Characteristic")
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
for i in range(len(class_names)):
fpr, tpr, thresholds = roc_curve(labels[:, i], preds[:, i])
plt.plot(fpr, tpr, label=class_names[i])
plt.legend(loc="best")
plt.show()
def get_roc_auc_score(labels, preds):
roc_auc_scores = []
for i in range(len(pathology_list)):
roc_auc_scores.append(roc_auc_score(labels[:, i], preds[:, i]))
return roc_auc_scores
def get_roc_auc_data(class_names, roc_auc_scores):
df = pd.DataFrame(
list(zip(class_names, roc_auc_scores)), columns=["Labels", "ROC AUC Score"]
)
return df
def get_roc_data(labels, preds, class_names):
get_roc_curve(labels, preds, class_names)
roc_auc_scores = get_roc_auc_score(labels, preds)
return get_roc_auc_data(class_names, roc_auc_scores)
def get_roc_data_for_dataset(dataloader, model, class_names):
labels = []
preds = []
with torch.no_grad():
for images, labels_batch in dataloader:
labels_batch = labels_batch.numpy()
labels.append(labels_batch)
ps = model(images.to(device))
ps = ps.cpu().numpy()
preds.append(ps)
labels = np.concatenate(labels)
preds = np.concatenate(preds)
return get_roc_data(labels, preds, class_names)
print("Train Dataset ROC AUC Report")
get_roc_data_for_dataset(trainloader, model, pathology_list)
print("Test Dataset ROC AUC Report")
get_roc_data_for_dataset(testloader, model, pathology_list)
print("Valid Dataset ROC AUC Report")
get_roc_data_for_dataset(validloader, model, pathology_list)
# GradCAM
def get_gradcam(img, model, class_names, layer_name):
gradcam = GradCAM.from_config(
model_type="resnet", arch=model, layer_name=layer_name
)
img = np.float32(img) / 255
input = preprocess_image(img)
input = input.unsqueeze(0)
model.to(device)
model.eval()
input = input.to(device)
target_index = None
mask = gradcam(input, target_index)
heatmap, result = visualize_cam(mask, img)
return heatmap, result
def plot_heatmap(img, heatmap, result, class_names):
plt.figure(figsize=(10, 10))
ax = sns.heatmap(heatmap, cmap="jet")
ax2 = plt.axes([0, 0, 1, 1], frameon=False)
plt.axis("off")
plt.imshow(img)
plt.savefig("heatmap.png")
plt.show()
plt.figure(figsize=(10, 10))
plt.imshow(result)
plt.savefig("result.png")
plt.show()
def get_gradcam_for_dataset(dataloader, model, class_names, layer_name):
images, labels = next(iter(dataloader))
images = images.numpy()
labels = labels.numpy()
idx = np.random.randint(0, len(images))
img = images[idx]
img = np.transpose(img, (1, 2, 0))
heatmap, result = get_gradcam(img, model, class_names, layer_name)
plot_heatmap(img, heatmap, result, class_names)
get_gradcam_for_dataset(trainloader, model, pathology_list, "layer4")
get_gradcam_for_dataset(testloader, model, pathology_list, "layer4")
get_gradcam_for_dataset(validloader, model, pathology_list, "layer4")
# # Plot Results
def view_classify(img, ps, label):
class_name = pathology_list
classes = np.array(class_name)
ps = ps.cpu().data.numpy().squeeze()
img = deprocess(img)
class_labels = list(np.where(label == 1)[0])
if not class_labels:
title = "No Findings"
else:
title = itemgetter(*class_labels)(class_name)
fig, (ax1, ax2) = plt.subplots(figsize=(8, 12), ncols=2)
ax1.imshow(img)
ax1.set_title("Ground Truth : {}".format(title))
ax1.axis("off")
ax2.barh(classes, ps)
ax2.set_aspect(0.1)
ax2.set_yticks(classes)
ax2.set_yticklabels(classes)
ax2.set_title("Predicted Class")
ax2.set_xlim(0, 1.1)
plt.tight_layout()
return None
image, label = testset[33]
ps = model(image.unsqueeze(0).to(device))
view_classify(image, ps, label)
image, label = trainset[999]
ps = model(image.unsqueeze(0).to(device))
view_classify(image, ps, label)
image, label = validset[234]
ps = model(image.unsqueeze(0).to(device))
view_classify(image, ps, label)
| false | 0 | 6,153 | 0 | 7,407 | 6,153 |
||
129423947
|
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder, StandardScaler
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from sklearn.impute import SimpleImputer
# 1. Load and inspect the data
train = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/train.csv")
test = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/test.csv")
greeks = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/greeks.csv")
# 2. Preprocess the data
# Identify numerical and categorical columns
num_cols = train.drop(["Id", "Class", "EJ"], axis=1).columns
cat_cols = ["EJ"]
# Build preprocessing pipeline
preprocessor = ColumnTransformer(
transformers=[
(
"num",
Pipeline(
steps=[
("imputer", SimpleImputer(strategy="mean")),
("scaler", StandardScaler()),
]
),
num_cols,
),
(
"cat",
Pipeline(
steps=[
("imputer", SimpleImputer(strategy="most_frequent")),
("encoder", OneHotEncoder()),
]
),
cat_cols,
),
]
)
# 3. Build and train the model
X = train.drop(["Id", "Class"], axis=1)
y = train["Class"]
X_train, X_valid, y_train, y_valid = train_test_split(
X, y, test_size=0.2, random_state=42
)
rf = Pipeline(
steps=[
("preprocessor", preprocessor),
("classifier", RandomForestClassifier(n_estimators=100, random_state=42)),
]
)
rf.fit(X_train, y_train)
# 4. Evaluate the model
y_train_pred = rf.predict(X_train)
y_valid_pred = rf.predict(X_valid)
print("Training accuracy: ", accuracy_score(y_train, y_train_pred))
print("Validation accuracy: ", accuracy_score(y_valid, y_valid_pred))
# Make predictions on the test set
X_test = test.drop("Id", axis=1)
predictions = rf.predict_proba(X_test)
# Check if the number of predictions matches the number of rows in the test set
assert len(predictions) == len(
test
), "Number of predictions must match number of rows in test set"
# Create a DataFrame for the submission
submission = pd.DataFrame(predictions, columns=["class_0", "class_1"])
submission.insert(0, "Id", test["Id"])
# Save the predictions to a CSV file
submission = test[["Id"]].copy()
submission["Class"] = predictions
submission.to_csv("submission.csv", index=False)
# Save the submission DataFrame to a CSV file
submission.to_csv("submission.csv", index=False)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/423/129423947.ipynb
| null | null |
[{"Id": 129423947, "ScriptId": 38481998, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11408729, "CreationDate": "05/13/2023 17:05:35", "VersionNumber": 2.0, "Title": "Age Related Conditions", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 71.0, "LinesInsertedFromPrevious": 16.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 55.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 4}]
| null | null | null | null |
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder, StandardScaler
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from sklearn.impute import SimpleImputer
# 1. Load and inspect the data
train = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/train.csv")
test = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/test.csv")
greeks = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/greeks.csv")
# 2. Preprocess the data
# Identify numerical and categorical columns
num_cols = train.drop(["Id", "Class", "EJ"], axis=1).columns
cat_cols = ["EJ"]
# Build preprocessing pipeline
preprocessor = ColumnTransformer(
transformers=[
(
"num",
Pipeline(
steps=[
("imputer", SimpleImputer(strategy="mean")),
("scaler", StandardScaler()),
]
),
num_cols,
),
(
"cat",
Pipeline(
steps=[
("imputer", SimpleImputer(strategy="most_frequent")),
("encoder", OneHotEncoder()),
]
),
cat_cols,
),
]
)
# 3. Build and train the model
X = train.drop(["Id", "Class"], axis=1)
y = train["Class"]
X_train, X_valid, y_train, y_valid = train_test_split(
X, y, test_size=0.2, random_state=42
)
rf = Pipeline(
steps=[
("preprocessor", preprocessor),
("classifier", RandomForestClassifier(n_estimators=100, random_state=42)),
]
)
rf.fit(X_train, y_train)
# 4. Evaluate the model
y_train_pred = rf.predict(X_train)
y_valid_pred = rf.predict(X_valid)
print("Training accuracy: ", accuracy_score(y_train, y_train_pred))
print("Validation accuracy: ", accuracy_score(y_valid, y_valid_pred))
# Make predictions on the test set
X_test = test.drop("Id", axis=1)
predictions = rf.predict_proba(X_test)
# Check if the number of predictions matches the number of rows in the test set
assert len(predictions) == len(
test
), "Number of predictions must match number of rows in test set"
# Create a DataFrame for the submission
submission = pd.DataFrame(predictions, columns=["class_0", "class_1"])
submission.insert(0, "Id", test["Id"])
# Save the predictions to a CSV file
submission = test[["Id"]].copy()
submission["Class"] = predictions
submission.to_csv("submission.csv", index=False)
# Save the submission DataFrame to a CSV file
submission.to_csv("submission.csv", index=False)
| false | 0 | 734 | 4 | 734 | 734 |
||
129423402
|
import pandas as pd
from statsmodels.graphics.tsaplots import plot_acf
import matplotlib.pyplot as plt
import numpy as np
from statsmodels.tsa.holtwinters import ExponentialSmoothing
import sys
from statsmodels.tsa.seasonal import seasonal_decompose
from sklearn.metrics import mean_squared_error, mean_absolute_error
import warnings
warnings.filterwarnings("ignore")
train = pd.read_csv("/kaggle/input/fixed-data-new/train_fixed_new.csv")
test = pd.read_csv("/kaggle/input/fixed-data-new/test_fixed_new.csv")
# # 1. Data preparation
# Tu ću sad pronaći mašinu koja ima seasonality koristeći plot_acf.
# AI10158
# AI10635
# AL11466
# AL12144
# CI12166
# DL101579
# DS100760
#
for machine in train.machine_name.unique():
machine_data = train[train["machine_name"] == machine]["total"]
# Perform seasonal decomposition
decomposition = seasonal_decompose(
machine_data, model="additive", period=int(np.floor(len(machine_data) / 2))
)
# Access the seasonal component
seasonal_component = decomposition.seasonal
# Check if there is seasonality
if abs(seasonal_component).mean() > 10000: # Adjust the threshold as needed
print("Seasonality detected-" + machine)
def getAnomalyLine(centil):
eff_1 = []
anomalije = train.loc[(train["label"] == 1)]
for index, row in anomalije.iterrows():
eff_1.append(row["broken"] / row["total"])
num_to_drop = int(len(eff_1) * centil)
eff_1.sort()
eff_1 = eff_1[num_to_drop:]
return eff_1[0]
def visualizeEff(machine_name, centil):
machine_data = train.loc[
(train["machine_name"] == machine_name) & (train["day"] > 364)
]
plt.figure(figsize=(20, 8))
anomalyLine = getAnomalyLine(centil)
eff = []
dani_anomalija = []
anomalija_eff = []
line = []
dani = []
for index, row in machine_data.iterrows():
eff.append(row["broken"] / row["total"])
line.append(anomalyLine)
dani.append(row["day"])
if row["label"] == 1:
dani_anomalija.append(row["day"])
anomalija_eff.append(row["broken"] / row["total"])
plt.title(machine_name)
plt.plot(dani, eff, "g-", label="Linija efektivnosti stroja po danima")
plt.scatter(
dani_anomalija,
anomalija_eff,
c="r",
edgecolors="black",
s=75,
label="Anomalije",
)
plt.plot(dani, line, "k--", label="Linija efikasnosti za dan centil")
plt.legend(loc="best")
plt.show()
return
def visualizeData(machine_name):
machine_data = train.loc[
(train["machine_name"] == machine_name) & (train["day"] > 364)
]
plt.figure(figsize=(20, 8))
total = []
broken = []
anomalija_day = []
anomalija_total = []
anomalija_broken = []
for index, row in machine_data.iterrows():
total.append(row["total"])
broken.append(row["broken"])
if row["label"] == 1:
anomalija_total.append(row["total"])
anomalija_broken.append(row["broken"])
anomalija_day.append(row["day"])
plt.title(machine_name)
plt.scatter(
range(365, 365 + len(total)),
np.log(total),
c="cyan",
edgecolors="black",
label="Total",
)
plt.scatter(
range(365, 365 + len(broken)),
np.log(broken),
c="yellow",
edgecolors="black",
label="Broken",
)
# plt.scatter(range(365,365+len(total)), total, c='cyan', edgecolors= "black", label='Total')
# plt.scatter(range(365,365+len(broken)), np.log(broken), c='yellow',edgecolors= "black", label='Broken')
plt.scatter(
anomalija_day,
np.log(anomalija_total),
c="b",
s=100,
edgecolors="black",
label="Total kod anomalije",
)
plt.scatter(
anomalija_day,
np.log(anomalija_broken),
c="r",
s=100,
edgecolors="black",
label="Broken kod anomalije",
)
plt.legend(loc="best")
plt.show()
return
visualizeData("CI101712")
visualizeEff("CI101712", 0.5)
# # 2. Exponential smoothing
machine_data = train[train["machine_name"] == "CI101712"]["total"]
machine_data = machine_data.sample(frac=1, random_state=42)
train_size = int(len(machine_data) * 0.7)
train_data = machine_data[:train_size]
test_data = machine_data[train_size:]
train_values = train_data.values
test_values = test_data.values
model = ExponentialSmoothing(train_values).fit()
pred = model.predict(
start=len(train_values), end=len(train_values) + len(test_values) - 1
)
plt.figure(figsize=(20, 8))
plt.plot(range(1, len(train_values) + 1), train_values, label="train")
plt.plot(
range(len(train_values) + 1, len(train_values) + len(test_values) + 1),
test_values,
label="test",
)
plt.plot(
range(len(train_values) + 1, len(train_values) + len(test_values) + 1),
pred,
label="pred",
)
plt.title("Exponential Smoothing")
plt.legend()
plt.show()
# seasonal_periods=23 jer je 162 dana a gore se vid 7 perioda
best_level = 0
best_slope = 0
best_seasonal = 0
best_error = sys.maxsize
for smoothing_level in [0.1, 0.2, 0.4]:
for smoothing_slope in [0.1, 0.2, 0.4]:
for smoothing_seasonal in [0.1, 0.2, 0.4]:
model = ExponentialSmoothing(
train_values, seasonal="add", seasonal_periods=23
).fit(
smoothing_level=smoothing_level,
smoothing_slope=smoothing_slope,
smoothing_seasonal=smoothing_seasonal,
)
pred = model.predict(
start=len(train_values), end=len(train_values) + len(test_values) - 1
)
error = mean_absolute_error(test_values, pred)
print("Parametri:")
print("Soothing_level: " + str(smoothing_level), end="")
print(", Soothing_slope: " + str(smoothing_slope), end="")
print(", Soothing_seasonal: " + str(smoothing_seasonal))
print("Error: " + str(error))
print()
if error < best_error:
best_level = smoothing_level
best_slope = smoothing_slope
best_seasonal = smoothing_seasonal
best_error = error
print()
print("Najbolji parametri:")
print("Soothing_level: " + str(best_level))
print("Soothing_slope: " + str(best_slope))
print("Soothing_seasonal: " + str(best_seasonal))
model = ExponentialSmoothing(train_values, seasonal="add", seasonal_periods=23).fit(
smoothing_level=best_level,
smoothing_slope=best_slope,
smoothing_seasonal=best_seasonal,
)
pred = model.predict(
start=len(train_values), end=len(train_values) + len(test_values) - 1
)
plt.figure(figsize=(20, 8))
plt.plot(range(1, len(train_values) + 1), train_values, label="train")
plt.plot(
range(len(train_values) + 1, len(train_values) + len(test_values) + 1),
test_values,
label="test",
)
plt.plot(
range(len(train_values) + 1, len(train_values) + len(test_values) + 1),
pred,
label="pred",
)
plt.title(
"s_level="
+ str(best_level)
+ "s_slope="
+ str(best_slope)
+ "s_seasonal="
+ str(best_seasonal)
)
plt.legend()
plt.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/423/129423402.ipynb
| null | null |
[{"Id": 129423402, "ScriptId": 38468064, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 9230157, "CreationDate": "05/13/2023 16:59:49", "VersionNumber": 1.0, "Title": "[MN <0036524183>] Time-series (TS)", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 182.0, "LinesInsertedFromPrevious": 182.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import pandas as pd
from statsmodels.graphics.tsaplots import plot_acf
import matplotlib.pyplot as plt
import numpy as np
from statsmodels.tsa.holtwinters import ExponentialSmoothing
import sys
from statsmodels.tsa.seasonal import seasonal_decompose
from sklearn.metrics import mean_squared_error, mean_absolute_error
import warnings
warnings.filterwarnings("ignore")
train = pd.read_csv("/kaggle/input/fixed-data-new/train_fixed_new.csv")
test = pd.read_csv("/kaggle/input/fixed-data-new/test_fixed_new.csv")
# # 1. Data preparation
# Tu ću sad pronaći mašinu koja ima seasonality koristeći plot_acf.
# AI10158
# AI10635
# AL11466
# AL12144
# CI12166
# DL101579
# DS100760
#
for machine in train.machine_name.unique():
machine_data = train[train["machine_name"] == machine]["total"]
# Perform seasonal decomposition
decomposition = seasonal_decompose(
machine_data, model="additive", period=int(np.floor(len(machine_data) / 2))
)
# Access the seasonal component
seasonal_component = decomposition.seasonal
# Check if there is seasonality
if abs(seasonal_component).mean() > 10000: # Adjust the threshold as needed
print("Seasonality detected-" + machine)
def getAnomalyLine(centil):
eff_1 = []
anomalije = train.loc[(train["label"] == 1)]
for index, row in anomalije.iterrows():
eff_1.append(row["broken"] / row["total"])
num_to_drop = int(len(eff_1) * centil)
eff_1.sort()
eff_1 = eff_1[num_to_drop:]
return eff_1[0]
def visualizeEff(machine_name, centil):
machine_data = train.loc[
(train["machine_name"] == machine_name) & (train["day"] > 364)
]
plt.figure(figsize=(20, 8))
anomalyLine = getAnomalyLine(centil)
eff = []
dani_anomalija = []
anomalija_eff = []
line = []
dani = []
for index, row in machine_data.iterrows():
eff.append(row["broken"] / row["total"])
line.append(anomalyLine)
dani.append(row["day"])
if row["label"] == 1:
dani_anomalija.append(row["day"])
anomalija_eff.append(row["broken"] / row["total"])
plt.title(machine_name)
plt.plot(dani, eff, "g-", label="Linija efektivnosti stroja po danima")
plt.scatter(
dani_anomalija,
anomalija_eff,
c="r",
edgecolors="black",
s=75,
label="Anomalije",
)
plt.plot(dani, line, "k--", label="Linija efikasnosti za dan centil")
plt.legend(loc="best")
plt.show()
return
def visualizeData(machine_name):
machine_data = train.loc[
(train["machine_name"] == machine_name) & (train["day"] > 364)
]
plt.figure(figsize=(20, 8))
total = []
broken = []
anomalija_day = []
anomalija_total = []
anomalija_broken = []
for index, row in machine_data.iterrows():
total.append(row["total"])
broken.append(row["broken"])
if row["label"] == 1:
anomalija_total.append(row["total"])
anomalija_broken.append(row["broken"])
anomalija_day.append(row["day"])
plt.title(machine_name)
plt.scatter(
range(365, 365 + len(total)),
np.log(total),
c="cyan",
edgecolors="black",
label="Total",
)
plt.scatter(
range(365, 365 + len(broken)),
np.log(broken),
c="yellow",
edgecolors="black",
label="Broken",
)
# plt.scatter(range(365,365+len(total)), total, c='cyan', edgecolors= "black", label='Total')
# plt.scatter(range(365,365+len(broken)), np.log(broken), c='yellow',edgecolors= "black", label='Broken')
plt.scatter(
anomalija_day,
np.log(anomalija_total),
c="b",
s=100,
edgecolors="black",
label="Total kod anomalije",
)
plt.scatter(
anomalija_day,
np.log(anomalija_broken),
c="r",
s=100,
edgecolors="black",
label="Broken kod anomalije",
)
plt.legend(loc="best")
plt.show()
return
visualizeData("CI101712")
visualizeEff("CI101712", 0.5)
# # 2. Exponential smoothing
machine_data = train[train["machine_name"] == "CI101712"]["total"]
machine_data = machine_data.sample(frac=1, random_state=42)
train_size = int(len(machine_data) * 0.7)
train_data = machine_data[:train_size]
test_data = machine_data[train_size:]
train_values = train_data.values
test_values = test_data.values
model = ExponentialSmoothing(train_values).fit()
pred = model.predict(
start=len(train_values), end=len(train_values) + len(test_values) - 1
)
plt.figure(figsize=(20, 8))
plt.plot(range(1, len(train_values) + 1), train_values, label="train")
plt.plot(
range(len(train_values) + 1, len(train_values) + len(test_values) + 1),
test_values,
label="test",
)
plt.plot(
range(len(train_values) + 1, len(train_values) + len(test_values) + 1),
pred,
label="pred",
)
plt.title("Exponential Smoothing")
plt.legend()
plt.show()
# seasonal_periods=23 jer je 162 dana a gore se vid 7 perioda
best_level = 0
best_slope = 0
best_seasonal = 0
best_error = sys.maxsize
for smoothing_level in [0.1, 0.2, 0.4]:
for smoothing_slope in [0.1, 0.2, 0.4]:
for smoothing_seasonal in [0.1, 0.2, 0.4]:
model = ExponentialSmoothing(
train_values, seasonal="add", seasonal_periods=23
).fit(
smoothing_level=smoothing_level,
smoothing_slope=smoothing_slope,
smoothing_seasonal=smoothing_seasonal,
)
pred = model.predict(
start=len(train_values), end=len(train_values) + len(test_values) - 1
)
error = mean_absolute_error(test_values, pred)
print("Parametri:")
print("Soothing_level: " + str(smoothing_level), end="")
print(", Soothing_slope: " + str(smoothing_slope), end="")
print(", Soothing_seasonal: " + str(smoothing_seasonal))
print("Error: " + str(error))
print()
if error < best_error:
best_level = smoothing_level
best_slope = smoothing_slope
best_seasonal = smoothing_seasonal
best_error = error
print()
print("Najbolji parametri:")
print("Soothing_level: " + str(best_level))
print("Soothing_slope: " + str(best_slope))
print("Soothing_seasonal: " + str(best_seasonal))
model = ExponentialSmoothing(train_values, seasonal="add", seasonal_periods=23).fit(
smoothing_level=best_level,
smoothing_slope=best_slope,
smoothing_seasonal=best_seasonal,
)
pred = model.predict(
start=len(train_values), end=len(train_values) + len(test_values) - 1
)
plt.figure(figsize=(20, 8))
plt.plot(range(1, len(train_values) + 1), train_values, label="train")
plt.plot(
range(len(train_values) + 1, len(train_values) + len(test_values) + 1),
test_values,
label="test",
)
plt.plot(
range(len(train_values) + 1, len(train_values) + len(test_values) + 1),
pred,
label="pred",
)
plt.title(
"s_level="
+ str(best_level)
+ "s_slope="
+ str(best_slope)
+ "s_seasonal="
+ str(best_seasonal)
)
plt.legend()
plt.show()
| false | 0 | 2,367 | 0 | 2,367 | 2,367 |
||
129423015
|
# ## Hello everyone! 😺
# **Today we will build and train our own GAN model using the Tensorflow framework and its Keras wrapper. Watch carefully it will be entertaining!**
# # Libraries
import os
import numpy as np
import matplotlib.pyplot as plt
import time
import tensorflow as tf
from tensorflow.keras.datasets import mnist
import tensorflow.keras.backend as K
from tensorflow.keras import layers
from tensorflow import keras
# # Prepare data
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train[y_train == 8]
y_train = y_train[y_train == 8]
BUFFER_SIZE = x_train.shape[0]
BATCH_SIZE = 100
BUFFER_SIZE = BUFFER_SIZE // BATCH_SIZE * BATCH_SIZE
x_train = x_train[:BUFFER_SIZE]
y_train = y_train[:BUFFER_SIZE]
print(x_train.shape, y_train.shape)
x_train = x_train / 255
y_train = y_train / 255
dataset = (
tf.data.Dataset.from_tensor_slices(x_train).shuffle(BUFFER_SIZE).batch(BATCH_SIZE)
)
# # Our model | GAN
# 
class Generator(tf.keras.Model):
def __init__(
self, noise_size: int = 2, width: int = 256, height: int = 7, depth: int = 2
):
super().__init__()
self.noise_input = keras.Input(shape=(noise_size,))
self.stem = keras.Sequential(
[
layers.Dense(
units=height * height * width,
input_shape=(noise_size,),
activation="relu",
use_bias=False,
),
layers.BatchNormalization(),
layers.Reshape((height, height, width)),
],
name="stem-network",
)
self.bottleneck = keras.Sequential(
[
layers.Conv2DTranspose(
filters=width // 2,
kernel_size=4,
strides=1,
padding="same",
use_bias=False,
),
layers.BatchNormalization(),
layers.ReLU(),
],
name="bottleneck",
)
self.body = keras.Sequential(
[*[self.deconv_block(width, i) for i in range(2, depth * 2 + 1, 2)]],
name="body",
)
self.head = keras.Sequential(
[
layers.Dropout(0.30),
layers.Conv2DTranspose(
filters=1,
kernel_size=4,
strides=1,
padding="same",
activation="sigmoid",
use_bias=False,
),
],
name="head",
)
@staticmethod
def deconv_block(width: int, i: int, name: str = None) -> keras.Sequential:
if not name:
name = f"deconv-block-{i//2}"
return keras.Sequential(
[
layers.Conv2DTranspose(
filters=width // i,
kernel_size=4,
strides=2,
padding="same",
use_bias=False,
),
layers.BatchNormalization(),
layers.ReLU(),
],
name=name,
)
def call(self, inputs, training=False):
x = self.stem(inputs)
x = self.bottleneck(x)
x = self.body(x)
x = self.head(x)
return x
gan = Generator()
gan.build((None, 2))
gan.summary()
class Discriminator:
def __init__():
pass
def call():
pass
class GAN:
def __init__():
pass
def call():
pass
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/423/129423015.ipynb
| null | null |
[{"Id": 129423015, "ScriptId": 38382573, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13000811, "CreationDate": "05/13/2023 16:55:32", "VersionNumber": 3.0, "Title": "GAN with Tensorflow&Keras \ud83c\udf4e", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 117.0, "LinesInsertedFromPrevious": 66.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 51.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# ## Hello everyone! 😺
# **Today we will build and train our own GAN model using the Tensorflow framework and its Keras wrapper. Watch carefully it will be entertaining!**
# # Libraries
import os
import numpy as np
import matplotlib.pyplot as plt
import time
import tensorflow as tf
from tensorflow.keras.datasets import mnist
import tensorflow.keras.backend as K
from tensorflow.keras import layers
from tensorflow import keras
# # Prepare data
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train[y_train == 8]
y_train = y_train[y_train == 8]
BUFFER_SIZE = x_train.shape[0]
BATCH_SIZE = 100
BUFFER_SIZE = BUFFER_SIZE // BATCH_SIZE * BATCH_SIZE
x_train = x_train[:BUFFER_SIZE]
y_train = y_train[:BUFFER_SIZE]
print(x_train.shape, y_train.shape)
x_train = x_train / 255
y_train = y_train / 255
dataset = (
tf.data.Dataset.from_tensor_slices(x_train).shuffle(BUFFER_SIZE).batch(BATCH_SIZE)
)
# # Our model | GAN
# 
class Generator(tf.keras.Model):
def __init__(
self, noise_size: int = 2, width: int = 256, height: int = 7, depth: int = 2
):
super().__init__()
self.noise_input = keras.Input(shape=(noise_size,))
self.stem = keras.Sequential(
[
layers.Dense(
units=height * height * width,
input_shape=(noise_size,),
activation="relu",
use_bias=False,
),
layers.BatchNormalization(),
layers.Reshape((height, height, width)),
],
name="stem-network",
)
self.bottleneck = keras.Sequential(
[
layers.Conv2DTranspose(
filters=width // 2,
kernel_size=4,
strides=1,
padding="same",
use_bias=False,
),
layers.BatchNormalization(),
layers.ReLU(),
],
name="bottleneck",
)
self.body = keras.Sequential(
[*[self.deconv_block(width, i) for i in range(2, depth * 2 + 1, 2)]],
name="body",
)
self.head = keras.Sequential(
[
layers.Dropout(0.30),
layers.Conv2DTranspose(
filters=1,
kernel_size=4,
strides=1,
padding="same",
activation="sigmoid",
use_bias=False,
),
],
name="head",
)
@staticmethod
def deconv_block(width: int, i: int, name: str = None) -> keras.Sequential:
if not name:
name = f"deconv-block-{i//2}"
return keras.Sequential(
[
layers.Conv2DTranspose(
filters=width // i,
kernel_size=4,
strides=2,
padding="same",
use_bias=False,
),
layers.BatchNormalization(),
layers.ReLU(),
],
name=name,
)
def call(self, inputs, training=False):
x = self.stem(inputs)
x = self.bottleneck(x)
x = self.body(x)
x = self.head(x)
return x
gan = Generator()
gan.build((None, 2))
gan.summary()
class Discriminator:
def __init__():
pass
def call():
pass
class GAN:
def __init__():
pass
def call():
pass
| false | 0 | 934 | 0 | 934 | 934 |
||
129879523
|
<jupyter_start><jupyter_text>google_share_price_data
Kaggle dataset identifier: google-share-price-data
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
df = pd.read_csv("/kaggle/input/google-share-price-data/GOOGL.csv")
import statsmodels
df.head()
google_df = df.loc[:, ["Date", "Close"]]
google_df
from statsmodels.tsa.stattools import adfuller
ADF_result = adfuller(google_df["Close"])
ADF_result
ADF_result
print("ADF Stats", ADF_result[0])
print("P-value", ADF_result[1])
from statsmodels.graphics.tsaplots import plot_acf
plot_acf(google_df["Close"], lags=20)
# ### Doing first order difference
diff_google_df = np.diff(google_df["Close"], n=1)
diff_google_ADF_result = adfuller(diff_google_df)
diff_google_ADF_result
print("ADF Stats", diff_google_ADF_result[0])
print("P-value", diff_google_ADF_result[1])
from statsmodels.graphics.tsaplots import plot_acf
plot_acf(diff_google_df, lags=20)
# # Naive Algos
len(google_df)
train = google_df.iloc[:202]
test = google_df.iloc[202:]
len(train) + len(test)
def last_record(train, test):
test_df = test.copy()
test_df["Close"] = train.tail(1)["Close"].unique()[0]
return test_df
last_record_df = last_record(train, test)
def arithmetic_mean(train, test):
train_mean = train["Close"].mean()
test_df = test.copy()
test_df["Close"] = train_mean
return test_df
arithmetic_mean_df = arithmetic_mean(train, test)
def last_month(train, test):
train_mean = train.loc[len(train) - 30 :, ["Close"]].mean().values[0]
test_df = test.copy()
test_df["Close"] = train_mean
return test_df
last_month_df = last_month(train, test)
from sklearn.metrics import mean_squared_error, mean_absolute_percentage_error
import math
last_month_mape = mean_absolute_percentage_error(test["Close"], last_month_df["Close"])
last_month_mse = mean_squared_error(test["Close"], last_month_df["Close"])
last_month_rmse = math.sqrt(mean_squared_error(test["Close"], last_month_df["Close"]))
last_record_mape = mean_absolute_percentage_error(
test["Close"], last_record_df["Close"]
)
last_record_mse = mean_squared_error(test["Close"], last_record_df["Close"])
last_record_rmse = math.sqrt(mean_squared_error(test["Close"], last_record_df["Close"]))
arithmetic_mean_mape = mean_absolute_percentage_error(
test["Close"], arithmetic_mean_df["Close"]
)
arithmetic_mean_mse = mean_squared_error(test["Close"], arithmetic_mean_df["Close"])
arithmetic_mean_rmse = math.sqrt(
mean_squared_error(test["Close"], arithmetic_mean_df["Close"])
)
mape = {
"last_month": last_month_mape,
"last_record": last_record_mape,
"arithmetic_mean": arithmetic_mean_mape,
}
mse = {
"last_month": last_month_mse,
"last_record": last_record_mse,
"arithmetic_mean": arithmetic_mean_mse,
}
rmse = {
"last_month": last_month_rmse,
"last_record": last_record_rmse,
"arithmetic_mean": arithmetic_mean_rmse,
}
print("MAPE:-", mape, "\n")
print("MSE:-", mse, "\n")
print("RMSE:-", rmse, "\n")
def plot_bar_graph(plot_dict, ylabel):
import matplotlib.pyplot as plt
import numpy as np
# Data for the bar graph
x = plot_dict.keys()
y = plot_dict.values()
# Create the bar graph
plt.bar(x, y)
# Set the labels and title
plt.xlabel("BaseLine")
plt.ylabel(ylabel.upper())
plt.title("Bar Graph")
# Show the plot
plt.show()
plot_bar_graph(mape, "mape")
plot_bar_graph(mse, "mse")
plot_bar_graph(rmse, "rmse")
def shif_by_one_day(train, test):
test_df = test.copy()
test_df["Close"] = test_df["Close"].shift(1)
test_df.loc[test_df["Close"].index[0], "Close"] = train.loc[
train["Close"].index[-1], "Close"
]
return test_df
shif_by_one_day_df = shif_by_one_day(train, test)
last_month_mape = mean_absolute_percentage_error(
shif_by_one_day_df["Close"], last_month_df["Close"]
)
last_month_mse = mean_squared_error(shif_by_one_day_df["Close"], last_month_df["Close"])
last_month_rmse = math.sqrt(
mean_squared_error(shif_by_one_day_df["Close"], last_month_df["Close"])
)
last_record_mape = mean_absolute_percentage_error(
shif_by_one_day_df["Close"], last_record_df["Close"]
)
last_record_mse = mean_squared_error(
shif_by_one_day_df["Close"], last_record_df["Close"]
)
last_record_rmse = math.sqrt(
mean_squared_error(shif_by_one_day_df["Close"], last_record_df["Close"])
)
arithmetic_mean_mape = mean_absolute_percentage_error(
shif_by_one_day_df["Close"], arithmetic_mean_df["Close"]
)
arithmetic_mean_mse = mean_squared_error(
shif_by_one_day_df["Close"], arithmetic_mean_df["Close"]
)
arithmetic_mean_rmse = math.sqrt(
mean_squared_error(shif_by_one_day_df["Close"], arithmetic_mean_df["Close"])
)
mape = {
"last_month": last_month_mape,
"last_record": last_record_mape,
"arithmetic_mean": arithmetic_mean_mape,
}
mse = {
"last_month": last_month_mse,
"last_record": last_record_mse,
"arithmetic_mean": arithmetic_mean_mse,
}
rmse = {
"last_month": last_month_rmse,
"last_record": last_record_rmse,
"arithmetic_mean": arithmetic_mean_rmse,
}
print("MAPE:-", mape, "\n")
print("MSE:-", mse, "\n")
print("RMSE:-", rmse, "\n")
def plot_bar_graph(plot_dict, ylabel):
import matplotlib.pyplot as plt
import numpy as np
# Data for the bar graph
x = plot_dict.keys()
y = plot_dict.values()
# Create the bar graph
plt.bar(x, y)
# Set the labels and title
plt.xlabel("BaseLine")
plt.ylabel(ylabel.upper())
plt.title("Bar Graph")
# Show the plot
plt.show()
plot_bar_graph(mape, "mape")
plot_bar_graph(mse, "mse")
plot_bar_graph(rmse, "rmse")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/879/129879523.ipynb
|
google-share-price-data
|
zidanesunesara
|
[{"Id": 129879523, "ScriptId": 38591595, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4829488, "CreationDate": "05/17/2023 06:40:15", "VersionNumber": 2.0, "Title": "randoom_walk", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 193.0, "LinesInsertedFromPrevious": 8.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 185.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 186284491, "KernelVersionId": 129879523, "SourceDatasetVersionId": 5697376}]
|
[{"Id": 5697376, "DatasetId": 3275919, "DatasourceVersionId": 5773019, "CreatorUserId": 4829488, "LicenseName": "Unknown", "CreationDate": "05/16/2023 09:47:05", "VersionNumber": 1.0, "Title": "google_share_price_data", "Slug": "google-share-price-data", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 3275919, "CreatorUserId": 4829488, "OwnerUserId": 4829488.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5697376.0, "CurrentDatasourceVersionId": 5773019.0, "ForumId": 3341596, "Type": 2, "CreationDate": "05/16/2023 09:47:05", "LastActivityDate": "05/16/2023", "TotalViews": 76, "TotalDownloads": 4, "TotalVotes": 0, "TotalKernels": 3}]
|
[{"Id": 4829488, "UserName": "zidanesunesara", "DisplayName": "Zidane Sunesara", "RegisterDate": "04/07/2020", "PerformanceTier": 0}]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
df = pd.read_csv("/kaggle/input/google-share-price-data/GOOGL.csv")
import statsmodels
df.head()
google_df = df.loc[:, ["Date", "Close"]]
google_df
from statsmodels.tsa.stattools import adfuller
ADF_result = adfuller(google_df["Close"])
ADF_result
ADF_result
print("ADF Stats", ADF_result[0])
print("P-value", ADF_result[1])
from statsmodels.graphics.tsaplots import plot_acf
plot_acf(google_df["Close"], lags=20)
# ### Doing first order difference
diff_google_df = np.diff(google_df["Close"], n=1)
diff_google_ADF_result = adfuller(diff_google_df)
diff_google_ADF_result
print("ADF Stats", diff_google_ADF_result[0])
print("P-value", diff_google_ADF_result[1])
from statsmodels.graphics.tsaplots import plot_acf
plot_acf(diff_google_df, lags=20)
# # Naive Algos
len(google_df)
train = google_df.iloc[:202]
test = google_df.iloc[202:]
len(train) + len(test)
def last_record(train, test):
test_df = test.copy()
test_df["Close"] = train.tail(1)["Close"].unique()[0]
return test_df
last_record_df = last_record(train, test)
def arithmetic_mean(train, test):
train_mean = train["Close"].mean()
test_df = test.copy()
test_df["Close"] = train_mean
return test_df
arithmetic_mean_df = arithmetic_mean(train, test)
def last_month(train, test):
train_mean = train.loc[len(train) - 30 :, ["Close"]].mean().values[0]
test_df = test.copy()
test_df["Close"] = train_mean
return test_df
last_month_df = last_month(train, test)
from sklearn.metrics import mean_squared_error, mean_absolute_percentage_error
import math
last_month_mape = mean_absolute_percentage_error(test["Close"], last_month_df["Close"])
last_month_mse = mean_squared_error(test["Close"], last_month_df["Close"])
last_month_rmse = math.sqrt(mean_squared_error(test["Close"], last_month_df["Close"]))
last_record_mape = mean_absolute_percentage_error(
test["Close"], last_record_df["Close"]
)
last_record_mse = mean_squared_error(test["Close"], last_record_df["Close"])
last_record_rmse = math.sqrt(mean_squared_error(test["Close"], last_record_df["Close"]))
arithmetic_mean_mape = mean_absolute_percentage_error(
test["Close"], arithmetic_mean_df["Close"]
)
arithmetic_mean_mse = mean_squared_error(test["Close"], arithmetic_mean_df["Close"])
arithmetic_mean_rmse = math.sqrt(
mean_squared_error(test["Close"], arithmetic_mean_df["Close"])
)
mape = {
"last_month": last_month_mape,
"last_record": last_record_mape,
"arithmetic_mean": arithmetic_mean_mape,
}
mse = {
"last_month": last_month_mse,
"last_record": last_record_mse,
"arithmetic_mean": arithmetic_mean_mse,
}
rmse = {
"last_month": last_month_rmse,
"last_record": last_record_rmse,
"arithmetic_mean": arithmetic_mean_rmse,
}
print("MAPE:-", mape, "\n")
print("MSE:-", mse, "\n")
print("RMSE:-", rmse, "\n")
def plot_bar_graph(plot_dict, ylabel):
import matplotlib.pyplot as plt
import numpy as np
# Data for the bar graph
x = plot_dict.keys()
y = plot_dict.values()
# Create the bar graph
plt.bar(x, y)
# Set the labels and title
plt.xlabel("BaseLine")
plt.ylabel(ylabel.upper())
plt.title("Bar Graph")
# Show the plot
plt.show()
plot_bar_graph(mape, "mape")
plot_bar_graph(mse, "mse")
plot_bar_graph(rmse, "rmse")
def shif_by_one_day(train, test):
test_df = test.copy()
test_df["Close"] = test_df["Close"].shift(1)
test_df.loc[test_df["Close"].index[0], "Close"] = train.loc[
train["Close"].index[-1], "Close"
]
return test_df
shif_by_one_day_df = shif_by_one_day(train, test)
last_month_mape = mean_absolute_percentage_error(
shif_by_one_day_df["Close"], last_month_df["Close"]
)
last_month_mse = mean_squared_error(shif_by_one_day_df["Close"], last_month_df["Close"])
last_month_rmse = math.sqrt(
mean_squared_error(shif_by_one_day_df["Close"], last_month_df["Close"])
)
last_record_mape = mean_absolute_percentage_error(
shif_by_one_day_df["Close"], last_record_df["Close"]
)
last_record_mse = mean_squared_error(
shif_by_one_day_df["Close"], last_record_df["Close"]
)
last_record_rmse = math.sqrt(
mean_squared_error(shif_by_one_day_df["Close"], last_record_df["Close"])
)
arithmetic_mean_mape = mean_absolute_percentage_error(
shif_by_one_day_df["Close"], arithmetic_mean_df["Close"]
)
arithmetic_mean_mse = mean_squared_error(
shif_by_one_day_df["Close"], arithmetic_mean_df["Close"]
)
arithmetic_mean_rmse = math.sqrt(
mean_squared_error(shif_by_one_day_df["Close"], arithmetic_mean_df["Close"])
)
mape = {
"last_month": last_month_mape,
"last_record": last_record_mape,
"arithmetic_mean": arithmetic_mean_mape,
}
mse = {
"last_month": last_month_mse,
"last_record": last_record_mse,
"arithmetic_mean": arithmetic_mean_mse,
}
rmse = {
"last_month": last_month_rmse,
"last_record": last_record_rmse,
"arithmetic_mean": arithmetic_mean_rmse,
}
print("MAPE:-", mape, "\n")
print("MSE:-", mse, "\n")
print("RMSE:-", rmse, "\n")
def plot_bar_graph(plot_dict, ylabel):
import matplotlib.pyplot as plt
import numpy as np
# Data for the bar graph
x = plot_dict.keys()
y = plot_dict.values()
# Create the bar graph
plt.bar(x, y)
# Set the labels and title
plt.xlabel("BaseLine")
plt.ylabel(ylabel.upper())
plt.title("Bar Graph")
# Show the plot
plt.show()
plot_bar_graph(mape, "mape")
plot_bar_graph(mse, "mse")
plot_bar_graph(rmse, "rmse")
| false | 1 | 2,120 | 0 | 2,148 | 2,120 |
||
129879123
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# library
import os
from datetime import datetime
import time
from sklearn.preprocessing import StandardScaler
import plotly.express as px
import plotly.graph_objs as go
import matplotlib.pyplot as plt
import seaborn as sns
import math
import statsmodels.api as sm
from statsmodels.tsa.ar_model import AutoReg
from statsmodels.tsa.arima_model import ARMA
from statsmodels.tsa.stattools import adfuller
from statsmodels.graphics.tsaplots import plot_acf
from statsmodels.graphics.tsaplots import plot_pacf
from statsmodels.tsa.arima_model import ARIMA
from statsmodels.tsa.statespace.sarimax import SARIMAX
from statsmodels.tsa.seasonal import seasonal_decompose
from scipy import stats
from itertools import product
import warnings
warnings.filterwarnings("ignore")
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
info = pd.read_csv(
"/kaggle/input/crypto-stock-data/Crypto Stock Data/daily_adjusted_IBM.csv"
)
ctrain = pd.read_csv(
"/kaggle/input/crypto-stock-data/Crypto Stock Data/monthly_adjusted_IBM.csv"
)
intraday = pd.read_csv(
"/kaggle/input/crypto-stock-data/Crypto Stock Data/intraday_5min_IBM.csv"
)
daily = pd.read_csv(
"/kaggle/input/crypto-stock-data/Crypto Stock Data/daily_adjusted_IBM.csv"
)
sns.color_palette("YlOrRd", 10)
info.head
info.columns
import plotly.express as px
# Assuming you have a DataFrame called 'info_s' with the required columns
# ['timestamp', 'open', 'high', 'low', 'close', 'adjusted_close', 'volume',
# 'dividend_amount', 'split_coefficient']
# Replace 'Weight' with the correct column name from your DataFrame
x_column = "timestamp"
y_column = "volume"
fig_index = px.bar(
info_s,
x=x_column,
y=y_column,
color=y_column,
title="Popular Cryptocurrency Weight Distribution",
color_continuous_scale=px.colors.sequential.YlOrRd,
)
fig_index.show()
ctrain.columns
import pandas as pd
import requests
api_key = "V6M4VISDAY5MIIHY" # Replace with your actual API key
symbol = "AAPL" # Replace with the stock symbol you want to retrieve data for
start_date = "2021-01-01" # Start date of the data range
end_date = "2021-09-21" # End date of the data range
url = f"https://www.alphavantage.co/query?function=TIME_SERIES_DAILY_ADJUSTED&symbol={symbol}&outputsize=full&apikey={api_key}"
response = requests.get(url)
data = response.json()
# Convert the JSON response to a DataFrame
df = pd.DataFrame(data["Time Series (Daily)"]).T
df.index = pd.to_datetime(df.index)
df = df.loc[start_date:end_date]
# Perform further data processing or analysis as needed
import time
from datetime import datetime
volume_column = "volume"
asset_id_column = "timestamp"
asset_name_column = "high" # Update the column name here
totimestamp = lambda s: np.int32(
time.mktime(datetime.strptime(s, "%d/%m/%Y").timetuple())
)
def log_return(series, periods=1):
return np.log(series).diff(periods=periods)
all2021 = pd.DataFrame([])
for asset_id, asset_name in zip(info[asset_id_column], info[asset_name_column]):
asset = ctrain[ctrain[volume_column] == asset_id].set_index("timestamp")
if asset.empty:
continue
asset = asset.reindex(range(asset.index[0], asset.index[-1] + 60, 60), method="pad")
lret = log_return(asset["close"].fillna(0))[1:]
all2021 = all2021.join(lret, rsuffix=asset_name, how="outer")
plt.figure(figsize=(15, 10)) # Adjust the figure size as needed
plt.imshow(all2021.corr())
plt.yticks(
range(len(info[volume_column])), info[asset_name_column].values, fontsize=8
) # Increase font size
plt.xticks(
range(len(info[volume_column])),
info[asset_name_column].values,
rotation="vertical",
fontsize=8,
) # Increase font size
plt.colorbar(cmap="coolwarm")
plt.show()
daily.columns
# Create time interval for 2021
def dur(start, end, data):
df = data.loc[totimestamp(start) : totimestamp(end)]
return df
info2021 = dur(start="01/01/2021", end="21/09/2021", data=info)
ctrain2021 = dur(start="01/01/2021", end="21/09/2021", data=daily)
daily = dur(start="01/01/2021", end="21/09/2021", data=ctrain)
import requests
import json
api_key = "V6M4VISDAY5MIIHY"
symbol = "BTCUSD" # Replace with the desired cryptocurrency symbol (e.g., BTCUSD, ETHUSD, etc.)
interval = "5min"
url = f"https://www.alphavantage.co/query?function=DIGITAL_CURRENCY_INTRADAY_EXTENDED&symbol={symbol}&market=USD&interval={interval}&apikey={api_key}"
response = requests.get(url)
import requests
import json
url = "https://www.alphavantage.co/query?function=TIME_SERIES_DAILY_ADJUSTED&symbol=IBM&apikey=demo"
response = requests.get(url)
data = response.json()
time_series_key = "Time Series (Daily)"
if time_series_key not in data:
print("Unable to find time series data in the API response.")
else:
time_series = data[time_series_key]
prices = [float(entry["4. close"]) for entry in time_series.values()]
highest_price = max(prices)
lowest_price = min(prices)
print("Highest price:", highest_price)
print("Lowest price:", lowest_price)
import requests
import json
import matplotlib.pyplot as plt
from matplotlib.dates import DateFormatter
from datetime import datetime
url = "https://www.alphavantage.co/query?function=TIME_SERIES_DAILY_ADJUSTED&symbol=IBM&apikey=demo"
response = requests.get(url)
data = response.json()
time_series_key = "Time Series (Daily)"
if time_series_key not in data:
print("Unable to find time series data in the API response.")
else:
time_series = data[time_series_key]
# Extract data points and dates within the desired date range
dates = []
high_values = []
low_values = []
today = datetime.now().date()
year_2020 = datetime(2020, 1, 1).date()
for date, entry in time_series.items():
date_obj = datetime.strptime(date, "%Y-%m-%d").date()
if year_2020 <= date_obj <= today:
dates.append(date)
high_values.append(float(entry["2. high"]))
low_values.append(float(entry["3. low"]))
# Reverse the lists to get the oldest dates first
dates = dates[::-1]
high_values = high_values[::-1]
low_values = low_values[::-1]
# Plotting the graph
plt.figure(figsize=(12, 6))
plt.plot(dates, high_values, label="High")
plt.plot(dates, low_values, label="Low")
plt.xlabel("Date")
plt.ylabel("Price")
plt.title("High and Low Prices of IBM Stock")
plt.xticks(rotation=45)
# Specify the date interval for x-axis ticks
ax = plt.gca()
date_format = DateFormatter("%Y-%m-%d")
ax.xaxis.set_major_locator(plt.MaxNLocator(10))
ax.xaxis.set_major_formatter(date_format)
plt.legend()
plt.tight_layout()
plt.show()
# Define the symbols for the stocks
symbols = ["IBM", "TSLA", "MSFT"]
# Initialize the data dictionary to store the stock data for each symbol
data = {}
# Fetch the data for each symbol
for symbol in symbols:
url = f"https://www.alphavantage.co/query?function=TIME_SERIES_DAILY_ADJUSTED&symbol={symbol}&apikey=demo"
response = requests.get(url)
data[symbol] = response.json()
# Plotting the graph
fig, ax1 = plt.subplots(figsize=(12, 6))
# Iterate over each symbol and plot the high, low, close prices, and volumes
for symbol, stock_data in data.items():
time_series_key = "Time Series (Daily)"
if time_series_key not in stock_data:
print(
f"Unable to find time series data for symbol {symbol} in the API response."
)
else:
time_series = stock_data[time_series_key]
# Extract data points and dates within the desired date range
dates = []
high_values = []
low_values = []
close_values = []
volumes = []
for date, entry in time_series.items():
date_obj = datetime.strptime(date, "%Y-%m-%d").date()
dates.append(date_obj)
high_values.append(float(entry["2. high"]))
low_values.append(float(entry["3. low"]))
close_values.append(float(entry["4. close"]))
volumes.append(float(entry["6. volume"]))
# Reverse the lists to get the oldest dates first
dates = dates[::-1]
high_values = high_values[::-1]
low_values = low_values[::-1]
close_values = close_values[::-1]
volumes = volumes[::-1]
# Plot the high, low, close prices on the left y-axis
ax1.plot(
dates, high_values, label=f"{symbol} High", color="red", linestyle="--"
)
ax1.plot(
dates, low_values, label=f"{symbol} Low", color="green", linestyle="--"
)
ax1.plot(dates, close_values, label=f"{symbol} Close", color="blue")
# Plot the volume on the right y-axis
ax2 = ax1.twinx()
ax2.plot(dates, volumes, label=f"{symbol} Volume", color="orange")
# Set the axis labels, title, and legends
ax1.set_xlabel("Date")
ax1.set_ylabel("Price")
ax1.set_title("Stock Prices")
ax1.legend(loc="upper left")
ax1.xaxis.set_major_formatter(DateFormatter("%Y-%m-%d"))
ax2.set_ylabel("Volume")
ax2.legend(loc="upper right")
plt.xticks(rotation=45)
plt.tight_layout()
plt.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/879/129879123.ipynb
| null | null |
[{"Id": 129879123, "ScriptId": 38594640, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7088409, "CreationDate": "05/17/2023 06:36:38", "VersionNumber": 1.0, "Title": "notebookd0d90ef890", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 305.0, "LinesInsertedFromPrevious": 305.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# library
import os
from datetime import datetime
import time
from sklearn.preprocessing import StandardScaler
import plotly.express as px
import plotly.graph_objs as go
import matplotlib.pyplot as plt
import seaborn as sns
import math
import statsmodels.api as sm
from statsmodels.tsa.ar_model import AutoReg
from statsmodels.tsa.arima_model import ARMA
from statsmodels.tsa.stattools import adfuller
from statsmodels.graphics.tsaplots import plot_acf
from statsmodels.graphics.tsaplots import plot_pacf
from statsmodels.tsa.arima_model import ARIMA
from statsmodels.tsa.statespace.sarimax import SARIMAX
from statsmodels.tsa.seasonal import seasonal_decompose
from scipy import stats
from itertools import product
import warnings
warnings.filterwarnings("ignore")
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
info = pd.read_csv(
"/kaggle/input/crypto-stock-data/Crypto Stock Data/daily_adjusted_IBM.csv"
)
ctrain = pd.read_csv(
"/kaggle/input/crypto-stock-data/Crypto Stock Data/monthly_adjusted_IBM.csv"
)
intraday = pd.read_csv(
"/kaggle/input/crypto-stock-data/Crypto Stock Data/intraday_5min_IBM.csv"
)
daily = pd.read_csv(
"/kaggle/input/crypto-stock-data/Crypto Stock Data/daily_adjusted_IBM.csv"
)
sns.color_palette("YlOrRd", 10)
info.head
info.columns
import plotly.express as px
# Assuming you have a DataFrame called 'info_s' with the required columns
# ['timestamp', 'open', 'high', 'low', 'close', 'adjusted_close', 'volume',
# 'dividend_amount', 'split_coefficient']
# Replace 'Weight' with the correct column name from your DataFrame
x_column = "timestamp"
y_column = "volume"
fig_index = px.bar(
info_s,
x=x_column,
y=y_column,
color=y_column,
title="Popular Cryptocurrency Weight Distribution",
color_continuous_scale=px.colors.sequential.YlOrRd,
)
fig_index.show()
ctrain.columns
import pandas as pd
import requests
api_key = "V6M4VISDAY5MIIHY" # Replace with your actual API key
symbol = "AAPL" # Replace with the stock symbol you want to retrieve data for
start_date = "2021-01-01" # Start date of the data range
end_date = "2021-09-21" # End date of the data range
url = f"https://www.alphavantage.co/query?function=TIME_SERIES_DAILY_ADJUSTED&symbol={symbol}&outputsize=full&apikey={api_key}"
response = requests.get(url)
data = response.json()
# Convert the JSON response to a DataFrame
df = pd.DataFrame(data["Time Series (Daily)"]).T
df.index = pd.to_datetime(df.index)
df = df.loc[start_date:end_date]
# Perform further data processing or analysis as needed
import time
from datetime import datetime
volume_column = "volume"
asset_id_column = "timestamp"
asset_name_column = "high" # Update the column name here
totimestamp = lambda s: np.int32(
time.mktime(datetime.strptime(s, "%d/%m/%Y").timetuple())
)
def log_return(series, periods=1):
return np.log(series).diff(periods=periods)
all2021 = pd.DataFrame([])
for asset_id, asset_name in zip(info[asset_id_column], info[asset_name_column]):
asset = ctrain[ctrain[volume_column] == asset_id].set_index("timestamp")
if asset.empty:
continue
asset = asset.reindex(range(asset.index[0], asset.index[-1] + 60, 60), method="pad")
lret = log_return(asset["close"].fillna(0))[1:]
all2021 = all2021.join(lret, rsuffix=asset_name, how="outer")
plt.figure(figsize=(15, 10)) # Adjust the figure size as needed
plt.imshow(all2021.corr())
plt.yticks(
range(len(info[volume_column])), info[asset_name_column].values, fontsize=8
) # Increase font size
plt.xticks(
range(len(info[volume_column])),
info[asset_name_column].values,
rotation="vertical",
fontsize=8,
) # Increase font size
plt.colorbar(cmap="coolwarm")
plt.show()
daily.columns
# Create time interval for 2021
def dur(start, end, data):
df = data.loc[totimestamp(start) : totimestamp(end)]
return df
info2021 = dur(start="01/01/2021", end="21/09/2021", data=info)
ctrain2021 = dur(start="01/01/2021", end="21/09/2021", data=daily)
daily = dur(start="01/01/2021", end="21/09/2021", data=ctrain)
import requests
import json
api_key = "V6M4VISDAY5MIIHY"
symbol = "BTCUSD" # Replace with the desired cryptocurrency symbol (e.g., BTCUSD, ETHUSD, etc.)
interval = "5min"
url = f"https://www.alphavantage.co/query?function=DIGITAL_CURRENCY_INTRADAY_EXTENDED&symbol={symbol}&market=USD&interval={interval}&apikey={api_key}"
response = requests.get(url)
import requests
import json
url = "https://www.alphavantage.co/query?function=TIME_SERIES_DAILY_ADJUSTED&symbol=IBM&apikey=demo"
response = requests.get(url)
data = response.json()
time_series_key = "Time Series (Daily)"
if time_series_key not in data:
print("Unable to find time series data in the API response.")
else:
time_series = data[time_series_key]
prices = [float(entry["4. close"]) for entry in time_series.values()]
highest_price = max(prices)
lowest_price = min(prices)
print("Highest price:", highest_price)
print("Lowest price:", lowest_price)
import requests
import json
import matplotlib.pyplot as plt
from matplotlib.dates import DateFormatter
from datetime import datetime
url = "https://www.alphavantage.co/query?function=TIME_SERIES_DAILY_ADJUSTED&symbol=IBM&apikey=demo"
response = requests.get(url)
data = response.json()
time_series_key = "Time Series (Daily)"
if time_series_key not in data:
print("Unable to find time series data in the API response.")
else:
time_series = data[time_series_key]
# Extract data points and dates within the desired date range
dates = []
high_values = []
low_values = []
today = datetime.now().date()
year_2020 = datetime(2020, 1, 1).date()
for date, entry in time_series.items():
date_obj = datetime.strptime(date, "%Y-%m-%d").date()
if year_2020 <= date_obj <= today:
dates.append(date)
high_values.append(float(entry["2. high"]))
low_values.append(float(entry["3. low"]))
# Reverse the lists to get the oldest dates first
dates = dates[::-1]
high_values = high_values[::-1]
low_values = low_values[::-1]
# Plotting the graph
plt.figure(figsize=(12, 6))
plt.plot(dates, high_values, label="High")
plt.plot(dates, low_values, label="Low")
plt.xlabel("Date")
plt.ylabel("Price")
plt.title("High and Low Prices of IBM Stock")
plt.xticks(rotation=45)
# Specify the date interval for x-axis ticks
ax = plt.gca()
date_format = DateFormatter("%Y-%m-%d")
ax.xaxis.set_major_locator(plt.MaxNLocator(10))
ax.xaxis.set_major_formatter(date_format)
plt.legend()
plt.tight_layout()
plt.show()
# Define the symbols for the stocks
symbols = ["IBM", "TSLA", "MSFT"]
# Initialize the data dictionary to store the stock data for each symbol
data = {}
# Fetch the data for each symbol
for symbol in symbols:
url = f"https://www.alphavantage.co/query?function=TIME_SERIES_DAILY_ADJUSTED&symbol={symbol}&apikey=demo"
response = requests.get(url)
data[symbol] = response.json()
# Plotting the graph
fig, ax1 = plt.subplots(figsize=(12, 6))
# Iterate over each symbol and plot the high, low, close prices, and volumes
for symbol, stock_data in data.items():
time_series_key = "Time Series (Daily)"
if time_series_key not in stock_data:
print(
f"Unable to find time series data for symbol {symbol} in the API response."
)
else:
time_series = stock_data[time_series_key]
# Extract data points and dates within the desired date range
dates = []
high_values = []
low_values = []
close_values = []
volumes = []
for date, entry in time_series.items():
date_obj = datetime.strptime(date, "%Y-%m-%d").date()
dates.append(date_obj)
high_values.append(float(entry["2. high"]))
low_values.append(float(entry["3. low"]))
close_values.append(float(entry["4. close"]))
volumes.append(float(entry["6. volume"]))
# Reverse the lists to get the oldest dates first
dates = dates[::-1]
high_values = high_values[::-1]
low_values = low_values[::-1]
close_values = close_values[::-1]
volumes = volumes[::-1]
# Plot the high, low, close prices on the left y-axis
ax1.plot(
dates, high_values, label=f"{symbol} High", color="red", linestyle="--"
)
ax1.plot(
dates, low_values, label=f"{symbol} Low", color="green", linestyle="--"
)
ax1.plot(dates, close_values, label=f"{symbol} Close", color="blue")
# Plot the volume on the right y-axis
ax2 = ax1.twinx()
ax2.plot(dates, volumes, label=f"{symbol} Volume", color="orange")
# Set the axis labels, title, and legends
ax1.set_xlabel("Date")
ax1.set_ylabel("Price")
ax1.set_title("Stock Prices")
ax1.legend(loc="upper left")
ax1.xaxis.set_major_formatter(DateFormatter("%Y-%m-%d"))
ax2.set_ylabel("Volume")
ax2.legend(loc="upper right")
plt.xticks(rotation=45)
plt.tight_layout()
plt.show()
| false | 0 | 3,089 | 0 | 3,089 | 3,089 |
||
129169438
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# -*- coding: utf-8 -*-
"""
Created on Wed May 10 15:18:50 2023
@author: 2687492Z
"""
import pandas as pd
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import (
Input,
Conv2D,
MaxPooling2D,
concatenate,
Flatten,
Dense,
Dropout,
AveragePooling2D,
GlobalAveragePooling2D,
Reshape,
Lambda,
minimum,
)
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers.schedules import ExponentialDecay
from tensorflow.keras.callbacks import LearningRateScheduler
from tensorflow.keras.layers import Input, DepthwiseConv2D, UpSampling2D
from tensorflow.keras.applications import MobileNetV2
from tensorflow.keras.layers import Input, Conv2D
from tensorflow.keras.callbacks import ReduceLROnPlateau
def lambda_1(inputs):
subtracted_12, output_3 = inputs
return tf.math.square(subtracted_12 / output_3)
def lambda_2(inputs):
return tf.math.log(inputs) / tf.math.log(10.0)
inputs_1 = tf.keras.layers.Input(shape=[1, 1])
output_1 = tf.keras.layers.Dense(32, "relu")(inputs_1)
inputs_2 = tf.keras.layers.Input(shape=[1, 1])
output_2 = tf.keras.layers.Dense(32, "relu")(inputs_2)
inputs_3 = tf.keras.layers.Input(shape=[1, 1])
output_3 = tf.keras.layers.Dense(32, "relu")(inputs_3)
inputs_4 = tf.keras.layers.Input(shape=[1, 1])
output_4 = tf.keras.layers.Dense(32, "relu")(inputs_4)
subtracted_12 = tf.keras.layers.subtract([output_1, output_2])
lambda_13 = tf.keras.layers.Lambda(lambda_1)([subtracted_12, output_3])
output_13 = tf.keras.layers.Dense(32, "relu")(lambda_13)
minimum_14 = tf.keras.layers.minimum([output_13, output_4])
inputs_5 = tf.keras.layers.Input(shape=[1, 1])
output_5 = tf.keras.layers.Dense(32, "relu")(inputs_5)
inputs_6 = tf.keras.layers.Input(shape=[1, 1])
output_6 = tf.keras.layers.Dense(32, "relu")(inputs_6)
inputs_7 = tf.keras.layers.Input(shape=[1, 1])
output_7 = tf.keras.layers.Dense(32, "relu")(inputs_7)
inputs_8 = tf.keras.layers.Input(shape=[1, 1])
output_8 = tf.keras.layers.Dense(32, "relu")(inputs_8)
subtracted_56 = tf.keras.layers.subtract([output_5, output_6])
lambda_57 = tf.keras.layers.Lambda(lambda_1)([subtracted_56, output_7])
output_57 = tf.keras.layers.Dense(32, "relu")(lambda_57)
minimum_58 = tf.keras.layers.minimum([output_57, output_8])
output_18 = tf.keras.layers.Dense(32, "relu")(
tf.keras.layers.concatenate([minimum_14, minimum_58])
)
inputs_9 = tf.keras.layers.Input(shape=[1, 1], dtype=tf.float32)
lambda_9 = tf.keras.layers.Lambda(lambda_2)(inputs_9)
output_9 = tf.keras.layers.Dense(32, "relu")(lambda_9)
inputs_10 = tf.keras.layers.Input(shape=[1, 1], dtype=tf.float32)
lambda_10 = tf.keras.layers.Lambda(lambda_2)(inputs_10)
output_10 = tf.keras.layers.Dense(32, "relu")(lambda_10)
add_910 = tf.keras.layers.add([output_9, output_10])
inputs_11 = tf.keras.layers.Input(shape=[1, 1], dtype=tf.float32)
lambda_11 = tf.keras.layers.Lambda(lambda_2)(inputs_11)
output_11 = tf.keras.layers.Dense(32, "relu")(lambda_11)
output = tf.keras.layers.add([output_18, add_910, output_11])
output = tf.keras.layers.Dense(1, activation="linear")(output)
model = tf.keras.Model(
inputs=[
inputs_1,
inputs_2,
inputs_3,
inputs_4,
inputs_5,
inputs_6,
inputs_7,
inputs_8,
inputs_9,
inputs_10,
inputs_11,
],
outputs=output,
)
tf.keras.utils.plot_model(model, show_shapes=True, dpi=64)
tf.keras.utils.plot_model(model, show_shapes=True, dpi=64)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/169/129169438.ipynb
| null | null |
[{"Id": 129169438, "ScriptId": 38361088, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 8394130, "CreationDate": "05/11/2023 13:59:26", "VersionNumber": 1.0, "Title": "NN.py", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 115.0, "LinesInsertedFromPrevious": 115.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# -*- coding: utf-8 -*-
"""
Created on Wed May 10 15:18:50 2023
@author: 2687492Z
"""
import pandas as pd
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import (
Input,
Conv2D,
MaxPooling2D,
concatenate,
Flatten,
Dense,
Dropout,
AveragePooling2D,
GlobalAveragePooling2D,
Reshape,
Lambda,
minimum,
)
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers.schedules import ExponentialDecay
from tensorflow.keras.callbacks import LearningRateScheduler
from tensorflow.keras.layers import Input, DepthwiseConv2D, UpSampling2D
from tensorflow.keras.applications import MobileNetV2
from tensorflow.keras.layers import Input, Conv2D
from tensorflow.keras.callbacks import ReduceLROnPlateau
def lambda_1(inputs):
subtracted_12, output_3 = inputs
return tf.math.square(subtracted_12 / output_3)
def lambda_2(inputs):
return tf.math.log(inputs) / tf.math.log(10.0)
inputs_1 = tf.keras.layers.Input(shape=[1, 1])
output_1 = tf.keras.layers.Dense(32, "relu")(inputs_1)
inputs_2 = tf.keras.layers.Input(shape=[1, 1])
output_2 = tf.keras.layers.Dense(32, "relu")(inputs_2)
inputs_3 = tf.keras.layers.Input(shape=[1, 1])
output_3 = tf.keras.layers.Dense(32, "relu")(inputs_3)
inputs_4 = tf.keras.layers.Input(shape=[1, 1])
output_4 = tf.keras.layers.Dense(32, "relu")(inputs_4)
subtracted_12 = tf.keras.layers.subtract([output_1, output_2])
lambda_13 = tf.keras.layers.Lambda(lambda_1)([subtracted_12, output_3])
output_13 = tf.keras.layers.Dense(32, "relu")(lambda_13)
minimum_14 = tf.keras.layers.minimum([output_13, output_4])
inputs_5 = tf.keras.layers.Input(shape=[1, 1])
output_5 = tf.keras.layers.Dense(32, "relu")(inputs_5)
inputs_6 = tf.keras.layers.Input(shape=[1, 1])
output_6 = tf.keras.layers.Dense(32, "relu")(inputs_6)
inputs_7 = tf.keras.layers.Input(shape=[1, 1])
output_7 = tf.keras.layers.Dense(32, "relu")(inputs_7)
inputs_8 = tf.keras.layers.Input(shape=[1, 1])
output_8 = tf.keras.layers.Dense(32, "relu")(inputs_8)
subtracted_56 = tf.keras.layers.subtract([output_5, output_6])
lambda_57 = tf.keras.layers.Lambda(lambda_1)([subtracted_56, output_7])
output_57 = tf.keras.layers.Dense(32, "relu")(lambda_57)
minimum_58 = tf.keras.layers.minimum([output_57, output_8])
output_18 = tf.keras.layers.Dense(32, "relu")(
tf.keras.layers.concatenate([minimum_14, minimum_58])
)
inputs_9 = tf.keras.layers.Input(shape=[1, 1], dtype=tf.float32)
lambda_9 = tf.keras.layers.Lambda(lambda_2)(inputs_9)
output_9 = tf.keras.layers.Dense(32, "relu")(lambda_9)
inputs_10 = tf.keras.layers.Input(shape=[1, 1], dtype=tf.float32)
lambda_10 = tf.keras.layers.Lambda(lambda_2)(inputs_10)
output_10 = tf.keras.layers.Dense(32, "relu")(lambda_10)
add_910 = tf.keras.layers.add([output_9, output_10])
inputs_11 = tf.keras.layers.Input(shape=[1, 1], dtype=tf.float32)
lambda_11 = tf.keras.layers.Lambda(lambda_2)(inputs_11)
output_11 = tf.keras.layers.Dense(32, "relu")(lambda_11)
output = tf.keras.layers.add([output_18, add_910, output_11])
output = tf.keras.layers.Dense(1, activation="linear")(output)
model = tf.keras.Model(
inputs=[
inputs_1,
inputs_2,
inputs_3,
inputs_4,
inputs_5,
inputs_6,
inputs_7,
inputs_8,
inputs_9,
inputs_10,
inputs_11,
],
outputs=output,
)
tf.keras.utils.plot_model(model, show_shapes=True, dpi=64)
tf.keras.utils.plot_model(model, show_shapes=True, dpi=64)
| false | 0 | 1,488 | 0 | 1,488 | 1,488 |
||
129169846
|
<jupyter_start><jupyter_text>net__data_isha
Kaggle dataset identifier: net-data-isha
<jupyter_script>from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import (
Dense,
Flatten,
Dropout,
BatchNormalization,
Conv2D,
MaxPooling2D,
)
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras import optimizers
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
from keras.layers import Input, Lambda, Dense, Flatten
from keras.models import Model
from keras.applications.vgg16 import VGG16
from keras.applications.vgg16 import preprocess_input
from keras.preprocessing import image
from keras.preprocessing.image import ImageDataGenerator
import numpy as np
from glob import glob
import matplotlib.pyplot as plt
train_path = "/kaggle/input/net-data-isha/Data/train"
valid_path = "/kaggle/input/net-data-isha/Data/valid"
test_path = "/kaggle/input/net-data-isha/Data/test"
# re-size all the images to this
IMAGE_SIZE = [224, 224]
N_CLASSES = 5
BATCH_SIZE = 32
vgg16 = VGG16(input_shape=IMAGE_SIZE + [3], weights="imagenet", include_top=False)
# don't train existing weights
for layer in vgg16.layers:
layer.trainable = False
# useful for getting number of output classes
folders = glob("/kaggle/input/net-data-isha/Data/train/*")
print(len(folders))
folders
# Add the classification layers
x = Flatten()(vgg16.output)
x = Dense(N_CLASSES, activation="softmax")(x)
# Create a new model with the VGG16 base and the new top layers
model = Model(inputs=vgg16.input, outputs=x)
# Print the model summary
model.summary()
# compile the model
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
train_datagen = ImageDataGenerator(dtype="float32")
train_generator = train_datagen.flow_from_directory(
train_path,
batch_size=BATCH_SIZE,
target_size=IMAGE_SIZE[:2],
class_mode="categorical",
)
valid_datagen = ImageDataGenerator(dtype="float32")
valid_generator = valid_datagen.flow_from_directory(
valid_path,
batch_size=BATCH_SIZE,
target_size=IMAGE_SIZE[:2],
class_mode="categorical",
)
test_datagen = ImageDataGenerator(dtype="float32")
test_generator = test_datagen.flow_from_directory(
test_path,
batch_size=BATCH_SIZE,
target_size=IMAGE_SIZE[:2],
class_mode="categorical",
)
# Train the model
history = model.fit(
train_generator,
steps_per_epoch=len(train_generator),
epochs=56,
verbose=1,
validation_data=valid_generator,
validation_steps=len(valid_generator),
)
# Evaluate the model on the test set
result = model.evaluate(test_generator)
print("Test loss: {:.2f}, Test accuracy: {:.2f}%".format(result[0], result[1] * 100))
# Save the model
model.save("finalmodel-VGG16.hdf5")
# Load the saved model
from tensorflow.keras.models import load_model
model = load_model("finalmodel-VGG16.hdf5")
# Define the classes
classes = ["class1", "class2", "class3", "class4"]
# Load the test image and preprocess it
from tensorflow.keras.preprocessing.image import load_img, img_to_array
img_path = "/kaggle/input/net-data-isha/Data/test/adenocarcinoma/000108 (3).png"
img = load_img(img_path, target_size=(224, 224))
x = img_to_array(img)
x = x.reshape((1,) + x.shape)
x = preprocess_input(x)
# Make the prediction
preds = model.predict(x)
class_idx = np.argmax(preds)
class_name = classes[class_idx]
print("Predicted class:", class_name)
# Save the model
model.save("finalmodel-VGG16.hdf5")
import tensorflow as tf
import numpy as np
img_path = "/kaggle/input/chest-ctscan-images/Data/test/normal/12 (2).png"
class_names = list(test_generator.class_indices.keys())
# Load the pre-trained VGG16 model
vgg16_model = tf.keras.applications.vgg16.VGG16(weights="imagenet")
# Preprocess the image
img = tf.keras.preprocessing.image.load_img(img_path, target_size=(224, 224))
img_array = tf.keras.preprocessing.image.img_to_array(img)
img_array = tf.keras.applications.vgg16.preprocess_input(img_array)
img_array = tf.expand_dims(img_array, 0)
# Load the class names
class_names = list(
test_generator.class_indices.keys()
) # Replace with the actual class names
# Make predictions on the input image
prediction = model.predict(img_array)
# Get the predicted class index
predicted_class_index = np.argmax(prediction)
# Print the predicted class and confidence
predicted_class_name = class_names[predicted_class_index]
confidence = 100 * prediction[0][predicted_class_index]
print(
"This image most likely belongs to {} with a {:.2f} percent confidence.".format(
class_names[np.argmax(prediction)], 100 * np.max(prediction)
)
)
from tensorflow.keras import optimizers
from tensorflow.keras.preprocessing.image import load_img, img_to_array
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import (
Dense,
Flatten,
Dropout,
BatchNormalization,
Conv2D,
MaxPooling2D,
)
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
from tensorflow.keras.applications import ResNet50, DenseNet201
from tensorflow.keras.applications import resnet, densenet
from keras.models import Model
import numpy as np
import matplotlib.pyplot as plt
import cv2
import os
import pandas as pd
import tensorflow as tf
paths = [
"/kaggle/input/net-data-isha/Data/test/adenocarcinoma/",
"/kaggle/input/net-data-isha/Data/test/covid19/",
"/kaggle/input/net-data-isha/Data/test/large.cell.carcinoma/",
"/kaggle/input/net-data-isha/Data/test/normal/",
"/kaggle/input/net-data-isha/Data/test/squamous.cell.carcinoma/",
]
results = {
"adeno": {0: 0, 1: 0, 2: 0, 3: 0, 4: 0},
"covid19": {0: 0, 1: 0, 2: 0, 3: 0, 4: 0},
"large": {0: 0, 1: 0, 2: 0, 3: 0, 4: 0},
"normal": {0: 0, 1: 0, 2: 0, 3: 0, 4: 0},
"squamous": {0: 0, 1: 0, 2: 0, 3: 0, 4: 0},
}
for path, key in zip(paths, results.keys()):
for file in os.listdir(path):
img = tf.keras.utils.load_img((path + file), target_size=(224, 224))
img_array = tf.keras.utils.img_to_array(img)
img_array = tf.expand_dims(img_array, 0)
prediction = model.predict(img_array, verbose=0)
results[key][np.argmax(prediction)] = (
results.get(key).get(np.argmax(prediction), 0) + 1
)
results
df = pd.DataFrame(results)
print(
"Overall accuracy is : {:.2f}%\n".format(
(
df["adeno"][0]
+ df["covid19"][1]
+ df["large"][2]
+ df["normal"][3]
+ df["squamous"][4]
)
/ 714
* 100
)
)
print(
"Adeno cancer detection accuracy is : {:.2f}%".format(
df["adeno"][0] / df["adeno"].sum() * 100
)
)
print(
"covid19 cancer detection accuracy is : {:.2f}%".format(
df["covid19"][1] / df["covid19"].sum() * 100
)
)
print(
"Large cell cancer detection accuracy is : {:.2f}%".format(
df["large"][2] / df["large"].sum() * 100
)
)
print(
"Normal chest detection accuracy is : {:.2f}%".format(
df["normal"][3] / df["normal"].sum() * 100
)
)
print(
"Squamous cell cancer detection accuracy is : {:.2f}%".format(
df["squamous"][4] / df["squamous"].sum() * 100
)
)
print("\nConfusion Matrix :")
df.transpose()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/169/129169846.ipynb
|
net-data-isha
|
sheetallamani
|
[{"Id": 129169846, "ScriptId": 36363335, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 12386091, "CreationDate": "05/11/2023 14:02:43", "VersionNumber": 1.0, "Title": "vgg16work2", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 226.0, "LinesInsertedFromPrevious": 226.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 184980155, "KernelVersionId": 129169846, "SourceDatasetVersionId": 5200313}]
|
[{"Id": 5200313, "DatasetId": 3024128, "DatasourceVersionId": 5272580, "CreatorUserId": 12386091, "LicenseName": "Unknown", "CreationDate": "03/20/2023 11:09:22", "VersionNumber": 1.0, "Title": "net__data_isha", "Slug": "net-data-isha", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 3024128, "CreatorUserId": 12386091, "OwnerUserId": 12386091.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5200313.0, "CurrentDatasourceVersionId": 5272580.0, "ForumId": 3063369, "Type": 2, "CreationDate": "03/20/2023 11:09:22", "LastActivityDate": "03/20/2023", "TotalViews": 45, "TotalDownloads": 0, "TotalVotes": 0, "TotalKernels": 0}]
|
[{"Id": 12386091, "UserName": "sheetallamani", "DisplayName": "Sheetal Lamani", "RegisterDate": "11/13/2022", "PerformanceTier": 0}]
|
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import (
Dense,
Flatten,
Dropout,
BatchNormalization,
Conv2D,
MaxPooling2D,
)
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras import optimizers
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
from keras.layers import Input, Lambda, Dense, Flatten
from keras.models import Model
from keras.applications.vgg16 import VGG16
from keras.applications.vgg16 import preprocess_input
from keras.preprocessing import image
from keras.preprocessing.image import ImageDataGenerator
import numpy as np
from glob import glob
import matplotlib.pyplot as plt
train_path = "/kaggle/input/net-data-isha/Data/train"
valid_path = "/kaggle/input/net-data-isha/Data/valid"
test_path = "/kaggle/input/net-data-isha/Data/test"
# re-size all the images to this
IMAGE_SIZE = [224, 224]
N_CLASSES = 5
BATCH_SIZE = 32
vgg16 = VGG16(input_shape=IMAGE_SIZE + [3], weights="imagenet", include_top=False)
# don't train existing weights
for layer in vgg16.layers:
layer.trainable = False
# useful for getting number of output classes
folders = glob("/kaggle/input/net-data-isha/Data/train/*")
print(len(folders))
folders
# Add the classification layers
x = Flatten()(vgg16.output)
x = Dense(N_CLASSES, activation="softmax")(x)
# Create a new model with the VGG16 base and the new top layers
model = Model(inputs=vgg16.input, outputs=x)
# Print the model summary
model.summary()
# compile the model
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
train_datagen = ImageDataGenerator(dtype="float32")
train_generator = train_datagen.flow_from_directory(
train_path,
batch_size=BATCH_SIZE,
target_size=IMAGE_SIZE[:2],
class_mode="categorical",
)
valid_datagen = ImageDataGenerator(dtype="float32")
valid_generator = valid_datagen.flow_from_directory(
valid_path,
batch_size=BATCH_SIZE,
target_size=IMAGE_SIZE[:2],
class_mode="categorical",
)
test_datagen = ImageDataGenerator(dtype="float32")
test_generator = test_datagen.flow_from_directory(
test_path,
batch_size=BATCH_SIZE,
target_size=IMAGE_SIZE[:2],
class_mode="categorical",
)
# Train the model
history = model.fit(
train_generator,
steps_per_epoch=len(train_generator),
epochs=56,
verbose=1,
validation_data=valid_generator,
validation_steps=len(valid_generator),
)
# Evaluate the model on the test set
result = model.evaluate(test_generator)
print("Test loss: {:.2f}, Test accuracy: {:.2f}%".format(result[0], result[1] * 100))
# Save the model
model.save("finalmodel-VGG16.hdf5")
# Load the saved model
from tensorflow.keras.models import load_model
model = load_model("finalmodel-VGG16.hdf5")
# Define the classes
classes = ["class1", "class2", "class3", "class4"]
# Load the test image and preprocess it
from tensorflow.keras.preprocessing.image import load_img, img_to_array
img_path = "/kaggle/input/net-data-isha/Data/test/adenocarcinoma/000108 (3).png"
img = load_img(img_path, target_size=(224, 224))
x = img_to_array(img)
x = x.reshape((1,) + x.shape)
x = preprocess_input(x)
# Make the prediction
preds = model.predict(x)
class_idx = np.argmax(preds)
class_name = classes[class_idx]
print("Predicted class:", class_name)
# Save the model
model.save("finalmodel-VGG16.hdf5")
import tensorflow as tf
import numpy as np
img_path = "/kaggle/input/chest-ctscan-images/Data/test/normal/12 (2).png"
class_names = list(test_generator.class_indices.keys())
# Load the pre-trained VGG16 model
vgg16_model = tf.keras.applications.vgg16.VGG16(weights="imagenet")
# Preprocess the image
img = tf.keras.preprocessing.image.load_img(img_path, target_size=(224, 224))
img_array = tf.keras.preprocessing.image.img_to_array(img)
img_array = tf.keras.applications.vgg16.preprocess_input(img_array)
img_array = tf.expand_dims(img_array, 0)
# Load the class names
class_names = list(
test_generator.class_indices.keys()
) # Replace with the actual class names
# Make predictions on the input image
prediction = model.predict(img_array)
# Get the predicted class index
predicted_class_index = np.argmax(prediction)
# Print the predicted class and confidence
predicted_class_name = class_names[predicted_class_index]
confidence = 100 * prediction[0][predicted_class_index]
print(
"This image most likely belongs to {} with a {:.2f} percent confidence.".format(
class_names[np.argmax(prediction)], 100 * np.max(prediction)
)
)
from tensorflow.keras import optimizers
from tensorflow.keras.preprocessing.image import load_img, img_to_array
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import (
Dense,
Flatten,
Dropout,
BatchNormalization,
Conv2D,
MaxPooling2D,
)
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
from tensorflow.keras.applications import ResNet50, DenseNet201
from tensorflow.keras.applications import resnet, densenet
from keras.models import Model
import numpy as np
import matplotlib.pyplot as plt
import cv2
import os
import pandas as pd
import tensorflow as tf
paths = [
"/kaggle/input/net-data-isha/Data/test/adenocarcinoma/",
"/kaggle/input/net-data-isha/Data/test/covid19/",
"/kaggle/input/net-data-isha/Data/test/large.cell.carcinoma/",
"/kaggle/input/net-data-isha/Data/test/normal/",
"/kaggle/input/net-data-isha/Data/test/squamous.cell.carcinoma/",
]
results = {
"adeno": {0: 0, 1: 0, 2: 0, 3: 0, 4: 0},
"covid19": {0: 0, 1: 0, 2: 0, 3: 0, 4: 0},
"large": {0: 0, 1: 0, 2: 0, 3: 0, 4: 0},
"normal": {0: 0, 1: 0, 2: 0, 3: 0, 4: 0},
"squamous": {0: 0, 1: 0, 2: 0, 3: 0, 4: 0},
}
for path, key in zip(paths, results.keys()):
for file in os.listdir(path):
img = tf.keras.utils.load_img((path + file), target_size=(224, 224))
img_array = tf.keras.utils.img_to_array(img)
img_array = tf.expand_dims(img_array, 0)
prediction = model.predict(img_array, verbose=0)
results[key][np.argmax(prediction)] = (
results.get(key).get(np.argmax(prediction), 0) + 1
)
results
df = pd.DataFrame(results)
print(
"Overall accuracy is : {:.2f}%\n".format(
(
df["adeno"][0]
+ df["covid19"][1]
+ df["large"][2]
+ df["normal"][3]
+ df["squamous"][4]
)
/ 714
* 100
)
)
print(
"Adeno cancer detection accuracy is : {:.2f}%".format(
df["adeno"][0] / df["adeno"].sum() * 100
)
)
print(
"covid19 cancer detection accuracy is : {:.2f}%".format(
df["covid19"][1] / df["covid19"].sum() * 100
)
)
print(
"Large cell cancer detection accuracy is : {:.2f}%".format(
df["large"][2] / df["large"].sum() * 100
)
)
print(
"Normal chest detection accuracy is : {:.2f}%".format(
df["normal"][3] / df["normal"].sum() * 100
)
)
print(
"Squamous cell cancer detection accuracy is : {:.2f}%".format(
df["squamous"][4] / df["squamous"].sum() * 100
)
)
print("\nConfusion Matrix :")
df.transpose()
| false | 0 | 2,401 | 0 | 2,427 | 2,401 |
||
129064925
|
<jupyter_start><jupyter_text>UFO Sightings
# Context
This dataset contains over 80,000 reports of UFO sightings over the last century.
# Content
There are two versions of this dataset: scrubbed and complete. The complete data includes entries where the location of the sighting was not found or blank (0.8146%) or have an erroneous or blank time (8.0237%). Since the reports date back to the 20th century, some older data might be obscured. Data contains city, state, time, description, and duration of each sighting.
# Inspiration
* What areas of the country are most likely to have UFO sightings?
* Are there any trends in UFO sightings over time? Do they tend to be clustered or seasonal?
* Do clusters of UFO sightings correlate with landmarks, such as airports or government research centers?
* What are the most common UFO descriptions?
# Acknowledgement
This dataset was scraped, geolocated, and time standardized from NUFORC data by Sigmond Axel [here](https://github.com/planetsig/ufo-reports).
Kaggle dataset identifier: ufo-sightings
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
df = pd.read_csv("/kaggle/input/ufo-sightings/scrubbed.csv")
df.head()
df.info()
# preprocessing
def tim(x):
return x.replace("24:00", "23:59")
df.datetime = df.datetime.apply(tim)
df["datetime"] = pd.to_datetime(df["datetime"])
df.info()
df[["duration (seconds)", "duration (hours/min)"]]
df.head()
df.sort_values("datetime", ascending=False)
df["shape"].value_counts()
df["shape"].unique()
df["shape"].value_counts().head(30)
# eda
import seaborn as sn, matplotlib.pyplot as plt
plt.figure(figsize=(20, 8))
sn.countplot(
x=df["shape"],
)
plt.xticks(rotation=90)
plt.xlabel("Shape of UFO are seen")
plt.title("UFO Shape are shown")
plt.show()
df["year"] = df.datetime.dt.year
plt.figure(figsize=(20, 10))
sn.countplot(x=df.year)
plt.xticks(
rotation=90,
)
plt.title(
"Total UFO seen in Year",
)
plt.show()
df["duration (seconds)"] = df["duration (seconds)"].str.replace("[^0-9]", "")
df["duration (seconds)"] = df["duration (seconds)"].astype(int)
df[["year", "duration (seconds)"]].groupby("year").sum().plot(figsize=(20, 8))
plt.xticks(rotation=90)
plt.ylabel("Time (sec)")
plt.title("Time Duration show in Year")
plt.show()
df[["year", "duration (seconds)"]].groupby("year").sum().plot(
kind="bar", figsize=(20, 8)
)
plt.xticks(rotation=90)
plt.title(
"Time Duration show in Year",
fontdict={"size": 25, "style": "oblique", "font": "times new roman"},
)
plt.ylabel("Time (sec)", fontdict={"size": 20})
plt.xlabel("Year".upper(), fontdict={"size": 20})
plt.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/064/129064925.ipynb
|
ufo-sightings
| null |
[{"Id": 129064925, "ScriptId": 38363151, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 5939826, "CreationDate": "05/10/2023 17:32:46", "VersionNumber": 1.0, "Title": "notebookd7462f8e03", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 77.0, "LinesInsertedFromPrevious": 77.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 184791090, "KernelVersionId": 129064925, "SourceDatasetVersionId": 793053}]
|
[{"Id": 793053, "DatasetId": 388, "DatasourceVersionId": 814842, "CreatorUserId": 998023, "LicenseName": "Unknown", "CreationDate": "11/13/2019 19:45:57", "VersionNumber": 2.0, "Title": "UFO Sightings", "Slug": "ufo-sightings", "Subtitle": "Reports of unidentified flying object reports in the last century", "Description": "# Context\n\nThis dataset contains over 80,000 reports of UFO sightings over the last century. \n\n# Content\n\nThere are two versions of this dataset: scrubbed and complete. The complete data includes entries where the location of the sighting was not found or blank (0.8146%) or have an erroneous or blank time (8.0237%). Since the reports date back to the 20th century, some older data might be obscured. Data contains city, state, time, description, and duration of each sighting.\n\n# Inspiration\n\n* What areas of the country are most likely to have UFO sightings?\n* Are there any trends in UFO sightings over time? Do they tend to be clustered or seasonal?\n* Do clusters of UFO sightings correlate with landmarks, such as airports or government research centers?\n* What are the most common UFO descriptions? \n\n# Acknowledgement\n\nThis dataset was scraped, geolocated, and time standardized from NUFORC data by Sigmond Axel [here](https://github.com/planetsig/ufo-reports).", "VersionNotes": "Fix data", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 388, "CreatorUserId": 270995, "OwnerUserId": NaN, "OwnerOrganizationId": 222.0, "CurrentDatasetVersionId": 793053.0, "CurrentDatasourceVersionId": 814842.0, "ForumId": 1968, "Type": 2, "CreationDate": "11/17/2016 03:50:44", "LastActivityDate": "02/06/2018", "TotalViews": 248610, "TotalDownloads": 35131, "TotalVotes": 620, "TotalKernels": 194}]
| null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
df = pd.read_csv("/kaggle/input/ufo-sightings/scrubbed.csv")
df.head()
df.info()
# preprocessing
def tim(x):
return x.replace("24:00", "23:59")
df.datetime = df.datetime.apply(tim)
df["datetime"] = pd.to_datetime(df["datetime"])
df.info()
df[["duration (seconds)", "duration (hours/min)"]]
df.head()
df.sort_values("datetime", ascending=False)
df["shape"].value_counts()
df["shape"].unique()
df["shape"].value_counts().head(30)
# eda
import seaborn as sn, matplotlib.pyplot as plt
plt.figure(figsize=(20, 8))
sn.countplot(
x=df["shape"],
)
plt.xticks(rotation=90)
plt.xlabel("Shape of UFO are seen")
plt.title("UFO Shape are shown")
plt.show()
df["year"] = df.datetime.dt.year
plt.figure(figsize=(20, 10))
sn.countplot(x=df.year)
plt.xticks(
rotation=90,
)
plt.title(
"Total UFO seen in Year",
)
plt.show()
df["duration (seconds)"] = df["duration (seconds)"].str.replace("[^0-9]", "")
df["duration (seconds)"] = df["duration (seconds)"].astype(int)
df[["year", "duration (seconds)"]].groupby("year").sum().plot(figsize=(20, 8))
plt.xticks(rotation=90)
plt.ylabel("Time (sec)")
plt.title("Time Duration show in Year")
plt.show()
df[["year", "duration (seconds)"]].groupby("year").sum().plot(
kind="bar", figsize=(20, 8)
)
plt.xticks(rotation=90)
plt.title(
"Time Duration show in Year",
fontdict={"size": 25, "style": "oblique", "font": "times new roman"},
)
plt.ylabel("Time (sec)", fontdict={"size": 20})
plt.xlabel("Year".upper(), fontdict={"size": 20})
plt.show()
| false | 0 | 741 | 0 | 1,029 | 741 |
||
129064415
|
<jupyter_start><jupyter_text>lendingclub
Kaggle dataset identifier: lendingclub
<jupyter_script>#
# # LENDING CLUB CASE STUDY
# ### Problem Statement
#
# The company is the largest online loan marketplace, facilitating personal loans, business loans, and financing of medical procedures.
# Lending loans to ‘risky’ applicants is the largest source of financial loss (called credit loss). Credit loss is the amount of money lost by the lender when the borrower refuses to pay or runs away with the money owed. In other words, borrowers who default cause the largest amount of loss to the lenders. In this case, the customers labelled as 'charged-off' are the 'defaulters'.
#
# **Identification of such applicants using EDA is the aim of this case study.**
#
# The company wants to understand the driving factors behind loan default, i.e. the variables which are strong indicators of default. The company can utilise this knowledge for its portfolio and risk assessment.
#
#
#
# ### Import the libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings("ignore")
# ### Read the dataset and check the first five rows
loan_df = pd.read_csv("/kaggle/input/lendingclub/loan.csv")
loan_df.head()
# ### Check the shape of the dataframe
print(loan_df.shape)
loan_df.info(max_cols=111)
# ## Data Cleaning and Manipulation
# Data Quality Issues can:
# - Missing Values
# - We can Drop the columns containing maximum null values and also Drop columns which does not add any value to analysis.
# - Sanity Checks
# - We can check for data inconsistencies
# - Small % of Missing Value can be Imputed.
# - If categorical can be imputed by mode or if continuous if suitable we can go for Mean/median
# - Modifing the Data Types:
# - Converting the some string columns to numeric variables in order to perform more analysis.
# - Outliers Treatment is necessary for correct analysis
# ## Data Understanding
# - Aim of the Analysis of this loan dataset is find out the factors influencing a loan to be default.
# - The dataset has the details of the past loan applicants and whethe r they resulted in good loan or bad loan.
# - Loan Status is the Target variable.
# - Column Loan_status has 3 values:
# >'Fully Paid'
# > 'Charged Off'
# > 'Current'
# Since we are not sure about 'Current' customers whether they will default or fully pay at the end of the tenure, we will filter out 'Current' customers and will only consider 'Fully Paid' and 'Charged Off' values. Here **charged off means defaulters**
# - Many columns which have NA values need to be removed
# **Missing Value Treatment**
# Calculate missing value %
df_null_percent = loan_df.isnull().sum() / len(loan_df) * 100
df_null_percent.sort_values(ascending=False)
# Filter columns with more than or equal to 50% missing values which will be our threshold to drop the columns
loan_df = loan_df.loc[:, df_null_percent < 50]
# print shape of the dataframe
loan_df.shape
loan_df.info(max_cols=54)
# **The customer behavior variables are not available at the time of
# loan application, and thus they cannot be used as predictors for
# credit approval**
# List the coumns which are not required for the analysis. Total 21 columns
list_drop_col1 = [
"delinq_2yrs",
"earliest_cr_line",
"inq_last_6mths",
"open_acc",
"pub_rec",
"revol_bal",
"revol_util",
"total_acc",
"out_prncp",
"out_prncp_inv",
"total_pymnt",
"total_pymnt_inv",
"total_rec_prncp",
"total_rec_int",
"total_rec_late_fee",
"recoveries",
"collection_recovery_fee",
"last_pymnt_d",
"last_pymnt_amnt",
"last_credit_pull_d",
"application_type",
]
# Drop the coumns which are not required for the analysis. Total 21 columns
loan_df.drop(list_drop_col1, axis=1, inplace=True)
# Observer the shape after dropping the columns
loan_df.shape
# **Drop columns with single value as these will not be helpful in the analysis**
# Observe columns having single value
num_unique = (
loan_df.nunique()
) # This function is a method used to return the number of unique elements in a pandas object.
num_unique.sort_values()
loan_df.chargeoff_within_12_mths.unique()
# print columns with single value
list_col_unique_val = num_unique[num_unique.values == 1]
print(list_col_unique_val)
# List of Columns to be dropped
list_col_unique_val.index.tolist()
# Drop columns having single value
list_drop_col2 = list_col_unique_val.index.tolist()
loan_df.drop(list_drop_col2, axis=1, inplace=True)
loan_df.shape
# **Drop columns which does not add any value to analysis.**
# - id
# -member_id
# -url
# -desc
# -zip_code
# drop columns which does not add any value to analysis and observe the shape
list_drop_col3 = ["id", "member_id", "url", "desc", "zip_code"]
loan_df.drop(list_drop_col3, axis=1, inplace=True)
loan_df.shape
# **Taking loan_status equal to "Charged Off" and "Fully Paid" as if the loan_status is "Current" we might not know whether they will turn in fully paid or will they be charged off**
# observe the count of loan_status values
loan_df.loan_status.value_counts()
# consider subset with values "Fully Paid" and "Charged Off" and observe the shape
loan_df = loan_df[loan_df.loan_status.isin(["Fully Paid", "Charged Off"])]
loan_df.shape
#
# - Check for rows with more than 50% of missing values and drop them
# print rows with null values in descending order
loan_df.isnull().sum(axis=1).sort_values(ascending=False)
# calculate percentage of null values across rows
df_missing_val_row = loan_df.isnull().sum(axis=1) / loan_df.shape[1] * 100
print(df_missing_val_row.sort_values(ascending=False))
# Identify rows with missing values greater than or equal to 50%
df_missing_val_row[df_missing_val_row >= 50]
# #### Observation:
#
# There are no rows with missing values >=50% Hence no need to drop any rows
# **Sanity Checks**
#
# drop rows if "funded_amnt_inv" is greater than "loan_amnt"
loan_df[loan_df.funded_amnt_inv > loan_df.loan_amnt]
loan_df.info(max_cols=20)
# **Analysis:** There are no rows with "funded_amnt_inv" is greater than "loan_amnt".
# **Missing Value Imputation**
# Observe columns with null values
null_val = loan_df.isnull().sum() * 100 / loan_df.shape[0]
null_val[null_val.values > 0]
# Treating missing values for emp_title
loan_df["emp_title"].value_counts()
# calculate mode
loan_df["emp_title"].mode()
# **Analysis: We Can replace missing values of "emp_title" with mode, but it won't create much sense so will go ahead without the imputation**
# Drop column emp_title
loan_df.drop(["emp_title"], axis=1, inplace=True)
loan_df.shape
# Treating missing values for emp_length
loan_df["emp_length"].value_counts()
# **Analysis: After observing the values of emp_length it seems that absence of value means the employee is not working for any firm but must be running a business. Hence we can impute the missing value by "Self Employed".**
#
# Replace null with "Self Employed"
loan_df["emp_length"] = loan_df["emp_length"].fillna("Self Employed")
loan_df["emp_length"].isnull().sum()
# Observe values for title column
loan_df.title.value_counts()
# Drop column title as it has lot of different values so wont be useful
loan_df.drop(["title"], axis=1, inplace=True)
loan_df.shape
# Treating missing values for pub_rec_bankruptcies
loan_df.pub_rec_bankruptcies.value_counts()
# Filter rows with nonnull values of pub_rec_bankruptcies
loan_df = loan_df[~loan_df.pub_rec_bankruptcies.isnull()]
loan_df.shape
# ### Handling Data Types
# Observe the data
loan_df.head()
# Observe data types
loan_df.dtypes
# Convert "funded_amnt_inv" to int64
print(loan_df.funded_amnt_inv.dtype)
loan_df.funded_amnt_inv = loan_df.funded_amnt_inv.astype("int64")
print(loan_df.funded_amnt_inv.dtype)
# Convert "annual_inc" to int64
print(loan_df.annual_inc.dtype)
loan_df.annual_inc = loan_df.annual_inc.astype("int64")
print(loan_df.annual_inc.dtype)
# Convert "pub_rec_bankruptcies" to int64
print(loan_df.pub_rec_bankruptcies.dtype)
loan_df.pub_rec_bankruptcies = loan_df.pub_rec_bankruptcies.astype("int64")
print(loan_df.pub_rec_bankruptcies.dtype)
# Convert "issue_d" to Datetime
print(loan_df.issue_d)
loan_df.issue_d = pd.to_datetime(loan_df.issue_d, format="%b-%y")
print(loan_df.issue_d)
print(loan_df.issue_d.dtype)
# Convert "term" to "int64" after removing " months" string
print(loan_df.term)
loan_df.term = loan_df.term.str.replace(" months", "").astype("int64")
print(loan_df.term)
print(loan_df.term.dtype)
# Convert "int_rate" to "float64" after removing "%" symbol.
print(loan_df.int_rate)
loan_df.int_rate = loan_df.int_rate.str.replace("%", "").astype("float64")
print(loan_df.int_rate)
# Observe values of emp_length and Remove "+".
loan_df.emp_length.value_counts()
# remove '+'
loan_df.emp_length = loan_df.emp_length.str.replace("+", "")
loan_df.emp_length.value_counts()
# # Treating Outliers
# Observe basic statistical details about the dataset
loan_df.annual_inc.describe()
# **Remove outliers based on annual_inc**
# Plot boxplot to observe the outliers
loan_df.annual_inc.plot.box(fontsize=12, figsize=(8, 8)).set(title="Annual income")
plt.show()
# Observe Quantiles
quantiles = loan_df.annual_inc.quantile([0.01, 0.25, 0.5, 0.75, 0.99])
print(quantiles)
# Remove outliers
df_final = loan_df[(loan_df.annual_inc > 14400) & (loan_df.annual_inc < 234144)]
# Plot boxplot from new dataset after removing obvious outliers
df_final.annual_inc.plot.box().set(title="Annual income")
plt.show()
# Final dataset after data cleaning and data handling
df_final.head()
# ## Data Analysis
# **Derived metrics for Month and Year from column "issue_d"**
# Derived metrics analysis on column "issue_d"
df_final["year"] = df_final.issue_d.dt.year
df_final["month"] = df_final.issue_d.dt.month
df_final[["issue_d", "month", "year"]].head()
# Create new column loan_status_code with 0 and 1 values based on loan_status column where 0="Charged Off" and 1="Fully Paid"
loan_status_map = {"Charged Off": 1, "Fully Paid": 0}
df_final["loan_status_code"] = df_final["loan_status"].map(loan_status_map)
print(df_final["loan_status_code"])
df_final.loan_status.value_counts()
# Creating different groups for interest rate
# Int_rate is between 5% to 25%, grouping them accordingly
df_final.int_rate.describe
# Treating for int_rate
bins = [5, 9, 13, 17, 21, 25]
labels = ["5%-9%", "9%-13%", "13%-17%", "17%-21%", "21%-25%"]
df_final["int_rate_group"] = pd.cut(df_final["int_rate"], bins=bins, labels=labels)
df_final["int_rate_group"].value_counts()
# Treating for annual_income
bins = [14400, 25000, 50000, 100000, 150000, 234000]
labels = ["14k-25k", "25k-50k", "50k-100k", "100k-150k", "150k-250k"]
df_final["annual_inc_group"] = pd.cut(df_final["annual_inc"], bins=bins, labels=labels)
df_final["annual_inc_group"].value_counts()
# # Univariate Analysis
# (P.S. - The plot function reference is taken from a kaggle notebook on EDA)
# define a function to attach values with each bar
def label(ax, x):
"""
Attach a text label above each bar displaying its height
"""
for p in ax.patches:
ax.annotate("{:1}".format(p.get_height()), (p.get_x() + x, p.get_height() + 10))
# define function to plot countplot for categorical variables
def cat(df, col_name):
fig, ax = plt.subplots(figsize=(10, 6), dpi=100)
sns.countplot(
x=col_name,
data=df_final[df_final.loan_status == "Charged Off"],
order=df[col_name].value_counts().index,
)
ax.set_xlabel(col_name)
ax.set_ylabel("No of loans")
ax.set_title("Plot of " + col_name, weight="bold")
plt.xticks(rotation=90)
label(ax, 0.01)
plt.show()
# define function to plot countplot for numerical variables
def num(df, col_name):
fig, ax = plt.subplots(figsize=(10, 6), dpi=100)
plt.figure(figsize=(16, 6))
plt.subplot(1, 2, 1)
sns.distplot(a=df[col_name], rug=True, color="#388E3C")
plt.subplot(1, 2, 2)
sns.boxplot(data=df[col_name], color="#388E3C")
plt.suptitle("Distribution of " + col_name)
label(ax, 0.01)
plt.show()
# **grade, sub_grade, term, emp_length, issue_y, issue_m**
# countplot for "grade", "sub_grade","term","emp_length","issue_y","issue_m" for Charged off
for factor in ["grade", "sub_grade", "term", "emp_length", "year", "month"]:
cat(df_final, factor)
# **Observation**
# - Count Plot of **Grade** shows Grade B ,C, D are given more likely to charged off as compared to other grades
# - Count Plot of **Sub Grade** shows Grade B3, B5, B4, C1 , C2 have charged off more as compared to other grades
# - Count Plot of **term** shows 36 months loans are issued more so defaulted more compared to 60 months loan
# - Count Plot of **emp_length** shows employees with 10 years have more defaulted on loan compared with lesser experience
# - Count Plot of issue **year** shows the no. of defaulted loans have increased in the year 2011. The trend is increasing with the increase in the year
# - Count Plot of issue **month** shows there is increasing trend in number of defaults with increase in the months. More defaults can be seen in the month of October, November, December.
# **loan_status**
sns.countplot(x="loan_status", data=df_final)
# **Observation**
# - This shows that 14% of total loans are charged off.
# ### addr_state, purpose, home_ownership
# Plot countplot for "addr_state", "purpose","home_ownership"
for factor in ["addr_state", "purpose", "home_ownership"]:
cat(df_final, factor)
# ### Observation
# - States CA, NY, FL and TX are the states for which maximum loan defaults
# - Maximum loan defaults are for debt consolidation, paying off Credit card, small business and 'other' reasons
# - Education and renewable energy is the least category where loans are defaulted
# - People who are in Rented house or Mortgate are more likely to Default
# **funded_amnt, installment, amnt_to_inc_ratio**
# Plot distplot for "funded_amnt","installment","amnt_to_inc_ratio"
sns.distplot(a=df_final["funded_amnt"], rug=True, color="#388E3C")
plt.suptitle("Distribution of funded_amnt")
sns.distplot(a=df_final["installment"], rug=True, color="#388E3C")
plt.suptitle("Distribution of installment")
# ### Observation
# - Funded amount is ranging from 5000 to 15000 USD
# - Installment amount is ranging from 200 to 400 USD
# **Segmented Univariate Analysis**
# - int_rate_group
# Countplot of int_rate_group
fig, ax = plt.subplots(figsize=(6, 4), dpi=100)
sns.countplot(x="int_rate_group", data=df_final)
ax.set_xlabel("int_rate_group")
ax.set_ylabel("No of loans")
ax.set_title("int_rate_group", weight="bold")
label(ax, 0.20)
plt.show()
# **Observation**
# - Interest rate range 9 to 13 is the range where maximum loans have been issued
# - 21 - 25% is the range where minimum loans have been issued
#
# Observing the same above graph for charged off
# Countplot of int_rate_group for charged off
fig, ax = plt.subplots(figsize=(6, 4), dpi=100)
sns.countplot(x="int_rate_group", data=df_final[df_final.loan_status == "Charged Off"])
ax.set_xlabel("int_rate_group")
ax.set_ylabel("No of loans")
ax.set_title("int_rate_group", weight="bold")
label(ax, 0.20)
plt.show()
# **Observation**
# . Interest rate range 13% - 17% is the range with maximum loan defaults
# - 21 - 25% is the range where minimum loan defaults can be observed from the above plot
# **But considering above 2 plots we come to a conclusion that**
# > If we notice the % of defaults in each groups we can state that the interest range 21%-25% has maximum chances of defaults (as high as 44%)
# P.S. (Percent here means no. of charged off divided by the total no. of loan issues in a particular interest rate group)
# ## Summary of univariate analaysis
# - Count Plot of **Grade** shows Grade B ,C, D are given more likely to charged off as compared to other grades
# - Count Plot of **Sub Grade** shows Grade B3, B5, B4, C1 , C2 have charged off more as compared to other grades
# - Count Plot of **term** shows 36 months loans are issued more so defaulted more compared to 60 months loan
# - Count Plot of **emp_length** shows employees with 10 years have more defaulted on loan compared with lesser experience
# - Count Plot of issue **year** shows the no. of defaulted loans have increased in the year 2011. The trend is increasing with the increase in the year
# - Count Plot of issue **month** shows there is increasing trend in number of defaults with increase in the months. More defaults can be seen in the month of October, November, December.
# - States CA, NY, FL and TX are the states for which maximum loan defaults
# - Maximum loan defaults are for debt consolidation, paying off Credit card, small business and 'other' reasons
# - Education and renewable energy is the least category where loans are defaulted
# - People who are in Rented house or Mortgate are more likely to Default- Funded amount is ranging from 5000 to 15000 USD
# - Installment amount is ranging from 200 to 400 USD
# - Interest rate range 9 to 13 is the range where maximum loans have been issued
# - 21 - 25% is the range where minimum loans have been issued
# **Observation**
# . Interest rate range 13% - 17% is the range with maximum loan defaults
# - 21 - 25% is the range where minimum loan defaults can be observed from the above plot
# **But considering above 2 plots we come to a conclusion that**
# > If we notice the % of defaults in each groups we can state that the interest range 21%-25% has maximum chances of defaults (as high as 44%)
# P.S. (Percent here means no. of charged off divided by the total no. of loan issues in a particular interest rate group)
# # Bivariate Analysis
# **Grade vs Loan Staus**
# Countplot of Grade vs Loan Status
fig, ax = plt.subplots(figsize=(12, 6), dpi=100)
sns.countplot(x="grade", hue="loan_status", data=df_final, palette="Set2")
ax.set_xlabel("Grade")
ax.set_ylabel("No of loans")
label(ax, 0.1)
ax.set_title("Grade vs Loan Status")
plt.show()
# **Observation**
# - The counts of Grade B, C and D are highest in Charged Off
# **Sub Grade vs Loan Staus**
# Countplot of Sub Grade vs Loan Status
fig, ax = plt.subplots(figsize=(12, 6), dpi=100)
sns.countplot(x="sub_grade", hue="loan_status", data=df_final)
ax.set_xlabel("Sub Grade")
ax.set_ylabel("No of loans")
ax.set_title("Sub Grade vs Loan Status")
plt.show()
# **Observation**
# - The counts of B3,B4,B5, C1,C2, D3 sub grades are higher in Charged Off
# **Term vs Loan Staus**
# Countplot of Term vs Loan Status
fig, ax = plt.subplots(figsize=(5, 4), dpi=100)
sns.countplot(x="term", hue="loan_status", data=df_final)
ax.set_xlabel("Term")
ax.set_ylabel("No of loans")
label(ax, 0.30)
ax.set_title("Term vs Loan Status(Charged off)")
# **Observation**
# - Though 36 month loan default is more compared to 60 month
# - What is observed is if we see the % of charged off in 36 months it would be 10% of the total loans of 36 months tenure.
# - Also if we calculate % charged off in 60 months it is 25% of the total loans issued with a 60 months which is much higher as compared to 36 month tenure so it is more likely for a 60 months loan issued to be a default
# **Employment Length vs Loan Staus**
# Countplot of Employment Length vs Loan Status
fig, ax = plt.subplots(figsize=(12, 6), dpi=100)
sns.countplot(x="emp_length", hue="loan_status", data=df_final, palette="Set2")
ax.set_xlabel("Employment length")
ax.set_ylabel("No of loans")
ax.set_title("Employment length vs Loan Status")
label(ax, 0.001)
# **Observation**
# - Maximum loans are issued for people having 10 years of emp_lenth and hence the no. of defaulters are also high
# **Loan Issue Year vs Loan Status**
# Countplot of Loan Issue Year vs Loan Status
fig, ax = plt.subplots(figsize=(8, 6), dpi=100)
plt.xticks(rotation=90)
sns.countplot(x="year", data=df_final, hue="loan_status", palette="Set2")
ax.set_xlabel("Loan Issue Year")
ax.set_ylabel("No of loans")
label(ax, 0.01)
ax.set_title("Loan Issue Year (fully paid and charged off)")
plt.show()
# ### Observation
# - Plot of loan issue year shows maximum loans were taken in the year 2011
# - Also high loans are being Charged Off in 2011
# ### Loan Issue Month vs Loan Status
# Countplot of Loan Issue Month vs Loan Status
fig, ax = plt.subplots(figsize=(8, 6), dpi=100)
plt.xticks(rotation=90)
sns.countplot(x="month", data=df_final, hue="loan_status", palette="Set2")
ax.set_xlabel("Loan Issue Month")
ax.set_ylabel("No of loans")
label(ax, 0.01)
ax.set_title("Loan Issue Month (fully paid and charged off)")
plt.show()
# **Observation**
# - high loans are Charged Off for the loans issued in Sep - Dec months
# **Purpose vs Loan Staus**
# Countplot of Purpose vs Loan Status
fig, ax = plt.subplots(figsize=(12, 6), dpi=100)
plt.xticks(rotation=90)
sns.countplot(
x="purpose",
hue="loan_status",
data=df_final,
palette="Set2",
order=df_final[df_final.loan_status == "Charged Off"].purpose.value_counts().index,
)
ax.set_xlabel("Loan purpose")
ax.set_ylabel("No of loans")
ax.set_title("Loan purpose vs Loan Status")
label(ax, 0.001)
plt.show()
# **Observation**
# - Loans with purpose debt consolidation, other, credit crd and home improvement categories are unable to pay the loan compared with education / renewable energy
# - Also debt consolidation is the category where maximum loans are given.
# **Home Ownership vs Loan Staus**
# Hist of Home Ownership vs Loan Status
fig, ax = plt.subplots(figsize=(8, 6), dpi=100)
sns.histplot(
data=df_final,
x="home_ownership",
hue="loan_status",
multiple="dodge",
shrink=0.8,
palette="Set2",
)
ax.set_xlabel("Home ownership")
ax.set_ylabel("No of loans")
ax.set_title("Home ownership vs Loan Status")
label(ax, 0.01)
# **Observation**
# - Rent and Mortage category people take maximum loans and have higher chances of being defaulters of the loan compared with people in Own house
# **Verification vs Loan Staus**
# Histplot of Verification vs Loan Status
fig, ax = plt.subplots(figsize=(6, 4), dpi=100)
sns.histplot(
data=df_final,
x="verification_status",
hue="loan_status",
multiple="dodge",
shrink=0.8,
palette="Set2",
)
ax.set_xlabel("Verification status")
ax.set_ylabel("No of loans")
ax.set_title("Verfification status vs Loan Status")
label(ax, 0.01)
plt.show()
# **Observation**
# - Verified loans which are Charged Off is more compared to Not Verified
# **State vs Loan Staus**
# Countplot of State vs Loan Status
fig, ax = plt.subplots(figsize=(16, 8), dpi=100)
sns.countplot(x="addr_state", hue="loan_status", data=df_final)
ax.set_xlabel("State")
ax.set_ylabel("No of loans")
ax.set_title("State vs Loan Status = Charged Off")
plt.show()
# **Observation**
# - Borrowers from states CA, FL, NY and NJ have failed to pay the loan
# **Grade vs Loan status**
# Boxplot Grade vs Loan status
plt.figure(figsize=(12, 8))
ax = sns.boxplot(
y="grade",
x="loan_amnt",
data=df_final[df_final.loan_status == "Charged Off"],
palette="rainbow",
)
ax.set_title("Grade vs Loan Amount", fontsize=15, color="b")
ax.set_ylabel("Grade", fontsize=14, color="b")
ax.set_xlabel("Loan Amount", fontsize=14, color="b")
plt.show()
# **Observation**
# - Grade F, G and E are the three category which has higher charged off
# - The median of F and G is around 20k and Q3 at 25k
# - Grade A has a median at 7.5k and is at the least
# **DTI vs Loan status**
plt.figure(figsize=(12, 8))
ax = sns.boxplot(y="dti", x="loan_status", data=df_final, palette="rainbow")
ax.set_title("DTI for both Fully Paid and Charged off loans", fontsize=15, color="b")
ax.set_ylabel("DTI spread", fontsize=14, color="b")
ax.set_xlabel("Loan status", fontsize=14, color="b")
plt.show()
# **Observation**
# - DTI is not a significant factor because the median is close to each other for Fully Paid and Charged Off
# **amnt_to_inc_ratio vs Loan status**
#
df_final["amnt_to_inc_ratio"] = df_final.loan_amnt / df_final.annual_inc
df_final[["loan_amnt", "annual_inc", "amnt_to_inc_ratio"]]
plt.figure(figsize=(12, 8))
ax = sns.boxplot(
y="amnt_to_inc_ratio", x="loan_status", data=df_final, palette="rainbow"
)
ax.set_title(
"amnt_to_inc_ratio for both Fully Paid and Charged off loans",
fontsize=15,
color="b",
)
ax.set_ylabel("amnt_to_inc_ratio spread", fontsize=14, color="b")
ax.set_xlabel("Loan status", fontsize=14, color="b")
plt.show()
# **Observation**
# - Amnt_to_int_ratio is an indicator of bad loans as for charged off the median and Q3 is higher
# **Summary**
# >- Above plots is for charged off loans.A Applicant is more likely to default if:
#
# >- Though 36 month loan default is more compared to 60 month
# >- What is observed is if we see the % of charged off in 36 months it would be 10% of the total loans of 36 months tenure.
# >- Also if we calculate % charged off in 60 months it is 25% of the total loans issued with a 60 months which is much higher as compared to 36 month tenure so it is more likely for a 60 months loan issued to be a default
# >- Loans with purpose debt consolidation, other, credit crd and home improvement categories are unable to pay the loan compared with education / renewable energy
# >- Also debt consolidation is the category where maximum loans are given.
# >- Rent and Mortage category people take maximum loans and have higher chances of being defaulters of the loan compared with people in Own house
# >- Verified loans which are Charged Off is more compared to Not Verified
# >- Borrowers from states CA, FL, NY and NJ have failed to pay the loan
# >- Grade F, G and E are the three category which has higher charged off
# >- The median of F and G is around 20k and Q3 at 25k
# >- Grade A has a median at 7.5k and is at the least
# >- DTI is not a significant factor because the median is close to each other for Fully Paid and Charged Off
# >- Amnt_to_int_ratio is an indicator of bad loans as for charged off the median and Q3 is higher
# To identify correlation between all variables of the dataset and see which variables are negativey impacting loan_status variable
f, ax = plt.subplots(figsize=(11, 9))
corr = df_final.corr()
sns.heatmap(corr, vmax=0.3, annot=True)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/064/129064415.ipynb
|
lendingclub
|
shrinivasbhat
|
[{"Id": 129064415, "ScriptId": 38366452, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13160802, "CreationDate": "05/10/2023 17:27:46", "VersionNumber": 1.0, "Title": "Lending_Club_Case_Study", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 796.0, "LinesInsertedFromPrevious": 796.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
|
[{"Id": 184790199, "KernelVersionId": 129064415, "SourceDatasetVersionId": 4939533}]
|
[{"Id": 4939533, "DatasetId": 2864339, "DatasourceVersionId": 5007603, "CreatorUserId": 11454177, "LicenseName": "Unknown", "CreationDate": "02/03/2023 03:50:46", "VersionNumber": 1.0, "Title": "lendingclub", "Slug": "lendingclub", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 2864339, "CreatorUserId": 11454177, "OwnerUserId": 11454177.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 4939533.0, "CurrentDatasourceVersionId": 5007603.0, "ForumId": 2900483, "Type": 2, "CreationDate": "02/03/2023 03:50:46", "LastActivityDate": "02/03/2023", "TotalViews": 123, "TotalDownloads": 4, "TotalVotes": 1, "TotalKernels": 5}]
|
[{"Id": 11454177, "UserName": "shrinivasbhat", "DisplayName": "Shrinivas Bhat", "RegisterDate": "09/01/2022", "PerformanceTier": 0}]
|
#
# # LENDING CLUB CASE STUDY
# ### Problem Statement
#
# The company is the largest online loan marketplace, facilitating personal loans, business loans, and financing of medical procedures.
# Lending loans to ‘risky’ applicants is the largest source of financial loss (called credit loss). Credit loss is the amount of money lost by the lender when the borrower refuses to pay or runs away with the money owed. In other words, borrowers who default cause the largest amount of loss to the lenders. In this case, the customers labelled as 'charged-off' are the 'defaulters'.
#
# **Identification of such applicants using EDA is the aim of this case study.**
#
# The company wants to understand the driving factors behind loan default, i.e. the variables which are strong indicators of default. The company can utilise this knowledge for its portfolio and risk assessment.
#
#
#
# ### Import the libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings("ignore")
# ### Read the dataset and check the first five rows
loan_df = pd.read_csv("/kaggle/input/lendingclub/loan.csv")
loan_df.head()
# ### Check the shape of the dataframe
print(loan_df.shape)
loan_df.info(max_cols=111)
# ## Data Cleaning and Manipulation
# Data Quality Issues can:
# - Missing Values
# - We can Drop the columns containing maximum null values and also Drop columns which does not add any value to analysis.
# - Sanity Checks
# - We can check for data inconsistencies
# - Small % of Missing Value can be Imputed.
# - If categorical can be imputed by mode or if continuous if suitable we can go for Mean/median
# - Modifing the Data Types:
# - Converting the some string columns to numeric variables in order to perform more analysis.
# - Outliers Treatment is necessary for correct analysis
# ## Data Understanding
# - Aim of the Analysis of this loan dataset is find out the factors influencing a loan to be default.
# - The dataset has the details of the past loan applicants and whethe r they resulted in good loan or bad loan.
# - Loan Status is the Target variable.
# - Column Loan_status has 3 values:
# >'Fully Paid'
# > 'Charged Off'
# > 'Current'
# Since we are not sure about 'Current' customers whether they will default or fully pay at the end of the tenure, we will filter out 'Current' customers and will only consider 'Fully Paid' and 'Charged Off' values. Here **charged off means defaulters**
# - Many columns which have NA values need to be removed
# **Missing Value Treatment**
# Calculate missing value %
df_null_percent = loan_df.isnull().sum() / len(loan_df) * 100
df_null_percent.sort_values(ascending=False)
# Filter columns with more than or equal to 50% missing values which will be our threshold to drop the columns
loan_df = loan_df.loc[:, df_null_percent < 50]
# print shape of the dataframe
loan_df.shape
loan_df.info(max_cols=54)
# **The customer behavior variables are not available at the time of
# loan application, and thus they cannot be used as predictors for
# credit approval**
# List the coumns which are not required for the analysis. Total 21 columns
list_drop_col1 = [
"delinq_2yrs",
"earliest_cr_line",
"inq_last_6mths",
"open_acc",
"pub_rec",
"revol_bal",
"revol_util",
"total_acc",
"out_prncp",
"out_prncp_inv",
"total_pymnt",
"total_pymnt_inv",
"total_rec_prncp",
"total_rec_int",
"total_rec_late_fee",
"recoveries",
"collection_recovery_fee",
"last_pymnt_d",
"last_pymnt_amnt",
"last_credit_pull_d",
"application_type",
]
# Drop the coumns which are not required for the analysis. Total 21 columns
loan_df.drop(list_drop_col1, axis=1, inplace=True)
# Observer the shape after dropping the columns
loan_df.shape
# **Drop columns with single value as these will not be helpful in the analysis**
# Observe columns having single value
num_unique = (
loan_df.nunique()
) # This function is a method used to return the number of unique elements in a pandas object.
num_unique.sort_values()
loan_df.chargeoff_within_12_mths.unique()
# print columns with single value
list_col_unique_val = num_unique[num_unique.values == 1]
print(list_col_unique_val)
# List of Columns to be dropped
list_col_unique_val.index.tolist()
# Drop columns having single value
list_drop_col2 = list_col_unique_val.index.tolist()
loan_df.drop(list_drop_col2, axis=1, inplace=True)
loan_df.shape
# **Drop columns which does not add any value to analysis.**
# - id
# -member_id
# -url
# -desc
# -zip_code
# drop columns which does not add any value to analysis and observe the shape
list_drop_col3 = ["id", "member_id", "url", "desc", "zip_code"]
loan_df.drop(list_drop_col3, axis=1, inplace=True)
loan_df.shape
# **Taking loan_status equal to "Charged Off" and "Fully Paid" as if the loan_status is "Current" we might not know whether they will turn in fully paid or will they be charged off**
# observe the count of loan_status values
loan_df.loan_status.value_counts()
# consider subset with values "Fully Paid" and "Charged Off" and observe the shape
loan_df = loan_df[loan_df.loan_status.isin(["Fully Paid", "Charged Off"])]
loan_df.shape
#
# - Check for rows with more than 50% of missing values and drop them
# print rows with null values in descending order
loan_df.isnull().sum(axis=1).sort_values(ascending=False)
# calculate percentage of null values across rows
df_missing_val_row = loan_df.isnull().sum(axis=1) / loan_df.shape[1] * 100
print(df_missing_val_row.sort_values(ascending=False))
# Identify rows with missing values greater than or equal to 50%
df_missing_val_row[df_missing_val_row >= 50]
# #### Observation:
#
# There are no rows with missing values >=50% Hence no need to drop any rows
# **Sanity Checks**
#
# drop rows if "funded_amnt_inv" is greater than "loan_amnt"
loan_df[loan_df.funded_amnt_inv > loan_df.loan_amnt]
loan_df.info(max_cols=20)
# **Analysis:** There are no rows with "funded_amnt_inv" is greater than "loan_amnt".
# **Missing Value Imputation**
# Observe columns with null values
null_val = loan_df.isnull().sum() * 100 / loan_df.shape[0]
null_val[null_val.values > 0]
# Treating missing values for emp_title
loan_df["emp_title"].value_counts()
# calculate mode
loan_df["emp_title"].mode()
# **Analysis: We Can replace missing values of "emp_title" with mode, but it won't create much sense so will go ahead without the imputation**
# Drop column emp_title
loan_df.drop(["emp_title"], axis=1, inplace=True)
loan_df.shape
# Treating missing values for emp_length
loan_df["emp_length"].value_counts()
# **Analysis: After observing the values of emp_length it seems that absence of value means the employee is not working for any firm but must be running a business. Hence we can impute the missing value by "Self Employed".**
#
# Replace null with "Self Employed"
loan_df["emp_length"] = loan_df["emp_length"].fillna("Self Employed")
loan_df["emp_length"].isnull().sum()
# Observe values for title column
loan_df.title.value_counts()
# Drop column title as it has lot of different values so wont be useful
loan_df.drop(["title"], axis=1, inplace=True)
loan_df.shape
# Treating missing values for pub_rec_bankruptcies
loan_df.pub_rec_bankruptcies.value_counts()
# Filter rows with nonnull values of pub_rec_bankruptcies
loan_df = loan_df[~loan_df.pub_rec_bankruptcies.isnull()]
loan_df.shape
# ### Handling Data Types
# Observe the data
loan_df.head()
# Observe data types
loan_df.dtypes
# Convert "funded_amnt_inv" to int64
print(loan_df.funded_amnt_inv.dtype)
loan_df.funded_amnt_inv = loan_df.funded_amnt_inv.astype("int64")
print(loan_df.funded_amnt_inv.dtype)
# Convert "annual_inc" to int64
print(loan_df.annual_inc.dtype)
loan_df.annual_inc = loan_df.annual_inc.astype("int64")
print(loan_df.annual_inc.dtype)
# Convert "pub_rec_bankruptcies" to int64
print(loan_df.pub_rec_bankruptcies.dtype)
loan_df.pub_rec_bankruptcies = loan_df.pub_rec_bankruptcies.astype("int64")
print(loan_df.pub_rec_bankruptcies.dtype)
# Convert "issue_d" to Datetime
print(loan_df.issue_d)
loan_df.issue_d = pd.to_datetime(loan_df.issue_d, format="%b-%y")
print(loan_df.issue_d)
print(loan_df.issue_d.dtype)
# Convert "term" to "int64" after removing " months" string
print(loan_df.term)
loan_df.term = loan_df.term.str.replace(" months", "").astype("int64")
print(loan_df.term)
print(loan_df.term.dtype)
# Convert "int_rate" to "float64" after removing "%" symbol.
print(loan_df.int_rate)
loan_df.int_rate = loan_df.int_rate.str.replace("%", "").astype("float64")
print(loan_df.int_rate)
# Observe values of emp_length and Remove "+".
loan_df.emp_length.value_counts()
# remove '+'
loan_df.emp_length = loan_df.emp_length.str.replace("+", "")
loan_df.emp_length.value_counts()
# # Treating Outliers
# Observe basic statistical details about the dataset
loan_df.annual_inc.describe()
# **Remove outliers based on annual_inc**
# Plot boxplot to observe the outliers
loan_df.annual_inc.plot.box(fontsize=12, figsize=(8, 8)).set(title="Annual income")
plt.show()
# Observe Quantiles
quantiles = loan_df.annual_inc.quantile([0.01, 0.25, 0.5, 0.75, 0.99])
print(quantiles)
# Remove outliers
df_final = loan_df[(loan_df.annual_inc > 14400) & (loan_df.annual_inc < 234144)]
# Plot boxplot from new dataset after removing obvious outliers
df_final.annual_inc.plot.box().set(title="Annual income")
plt.show()
# Final dataset after data cleaning and data handling
df_final.head()
# ## Data Analysis
# **Derived metrics for Month and Year from column "issue_d"**
# Derived metrics analysis on column "issue_d"
df_final["year"] = df_final.issue_d.dt.year
df_final["month"] = df_final.issue_d.dt.month
df_final[["issue_d", "month", "year"]].head()
# Create new column loan_status_code with 0 and 1 values based on loan_status column where 0="Charged Off" and 1="Fully Paid"
loan_status_map = {"Charged Off": 1, "Fully Paid": 0}
df_final["loan_status_code"] = df_final["loan_status"].map(loan_status_map)
print(df_final["loan_status_code"])
df_final.loan_status.value_counts()
# Creating different groups for interest rate
# Int_rate is between 5% to 25%, grouping them accordingly
df_final.int_rate.describe
# Treating for int_rate
bins = [5, 9, 13, 17, 21, 25]
labels = ["5%-9%", "9%-13%", "13%-17%", "17%-21%", "21%-25%"]
df_final["int_rate_group"] = pd.cut(df_final["int_rate"], bins=bins, labels=labels)
df_final["int_rate_group"].value_counts()
# Treating for annual_income
bins = [14400, 25000, 50000, 100000, 150000, 234000]
labels = ["14k-25k", "25k-50k", "50k-100k", "100k-150k", "150k-250k"]
df_final["annual_inc_group"] = pd.cut(df_final["annual_inc"], bins=bins, labels=labels)
df_final["annual_inc_group"].value_counts()
# # Univariate Analysis
# (P.S. - The plot function reference is taken from a kaggle notebook on EDA)
# define a function to attach values with each bar
def label(ax, x):
"""
Attach a text label above each bar displaying its height
"""
for p in ax.patches:
ax.annotate("{:1}".format(p.get_height()), (p.get_x() + x, p.get_height() + 10))
# define function to plot countplot for categorical variables
def cat(df, col_name):
fig, ax = plt.subplots(figsize=(10, 6), dpi=100)
sns.countplot(
x=col_name,
data=df_final[df_final.loan_status == "Charged Off"],
order=df[col_name].value_counts().index,
)
ax.set_xlabel(col_name)
ax.set_ylabel("No of loans")
ax.set_title("Plot of " + col_name, weight="bold")
plt.xticks(rotation=90)
label(ax, 0.01)
plt.show()
# define function to plot countplot for numerical variables
def num(df, col_name):
fig, ax = plt.subplots(figsize=(10, 6), dpi=100)
plt.figure(figsize=(16, 6))
plt.subplot(1, 2, 1)
sns.distplot(a=df[col_name], rug=True, color="#388E3C")
plt.subplot(1, 2, 2)
sns.boxplot(data=df[col_name], color="#388E3C")
plt.suptitle("Distribution of " + col_name)
label(ax, 0.01)
plt.show()
# **grade, sub_grade, term, emp_length, issue_y, issue_m**
# countplot for "grade", "sub_grade","term","emp_length","issue_y","issue_m" for Charged off
for factor in ["grade", "sub_grade", "term", "emp_length", "year", "month"]:
cat(df_final, factor)
# **Observation**
# - Count Plot of **Grade** shows Grade B ,C, D are given more likely to charged off as compared to other grades
# - Count Plot of **Sub Grade** shows Grade B3, B5, B4, C1 , C2 have charged off more as compared to other grades
# - Count Plot of **term** shows 36 months loans are issued more so defaulted more compared to 60 months loan
# - Count Plot of **emp_length** shows employees with 10 years have more defaulted on loan compared with lesser experience
# - Count Plot of issue **year** shows the no. of defaulted loans have increased in the year 2011. The trend is increasing with the increase in the year
# - Count Plot of issue **month** shows there is increasing trend in number of defaults with increase in the months. More defaults can be seen in the month of October, November, December.
# **loan_status**
sns.countplot(x="loan_status", data=df_final)
# **Observation**
# - This shows that 14% of total loans are charged off.
# ### addr_state, purpose, home_ownership
# Plot countplot for "addr_state", "purpose","home_ownership"
for factor in ["addr_state", "purpose", "home_ownership"]:
cat(df_final, factor)
# ### Observation
# - States CA, NY, FL and TX are the states for which maximum loan defaults
# - Maximum loan defaults are for debt consolidation, paying off Credit card, small business and 'other' reasons
# - Education and renewable energy is the least category where loans are defaulted
# - People who are in Rented house or Mortgate are more likely to Default
# **funded_amnt, installment, amnt_to_inc_ratio**
# Plot distplot for "funded_amnt","installment","amnt_to_inc_ratio"
sns.distplot(a=df_final["funded_amnt"], rug=True, color="#388E3C")
plt.suptitle("Distribution of funded_amnt")
sns.distplot(a=df_final["installment"], rug=True, color="#388E3C")
plt.suptitle("Distribution of installment")
# ### Observation
# - Funded amount is ranging from 5000 to 15000 USD
# - Installment amount is ranging from 200 to 400 USD
# **Segmented Univariate Analysis**
# - int_rate_group
# Countplot of int_rate_group
fig, ax = plt.subplots(figsize=(6, 4), dpi=100)
sns.countplot(x="int_rate_group", data=df_final)
ax.set_xlabel("int_rate_group")
ax.set_ylabel("No of loans")
ax.set_title("int_rate_group", weight="bold")
label(ax, 0.20)
plt.show()
# **Observation**
# - Interest rate range 9 to 13 is the range where maximum loans have been issued
# - 21 - 25% is the range where minimum loans have been issued
#
# Observing the same above graph for charged off
# Countplot of int_rate_group for charged off
fig, ax = plt.subplots(figsize=(6, 4), dpi=100)
sns.countplot(x="int_rate_group", data=df_final[df_final.loan_status == "Charged Off"])
ax.set_xlabel("int_rate_group")
ax.set_ylabel("No of loans")
ax.set_title("int_rate_group", weight="bold")
label(ax, 0.20)
plt.show()
# **Observation**
# . Interest rate range 13% - 17% is the range with maximum loan defaults
# - 21 - 25% is the range where minimum loan defaults can be observed from the above plot
# **But considering above 2 plots we come to a conclusion that**
# > If we notice the % of defaults in each groups we can state that the interest range 21%-25% has maximum chances of defaults (as high as 44%)
# P.S. (Percent here means no. of charged off divided by the total no. of loan issues in a particular interest rate group)
# ## Summary of univariate analaysis
# - Count Plot of **Grade** shows Grade B ,C, D are given more likely to charged off as compared to other grades
# - Count Plot of **Sub Grade** shows Grade B3, B5, B4, C1 , C2 have charged off more as compared to other grades
# - Count Plot of **term** shows 36 months loans are issued more so defaulted more compared to 60 months loan
# - Count Plot of **emp_length** shows employees with 10 years have more defaulted on loan compared with lesser experience
# - Count Plot of issue **year** shows the no. of defaulted loans have increased in the year 2011. The trend is increasing with the increase in the year
# - Count Plot of issue **month** shows there is increasing trend in number of defaults with increase in the months. More defaults can be seen in the month of October, November, December.
# - States CA, NY, FL and TX are the states for which maximum loan defaults
# - Maximum loan defaults are for debt consolidation, paying off Credit card, small business and 'other' reasons
# - Education and renewable energy is the least category where loans are defaulted
# - People who are in Rented house or Mortgate are more likely to Default- Funded amount is ranging from 5000 to 15000 USD
# - Installment amount is ranging from 200 to 400 USD
# - Interest rate range 9 to 13 is the range where maximum loans have been issued
# - 21 - 25% is the range where minimum loans have been issued
# **Observation**
# . Interest rate range 13% - 17% is the range with maximum loan defaults
# - 21 - 25% is the range where minimum loan defaults can be observed from the above plot
# **But considering above 2 plots we come to a conclusion that**
# > If we notice the % of defaults in each groups we can state that the interest range 21%-25% has maximum chances of defaults (as high as 44%)
# P.S. (Percent here means no. of charged off divided by the total no. of loan issues in a particular interest rate group)
# # Bivariate Analysis
# **Grade vs Loan Staus**
# Countplot of Grade vs Loan Status
fig, ax = plt.subplots(figsize=(12, 6), dpi=100)
sns.countplot(x="grade", hue="loan_status", data=df_final, palette="Set2")
ax.set_xlabel("Grade")
ax.set_ylabel("No of loans")
label(ax, 0.1)
ax.set_title("Grade vs Loan Status")
plt.show()
# **Observation**
# - The counts of Grade B, C and D are highest in Charged Off
# **Sub Grade vs Loan Staus**
# Countplot of Sub Grade vs Loan Status
fig, ax = plt.subplots(figsize=(12, 6), dpi=100)
sns.countplot(x="sub_grade", hue="loan_status", data=df_final)
ax.set_xlabel("Sub Grade")
ax.set_ylabel("No of loans")
ax.set_title("Sub Grade vs Loan Status")
plt.show()
# **Observation**
# - The counts of B3,B4,B5, C1,C2, D3 sub grades are higher in Charged Off
# **Term vs Loan Staus**
# Countplot of Term vs Loan Status
fig, ax = plt.subplots(figsize=(5, 4), dpi=100)
sns.countplot(x="term", hue="loan_status", data=df_final)
ax.set_xlabel("Term")
ax.set_ylabel("No of loans")
label(ax, 0.30)
ax.set_title("Term vs Loan Status(Charged off)")
# **Observation**
# - Though 36 month loan default is more compared to 60 month
# - What is observed is if we see the % of charged off in 36 months it would be 10% of the total loans of 36 months tenure.
# - Also if we calculate % charged off in 60 months it is 25% of the total loans issued with a 60 months which is much higher as compared to 36 month tenure so it is more likely for a 60 months loan issued to be a default
# **Employment Length vs Loan Staus**
# Countplot of Employment Length vs Loan Status
fig, ax = plt.subplots(figsize=(12, 6), dpi=100)
sns.countplot(x="emp_length", hue="loan_status", data=df_final, palette="Set2")
ax.set_xlabel("Employment length")
ax.set_ylabel("No of loans")
ax.set_title("Employment length vs Loan Status")
label(ax, 0.001)
# **Observation**
# - Maximum loans are issued for people having 10 years of emp_lenth and hence the no. of defaulters are also high
# **Loan Issue Year vs Loan Status**
# Countplot of Loan Issue Year vs Loan Status
fig, ax = plt.subplots(figsize=(8, 6), dpi=100)
plt.xticks(rotation=90)
sns.countplot(x="year", data=df_final, hue="loan_status", palette="Set2")
ax.set_xlabel("Loan Issue Year")
ax.set_ylabel("No of loans")
label(ax, 0.01)
ax.set_title("Loan Issue Year (fully paid and charged off)")
plt.show()
# ### Observation
# - Plot of loan issue year shows maximum loans were taken in the year 2011
# - Also high loans are being Charged Off in 2011
# ### Loan Issue Month vs Loan Status
# Countplot of Loan Issue Month vs Loan Status
fig, ax = plt.subplots(figsize=(8, 6), dpi=100)
plt.xticks(rotation=90)
sns.countplot(x="month", data=df_final, hue="loan_status", palette="Set2")
ax.set_xlabel("Loan Issue Month")
ax.set_ylabel("No of loans")
label(ax, 0.01)
ax.set_title("Loan Issue Month (fully paid and charged off)")
plt.show()
# **Observation**
# - high loans are Charged Off for the loans issued in Sep - Dec months
# **Purpose vs Loan Staus**
# Countplot of Purpose vs Loan Status
fig, ax = plt.subplots(figsize=(12, 6), dpi=100)
plt.xticks(rotation=90)
sns.countplot(
x="purpose",
hue="loan_status",
data=df_final,
palette="Set2",
order=df_final[df_final.loan_status == "Charged Off"].purpose.value_counts().index,
)
ax.set_xlabel("Loan purpose")
ax.set_ylabel("No of loans")
ax.set_title("Loan purpose vs Loan Status")
label(ax, 0.001)
plt.show()
# **Observation**
# - Loans with purpose debt consolidation, other, credit crd and home improvement categories are unable to pay the loan compared with education / renewable energy
# - Also debt consolidation is the category where maximum loans are given.
# **Home Ownership vs Loan Staus**
# Hist of Home Ownership vs Loan Status
fig, ax = plt.subplots(figsize=(8, 6), dpi=100)
sns.histplot(
data=df_final,
x="home_ownership",
hue="loan_status",
multiple="dodge",
shrink=0.8,
palette="Set2",
)
ax.set_xlabel("Home ownership")
ax.set_ylabel("No of loans")
ax.set_title("Home ownership vs Loan Status")
label(ax, 0.01)
# **Observation**
# - Rent and Mortage category people take maximum loans and have higher chances of being defaulters of the loan compared with people in Own house
# **Verification vs Loan Staus**
# Histplot of Verification vs Loan Status
fig, ax = plt.subplots(figsize=(6, 4), dpi=100)
sns.histplot(
data=df_final,
x="verification_status",
hue="loan_status",
multiple="dodge",
shrink=0.8,
palette="Set2",
)
ax.set_xlabel("Verification status")
ax.set_ylabel("No of loans")
ax.set_title("Verfification status vs Loan Status")
label(ax, 0.01)
plt.show()
# **Observation**
# - Verified loans which are Charged Off is more compared to Not Verified
# **State vs Loan Staus**
# Countplot of State vs Loan Status
fig, ax = plt.subplots(figsize=(16, 8), dpi=100)
sns.countplot(x="addr_state", hue="loan_status", data=df_final)
ax.set_xlabel("State")
ax.set_ylabel("No of loans")
ax.set_title("State vs Loan Status = Charged Off")
plt.show()
# **Observation**
# - Borrowers from states CA, FL, NY and NJ have failed to pay the loan
# **Grade vs Loan status**
# Boxplot Grade vs Loan status
plt.figure(figsize=(12, 8))
ax = sns.boxplot(
y="grade",
x="loan_amnt",
data=df_final[df_final.loan_status == "Charged Off"],
palette="rainbow",
)
ax.set_title("Grade vs Loan Amount", fontsize=15, color="b")
ax.set_ylabel("Grade", fontsize=14, color="b")
ax.set_xlabel("Loan Amount", fontsize=14, color="b")
plt.show()
# **Observation**
# - Grade F, G and E are the three category which has higher charged off
# - The median of F and G is around 20k and Q3 at 25k
# - Grade A has a median at 7.5k and is at the least
# **DTI vs Loan status**
plt.figure(figsize=(12, 8))
ax = sns.boxplot(y="dti", x="loan_status", data=df_final, palette="rainbow")
ax.set_title("DTI for both Fully Paid and Charged off loans", fontsize=15, color="b")
ax.set_ylabel("DTI spread", fontsize=14, color="b")
ax.set_xlabel("Loan status", fontsize=14, color="b")
plt.show()
# **Observation**
# - DTI is not a significant factor because the median is close to each other for Fully Paid and Charged Off
# **amnt_to_inc_ratio vs Loan status**
#
df_final["amnt_to_inc_ratio"] = df_final.loan_amnt / df_final.annual_inc
df_final[["loan_amnt", "annual_inc", "amnt_to_inc_ratio"]]
plt.figure(figsize=(12, 8))
ax = sns.boxplot(
y="amnt_to_inc_ratio", x="loan_status", data=df_final, palette="rainbow"
)
ax.set_title(
"amnt_to_inc_ratio for both Fully Paid and Charged off loans",
fontsize=15,
color="b",
)
ax.set_ylabel("amnt_to_inc_ratio spread", fontsize=14, color="b")
ax.set_xlabel("Loan status", fontsize=14, color="b")
plt.show()
# **Observation**
# - Amnt_to_int_ratio is an indicator of bad loans as for charged off the median and Q3 is higher
# **Summary**
# >- Above plots is for charged off loans.A Applicant is more likely to default if:
#
# >- Though 36 month loan default is more compared to 60 month
# >- What is observed is if we see the % of charged off in 36 months it would be 10% of the total loans of 36 months tenure.
# >- Also if we calculate % charged off in 60 months it is 25% of the total loans issued with a 60 months which is much higher as compared to 36 month tenure so it is more likely for a 60 months loan issued to be a default
# >- Loans with purpose debt consolidation, other, credit crd and home improvement categories are unable to pay the loan compared with education / renewable energy
# >- Also debt consolidation is the category where maximum loans are given.
# >- Rent and Mortage category people take maximum loans and have higher chances of being defaulters of the loan compared with people in Own house
# >- Verified loans which are Charged Off is more compared to Not Verified
# >- Borrowers from states CA, FL, NY and NJ have failed to pay the loan
# >- Grade F, G and E are the three category which has higher charged off
# >- The median of F and G is around 20k and Q3 at 25k
# >- Grade A has a median at 7.5k and is at the least
# >- DTI is not a significant factor because the median is close to each other for Fully Paid and Charged Off
# >- Amnt_to_int_ratio is an indicator of bad loans as for charged off the median and Q3 is higher
# To identify correlation between all variables of the dataset and see which variables are negativey impacting loan_status variable
f, ax = plt.subplots(figsize=(11, 9))
corr = df_final.corr()
sns.heatmap(corr, vmax=0.3, annot=True)
| false | 1 | 8,611 | 1 | 8,630 | 8,611 |
||
129187974
|
<jupyter_start><jupyter_text>Data Science Salaries 2023 💸
Data Science Job Salaries Dataset contains 11 columns, each are:
1. work_year: The year the salary was paid.
2. experience_level: The experience level in the job during the year
3. employment_type: The type of employment for the role
4. job_title: The role worked in during the year.
5. salary: The total gross salary amount paid.
6. salary_currency: The currency of the salary paid as an ISO 4217 currency code.
7. salaryinusd: The salary in USD
8. employee_residence: Employee's primary country of residence in during the work year as an ISO 3166 country code.
9. remote_ratio: The overall amount of work done remotely
10. company_location: The country of the employer's main office or contracting branch
11. company_size: The median number of people that worked for the company during the year
Kaggle dataset identifier: data-science-salaries-2023
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# !
import pandas as pd
import numpy as np
df = pd.read_csv("/kaggle/input/data-science-salaries-2023/ds_salaries.csv")
df.head()
df.tail()
df.sample(5)
df.info()
df.describe()
df.shape
df.isnull().count()
# ## EDA Exploratory Data Analysis
import matplotlib.pyplot as plt
import seaborn as sns
# ## Plotting
# ## Univariant Analysis
df.columns
# plotting matplotlib
plt.plot(df["work_year"])
plt.show()
# ## Plotting seaborn Displots
sns.displot(df["work_year"])
sns.displot(df["experience_level"])
sns.displot(df["employment_type"])
sns.displot(df["job_title"])
sns.displot(df["salary"])
sns.displot(df["salary_currency"])
sns.displot(df["salary_in_usd"])
sns.displot(df["employee_residence"])
sns.displot(df["remote_ratio"])
sns.displot(df["company_location"])
sns.displot(df["company_size"])
# ## 2D Bivariant(2 variable Analysis)
plt.plot("work_year", "experience_level", data=df)
df.columns
sns.displot(data=df, x="work_year", hue="experience_level")
df.corr() # Calculate the correlation matrix
sns.pairplot(df) # Create pairwise scatter plots for multiple numerical columns
# ## Multi Variant Analysis
sns.pairplot(df)
plt.show()
df.corr() # Calculate the correlation matrix for all numerical columns
sns.heatmap(
df.corr(), annot=True, cmap="coolwarm"
) # Visualize the correlation matrix as a heatmap
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/187/129187974.ipynb
|
data-science-salaries-2023
|
arnabchaki
|
[{"Id": 129187974, "ScriptId": 38403855, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11919395, "CreationDate": "05/11/2023 16:39:02", "VersionNumber": 1.0, "Title": "Data Science Salaries 2023", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 113.0, "LinesInsertedFromPrevious": 113.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 4}]
|
[{"Id": 185012738, "KernelVersionId": 129187974, "SourceDatasetVersionId": 5392837}]
|
[{"Id": 5392837, "DatasetId": 3125926, "DatasourceVersionId": 5466555, "CreatorUserId": 7428813, "LicenseName": "Database: Open Database, Contents: Database Contents", "CreationDate": "04/13/2023 09:55:16", "VersionNumber": 1.0, "Title": "Data Science Salaries 2023 \ud83d\udcb8", "Slug": "data-science-salaries-2023", "Subtitle": "Salaries of Different Data Science Fields in the Data Science Domain", "Description": "Data Science Job Salaries Dataset contains 11 columns, each are:\n\n1. work_year: The year the salary was paid.\n2. experience_level: The experience level in the job during the year\n3. employment_type: The type of employment for the role\n4. job_title: The role worked in during the year.\n5. salary: The total gross salary amount paid.\n6. salary_currency: The currency of the salary paid as an ISO 4217 currency code.\n7. salaryinusd: The salary in USD\n8. employee_residence: Employee's primary country of residence in during the work year as an ISO 3166 country code.\n9. remote_ratio: The overall amount of work done remotely\n10. company_location: The country of the employer's main office or contracting branch\n11. company_size: The median number of people that worked for the company during the year", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 3125926, "CreatorUserId": 7428813, "OwnerUserId": 7428813.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5392837.0, "CurrentDatasourceVersionId": 5466555.0, "ForumId": 3189506, "Type": 2, "CreationDate": "04/13/2023 09:55:16", "LastActivityDate": "04/13/2023", "TotalViews": 234449, "TotalDownloads": 44330, "TotalVotes": 1244, "TotalKernels": 184}]
|
[{"Id": 7428813, "UserName": "arnabchaki", "DisplayName": "randomarnab", "RegisterDate": "05/16/2021", "PerformanceTier": 2}]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# !
import pandas as pd
import numpy as np
df = pd.read_csv("/kaggle/input/data-science-salaries-2023/ds_salaries.csv")
df.head()
df.tail()
df.sample(5)
df.info()
df.describe()
df.shape
df.isnull().count()
# ## EDA Exploratory Data Analysis
import matplotlib.pyplot as plt
import seaborn as sns
# ## Plotting
# ## Univariant Analysis
df.columns
# plotting matplotlib
plt.plot(df["work_year"])
plt.show()
# ## Plotting seaborn Displots
sns.displot(df["work_year"])
sns.displot(df["experience_level"])
sns.displot(df["employment_type"])
sns.displot(df["job_title"])
sns.displot(df["salary"])
sns.displot(df["salary_currency"])
sns.displot(df["salary_in_usd"])
sns.displot(df["employee_residence"])
sns.displot(df["remote_ratio"])
sns.displot(df["company_location"])
sns.displot(df["company_size"])
# ## 2D Bivariant(2 variable Analysis)
plt.plot("work_year", "experience_level", data=df)
df.columns
sns.displot(data=df, x="work_year", hue="experience_level")
df.corr() # Calculate the correlation matrix
sns.pairplot(df) # Create pairwise scatter plots for multiple numerical columns
# ## Multi Variant Analysis
sns.pairplot(df)
plt.show()
df.corr() # Calculate the correlation matrix for all numerical columns
sns.heatmap(
df.corr(), annot=True, cmap="coolwarm"
) # Visualize the correlation matrix as a heatmap
| false | 1 | 667 | 4 | 916 | 667 |
||
129187256
|
<jupyter_start><jupyter_text>Diabetes prediction dataset
The **Diabetes prediction dataset** is a collection of medical and demographic data from patients, along with their diabetes status (positive or negative). The data includes features such as age, gender, body mass index (BMI), hypertension, heart disease, smoking history, HbA1c level, and blood glucose level. This dataset can be used to build machine learning models to predict diabetes in patients based on their medical history and demographic information. This can be useful for healthcare professionals in identifying patients who may be at risk of developing diabetes and in developing personalized treatment plans. Additionally, the dataset can be used by researchers to explore the relationships between various medical and demographic factors and the likelihood of developing diabetes.
Kaggle dataset identifier: diabetes-prediction-dataset
<jupyter_script>import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
import seaborn as sns
import matplotlib.pyplot as plt
import plotly.express as px
import plotly.graph_objects as go
from xgboost import XGBClassifier
from sklearn.preprocessing import OneHotEncoder
from sklearn.model_selection import (
train_test_split,
cross_val_score,
StratifiedKFold,
GridSearchCV,
)
from xgboost import XGBClassifier
from sklearn.metrics import (
accuracy_score,
confusion_matrix,
ConfusionMatrixDisplay,
classification_report,
precision_recall_curve,
PrecisionRecallDisplay,
)
from sklearn.pipeline import Pipeline
sns.set_style("whitegrid")
sns.set_palette("Set2")
pd.options.plotting.backend = "plotly"
# ignore warnings
import warnings
warnings.filterwarnings("ignore")
print("-" * 25)
# Diabetes Detection with Machine Learning
# ## Introduction
# Diabetes is a chronic disease that affects millions of people worldwide. Early detection is crucial to prevent complications and manage the disease effectively. In this project, we will be building a machine learning model that can predict whether a person has diabetes based on several features such as age, gender, hypertension, heart disease, smoking history, BMI, HbA1c level, and blood glucose level.
# ## Dataset
# We will be using a dataset that contains information about patients, including their medical history and lab results. By analyzing this data, we can identify patterns and relationships between the features and diabetes risk and build a predictive model to classify individuals as either diabetic or non-diabetic.
# ## Goals
# Our ultimate goal is to create a machine learning model that accurately predicts diabetes status and can be used by healthcare professionals to identify at-risk individuals and provide early interventions. In this notebook, we will:
# * Perform exploratory data analysis (EDA) to gain insights into the data and identify any data quality issues.
# * Preprocess the data to prepare it for machine learning.
# * Build and train a machine learning model to predict diabetes status.
# * Evaluate the performance of our model using various performance metrics.
# Let's get started!
# ## Load the dataset
#
df = pd.read_csv(
"/kaggle/input/diabetes-prediction-dataset/diabetes_prediction_dataset.csv"
)
# # Exploratory Data Analysis
# Before we begin building our machine learning model to detect diabetes, let's perform some **exploratory data analysis (EDA)** to better understand our dataset.
print(df.info())
# Dataset Overview
# Our dataset contains information about 100,000 patients, including their medical history and lab results. The dataset has 9 columns, which are:
# gender: The gender of the patient.
# age: The age of the patient in years.
# hypertension: Whether the patient has hypertension (1 = yes, 0 = no).
# heart_disease: Whether the patient has heart disease (1 = yes, 0 = no).
# smoking_history: The patient's smoking history (never, formerly, or currently).
# bmi: The patient's body mass index (BMI).
# HbA1c_level: The patient's HbA1c level.
# blood_glucose_level: The patient's fasting blood glucose level.
# diabetes: Whether the patient has diabetes (1 = yes, 0 = no).
# The data types of the columns include float64, int64, and object.
# We will need to convert some of the columns to a different data type, such as converting the gender and smoking_history columns to a categorical data type.
# Data Quality Issues
# Before we proceed with our analysis, we need to check for any data quality issues that may affect our model's performance. These issues can include missing data, duplicate data, or outliers.
# We can use pandas functions such as isnull(), duplicated(), and describe() to identify and handle these issues. We can also use visualization tools such as histograms and box plots to detect outliers and other anomalies.
print(df.isnull().sum())
print(df.duplicated().sum())
print(df.describe())
#
# From the first output, we can see that there are no missing values in any of the columns since all values in the output are False.
# From the second output, we can see that there are some duplicate rows in the dataset since some of the values are True.
# From the third output, we can see that some columns have outliers since the maximum value is significantly higher than the 75th percentile value. These columns are age, BMI, HbA1c_level, and blood_glucose_level.
# We need to handle these issues before proceeding with the analysis. We can drop the duplicate rows and handle the outliers by either removing them or imputing them with a more reasonable value.
# Perform exploratory data analysis (EDA) to gain insights into the data
# Density plots
#
import plotly.express as px
import plotly.graph_objects as go
from plotly.subplots import make_subplots
# Create subplots for each variable
fig = make_subplots(rows=2, cols=2)
# Plot the density plot of age
fig.add_trace(go.Histogram(x=df["age"], nbinsx=20, name="All Patients"), row=1, col=1)
fig.add_trace(
go.Histogram(x=df["age"][df["diabetes"] == 1], nbinsx=20, name="Diabetes Patients"),
row=1,
col=1,
)
fig.update_xaxes(title_text="Age", row=1, col=1)
fig.update_yaxes(title_text="Count", row=1, col=1)
# Plot the density plot of BMI
fig.add_trace(go.Histogram(x=df["bmi"], nbinsx=20, name="All Patients"), row=1, col=2)
fig.add_trace(
go.Histogram(x=df["bmi"][df["diabetes"] == 1], nbinsx=20, name="Diabetes Patients"),
row=1,
col=2,
)
fig.update_xaxes(title_text="BMI", row=1, col=2)
fig.update_yaxes(title_text="Count", row=1, col=2)
# Plot the density plot of blood glucose level
fig.add_trace(
go.Histogram(x=df["blood_glucose_level"], nbinsx=20, name="All Patients"),
row=2,
col=1,
)
fig.add_trace(
go.Histogram(
x=df["blood_glucose_level"][df["diabetes"] == 1],
nbinsx=20,
name="Diabetes Patients",
),
row=2,
col=1,
)
fig.update_xaxes(title_text="Blood Glucose Level", row=2, col=1)
fig.update_yaxes(title_text="Count", row=2, col=1)
# Plot the density plot of HbA1c level
fig.add_trace(
go.Histogram(x=df["HbA1c_level"], nbinsx=20, name="All Patients"), row=2, col=2
)
fig.add_trace(
go.Histogram(
x=df["HbA1c_level"][df["diabetes"] == 1], nbinsx=20, name="Diabetes Patients"
),
row=2,
col=2,
)
fig.update_xaxes(title_text="HbA1c Level", row=2, col=2)
fig.update_yaxes(title_text="Count", row=2, col=2)
fig.update_layout(title="Density Plots of Health Metrics", height=800)
fig.show()
#
# Violin plots
#
# Violin plots are a method of plotting numeric data and can be considered a combination of the box plot with a kernel density plot. In the violin plot, we can find the same information as in the box plots:
# median (a white dot on the violin plot)
# interquartile range (the black bar in the center of violin)
# the lower/upper adjacent values (the black lines stretched from the bar) — defined as first quartile — 1.5 IQR and third quartile + 1.5 IQR respectively. These values can be used in a simple outlier detection technique (Tukey’s fences) — observations lying outside of these “fences” can be considered outliers.
#
import plotly.express as px
fig = px.violin(
df,
y="age",
x="gender",
color="diabetes",
hover_data=["bmi"],
category_orders={"gender": ["Male", "Female"], "diabetes": ["Yes", "No"]},
)
fig.update_layout(title="Age Distribution by Gender and Diabetes Status")
fig.show()
fig = px.violin(
df,
y="age",
x="hypertension",
color="diabetes",
hover_data=["bmi"],
category_orders={"hypertension": [0, 1], "diabetes": ["Yes", "No"]},
)
fig.update_layout(title="Age Distribution by hypertension and Diabetes Status")
fig
fig.show()
fig = px.violin(
df,
y="age",
x="heart_disease",
color="diabetes",
hover_data=["bmi"],
category_orders={"heart_disease": [0, 1], "diabetes": ["Yes", "No"]},
)
fig.update_layout(title="Age Distribution by heart_disease and Diabetes Status")
fig.show()
fig = px.violin(
df,
y="age",
x="smoking_history",
color="diabetes",
hover_data=["bmi"],
category_orders={
"smoking_history": ["never", "former", "current"],
"diabetes": ["Yes", "No"],
},
)
fig.update_layout(title="Age Distribution by smoking_history and Diabetes Status")
fig.show()
fig = px.violin(
df,
y="age",
x="diabetes",
hover_data=["bmi"],
category_orders={"diabetes": ["Yes", "No"]},
)
fig.update_layout(title="Age Distribution by Diabetes Status")
fig.show()
# Boxplot
#
import plotly.graph_objects as go
fig = make_subplots(rows=1, cols=3)
fig.add_trace(go.Box(y=df["bmi"], name="BMI"), row=1, col=1)
fig.add_trace(go.Box(y=df["HbA1c_level"], name="HbA1c Level"), row=1, col=2)
fig.add_trace(
go.Box(y=df["blood_glucose_level"], name="Blood Glucose Level"), row=1, col=3
)
fig.update_layout(title="Box Plots for BMI, HbA1c Level, and Blood Glucose Level")
fig.show()
# # Data-preprocessing
Q1 = df["bmi"].quantile(0.25)
Q3 = df["bmi"].quantile(0.75)
IQR = Q3 - Q1
lower_whisker = df["bmi"].where(df["bmi"] >= Q1 - 1.5 * IQR).dropna().min()
upper_whisker = df["bmi"].where(df["bmi"] <= Q3 + 1.5 * IQR).dropna().max()
outliers = df[(df["bmi"] < lower_whisker) | (df["bmi"] > upper_whisker)]
print(outliers["bmi"])
# calculate the IQR
Q1 = np.percentile(df["bmi"], 25)
Q3 = np.percentile(df["bmi"], 75)
IQR = Q3 - Q1
# determine the upper and lower bounds for outliers
lower_bound = Q1 - 1.5 * IQR
upper_bound = Q3 + 1.5 * IQR
# remove the outliers
df = df[(df["bmi"] >= lower_bound) & (df["bmi"] <= upper_bound)]
# plot the boxplot for BMI
fig = px.box(df, y="bmi")
fig.update_layout(title="Box plot of BMI (without outliers)")
fig.show()
df["gender"] = df["gender"].astype("category")
df["smoking_history"] = df["smoking_history"].astype("category")
df["hypertension"] = df["hypertension"].astype(bool)
df["heart_disease"] = df["heart_disease"].astype(bool)
df["diabetes"] = df["diabetes"].astype(bool)
#
# Remove the Outliers
# In this code, we are calculating the interquartile range (IQR) of the 'bmi' column, which is a measure of the spread of the data. We then determine the lower and upper bounds for outliers using the IQR, and remove any rows where the 'bmi' value is outside these bounds.
# Outliers in the 'bmi' column are values that are significantly different from the majority of the data, and may indicate errors in data entry or measurement, or unusual characteristics of the individuals in the dataset. In this case, we have identified outliers with a 'bmi' value greater than 53.5, which are likely to be uncommon and potentially erroneous data points.
# By removing these outliers from the dataset, we can ensure that our analysis is based on a more representative sample of the data.
# Additionally, removing duplicates from a dataset is generally considered to be a good practice as it helps in improving the accuracy and reliability of the data. Duplicates can cause biases in the data analysis and lead to incorrect conclusions. However, before removing duplicates, it is important to carefully examine the data and ensure that the duplicates are indeed not meaningful and should be removed. In this case, since the duplicates have been identified and there is no reason to keep them, it is okay to remove them from the dataset.
#
# drop duplicates
df.drop_duplicates(inplace=True)
# check for duplicates again
print(df.duplicated().any())
#
# Duplicates
#
# Removing duplicates from a dataset is generally considered to be a good practice as it helps in improving the accuracy and reliability of the data. Duplicates can cause biases in the data analysis and lead to incorrect conclusions.
# However, before removing duplicates, it is important to carefully examine the data and ensure that the duplicates are indeed not meaningful and should be removed.
# In this case, since the duplicates have been identified and there is no reason to keep them, it is okay to remove them from the dataset.
#
import plotly.graph_objects as go
# count the number of people with diabetes equal to 1 and 0
diabetes_counts = df["diabetes"].value_counts()
# create the pie chart
fig = go.Figure(
data=[
go.Pie(
labels=["No Diabetes", "Diabetes"],
values=diabetes_counts,
hole=0.3,
)
]
)
# update the layout
fig.update_layout(title="Diabetes Distribution")
# show the plot
fig.show()
#
# The dataset imbalanced
#
# The target variable 'diabetes' in this dataset is imbalanced, with a majority of 72,480 individuals labeled as 'No Diabetes' and only 5,843 labeled as 'Diabetes'. This means that the dataset contains significantly more examples of one class than the other, which can affect the performance of machine learning algorithms and result in biased predictions. To address this issue, we may need to use techniques such as oversampling, undersampling, or synthetic data generation to balance the dataset and improve the performance of our models.
#
import plotly.express as px
# Create heatmap figure
fig = px.imshow(df.corr(), color_continuous_scale="RdBu")
# Update axis labels and title
fig.update_layout(
xaxis_title="Features", yaxis_title="Features", title="Correlation Heatmap"
)
# Show the figure
fig.show()
# ****Looking at the correlation matrix, we can see that the variables most strongly related to diabetes are:****
# Blood glucose level (correlation coefficient of 0.419558)
# HbA1c level (correlation coefficient of 0.400660)
# BMI (correlation coefficient of 0.214357)
# Age (correlation coefficient of 0.258008)
# Hypertension (correlation coefficient of 0.197823)
# This suggests that these variables may be important predictors of diabetes and should be considered when building predictive models or analyzing the relationship between diabetes and other variables in the data.
# Preprocess the data to prepare it for machine learning.
# ## one-hot encoding
X = df.drop("diabetes", axis=1)
y = df.diabetes
X = pd.get_dummies(X, columns=["smoking_history", "gender"], drop_first=True)
X = X.drop(
[
"gender_Other",
"smoking_history_not current",
"smoking_history_never",
"smoking_history_ever",
],
axis=1,
)
# Split the dataset
#
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=42
)
# # Build Model & Evaluate
# Build and train a machine learning model to predict diabetes status.
#
from sklearn.preprocessing import OneHotEncoder
from sklearn.model_selection import (
train_test_split,
cross_val_score,
StratifiedKFold,
GridSearchCV,
)
from xgboost import XGBClassifier
from sklearn.metrics import (
accuracy_score,
confusion_matrix,
ConfusionMatrixDisplay,
classification_report,
precision_recall_curve,
PrecisionRecallDisplay,
)
from sklearn.pipeline import Pipeline
model = XGBClassifier(random_state=42)
model.fit(X_train, y_train)
cv = 5
weights = [2, 3, 25, 50, 100]
# Evaluate the performance of our model.
#
def report_model(model):
y_train_repo = model.predict(X_train)
y_test_repo = model.predict(X_test)
print(f"the accuracy on train set {accuracy_score(y_train, y_train_repo)}")
print(f"the accuracy on test set {accuracy_score(y_test, y_test_repo)}")
print()
print(classification_report(y_test, y_test_repo))
ConfusionMatrixDisplay(confusion_matrix(y_test, y_test_repo)).plot()
plt.show()
report_model(model)
weights = [2, 3, 25, 50, 100]
param_grid = dict(scale_pos_weight=weights)
grid = GridSearchCV(XGBClassifier(), param_grid=param_grid, cv=cv, scoring="recall")
grid.fit(X_train, y_train)
print(f"best parameters: {grid.best_params_}")
print(f"best scores: {grid.best_score_}")
# Handle imbalanced datasets
# Setting the scale_pos_weight hyperparameter to a value greater than 1 helps the algorithm to focus more on the positive class, and improves the recall (true positive rate) of the model, while possibly sacrificing some precision (positive predictive value). It is important to tune this hyperparameter carefully to avoid overfitting the positive class.
#
model2 = XGBClassifier(
n_estimators=100, max_depth=5, scale_pos_weight=5, random_state=42
)
model2.fit(X_train, y_train)
report_model(model2)
import pickle
# save the model to a file
with open("diabetes_XGB.pkl", "wb") as f:
pickle.dump(model2, f)
import numpy as np
# load the model from the file
with open("/kaggle/working/diabetes_XGB.pkl", "rb") as f:
test = pickle.load(f)
def diabetes(
age,
hypertension,
heart_disease,
bmi,
HbA1c_level,
blood_glucose_level,
smoking_history_current,
smoking_history_former,
gender_Male,
):
classes = ["Negative", "Postive"]
input_array = np.array(
[
[
age,
hypertension,
heart_disease,
bmi,
HbA1c_level,
blood_glucose_level,
smoking_history_current,
smoking_history_former,
gender_Male,
]
]
)
pred = test.predict(input_array)
class_pred = classes[pred[0]]
return class_pred
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/187/129187256.ipynb
|
diabetes-prediction-dataset
|
iammustafatz
|
[{"Id": 129187256, "ScriptId": 38311676, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 12844285, "CreationDate": "05/11/2023 16:30:57", "VersionNumber": 5.0, "Title": "Detecting Diabetes EDA and XGB\ud83e\uddd0\ud83d\udcca\ud83d\udcc9", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 411.0, "LinesInsertedFromPrevious": 31.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 380.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 7}]
|
[{"Id": 185011574, "KernelVersionId": 129187256, "SourceDatasetVersionId": 5344155}]
|
[{"Id": 5344155, "DatasetId": 3102947, "DatasourceVersionId": 5417553, "CreatorUserId": 11427441, "LicenseName": "Data files \u00a9 Original Authors", "CreationDate": "04/08/2023 06:11:45", "VersionNumber": 1.0, "Title": "Diabetes prediction dataset", "Slug": "diabetes-prediction-dataset", "Subtitle": "A Comprehensive Dataset for Predicting Diabetes with Medical & Demographic Data", "Description": "The **Diabetes prediction dataset** is a collection of medical and demographic data from patients, along with their diabetes status (positive or negative). The data includes features such as age, gender, body mass index (BMI), hypertension, heart disease, smoking history, HbA1c level, and blood glucose level. This dataset can be used to build machine learning models to predict diabetes in patients based on their medical history and demographic information. This can be useful for healthcare professionals in identifying patients who may be at risk of developing diabetes and in developing personalized treatment plans. Additionally, the dataset can be used by researchers to explore the relationships between various medical and demographic factors and the likelihood of developing diabetes.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 3102947, "CreatorUserId": 11427441, "OwnerUserId": 11427441.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5344155.0, "CurrentDatasourceVersionId": 5417553.0, "ForumId": 3166206, "Type": 2, "CreationDate": "04/08/2023 06:11:45", "LastActivityDate": "04/08/2023", "TotalViews": 127619, "TotalDownloads": 24886, "TotalVotes": 309, "TotalKernels": 120}]
|
[{"Id": 11427441, "UserName": "iammustafatz", "DisplayName": "Mohammed Mustafa", "RegisterDate": "08/29/2022", "PerformanceTier": 0}]
|
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
import seaborn as sns
import matplotlib.pyplot as plt
import plotly.express as px
import plotly.graph_objects as go
from xgboost import XGBClassifier
from sklearn.preprocessing import OneHotEncoder
from sklearn.model_selection import (
train_test_split,
cross_val_score,
StratifiedKFold,
GridSearchCV,
)
from xgboost import XGBClassifier
from sklearn.metrics import (
accuracy_score,
confusion_matrix,
ConfusionMatrixDisplay,
classification_report,
precision_recall_curve,
PrecisionRecallDisplay,
)
from sklearn.pipeline import Pipeline
sns.set_style("whitegrid")
sns.set_palette("Set2")
pd.options.plotting.backend = "plotly"
# ignore warnings
import warnings
warnings.filterwarnings("ignore")
print("-" * 25)
# Diabetes Detection with Machine Learning
# ## Introduction
# Diabetes is a chronic disease that affects millions of people worldwide. Early detection is crucial to prevent complications and manage the disease effectively. In this project, we will be building a machine learning model that can predict whether a person has diabetes based on several features such as age, gender, hypertension, heart disease, smoking history, BMI, HbA1c level, and blood glucose level.
# ## Dataset
# We will be using a dataset that contains information about patients, including their medical history and lab results. By analyzing this data, we can identify patterns and relationships between the features and diabetes risk and build a predictive model to classify individuals as either diabetic or non-diabetic.
# ## Goals
# Our ultimate goal is to create a machine learning model that accurately predicts diabetes status and can be used by healthcare professionals to identify at-risk individuals and provide early interventions. In this notebook, we will:
# * Perform exploratory data analysis (EDA) to gain insights into the data and identify any data quality issues.
# * Preprocess the data to prepare it for machine learning.
# * Build and train a machine learning model to predict diabetes status.
# * Evaluate the performance of our model using various performance metrics.
# Let's get started!
# ## Load the dataset
#
df = pd.read_csv(
"/kaggle/input/diabetes-prediction-dataset/diabetes_prediction_dataset.csv"
)
# # Exploratory Data Analysis
# Before we begin building our machine learning model to detect diabetes, let's perform some **exploratory data analysis (EDA)** to better understand our dataset.
print(df.info())
# Dataset Overview
# Our dataset contains information about 100,000 patients, including their medical history and lab results. The dataset has 9 columns, which are:
# gender: The gender of the patient.
# age: The age of the patient in years.
# hypertension: Whether the patient has hypertension (1 = yes, 0 = no).
# heart_disease: Whether the patient has heart disease (1 = yes, 0 = no).
# smoking_history: The patient's smoking history (never, formerly, or currently).
# bmi: The patient's body mass index (BMI).
# HbA1c_level: The patient's HbA1c level.
# blood_glucose_level: The patient's fasting blood glucose level.
# diabetes: Whether the patient has diabetes (1 = yes, 0 = no).
# The data types of the columns include float64, int64, and object.
# We will need to convert some of the columns to a different data type, such as converting the gender and smoking_history columns to a categorical data type.
# Data Quality Issues
# Before we proceed with our analysis, we need to check for any data quality issues that may affect our model's performance. These issues can include missing data, duplicate data, or outliers.
# We can use pandas functions such as isnull(), duplicated(), and describe() to identify and handle these issues. We can also use visualization tools such as histograms and box plots to detect outliers and other anomalies.
print(df.isnull().sum())
print(df.duplicated().sum())
print(df.describe())
#
# From the first output, we can see that there are no missing values in any of the columns since all values in the output are False.
# From the second output, we can see that there are some duplicate rows in the dataset since some of the values are True.
# From the third output, we can see that some columns have outliers since the maximum value is significantly higher than the 75th percentile value. These columns are age, BMI, HbA1c_level, and blood_glucose_level.
# We need to handle these issues before proceeding with the analysis. We can drop the duplicate rows and handle the outliers by either removing them or imputing them with a more reasonable value.
# Perform exploratory data analysis (EDA) to gain insights into the data
# Density plots
#
import plotly.express as px
import plotly.graph_objects as go
from plotly.subplots import make_subplots
# Create subplots for each variable
fig = make_subplots(rows=2, cols=2)
# Plot the density plot of age
fig.add_trace(go.Histogram(x=df["age"], nbinsx=20, name="All Patients"), row=1, col=1)
fig.add_trace(
go.Histogram(x=df["age"][df["diabetes"] == 1], nbinsx=20, name="Diabetes Patients"),
row=1,
col=1,
)
fig.update_xaxes(title_text="Age", row=1, col=1)
fig.update_yaxes(title_text="Count", row=1, col=1)
# Plot the density plot of BMI
fig.add_trace(go.Histogram(x=df["bmi"], nbinsx=20, name="All Patients"), row=1, col=2)
fig.add_trace(
go.Histogram(x=df["bmi"][df["diabetes"] == 1], nbinsx=20, name="Diabetes Patients"),
row=1,
col=2,
)
fig.update_xaxes(title_text="BMI", row=1, col=2)
fig.update_yaxes(title_text="Count", row=1, col=2)
# Plot the density plot of blood glucose level
fig.add_trace(
go.Histogram(x=df["blood_glucose_level"], nbinsx=20, name="All Patients"),
row=2,
col=1,
)
fig.add_trace(
go.Histogram(
x=df["blood_glucose_level"][df["diabetes"] == 1],
nbinsx=20,
name="Diabetes Patients",
),
row=2,
col=1,
)
fig.update_xaxes(title_text="Blood Glucose Level", row=2, col=1)
fig.update_yaxes(title_text="Count", row=2, col=1)
# Plot the density plot of HbA1c level
fig.add_trace(
go.Histogram(x=df["HbA1c_level"], nbinsx=20, name="All Patients"), row=2, col=2
)
fig.add_trace(
go.Histogram(
x=df["HbA1c_level"][df["diabetes"] == 1], nbinsx=20, name="Diabetes Patients"
),
row=2,
col=2,
)
fig.update_xaxes(title_text="HbA1c Level", row=2, col=2)
fig.update_yaxes(title_text="Count", row=2, col=2)
fig.update_layout(title="Density Plots of Health Metrics", height=800)
fig.show()
#
# Violin plots
#
# Violin plots are a method of plotting numeric data and can be considered a combination of the box plot with a kernel density plot. In the violin plot, we can find the same information as in the box plots:
# median (a white dot on the violin plot)
# interquartile range (the black bar in the center of violin)
# the lower/upper adjacent values (the black lines stretched from the bar) — defined as first quartile — 1.5 IQR and third quartile + 1.5 IQR respectively. These values can be used in a simple outlier detection technique (Tukey’s fences) — observations lying outside of these “fences” can be considered outliers.
#
import plotly.express as px
fig = px.violin(
df,
y="age",
x="gender",
color="diabetes",
hover_data=["bmi"],
category_orders={"gender": ["Male", "Female"], "diabetes": ["Yes", "No"]},
)
fig.update_layout(title="Age Distribution by Gender and Diabetes Status")
fig.show()
fig = px.violin(
df,
y="age",
x="hypertension",
color="diabetes",
hover_data=["bmi"],
category_orders={"hypertension": [0, 1], "diabetes": ["Yes", "No"]},
)
fig.update_layout(title="Age Distribution by hypertension and Diabetes Status")
fig
fig.show()
fig = px.violin(
df,
y="age",
x="heart_disease",
color="diabetes",
hover_data=["bmi"],
category_orders={"heart_disease": [0, 1], "diabetes": ["Yes", "No"]},
)
fig.update_layout(title="Age Distribution by heart_disease and Diabetes Status")
fig.show()
fig = px.violin(
df,
y="age",
x="smoking_history",
color="diabetes",
hover_data=["bmi"],
category_orders={
"smoking_history": ["never", "former", "current"],
"diabetes": ["Yes", "No"],
},
)
fig.update_layout(title="Age Distribution by smoking_history and Diabetes Status")
fig.show()
fig = px.violin(
df,
y="age",
x="diabetes",
hover_data=["bmi"],
category_orders={"diabetes": ["Yes", "No"]},
)
fig.update_layout(title="Age Distribution by Diabetes Status")
fig.show()
# Boxplot
#
import plotly.graph_objects as go
fig = make_subplots(rows=1, cols=3)
fig.add_trace(go.Box(y=df["bmi"], name="BMI"), row=1, col=1)
fig.add_trace(go.Box(y=df["HbA1c_level"], name="HbA1c Level"), row=1, col=2)
fig.add_trace(
go.Box(y=df["blood_glucose_level"], name="Blood Glucose Level"), row=1, col=3
)
fig.update_layout(title="Box Plots for BMI, HbA1c Level, and Blood Glucose Level")
fig.show()
# # Data-preprocessing
Q1 = df["bmi"].quantile(0.25)
Q3 = df["bmi"].quantile(0.75)
IQR = Q3 - Q1
lower_whisker = df["bmi"].where(df["bmi"] >= Q1 - 1.5 * IQR).dropna().min()
upper_whisker = df["bmi"].where(df["bmi"] <= Q3 + 1.5 * IQR).dropna().max()
outliers = df[(df["bmi"] < lower_whisker) | (df["bmi"] > upper_whisker)]
print(outliers["bmi"])
# calculate the IQR
Q1 = np.percentile(df["bmi"], 25)
Q3 = np.percentile(df["bmi"], 75)
IQR = Q3 - Q1
# determine the upper and lower bounds for outliers
lower_bound = Q1 - 1.5 * IQR
upper_bound = Q3 + 1.5 * IQR
# remove the outliers
df = df[(df["bmi"] >= lower_bound) & (df["bmi"] <= upper_bound)]
# plot the boxplot for BMI
fig = px.box(df, y="bmi")
fig.update_layout(title="Box plot of BMI (without outliers)")
fig.show()
df["gender"] = df["gender"].astype("category")
df["smoking_history"] = df["smoking_history"].astype("category")
df["hypertension"] = df["hypertension"].astype(bool)
df["heart_disease"] = df["heart_disease"].astype(bool)
df["diabetes"] = df["diabetes"].astype(bool)
#
# Remove the Outliers
# In this code, we are calculating the interquartile range (IQR) of the 'bmi' column, which is a measure of the spread of the data. We then determine the lower and upper bounds for outliers using the IQR, and remove any rows where the 'bmi' value is outside these bounds.
# Outliers in the 'bmi' column are values that are significantly different from the majority of the data, and may indicate errors in data entry or measurement, or unusual characteristics of the individuals in the dataset. In this case, we have identified outliers with a 'bmi' value greater than 53.5, which are likely to be uncommon and potentially erroneous data points.
# By removing these outliers from the dataset, we can ensure that our analysis is based on a more representative sample of the data.
# Additionally, removing duplicates from a dataset is generally considered to be a good practice as it helps in improving the accuracy and reliability of the data. Duplicates can cause biases in the data analysis and lead to incorrect conclusions. However, before removing duplicates, it is important to carefully examine the data and ensure that the duplicates are indeed not meaningful and should be removed. In this case, since the duplicates have been identified and there is no reason to keep them, it is okay to remove them from the dataset.
#
# drop duplicates
df.drop_duplicates(inplace=True)
# check for duplicates again
print(df.duplicated().any())
#
# Duplicates
#
# Removing duplicates from a dataset is generally considered to be a good practice as it helps in improving the accuracy and reliability of the data. Duplicates can cause biases in the data analysis and lead to incorrect conclusions.
# However, before removing duplicates, it is important to carefully examine the data and ensure that the duplicates are indeed not meaningful and should be removed.
# In this case, since the duplicates have been identified and there is no reason to keep them, it is okay to remove them from the dataset.
#
import plotly.graph_objects as go
# count the number of people with diabetes equal to 1 and 0
diabetes_counts = df["diabetes"].value_counts()
# create the pie chart
fig = go.Figure(
data=[
go.Pie(
labels=["No Diabetes", "Diabetes"],
values=diabetes_counts,
hole=0.3,
)
]
)
# update the layout
fig.update_layout(title="Diabetes Distribution")
# show the plot
fig.show()
#
# The dataset imbalanced
#
# The target variable 'diabetes' in this dataset is imbalanced, with a majority of 72,480 individuals labeled as 'No Diabetes' and only 5,843 labeled as 'Diabetes'. This means that the dataset contains significantly more examples of one class than the other, which can affect the performance of machine learning algorithms and result in biased predictions. To address this issue, we may need to use techniques such as oversampling, undersampling, or synthetic data generation to balance the dataset and improve the performance of our models.
#
import plotly.express as px
# Create heatmap figure
fig = px.imshow(df.corr(), color_continuous_scale="RdBu")
# Update axis labels and title
fig.update_layout(
xaxis_title="Features", yaxis_title="Features", title="Correlation Heatmap"
)
# Show the figure
fig.show()
# ****Looking at the correlation matrix, we can see that the variables most strongly related to diabetes are:****
# Blood glucose level (correlation coefficient of 0.419558)
# HbA1c level (correlation coefficient of 0.400660)
# BMI (correlation coefficient of 0.214357)
# Age (correlation coefficient of 0.258008)
# Hypertension (correlation coefficient of 0.197823)
# This suggests that these variables may be important predictors of diabetes and should be considered when building predictive models or analyzing the relationship between diabetes and other variables in the data.
# Preprocess the data to prepare it for machine learning.
# ## one-hot encoding
X = df.drop("diabetes", axis=1)
y = df.diabetes
X = pd.get_dummies(X, columns=["smoking_history", "gender"], drop_first=True)
X = X.drop(
[
"gender_Other",
"smoking_history_not current",
"smoking_history_never",
"smoking_history_ever",
],
axis=1,
)
# Split the dataset
#
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=42
)
# # Build Model & Evaluate
# Build and train a machine learning model to predict diabetes status.
#
from sklearn.preprocessing import OneHotEncoder
from sklearn.model_selection import (
train_test_split,
cross_val_score,
StratifiedKFold,
GridSearchCV,
)
from xgboost import XGBClassifier
from sklearn.metrics import (
accuracy_score,
confusion_matrix,
ConfusionMatrixDisplay,
classification_report,
precision_recall_curve,
PrecisionRecallDisplay,
)
from sklearn.pipeline import Pipeline
model = XGBClassifier(random_state=42)
model.fit(X_train, y_train)
cv = 5
weights = [2, 3, 25, 50, 100]
# Evaluate the performance of our model.
#
def report_model(model):
y_train_repo = model.predict(X_train)
y_test_repo = model.predict(X_test)
print(f"the accuracy on train set {accuracy_score(y_train, y_train_repo)}")
print(f"the accuracy on test set {accuracy_score(y_test, y_test_repo)}")
print()
print(classification_report(y_test, y_test_repo))
ConfusionMatrixDisplay(confusion_matrix(y_test, y_test_repo)).plot()
plt.show()
report_model(model)
weights = [2, 3, 25, 50, 100]
param_grid = dict(scale_pos_weight=weights)
grid = GridSearchCV(XGBClassifier(), param_grid=param_grid, cv=cv, scoring="recall")
grid.fit(X_train, y_train)
print(f"best parameters: {grid.best_params_}")
print(f"best scores: {grid.best_score_}")
# Handle imbalanced datasets
# Setting the scale_pos_weight hyperparameter to a value greater than 1 helps the algorithm to focus more on the positive class, and improves the recall (true positive rate) of the model, while possibly sacrificing some precision (positive predictive value). It is important to tune this hyperparameter carefully to avoid overfitting the positive class.
#
model2 = XGBClassifier(
n_estimators=100, max_depth=5, scale_pos_weight=5, random_state=42
)
model2.fit(X_train, y_train)
report_model(model2)
import pickle
# save the model to a file
with open("diabetes_XGB.pkl", "wb") as f:
pickle.dump(model2, f)
import numpy as np
# load the model from the file
with open("/kaggle/working/diabetes_XGB.pkl", "rb") as f:
test = pickle.load(f)
def diabetes(
age,
hypertension,
heart_disease,
bmi,
HbA1c_level,
blood_glucose_level,
smoking_history_current,
smoking_history_former,
gender_Male,
):
classes = ["Negative", "Postive"]
input_array = np.array(
[
[
age,
hypertension,
heart_disease,
bmi,
HbA1c_level,
blood_glucose_level,
smoking_history_current,
smoking_history_former,
gender_Male,
]
]
)
pred = test.predict(input_array)
class_pred = classes[pred[0]]
return class_pred
| false | 1 | 5,183 | 7 | 5,374 | 5,183 |
||
129013037
|
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
train = pd.read_excel("/kaggle/input/products/ABBREV_with_CLASS.xlsx")
train
train.describe()
train.isnull().sum()
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
data = train
sns.boxplot(x=data["Protein_(g)"])
Q1 = train["Protein_(g)"].quantile(0.25)
Q3 = train["Protein_(g)"].quantile(0.75)
IQR = Q3 - Q1
train = train[
(train["Protein_(g)"] >= Q1 - 1.5 * IQR) & (train["Protein_(g)"] <= Q3 + 1.5 * IQR)
]
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
data = train
sns.boxplot(x=data["Protein_(g)"])
train.info()
from sklearn.preprocessing import LabelEncoder
labelencoder_Shrt_Desc = LabelEncoder()
train["Shrt_Desc"] = labelencoder_Shrt_Desc.fit_transform(train["Shrt_Desc"])
labelencoder_GmWt_Desc1 = LabelEncoder()
train["GmWt_Desc1"] = labelencoder_GmWt_Desc1.fit_transform(train["GmWt_Desc1"])
labelencoder_GmWt_Desc2 = LabelEncoder()
train["GmWt_Desc2"] = labelencoder_GmWt_Desc2.fit_transform(train["GmWt_Desc2"])
train.info()
train.isnull().sum()
train["Ash_(g)"] = train["Ash_(g)"].fillna(train["Ash_(g)"].median())
train["Water_(g)"] = train["Water_(g)"].fillna(train["Water_(g)"].median())
train["Fiber_TD_(g)"] = train["Fiber_TD_(g)"].fillna(train["Fiber_TD_(g)"].median())
train["Sugar_Tot_(g)"] = train["Sugar_Tot_(g)"].fillna(train["Sugar_Tot_(g)"].median())
train["Calcium_(mg)"] = train["Calcium_(mg)"].fillna(train["Calcium_(mg)"].median())
train["Iron_(mg)"] = train["Iron_(mg)"].fillna(train["Iron_(mg)"].median())
train["Magnesium_(mg)"] = train["Magnesium_(mg)"].fillna(
train["Magnesium_(mg)"].median()
)
train["Phosphorus_(mg)"] = train["Phosphorus_(mg)"].fillna(
train["Phosphorus_(mg)"].median()
)
train["Potassium_(mg)"] = train["Potassium_(mg)"].fillna(
train["Potassium_(mg)"].median()
)
train["Sodium_(mg)"] = train["Sodium_(mg)"].fillna(train["Sodium_(mg)"].median())
train["Zinc_(mg)"] = train["Zinc_(mg)"].fillna(train["Zinc_(mg)"].median())
train["Copper_mg)"] = train["Copper_mg)"].fillna(train["Copper_mg)"].median())
train["Manganese_(mg)"] = train["Manganese_(mg)"].fillna(
train["Manganese_(mg)"].median()
)
train["Selenium_(µg)"] = train["Selenium_(µg)"].fillna(train["Selenium_(µg)"].median())
train["Vit_C_(mg)"] = train["Vit_C_(mg)"].fillna(train["Vit_C_(mg)"].median())
train["Thiamin_(mg)"] = train["Thiamin_(mg)"].fillna(train["Thiamin_(mg)"].median())
train["Riboflavin_(mg)"] = train["Riboflavin_(mg)"].fillna(
train["Riboflavin_(mg)"].median()
)
train["Niacin_(mg)"] = train["Niacin_(mg)"].fillna(train["Niacin_(mg)"].median())
train["Panto_Acid_mg)"] = train["Panto_Acid_mg)"].fillna(
train["Panto_Acid_mg)"].median()
)
train["Vit_B6_(mg)"] = train["Vit_B6_(mg)"].fillna(train["Vit_B6_(mg)"].median())
train["Folate_Tot_(µg)"] = train["Folate_Tot_(µg)"].fillna(
train["Folate_Tot_(µg)"].median()
)
train["Folic_Acid_(µg)"] = train["Folic_Acid_(µg)"].fillna(
train["Folic_Acid_(µg)"].median()
)
train["Food_Folate_(µg)"] = train["Food_Folate_(µg)"].fillna(
train["Food_Folate_(µg)"].median()
)
train["Folate_DFE_(µg)"] = train["Folate_DFE_(µg)"].fillna(
train["Folate_DFE_(µg)"].median()
)
train["Choline_Tot_ (mg)"] = train["Choline_Tot_ (mg)"].fillna(
train["Choline_Tot_ (mg)"].median()
)
train["Vit_B12_(µg)"] = train["Vit_B12_(µg)"].fillna(train["Vit_B12_(µg)"].median())
train["Vit_A_IU"] = train["Vit_A_IU"].fillna(train["Vit_A_IU"].median())
train["Vit_A_RAE"] = train["Vit_A_RAE"].fillna(train["Vit_A_RAE"].median())
train["Retinol_(µg)"] = train["Retinol_(µg)"].fillna(train["Retinol_(µg)"].median())
train["Alpha_Carot_(µg)"] = train["Alpha_Carot_(µg)"].fillna(
train["Alpha_Carot_(µg)"].median()
)
train["Beta_Carot_(µg)"] = train["Beta_Carot_(µg)"].fillna(
train["Beta_Carot_(µg)"].median()
)
train["Beta_Crypt_(µg)"] = train["Beta_Crypt_(µg)"].fillna(
train["Beta_Crypt_(µg)"].median()
)
train["Lycopene_(µg)"] = train["Lycopene_(µg)"].fillna(train["Lycopene_(µg)"].median())
train["Lut+Zea_ (µg)"] = train["Lut+Zea_ (µg)"].fillna(train["Lut+Zea_ (µg)"].median())
train["Vit_E_(mg)"] = train["Vit_E_(mg)"].fillna(train["Vit_E_(mg)"].median())
train["Vit_D_µg"] = train["Vit_D_µg"].fillna(train["Vit_D_µg"].median())
train["Vit_D_IU"] = train["Vit_D_IU"].fillna(train["Vit_D_IU"].median())
train["Vit_K_(µg)"] = train["Vit_K_(µg)"].fillna(train["Vit_K_(µg)"].median())
train["FA_Sat_(g)"] = train["FA_Sat_(g)"].fillna(train["FA_Sat_(g)"].median())
train["FA_Mono_(g)"] = train["FA_Mono_(g)"].fillna(train["FA_Mono_(g)"].median())
train["FA_Poly_(g)"] = train["FA_Poly_(g)"].fillna(train["FA_Poly_(g)"].median())
train["Cholestrl_(mg)"] = train["Cholestrl_(mg)"].fillna(
train["Cholestrl_(mg)"].median()
)
train["GmWt_1"] = train["GmWt_1"].fillna(train["GmWt_1"].median())
train["GmWt_2"] = train["GmWt_2"].fillna(train["GmWt_2"].median())
train["Refuse_Pct"] = train["Refuse_Pct"].fillna(train["Refuse_Pct"].median())
# Метод k-средний
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
# Создадим два множества точек, зрительно удаленных друг от друга.
from sklearn.datasets import make_blobs
centers = [[1, 1], [-1, -1]]
X = train.drop("CLASS", axis=1)
y = train["CLASS"]
y
X, y = make_blobs(n_samples=100, centers=centers, cluster_std=0.6, random_state=0)
sns.scatterplot(x=X[:, 0], y=X[:, 1], hue=y)
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=42
)
sns.scatterplot(x=X_train[:, 0], y=X_train[:, 1], hue=y_train, palette="viridis")
sns.scatterplot(x=X_test[:, 0], y=X_test[:, 1], hue=y_test, palette="rocket_r")
clf = KNeighborsClassifier()
clf.fit(X_train, y_train)
clf.score(X_test, y_test)
# Визуализация DecisionBoundaryDisplay
#!pip install scikit-learn --upgrade --no-deps
from sklearn.inspection import DecisionBoundaryDisplay
disp = DecisionBoundaryDisplay.from_estimator(
clf, X_test, response_method="predict", alpha=0.7
)
disp.ax_.scatter(X_test[:, 0], X_test[:, 1], c=y_test, edgecolor="yellow")
# Визуализация mlxtend, plot_decision_regions
#!pip install mlxtend --upgrade --no-deps
from mlxtend.plotting import plot_decision_regions
fig, ax = plt.subplots(figsize=(10, 8))
plot_decision_regions(X_test, y_test, clf=clf, legend=2)
# По умолчанию классификатор берет пять ближаших соседей. Уменьшим это значение до двух и посмотрим результат.
clf = KNeighborsClassifier(2)
clf.fit(X_train, y_train)
clf.score(X_test, y_test)
fig, ax = plt.subplots(figsize=(10, 8))
plot_decision_regions(X_test, y_test, clf=clf, legend=2)
# Увеличим количество множеств точек до трех.
centers = [[1, 1], [-1, -1], [1, -1]]
X = train.drop("CLASS", axis=1)
y = train["CLASS"]
y
X, y = make_blobs(n_samples=750, centers=centers, cluster_std=0.6, random_state=0)
sns.scatterplot(x=X[:, 0], y=X[:, 1], hue=y)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=53
)
sns.scatterplot(x=X_train[:, 0], y=X_train[:, 1], hue=y_train)
sns.scatterplot(x=X_test[:, 0], y=X_test[:, 1], hue=y_test, palette="tab10")
clf = KNeighborsClassifier()
clf.fit(X_train, y_train)
clf.score(X_test, y_test)
fig, ax = plt.subplots(figsize=(10, 8))
plot_decision_regions(X_test, y_test, clf=clf, legend=2)
scores = []
for k in range(1, 11):
clf = KNeighborsClassifier(k)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
print(k, score)
scores.append(score)
plt.figure(figsize=(12, 3))
sns.lineplot(x=map(str, range(1, 11)), y=scores, marker="o", markersize=10)
# Рассмотрим метрики расстояния между точками. По умолчанию испольуется метрика l2, соответствующая евклидову расстоянию. Попробуем сменить ее на l1 - манхеттенское расстояние.
clf = KNeighborsClassifier(metric="manhattan")
clf.fit(X_train, y_train)
clf.score(X_test, y_test)
# GridSearchCV
from sklearn.model_selection import GridSearchCV
params = {"n_neighbors": range(1, 30), "metric": ["l1", "l2"]}
best_clf = GridSearchCV(estimator=KNeighborsClassifier(), param_grid=params)
best_clf.fit(X_train, y_train)
best_clf.score(X_test, y_test)
best_clf.best_params_
from sklearn.metrics import classification_report
y_clf = clf.predict(X_test)
print(classification_report(y_test, y_clf))
y_best_clf = best_clf.predict(X_test)
print(classification_report(y_test, y_best_clf))
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/013/129013037.ipynb
| null | null |
[{"Id": 129013037, "ScriptId": 38348516, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6692497, "CreationDate": "05/10/2023 10:04:24", "VersionNumber": 1.0, "Title": "Task3_KNN", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 213.0, "LinesInsertedFromPrevious": 213.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
train = pd.read_excel("/kaggle/input/products/ABBREV_with_CLASS.xlsx")
train
train.describe()
train.isnull().sum()
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
data = train
sns.boxplot(x=data["Protein_(g)"])
Q1 = train["Protein_(g)"].quantile(0.25)
Q3 = train["Protein_(g)"].quantile(0.75)
IQR = Q3 - Q1
train = train[
(train["Protein_(g)"] >= Q1 - 1.5 * IQR) & (train["Protein_(g)"] <= Q3 + 1.5 * IQR)
]
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
data = train
sns.boxplot(x=data["Protein_(g)"])
train.info()
from sklearn.preprocessing import LabelEncoder
labelencoder_Shrt_Desc = LabelEncoder()
train["Shrt_Desc"] = labelencoder_Shrt_Desc.fit_transform(train["Shrt_Desc"])
labelencoder_GmWt_Desc1 = LabelEncoder()
train["GmWt_Desc1"] = labelencoder_GmWt_Desc1.fit_transform(train["GmWt_Desc1"])
labelencoder_GmWt_Desc2 = LabelEncoder()
train["GmWt_Desc2"] = labelencoder_GmWt_Desc2.fit_transform(train["GmWt_Desc2"])
train.info()
train.isnull().sum()
train["Ash_(g)"] = train["Ash_(g)"].fillna(train["Ash_(g)"].median())
train["Water_(g)"] = train["Water_(g)"].fillna(train["Water_(g)"].median())
train["Fiber_TD_(g)"] = train["Fiber_TD_(g)"].fillna(train["Fiber_TD_(g)"].median())
train["Sugar_Tot_(g)"] = train["Sugar_Tot_(g)"].fillna(train["Sugar_Tot_(g)"].median())
train["Calcium_(mg)"] = train["Calcium_(mg)"].fillna(train["Calcium_(mg)"].median())
train["Iron_(mg)"] = train["Iron_(mg)"].fillna(train["Iron_(mg)"].median())
train["Magnesium_(mg)"] = train["Magnesium_(mg)"].fillna(
train["Magnesium_(mg)"].median()
)
train["Phosphorus_(mg)"] = train["Phosphorus_(mg)"].fillna(
train["Phosphorus_(mg)"].median()
)
train["Potassium_(mg)"] = train["Potassium_(mg)"].fillna(
train["Potassium_(mg)"].median()
)
train["Sodium_(mg)"] = train["Sodium_(mg)"].fillna(train["Sodium_(mg)"].median())
train["Zinc_(mg)"] = train["Zinc_(mg)"].fillna(train["Zinc_(mg)"].median())
train["Copper_mg)"] = train["Copper_mg)"].fillna(train["Copper_mg)"].median())
train["Manganese_(mg)"] = train["Manganese_(mg)"].fillna(
train["Manganese_(mg)"].median()
)
train["Selenium_(µg)"] = train["Selenium_(µg)"].fillna(train["Selenium_(µg)"].median())
train["Vit_C_(mg)"] = train["Vit_C_(mg)"].fillna(train["Vit_C_(mg)"].median())
train["Thiamin_(mg)"] = train["Thiamin_(mg)"].fillna(train["Thiamin_(mg)"].median())
train["Riboflavin_(mg)"] = train["Riboflavin_(mg)"].fillna(
train["Riboflavin_(mg)"].median()
)
train["Niacin_(mg)"] = train["Niacin_(mg)"].fillna(train["Niacin_(mg)"].median())
train["Panto_Acid_mg)"] = train["Panto_Acid_mg)"].fillna(
train["Panto_Acid_mg)"].median()
)
train["Vit_B6_(mg)"] = train["Vit_B6_(mg)"].fillna(train["Vit_B6_(mg)"].median())
train["Folate_Tot_(µg)"] = train["Folate_Tot_(µg)"].fillna(
train["Folate_Tot_(µg)"].median()
)
train["Folic_Acid_(µg)"] = train["Folic_Acid_(µg)"].fillna(
train["Folic_Acid_(µg)"].median()
)
train["Food_Folate_(µg)"] = train["Food_Folate_(µg)"].fillna(
train["Food_Folate_(µg)"].median()
)
train["Folate_DFE_(µg)"] = train["Folate_DFE_(µg)"].fillna(
train["Folate_DFE_(µg)"].median()
)
train["Choline_Tot_ (mg)"] = train["Choline_Tot_ (mg)"].fillna(
train["Choline_Tot_ (mg)"].median()
)
train["Vit_B12_(µg)"] = train["Vit_B12_(µg)"].fillna(train["Vit_B12_(µg)"].median())
train["Vit_A_IU"] = train["Vit_A_IU"].fillna(train["Vit_A_IU"].median())
train["Vit_A_RAE"] = train["Vit_A_RAE"].fillna(train["Vit_A_RAE"].median())
train["Retinol_(µg)"] = train["Retinol_(µg)"].fillna(train["Retinol_(µg)"].median())
train["Alpha_Carot_(µg)"] = train["Alpha_Carot_(µg)"].fillna(
train["Alpha_Carot_(µg)"].median()
)
train["Beta_Carot_(µg)"] = train["Beta_Carot_(µg)"].fillna(
train["Beta_Carot_(µg)"].median()
)
train["Beta_Crypt_(µg)"] = train["Beta_Crypt_(µg)"].fillna(
train["Beta_Crypt_(µg)"].median()
)
train["Lycopene_(µg)"] = train["Lycopene_(µg)"].fillna(train["Lycopene_(µg)"].median())
train["Lut+Zea_ (µg)"] = train["Lut+Zea_ (µg)"].fillna(train["Lut+Zea_ (µg)"].median())
train["Vit_E_(mg)"] = train["Vit_E_(mg)"].fillna(train["Vit_E_(mg)"].median())
train["Vit_D_µg"] = train["Vit_D_µg"].fillna(train["Vit_D_µg"].median())
train["Vit_D_IU"] = train["Vit_D_IU"].fillna(train["Vit_D_IU"].median())
train["Vit_K_(µg)"] = train["Vit_K_(µg)"].fillna(train["Vit_K_(µg)"].median())
train["FA_Sat_(g)"] = train["FA_Sat_(g)"].fillna(train["FA_Sat_(g)"].median())
train["FA_Mono_(g)"] = train["FA_Mono_(g)"].fillna(train["FA_Mono_(g)"].median())
train["FA_Poly_(g)"] = train["FA_Poly_(g)"].fillna(train["FA_Poly_(g)"].median())
train["Cholestrl_(mg)"] = train["Cholestrl_(mg)"].fillna(
train["Cholestrl_(mg)"].median()
)
train["GmWt_1"] = train["GmWt_1"].fillna(train["GmWt_1"].median())
train["GmWt_2"] = train["GmWt_2"].fillna(train["GmWt_2"].median())
train["Refuse_Pct"] = train["Refuse_Pct"].fillna(train["Refuse_Pct"].median())
# Метод k-средний
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
# Создадим два множества точек, зрительно удаленных друг от друга.
from sklearn.datasets import make_blobs
centers = [[1, 1], [-1, -1]]
X = train.drop("CLASS", axis=1)
y = train["CLASS"]
y
X, y = make_blobs(n_samples=100, centers=centers, cluster_std=0.6, random_state=0)
sns.scatterplot(x=X[:, 0], y=X[:, 1], hue=y)
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=42
)
sns.scatterplot(x=X_train[:, 0], y=X_train[:, 1], hue=y_train, palette="viridis")
sns.scatterplot(x=X_test[:, 0], y=X_test[:, 1], hue=y_test, palette="rocket_r")
clf = KNeighborsClassifier()
clf.fit(X_train, y_train)
clf.score(X_test, y_test)
# Визуализация DecisionBoundaryDisplay
#!pip install scikit-learn --upgrade --no-deps
from sklearn.inspection import DecisionBoundaryDisplay
disp = DecisionBoundaryDisplay.from_estimator(
clf, X_test, response_method="predict", alpha=0.7
)
disp.ax_.scatter(X_test[:, 0], X_test[:, 1], c=y_test, edgecolor="yellow")
# Визуализация mlxtend, plot_decision_regions
#!pip install mlxtend --upgrade --no-deps
from mlxtend.plotting import plot_decision_regions
fig, ax = plt.subplots(figsize=(10, 8))
plot_decision_regions(X_test, y_test, clf=clf, legend=2)
# По умолчанию классификатор берет пять ближаших соседей. Уменьшим это значение до двух и посмотрим результат.
clf = KNeighborsClassifier(2)
clf.fit(X_train, y_train)
clf.score(X_test, y_test)
fig, ax = plt.subplots(figsize=(10, 8))
plot_decision_regions(X_test, y_test, clf=clf, legend=2)
# Увеличим количество множеств точек до трех.
centers = [[1, 1], [-1, -1], [1, -1]]
X = train.drop("CLASS", axis=1)
y = train["CLASS"]
y
X, y = make_blobs(n_samples=750, centers=centers, cluster_std=0.6, random_state=0)
sns.scatterplot(x=X[:, 0], y=X[:, 1], hue=y)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=53
)
sns.scatterplot(x=X_train[:, 0], y=X_train[:, 1], hue=y_train)
sns.scatterplot(x=X_test[:, 0], y=X_test[:, 1], hue=y_test, palette="tab10")
clf = KNeighborsClassifier()
clf.fit(X_train, y_train)
clf.score(X_test, y_test)
fig, ax = plt.subplots(figsize=(10, 8))
plot_decision_regions(X_test, y_test, clf=clf, legend=2)
scores = []
for k in range(1, 11):
clf = KNeighborsClassifier(k)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
print(k, score)
scores.append(score)
plt.figure(figsize=(12, 3))
sns.lineplot(x=map(str, range(1, 11)), y=scores, marker="o", markersize=10)
# Рассмотрим метрики расстояния между точками. По умолчанию испольуется метрика l2, соответствующая евклидову расстоянию. Попробуем сменить ее на l1 - манхеттенское расстояние.
clf = KNeighborsClassifier(metric="manhattan")
clf.fit(X_train, y_train)
clf.score(X_test, y_test)
# GridSearchCV
from sklearn.model_selection import GridSearchCV
params = {"n_neighbors": range(1, 30), "metric": ["l1", "l2"]}
best_clf = GridSearchCV(estimator=KNeighborsClassifier(), param_grid=params)
best_clf.fit(X_train, y_train)
best_clf.score(X_test, y_test)
best_clf.best_params_
from sklearn.metrics import classification_report
y_clf = clf.predict(X_test)
print(classification_report(y_test, y_clf))
y_best_clf = best_clf.predict(X_test)
print(classification_report(y_test, y_best_clf))
| false | 0 | 3,439 | 0 | 3,439 | 3,439 |
||
129114823
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
pass
# print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# y files to /kaggle/temp/, but they won't be saved outside of the current session
with open("submission.csv", "w") as creating_new_csv_file:
pass
import shutil
src = "/kaggle/input/quicksubmission/submission.csv"
dst = "/kaggle/working/submission.csv"
shutil.copyfile(src, dst)
# 2nd option
# shutil.copy(src, dst) # dst can be a folder; use shutil.copy2() to preserve timestamp
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/114/129114823.ipynb
| null | null |
[{"Id": 129114823, "ScriptId": 38383537, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4246326, "CreationDate": "05/11/2023 05:50:27", "VersionNumber": 2.0, "Title": "Simple test notebookd80b0e7c2f", "EvaluationDate": "05/11/2023", "IsChange": false, "TotalLines": 31.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 31.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
pass
# print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# y files to /kaggle/temp/, but they won't be saved outside of the current session
with open("submission.csv", "w") as creating_new_csv_file:
pass
import shutil
src = "/kaggle/input/quicksubmission/submission.csv"
dst = "/kaggle/working/submission.csv"
shutil.copyfile(src, dst)
# 2nd option
# shutil.copy(src, dst) # dst can be a folder; use shutil.copy2() to preserve timestamp
| false | 0 | 278 | 0 | 278 | 278 |
||
129114567
|
<jupyter_start><jupyter_text>Red Wine Quality
### Context
The two datasets are related to red and white variants of the Portuguese "Vinho Verde" wine. For more details, consult the reference [Cortez et al., 2009]. Due to privacy and logistic issues, only physicochemical (inputs) and sensory (the output) variables are available (e.g. there is no data about grape types, wine brand, wine selling price, etc.).
These datasets can be viewed as classification or regression tasks. The classes are ordered and not balanced (e.g. there are much more normal wines than excellent or poor ones).
---
*This dataset is also available from the UCI machine learning repository, https://archive.ics.uci.edu/ml/datasets/wine+quality , I just shared it to kaggle for convenience. (If I am mistaken and the public license type disallowed me from doing so, I will take this down if requested.)*
### Content
For more information, read [Cortez et al., 2009].<br>
Input variables (based on physicochemical tests):<br>
1 - fixed acidity <br>
2 - volatile acidity <br>
3 - citric acid <br>
4 - residual sugar <br>
5 - chlorides <br>
6 - free sulfur dioxide <br>
7 - total sulfur dioxide <br>
8 - density <br>
9 - pH <br>
10 - sulphates <br>
11 - alcohol <br>
Output variable (based on sensory data): <br>
12 - quality (score between 0 and 10) <br>
### Tips
What might be an interesting thing to do, is aside from using regression modelling, is to set an arbitrary cutoff for your dependent variable (wine quality) at e.g. 7 or higher getting classified as 'good/1' and the remainder as 'not good/0'.
This allows you to practice with hyper parameter tuning on e.g. decision tree algorithms looking at the ROC curve and the AUC value.
Without doing any kind of feature engineering or overfitting you should be able to get an AUC of .88 (without even using random forest algorithm)
**KNIME** is a great tool (GUI) that can be used for this.<br>
1 - File Reader (for csv) to linear correlation node and to interactive histogram for basic EDA.<br>
2- File Reader to 'Rule Engine Node' to turn the 10 point scale to dichtome variable (good wine and rest), the code to put in the rule engine is something like this:<br>
- **$quality$ > 6.5 => "good"**<br>
- **TRUE => "bad"** <br>
3- Rule Engine Node output to input of Column Filter node to filter out your original 10point feature (this prevent leaking)<br>
4- Column Filter Node output to input of Partitioning Node (your standard train/tes split, e.g. 75%/25%, choose 'random' or 'stratified')<br>
5- Partitioning Node train data split output to input of Train data split to input Decision Tree Learner node and <br>
6- Partitioning Node test data split output to input Decision Tree predictor Node<br>
7- Decision Tree learner Node output to input Decision Tree Node input<br>
8- Decision Tree output to input ROC Node.. (here you can evaluate your model base on AUC value)<br>
### Inspiration
Use machine learning to determine which physiochemical properties make a wine 'good'!
Kaggle dataset identifier: red-wine-quality-cortez-et-al-2009
<jupyter_script>import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from numpy.linalg import eig
# ### Loading the dataset
df = pd.read_csv("../input/red-wine-quality-cortez-et-al-2009/winequality-red.csv")
df.head()
df.shape
# There are 1599 rows and 12 columns
df.dtypes
# ### Null Values
df.isnull().sum()
# There are no null values noted in the dataset
# ### Five-Point Summary
df.describe().T
sns.pairplot(data=df, diag_kind="kde")
plt.show()
# From the plot we can see that the numerical features in the daigonal are mostly right skewed
plt.figure(figsize=(15, 7))
mask = np.triu(np.ones_like(df.corr()))
sns.heatmap(df.corr(), annot=True, mask=mask)
plt.show()
# Only few variables have high multicolinearity density and fixed acidity-0.67, citric acid and fixed acidity-0.67.
# ### Determining Outliers
for i in df:
sns.boxplot(df[i])
plt.show()
# From the box plot it could be seen that there are huge outliers present in multiple features like residual sugar, chloride,density etc. We will use IQR method to remove the outliers
Q1 = df.quantile(0.25)
Q3 = df.quantile(0.75)
IQR = Q3 - Q1
df = df[~((df < (Q1 - 3.5 * IQR)) | (df > (Q3 + 3.5 * IQR))).any(axis=1)]
df.shape
# After IQR technique the shape of the data has changed to 1452 rows and 12 columns
df.skew()
df.drop("quality", axis=1, inplace=True)
# ### Scaling the data
from sklearn.preprocessing import StandardScaler
ss = StandardScaler()
df_scaled = ss.fit_transform(df)
df_scaled = pd.DataFrame(df, columns=df.columns)
df_scaled.shape
# ### K-Means
wcss = []
for k in range(1, 10):
kmeans = KMeans(n_clusters=k)
kmeans.fit(df_scaled)
wcss.append(kmeans.inertia_)
print(wcss)
plt.figure(figsize=(12, 6))
plt.plot(range(1, 10), wcss)
plt.xlabel("Number of Clusters")
plt.title("Plot for Optimal Number of Clusters")
plt.ylabel("WCSS")
plt.show()
# From the above elbow plot we can see that the optimal value of k can be considered as 2.
KMeans3 = KMeans(n_clusters=2, random_state=10)
KMeans3.fit(df_scaled)
labels = kmeans.predict(df_scaled)
KMeans3.cluster_centers_
KMeans3.labels_
df_Kmeans = df_scaled.copy()
df_Kmeans["cluster"] = KMeans3.labels_
df_Kmeans["cluster"].value_counts()
sns.countplot(df_Kmeans["cluster"])
plt.title("Cluster Size")
plt.xlabel("Number of Clusters")
plt.ylabel("Number of Observations")
plt.show()
# There are 2 clusters formed and the segregation can be seen from the above plot
# ### Silhouette_score
from sklearn.metrics import silhouette_score
c = [2, 3, 4, 5, 6]
for i in c:
cluster = KMeans(n_clusters=i)
cluster.fit(df_scaled)
score = silhouette_score(df_scaled, cluster.labels_, random_state=10)
print("score", i, "", score)
from yellowbrick.cluster import SilhouetteVisualizer
c = [2, 3, 4, 5]
for i in c:
cluster = KMeans(n_clusters=i)
cluster.fit(df_scaled)
score = silhouette_score(df_scaled, cluster.labels_, random_state=10)
print("score", i, "", score)
Visualizer = SilhouetteVisualizer(cluster, colors="yellowbrick")
Visualizer.fit(df_scaled)
Visualizer.show()
# From silhouette_score method we can see that the silhouette_score is maximum for k=2 hence K=2 is considered.
# ### Agglomerative clustering
from sklearn.cluster import AgglomerativeClustering
from scipy.cluster.hierarchy import dendrogram
from scipy.cluster.hierarchy import linkage
from scipy.cluster.hierarchy import cophenet
from scipy.spatial.distance import pdist
link_mat = linkage(df_scaled, method="ward")
link_mat
c, cd = cophenet(link_mat, pdist(df_scaled))
c
# Cophenetic cofficient value is close to 1 hence it can be said that the clustering is quited good.
dendrogram(link_mat)
plt.show()
# from the above Dendrogram the optimal number of clusters obtained is 2 as 2 clusters will fall if we cut above 700
df_aggo = df_scaled.copy()
clusters = AgglomerativeClustering(n_clusters=2, linkage="ward")
clusters.fit(df_scaled)
df_aggo["cluster"] = clusters.labels_
df_aggo["cluster"].value_counts()
sns.countplot(df_aggo["cluster"])
plt.title("Cluster Size")
plt.xlabel("Number of Clusters")
plt.ylabel("Number of Observations")
plt.show()
# Through Agglomerative clustering the there are 2 clusters formed and teh segregation can be seen from the above plot .
# ## PCA
pca = PCA(n_components=0.95)
pca = pca.fit(df_scaled)
print("Eigen Vector :\n", pca.components_)
print()
print("Eigen Values :\n", pca.explained_variance_)
print()
print("Variance :\n", pca.explained_variance_ratio_)
# Highest variance is explained by PC1 that is 94%.
df_pca = pd.DataFrame(pca.transform(df_scaled), columns=["PC1", "PC2"])
df_pca.head()
sns.heatmap(df_pca.corr(), annot=True)
# From the above heat map we can see that there is no corelation after applying PCA
# ### Kmeans
df_kmeans = df_pca.copy()
df_kmeans["group"] = KMeans3.labels_
df_kmeans.head()
df_kmeans["group"].value_counts()
plt.figure(figsize=(10, 6))
sns.scatterplot(x="PC1", y="PC2", data=df_kmeans, hue="group")
plt.show()
# ### AgglomerativeClustering
df_agg = df_pca.copy()
df_agg["group"] = clusters.labels_
df_agg.head()
df_agg["group"].value_counts()
plt.figure(figsize=(10, 6))
sns.scatterplot(x="PC1", y="PC2", data=df_agg, hue="group")
plt.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/114/129114567.ipynb
|
red-wine-quality-cortez-et-al-2009
| null |
[{"Id": 129114567, "ScriptId": 23893640, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 8101573, "CreationDate": "05/11/2023 05:47:43", "VersionNumber": 1.0, "Title": "Red Wine Clustering", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 219.0, "LinesInsertedFromPrevious": 219.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 2}]
|
[{"Id": 184883344, "KernelVersionId": 129114567, "SourceDatasetVersionId": 8204}]
|
[{"Id": 8204, "DatasetId": 4458, "DatasourceVersionId": 8204, "CreatorUserId": 1132983, "LicenseName": "Database: Open Database, Contents: Database Contents", "CreationDate": "11/27/2017 23:41:08", "VersionNumber": 2.0, "Title": "Red Wine Quality", "Slug": "red-wine-quality-cortez-et-al-2009", "Subtitle": "Simple and clean practice dataset for regression or classification modelling", "Description": "### Context\n\nThe two datasets are related to red and white variants of the Portuguese \"Vinho Verde\" wine. For more details, consult the reference [Cortez et al., 2009]. Due to privacy and logistic issues, only physicochemical (inputs) and sensory (the output) variables are available (e.g. there is no data about grape types, wine brand, wine selling price, etc.). \n\nThese datasets can be viewed as classification or regression tasks. The classes are ordered and not balanced (e.g. there are much more normal wines than excellent or poor ones). \n\n---\n*This dataset is also available from the UCI machine learning repository, https://archive.ics.uci.edu/ml/datasets/wine+quality , I just shared it to kaggle for convenience. (If I am mistaken and the public license type disallowed me from doing so, I will take this down if requested.)*\n\n\n### Content\n\nFor more information, read [Cortez et al., 2009].<br>\nInput variables (based on physicochemical tests):<br>\n1 - fixed acidity <br>\n2 - volatile acidity <br>\n3 - citric acid <br>\n4 - residual sugar <br>\n5 - chlorides <br>\n6 - free sulfur dioxide <br> \n7 - total sulfur dioxide <br>\n8 - density <br>\n9 - pH <br>\n10 - sulphates <br>\n11 - alcohol <br>\nOutput variable (based on sensory data): <br>\n12 - quality (score between 0 and 10) <br>\n\n### Tips\nWhat might be an interesting thing to do, is aside from using regression modelling, is to set an arbitrary cutoff for your dependent variable (wine quality) at e.g. 7 or higher getting classified as 'good/1' and the remainder as 'not good/0'.\nThis allows you to practice with hyper parameter tuning on e.g. decision tree algorithms looking at the ROC curve and the AUC value.\nWithout doing any kind of feature engineering or overfitting you should be able to get an AUC of .88 (without even using random forest algorithm)\n\n**KNIME** is a great tool (GUI) that can be used for this.<br>\n1 - File Reader (for csv) to linear correlation node and to interactive histogram for basic EDA.<br>\n2- File Reader to 'Rule Engine Node' to turn the 10 point scale to dichtome variable (good wine and rest), the code to put in the rule engine is something like this:<br>\n - **$quality$ > 6.5 => \"good\"**<br>\n - **TRUE => \"bad\"** <br>\n3- Rule Engine Node output to input of Column Filter node to filter out your original 10point feature (this prevent leaking)<br>\n4- Column Filter Node output to input of Partitioning Node (your standard train/tes split, e.g. 75%/25%, choose 'random' or 'stratified')<br>\n5- Partitioning Node train data split output to input of Train data split to input Decision Tree Learner node and <br>\n6- Partitioning Node test data split output to input Decision Tree predictor Node<br>\n7- Decision Tree learner Node output to input Decision Tree Node input<br>\n8- Decision Tree output to input ROC Node.. (here you can evaluate your model base on AUC value)<br>\n\n\n### Inspiration\nUse machine learning to determine which physiochemical properties make a wine 'good'!\n\n\n\n### Acknowledgements\n\nThis dataset is also available from the UCI machine learning repository, https://archive.ics.uci.edu/ml/datasets/wine+quality , I just shared it to kaggle for convenience. *(I am mistaken and the public license type disallowed me from doing so, I will take this down at first request. I am not the owner of this dataset.*\n\n**Please include this citation if you plan to use this database: \nP. Cortez, A. Cerdeira, F. Almeida, T. Matos and J. Reis. \nModeling wine preferences by data mining from physicochemical properties. In Decision Support Systems, Elsevier, 47(4):547-553, 2009.**\n\n### Relevant publication\n\nP. Cortez, A. Cerdeira, F. Almeida, T. Matos and J. Reis. Modeling wine preferences by data mining from physicochemical properties. \nIn Decision Support Systems, Elsevier, 47(4):547-553, 2009.", "VersionNotes": "Fixed csv format to use comma as delimiter", "TotalCompressedBytes": 100951.0, "TotalUncompressedBytes": 100951.0}]
|
[{"Id": 4458, "CreatorUserId": 1132983, "OwnerUserId": NaN, "OwnerOrganizationId": 7.0, "CurrentDatasetVersionId": 8204.0, "CurrentDatasourceVersionId": 8204.0, "ForumId": 10170, "Type": 2, "CreationDate": "11/12/2017 14:08:43", "LastActivityDate": "02/06/2018", "TotalViews": 1214229, "TotalDownloads": 194418, "TotalVotes": 2537, "TotalKernels": 1574}]
| null |
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from numpy.linalg import eig
# ### Loading the dataset
df = pd.read_csv("../input/red-wine-quality-cortez-et-al-2009/winequality-red.csv")
df.head()
df.shape
# There are 1599 rows and 12 columns
df.dtypes
# ### Null Values
df.isnull().sum()
# There are no null values noted in the dataset
# ### Five-Point Summary
df.describe().T
sns.pairplot(data=df, diag_kind="kde")
plt.show()
# From the plot we can see that the numerical features in the daigonal are mostly right skewed
plt.figure(figsize=(15, 7))
mask = np.triu(np.ones_like(df.corr()))
sns.heatmap(df.corr(), annot=True, mask=mask)
plt.show()
# Only few variables have high multicolinearity density and fixed acidity-0.67, citric acid and fixed acidity-0.67.
# ### Determining Outliers
for i in df:
sns.boxplot(df[i])
plt.show()
# From the box plot it could be seen that there are huge outliers present in multiple features like residual sugar, chloride,density etc. We will use IQR method to remove the outliers
Q1 = df.quantile(0.25)
Q3 = df.quantile(0.75)
IQR = Q3 - Q1
df = df[~((df < (Q1 - 3.5 * IQR)) | (df > (Q3 + 3.5 * IQR))).any(axis=1)]
df.shape
# After IQR technique the shape of the data has changed to 1452 rows and 12 columns
df.skew()
df.drop("quality", axis=1, inplace=True)
# ### Scaling the data
from sklearn.preprocessing import StandardScaler
ss = StandardScaler()
df_scaled = ss.fit_transform(df)
df_scaled = pd.DataFrame(df, columns=df.columns)
df_scaled.shape
# ### K-Means
wcss = []
for k in range(1, 10):
kmeans = KMeans(n_clusters=k)
kmeans.fit(df_scaled)
wcss.append(kmeans.inertia_)
print(wcss)
plt.figure(figsize=(12, 6))
plt.plot(range(1, 10), wcss)
plt.xlabel("Number of Clusters")
plt.title("Plot for Optimal Number of Clusters")
plt.ylabel("WCSS")
plt.show()
# From the above elbow plot we can see that the optimal value of k can be considered as 2.
KMeans3 = KMeans(n_clusters=2, random_state=10)
KMeans3.fit(df_scaled)
labels = kmeans.predict(df_scaled)
KMeans3.cluster_centers_
KMeans3.labels_
df_Kmeans = df_scaled.copy()
df_Kmeans["cluster"] = KMeans3.labels_
df_Kmeans["cluster"].value_counts()
sns.countplot(df_Kmeans["cluster"])
plt.title("Cluster Size")
plt.xlabel("Number of Clusters")
plt.ylabel("Number of Observations")
plt.show()
# There are 2 clusters formed and the segregation can be seen from the above plot
# ### Silhouette_score
from sklearn.metrics import silhouette_score
c = [2, 3, 4, 5, 6]
for i in c:
cluster = KMeans(n_clusters=i)
cluster.fit(df_scaled)
score = silhouette_score(df_scaled, cluster.labels_, random_state=10)
print("score", i, "", score)
from yellowbrick.cluster import SilhouetteVisualizer
c = [2, 3, 4, 5]
for i in c:
cluster = KMeans(n_clusters=i)
cluster.fit(df_scaled)
score = silhouette_score(df_scaled, cluster.labels_, random_state=10)
print("score", i, "", score)
Visualizer = SilhouetteVisualizer(cluster, colors="yellowbrick")
Visualizer.fit(df_scaled)
Visualizer.show()
# From silhouette_score method we can see that the silhouette_score is maximum for k=2 hence K=2 is considered.
# ### Agglomerative clustering
from sklearn.cluster import AgglomerativeClustering
from scipy.cluster.hierarchy import dendrogram
from scipy.cluster.hierarchy import linkage
from scipy.cluster.hierarchy import cophenet
from scipy.spatial.distance import pdist
link_mat = linkage(df_scaled, method="ward")
link_mat
c, cd = cophenet(link_mat, pdist(df_scaled))
c
# Cophenetic cofficient value is close to 1 hence it can be said that the clustering is quited good.
dendrogram(link_mat)
plt.show()
# from the above Dendrogram the optimal number of clusters obtained is 2 as 2 clusters will fall if we cut above 700
df_aggo = df_scaled.copy()
clusters = AgglomerativeClustering(n_clusters=2, linkage="ward")
clusters.fit(df_scaled)
df_aggo["cluster"] = clusters.labels_
df_aggo["cluster"].value_counts()
sns.countplot(df_aggo["cluster"])
plt.title("Cluster Size")
plt.xlabel("Number of Clusters")
plt.ylabel("Number of Observations")
plt.show()
# Through Agglomerative clustering the there are 2 clusters formed and teh segregation can be seen from the above plot .
# ## PCA
pca = PCA(n_components=0.95)
pca = pca.fit(df_scaled)
print("Eigen Vector :\n", pca.components_)
print()
print("Eigen Values :\n", pca.explained_variance_)
print()
print("Variance :\n", pca.explained_variance_ratio_)
# Highest variance is explained by PC1 that is 94%.
df_pca = pd.DataFrame(pca.transform(df_scaled), columns=["PC1", "PC2"])
df_pca.head()
sns.heatmap(df_pca.corr(), annot=True)
# From the above heat map we can see that there is no corelation after applying PCA
# ### Kmeans
df_kmeans = df_pca.copy()
df_kmeans["group"] = KMeans3.labels_
df_kmeans.head()
df_kmeans["group"].value_counts()
plt.figure(figsize=(10, 6))
sns.scatterplot(x="PC1", y="PC2", data=df_kmeans, hue="group")
plt.show()
# ### AgglomerativeClustering
df_agg = df_pca.copy()
df_agg["group"] = clusters.labels_
df_agg.head()
df_agg["group"].value_counts()
plt.figure(figsize=(10, 6))
sns.scatterplot(x="PC1", y="PC2", data=df_agg, hue="group")
plt.show()
| false | 0 | 1,836 | 2 | 2,705 | 1,836 |
||
129114128
|
data_path = "/kaggle/input/deepfake-detection-challenge/train_sample_videos"
import pandas as pd
data_df = pd.read_json(
"/kaggle/input/deepfake-detection-challenge/train_sample_videos/metadata.json"
)
data_df = data_df.T
data_df
df_real = data_df[data_df["label"] == "REAL"]
df_real
df_fake = data_df[data_df["label"] == "FAKE"]
df_fake
df = pd.concat([df_real.sample(n=30), df_fake.sample(n=30)])
df
from tensorflow import keras
model1 = keras.models.load_model("/kaggle/input/models/ncs/densenet.h5")
from tensorflow.keras.applications import InceptionResNetV2
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import InputLayer
from tensorflow.keras.layers import GlobalAveragePooling2D
from tensorflow.keras.models import Sequential
from tensorflow.keras.models import Model
from tensorflow.keras import optimizers
from tensorflow.keras.callbacks import ReduceLROnPlateau, EarlyStopping
base_model2 = InceptionResNetV2(
include_top=False, weights="imagenet", input_shape=(128, 128, 3)
)
base_model2.trainable = True
model2 = Sequential()
model2.add(base_model2)
model2.add(GlobalAveragePooling2D())
model2.add(Dense(2, activation="softmax"))
model2.summary(expand_nested=False)
model2.load_weights("/kaggle/input/models/ncs/googlenet.h5")
model3 = keras.models.load_model("/kaggle/input/models/ncs/densenet_sc.h5")
model4 = keras.models.load_model("/kaggle/input/models/ncs/googlenet_sc.h5")
import gc
gc.collect()
gc.collect()
import dlib
import cv2
from tqdm import tqdm
import numpy as np
from tensorflow.keras.preprocessing.image import img_to_array
detector = dlib.get_frontal_face_detector()
res = []
for x in tqdm(df.index):
file = df.loc[x]
cap = cv2.VideoCapture(
"/kaggle/input/deepfake-detection-challenge/train_sample_videos/" + x
)
frameRate = cap.get(5)
print(x, file[0])
r1 = []
r2 = []
r3 = []
r4 = []
while cap.isOpened():
frameId = cap.get(1)
ret, frame = cap.read()
if ret != True:
break
if frameId % ((int(frameRate) + 1) * 1) == 0:
face_rects, scores, idx = detector.run(frame, 0)
for i, d in enumerate(face_rects):
x1 = d.left()
y1 = d.top()
x2 = d.right()
y2 = d.bottom()
crop_img = frame[y1:y2, x1:x2]
data = img_to_array(cv2.resize(crop_img, (128, 128))).flatten() / 255.0
data = data.reshape(-1, 128, 128, 3)
pred1 = model1.predict(data, verbose=0)
pred2 = model2.predict(data, verbose=0)
pred3 = model3.predict(data, verbose=0)
pred4 = model4.predict(data, verbose=0)
cl1 = np.argmax(pred1, axis=1)[0]
cl2 = np.argmax(pred2, axis=1)[0]
cl3 = np.argmax(pred3, axis=1)[0]
cl4 = np.argmax(pred4, axis=1)[0]
r1.append(cl1)
r2.append(cl2)
r3.append(cl3)
r4.append(cl4)
print(r1)
print(r2)
print(r3)
print(r4)
row = [x] + [file[0]]
if r1 != []:
if 0 in r1:
fc = r1.count(0)
else:
fc = 0
if 1 in r1:
rc = r1.count(1)
else:
rc = 0
if rc > fc:
row += ["REAL"]
print("REAL", end="\t")
else:
row += ["FAKE"]
print("FAKE", end="\t")
if r2 != []:
if 0 in r2:
fc = r2.count(0)
else:
fc = 0
if 1 in r2:
rc = r2.count(1)
else:
rc = 0
if rc > fc:
row += ["REAL"]
print("REAL", end="\t")
else:
row += ["FAKE"]
print("FAKE", end="\t")
if r3 != []:
if 0 in r3:
fc = r3.count(0)
else:
fc = 0
if 1 in r3:
rc = r3.count(1)
else:
rc = 0
if rc > fc:
row += ["REAL"]
print("REAL")
else:
row += ["FAKE"]
print("FAKE")
if r4 != []:
if 0 in r4:
fc = r4.count(0)
else:
fc = 0
if 1 in r4:
rc = r4.count(1)
else:
rc = 0
if rc > fc:
row += ["REAL"]
print("REAL", end="\t")
else:
row += ["FAKE"]
print("FAKE", end="\t")
res.append(row)
gc.collect()
print("--------------------------------------\n")
res
df_res = pd.DataFrame(
res, columns=["file", "true", "densenet", "googlenet", "densenet_s", "googlenet_s"]
)
df_res
df_res = df_res.dropna(subset=["densenet"])
df_res.to_csv("result.csv", index=True)
df_res["true"].value_counts()
from sklearn import metrics
print(
"DenseNet :",
metrics.accuracy_score(df_res["true"].values, df_res["densenet"].values),
)
print(
"GoogLeNet :",
metrics.accuracy_score(df_res["true"].values, df_res["googlenet"].values),
)
print(
"DenseNet S :",
metrics.accuracy_score(df_res["true"].values, df_res["densenet_s"].values),
)
print(
"GoogLeNet S :",
metrics.accuracy_score(df_res["true"].values, df_res["googlenet_s"].values),
)
print(
"DenseNet :",
metrics.recall_score(
df_res["true"].values, df_res["densenet"].values, pos_label="FAKE"
),
)
print(
"GoogLeNet :",
metrics.recall_score(
df_res["true"].values, df_res["googlenet"].values, pos_label="FAKE"
),
)
print(
"DenseNet S :",
metrics.recall_score(
df_res["true"].values, df_res["densenet_s"].values, pos_label="FAKE"
),
)
print(
"GoogLeNet S :",
metrics.recall_score(
df_res["true"].values, df_res["googlenet_s"].values, pos_label="FAKE"
),
)
print(
"DenseNet :",
metrics.recall_score(
df_res["true"].values, df_res["densenet"].values, pos_label="REAL"
),
)
print(
"GoogLeNet :",
metrics.recall_score(
df_res["true"].values, df_res["googlenet"].values, pos_label="REAL"
),
)
print(
"DenseNet S :",
metrics.recall_score(
df_res["true"].values, df_res["densenet_s"].values, pos_label="REAL"
),
)
print(
"GoogLeNet S :",
metrics.recall_score(
df_res["true"].values, df_res["googlenet_s"].values, pos_label="REAL"
),
)
from sklearn import metrics
print(
"DenseNet :\n",
metrics.classification_report(df_res["true"].values, df_res["densenet"].values),
"\n",
)
print(
"GoogLeNet :\n",
metrics.classification_report(df_res["true"].values, df_res["googlenet"].values),
"\n",
)
print(
"DenseNet S :\n",
metrics.classification_report(df_res["true"].values, df_res["densenet_s"].values),
"\n",
)
print(
"GoogLeNet S :\n",
metrics.classification_report(df_res["true"].values, df_res["googlenet_s"].values),
"\n",
)
import os
import matplotlib.pyplot as plt
cm = metrics.confusion_matrix(df_res["true"].values, df_res["densenet"].values)
cm_display = metrics.ConfusionMatrixDisplay(
confusion_matrix=cm, display_labels=["FAKE", "REAL"]
)
fig, ax = plt.subplots(figsize=(10, 10))
cm_display.plot(ax=ax)
plt.show()
import os
import matplotlib.pyplot as plt
cm = metrics.confusion_matrix(df_res["true"].values, df_res["googlenet"].values)
cm_display = metrics.ConfusionMatrixDisplay(
confusion_matrix=cm, display_labels=["FAKE", "REAL"]
)
fig, ax = plt.subplots(figsize=(10, 10))
cm_display.plot(ax=ax)
plt.show()
import os
import matplotlib.pyplot as plt
cm = metrics.confusion_matrix(df_res["true"].values, df_res["densenet_s"].values)
cm_display = metrics.ConfusionMatrixDisplay(
confusion_matrix=cm, display_labels=["FAKE", "REAL"]
)
fig, ax = plt.subplots(figsize=(10, 10))
cm_display.plot(ax=ax)
plt.show()
import os
import matplotlib.pyplot as plt
cm = metrics.confusion_matrix(df_res["true"].values, df_res["googlenet_s"].values)
cm_display = metrics.ConfusionMatrixDisplay(
confusion_matrix=cm, display_labels=["FAKE", "REAL"]
)
fig, ax = plt.subplots(figsize=(10, 10))
cm_display.plot(ax=ax)
plt.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/114/129114128.ipynb
| null | null |
[{"Id": 129114128, "ScriptId": 38347181, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11268436, "CreationDate": "05/11/2023 05:42:54", "VersionNumber": 1.0, "Title": "test_dfdc", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 260.0, "LinesInsertedFromPrevious": 260.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
data_path = "/kaggle/input/deepfake-detection-challenge/train_sample_videos"
import pandas as pd
data_df = pd.read_json(
"/kaggle/input/deepfake-detection-challenge/train_sample_videos/metadata.json"
)
data_df = data_df.T
data_df
df_real = data_df[data_df["label"] == "REAL"]
df_real
df_fake = data_df[data_df["label"] == "FAKE"]
df_fake
df = pd.concat([df_real.sample(n=30), df_fake.sample(n=30)])
df
from tensorflow import keras
model1 = keras.models.load_model("/kaggle/input/models/ncs/densenet.h5")
from tensorflow.keras.applications import InceptionResNetV2
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import InputLayer
from tensorflow.keras.layers import GlobalAveragePooling2D
from tensorflow.keras.models import Sequential
from tensorflow.keras.models import Model
from tensorflow.keras import optimizers
from tensorflow.keras.callbacks import ReduceLROnPlateau, EarlyStopping
base_model2 = InceptionResNetV2(
include_top=False, weights="imagenet", input_shape=(128, 128, 3)
)
base_model2.trainable = True
model2 = Sequential()
model2.add(base_model2)
model2.add(GlobalAveragePooling2D())
model2.add(Dense(2, activation="softmax"))
model2.summary(expand_nested=False)
model2.load_weights("/kaggle/input/models/ncs/googlenet.h5")
model3 = keras.models.load_model("/kaggle/input/models/ncs/densenet_sc.h5")
model4 = keras.models.load_model("/kaggle/input/models/ncs/googlenet_sc.h5")
import gc
gc.collect()
gc.collect()
import dlib
import cv2
from tqdm import tqdm
import numpy as np
from tensorflow.keras.preprocessing.image import img_to_array
detector = dlib.get_frontal_face_detector()
res = []
for x in tqdm(df.index):
file = df.loc[x]
cap = cv2.VideoCapture(
"/kaggle/input/deepfake-detection-challenge/train_sample_videos/" + x
)
frameRate = cap.get(5)
print(x, file[0])
r1 = []
r2 = []
r3 = []
r4 = []
while cap.isOpened():
frameId = cap.get(1)
ret, frame = cap.read()
if ret != True:
break
if frameId % ((int(frameRate) + 1) * 1) == 0:
face_rects, scores, idx = detector.run(frame, 0)
for i, d in enumerate(face_rects):
x1 = d.left()
y1 = d.top()
x2 = d.right()
y2 = d.bottom()
crop_img = frame[y1:y2, x1:x2]
data = img_to_array(cv2.resize(crop_img, (128, 128))).flatten() / 255.0
data = data.reshape(-1, 128, 128, 3)
pred1 = model1.predict(data, verbose=0)
pred2 = model2.predict(data, verbose=0)
pred3 = model3.predict(data, verbose=0)
pred4 = model4.predict(data, verbose=0)
cl1 = np.argmax(pred1, axis=1)[0]
cl2 = np.argmax(pred2, axis=1)[0]
cl3 = np.argmax(pred3, axis=1)[0]
cl4 = np.argmax(pred4, axis=1)[0]
r1.append(cl1)
r2.append(cl2)
r3.append(cl3)
r4.append(cl4)
print(r1)
print(r2)
print(r3)
print(r4)
row = [x] + [file[0]]
if r1 != []:
if 0 in r1:
fc = r1.count(0)
else:
fc = 0
if 1 in r1:
rc = r1.count(1)
else:
rc = 0
if rc > fc:
row += ["REAL"]
print("REAL", end="\t")
else:
row += ["FAKE"]
print("FAKE", end="\t")
if r2 != []:
if 0 in r2:
fc = r2.count(0)
else:
fc = 0
if 1 in r2:
rc = r2.count(1)
else:
rc = 0
if rc > fc:
row += ["REAL"]
print("REAL", end="\t")
else:
row += ["FAKE"]
print("FAKE", end="\t")
if r3 != []:
if 0 in r3:
fc = r3.count(0)
else:
fc = 0
if 1 in r3:
rc = r3.count(1)
else:
rc = 0
if rc > fc:
row += ["REAL"]
print("REAL")
else:
row += ["FAKE"]
print("FAKE")
if r4 != []:
if 0 in r4:
fc = r4.count(0)
else:
fc = 0
if 1 in r4:
rc = r4.count(1)
else:
rc = 0
if rc > fc:
row += ["REAL"]
print("REAL", end="\t")
else:
row += ["FAKE"]
print("FAKE", end="\t")
res.append(row)
gc.collect()
print("--------------------------------------\n")
res
df_res = pd.DataFrame(
res, columns=["file", "true", "densenet", "googlenet", "densenet_s", "googlenet_s"]
)
df_res
df_res = df_res.dropna(subset=["densenet"])
df_res.to_csv("result.csv", index=True)
df_res["true"].value_counts()
from sklearn import metrics
print(
"DenseNet :",
metrics.accuracy_score(df_res["true"].values, df_res["densenet"].values),
)
print(
"GoogLeNet :",
metrics.accuracy_score(df_res["true"].values, df_res["googlenet"].values),
)
print(
"DenseNet S :",
metrics.accuracy_score(df_res["true"].values, df_res["densenet_s"].values),
)
print(
"GoogLeNet S :",
metrics.accuracy_score(df_res["true"].values, df_res["googlenet_s"].values),
)
print(
"DenseNet :",
metrics.recall_score(
df_res["true"].values, df_res["densenet"].values, pos_label="FAKE"
),
)
print(
"GoogLeNet :",
metrics.recall_score(
df_res["true"].values, df_res["googlenet"].values, pos_label="FAKE"
),
)
print(
"DenseNet S :",
metrics.recall_score(
df_res["true"].values, df_res["densenet_s"].values, pos_label="FAKE"
),
)
print(
"GoogLeNet S :",
metrics.recall_score(
df_res["true"].values, df_res["googlenet_s"].values, pos_label="FAKE"
),
)
print(
"DenseNet :",
metrics.recall_score(
df_res["true"].values, df_res["densenet"].values, pos_label="REAL"
),
)
print(
"GoogLeNet :",
metrics.recall_score(
df_res["true"].values, df_res["googlenet"].values, pos_label="REAL"
),
)
print(
"DenseNet S :",
metrics.recall_score(
df_res["true"].values, df_res["densenet_s"].values, pos_label="REAL"
),
)
print(
"GoogLeNet S :",
metrics.recall_score(
df_res["true"].values, df_res["googlenet_s"].values, pos_label="REAL"
),
)
from sklearn import metrics
print(
"DenseNet :\n",
metrics.classification_report(df_res["true"].values, df_res["densenet"].values),
"\n",
)
print(
"GoogLeNet :\n",
metrics.classification_report(df_res["true"].values, df_res["googlenet"].values),
"\n",
)
print(
"DenseNet S :\n",
metrics.classification_report(df_res["true"].values, df_res["densenet_s"].values),
"\n",
)
print(
"GoogLeNet S :\n",
metrics.classification_report(df_res["true"].values, df_res["googlenet_s"].values),
"\n",
)
import os
import matplotlib.pyplot as plt
cm = metrics.confusion_matrix(df_res["true"].values, df_res["densenet"].values)
cm_display = metrics.ConfusionMatrixDisplay(
confusion_matrix=cm, display_labels=["FAKE", "REAL"]
)
fig, ax = plt.subplots(figsize=(10, 10))
cm_display.plot(ax=ax)
plt.show()
import os
import matplotlib.pyplot as plt
cm = metrics.confusion_matrix(df_res["true"].values, df_res["googlenet"].values)
cm_display = metrics.ConfusionMatrixDisplay(
confusion_matrix=cm, display_labels=["FAKE", "REAL"]
)
fig, ax = plt.subplots(figsize=(10, 10))
cm_display.plot(ax=ax)
plt.show()
import os
import matplotlib.pyplot as plt
cm = metrics.confusion_matrix(df_res["true"].values, df_res["densenet_s"].values)
cm_display = metrics.ConfusionMatrixDisplay(
confusion_matrix=cm, display_labels=["FAKE", "REAL"]
)
fig, ax = plt.subplots(figsize=(10, 10))
cm_display.plot(ax=ax)
plt.show()
import os
import matplotlib.pyplot as plt
cm = metrics.confusion_matrix(df_res["true"].values, df_res["googlenet_s"].values)
cm_display = metrics.ConfusionMatrixDisplay(
confusion_matrix=cm, display_labels=["FAKE", "REAL"]
)
fig, ax = plt.subplots(figsize=(10, 10))
cm_display.plot(ax=ax)
plt.show()
| false | 0 | 2,716 | 0 | 2,716 | 2,716 |
||
129114544
|
<jupyter_start><jupyter_text>IMDB Movies Dataset
### Context
IMDB Dataset of top 1000 movies and tv shows.
You can find the **EDA Process** on - https://www.kaggle.com/harshitshankhdhar/eda-on-imdb-movies-dataset
Please consider **UPVOTE** if you found it useful.
### Content
Data:-
- **Poster_Link** - Link of the poster that imdb using
- **Series_Title** = Name of the movie
- **Released_Year** - Year at which that movie released
- **Certificate** - Certificate earned by that movie
- **Runtime** - Total runtime of the movie
- **Genre** - Genre of the movie
- **IMDB_Rating** - Rating of the movie at IMDB site
- **Overview** - mini story/ summary
- **Meta_score** - Score earned by the movie
- **Director** - Name of the Director
- **Star1,Star2,Star3,Star4** - Name of the Stars
- **No_of_votes** - Total number of votes
- **Gross** - Money earned by that movie
### Inspiration
- Analysis of the gross of a movie vs directors.
- Analysis of the gross of a movie vs different - different stars.
- Analysis of the No_of_votes of a movie vs directors.
- Analysis of the No_of_votes of a movie vs different - different stars.
- Which actor prefer which Genre more?
- Which combination of actors are getting good IMDB_Rating maximum time?
- Which combination of actors are getting good gross?
Kaggle dataset identifier: imdb-dataset-of-top-1000-movies-and-tv-shows
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # Import Libraries
import pandas as pd
import numpy as np
# # Load Data
df = pd.read_csv(
"/kaggle/input/imdb-dataset-of-top-1000-movies-and-tv-shows/imdb_top_1000.csv"
)
# # Preprocess Data
# Grab necessary columns
columns = df.columns[
~df.columns.isin(["Poster_Link", "Released_Year", "Certificate", "Meta_score"])
]
df = df[columns]
# Transform 'Runtime' from object to int
df["Runtime"] = df["Runtime"].apply(lambda x: x.split(" ")[0])
df["IMDB_Rating"] = df["IMDB_Rating"].astype(str)
df["Genre"] = df["Genre"].str.lower()
# Transform 'Gross' to 'Gross' in million. Fill Na value with mean value
def transform_gross(gross):
if gross is np.nan:
return gross
if isinstance(gross, str):
gross = float(gross.replace(",", ""))
return gross / 10**8
df["Gross"] = df["Gross"].apply(transform_gross)
df["Gross"] = df["Gross"].fillna(df["Gross"].mean())
import nltk
nltk.download("stopwords")
from string import punctuation
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
punctuation = list(punctuation)
stopwords = stopwords.words("english")
def preprocess_text(sentence):
list_words = []
tokens = word_tokenize(sentence)
return " ".join(
[t.lower() for t in tokens if t not in stopwords and t not in punctuation]
)
df["Overview"] = df["Overview"].apply(preprocess_text)
# # Join text from columns that contain movie information
names = df.columns[~df.columns.isin(["No_of_Votes", "Gross"])]
df["text"] = df[names].agg(" ".join, axis=1)
df["text"] = df["text"] + " " + df["Director"] + " " + df["Director"]
# # Load pre-trained model Word2Vec
# - I have trained Word2Vec model from IMDB Data, but the result is not as good as the pre-trained model
from gensim.models import Word2Vec
vec_size = 300
df["tokenized_text"] = df["text"].apply(word_tokenize)
# # Initialize the Word2Vec model (without training)
# model = Word2Vec(vector_size=vec_size, window=10, min_count=1, workers=2)
# # Build the vocabulary
# model.build_vocab(df['tokenized_text'])
# # Train the model
# model.train(df['tokenized_text'], total_examples=model.corpus_count, epochs=30)
import gensim.downloader as api
model = api.load("word2vec-google-news-300")
def get_avg_feature_vec(sentence, vec_size, model, vocab):
vec = np.zeros(vec_size)
tt_word = 0
for w in sentence:
if w in vocab:
vec += model[w]
tt_word += 1
if tt_word:
vec /= tt_word
return vec
vocabulary = set(model.index_to_key)
feature_matrix = [
get_avg_feature_vec(s, vec_size, model, vocabulary) for s in df["tokenized_text"]
]
# # Get recommendation
# - Assume that I have watched 'The Dark Knight'
# - Use cosine-similarity to find related movies
movie_index = df[df["Series_Title"] == "The Dark Knight"].index[0]
movie_index
from sklearn.metrics.pairwise import cosine_similarity
# Compute the cosine similarities between the user movie and all other movies
user_movie_vector = feature_matrix[movie_index].reshape(1, -1)
similarity_scores = cosine_similarity(user_movie_vector, feature_matrix)
similar_movies = list(enumerate(similarity_scores[0]))
# Get the top 10 most similar movies
sorted_similar_movies = sorted(similar_movies, key=lambda x: x[1], reverse=True)[1:20]
# Print the top 10 similar movies
for i, score in sorted_similar_movies:
print("{}: {}".format(i, df.loc[i, "Series_Title"]))
# ## Apply Weight_Rating for recommendation, this take into account movies that higher rating
def cal_weighted_rating(movie_indices):
weighted_movie_indices = dict()
m = df["No_of_Votes"].quantile(0.75)
C = df["IMDB_Rating"].astype(float).mean()
for movie_index in movie_indices:
if df.loc[movie_index, "No_of_Votes"] > m:
v = df.loc[movie_index, "No_of_Votes"]
R = float(df.loc[movie_index, "IMDB_Rating"])
weighted_movie_indices[movie_index] = R * v / (v + m) + C * m / (v + m)
return sorted(weighted_movie_indices.items(), key=lambda x: x[1], reverse=True)
top_movies = sorted(similar_movies, key=lambda x: x[1], reverse=True)[1:100]
indices = cal_weighted_rating([i[0] for i in top_movies])[0:20]
for i, score in indices:
print("{}: {}".format(i, df.loc[i, "Series_Title"]))
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/114/129114544.ipynb
|
imdb-dataset-of-top-1000-movies-and-tv-shows
|
harshitshankhdhar
|
[{"Id": 129114544, "ScriptId": 36803103, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11527547, "CreationDate": "05/11/2023 05:47:27", "VersionNumber": 1.0, "Title": "Movie Recommender", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 152.0, "LinesInsertedFromPrevious": 152.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 2}]
|
[{"Id": 184883315, "KernelVersionId": 129114544, "SourceDatasetVersionId": 1898721}]
|
[{"Id": 1898721, "DatasetId": 1131493, "DatasourceVersionId": 1937036, "CreatorUserId": 5138057, "LicenseName": "CC0: Public Domain", "CreationDate": "02/01/2021 07:35:48", "VersionNumber": 1.0, "Title": "IMDB Movies Dataset", "Slug": "imdb-dataset-of-top-1000-movies-and-tv-shows", "Subtitle": "Top 1000 Movies by IMDB Rating", "Description": "### Context\n\nIMDB Dataset of top 1000 movies and tv shows.\nYou can find the **EDA Process** on - https://www.kaggle.com/harshitshankhdhar/eda-on-imdb-movies-dataset\n\nPlease consider **UPVOTE** if you found it useful. \n\n### Content\n\nData:-\n- **Poster_Link** - Link of the poster that imdb using\n- **Series_Title** = Name of the movie \n- **Released_Year** - Year at which that movie released\n- **Certificate** - Certificate earned by that movie\n- **Runtime** - Total runtime of the movie\n- **Genre** - Genre of the movie\n- **IMDB_Rating** - Rating of the movie at IMDB site\n- **Overview** - mini story/ summary\n- **Meta_score** - Score earned by the movie\n- **Director** - Name of the Director\n- **Star1,Star2,Star3,Star4** - Name of the Stars\n- **No_of_votes** - Total number of votes\n- **Gross** - Money earned by that movie\n\n### Inspiration\n\n- Analysis of the gross of a movie vs directors.\n- Analysis of the gross of a movie vs different - different stars.\n- Analysis of the No_of_votes of a movie vs directors.\n- Analysis of the No_of_votes of a movie vs different - different stars.\n- Which actor prefer which Genre more?\n- Which combination of actors are getting good IMDB_Rating maximum time?\n- Which combination of actors are getting good gross?", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1131493, "CreatorUserId": 5138057, "OwnerUserId": 5138057.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 1898721.0, "CurrentDatasourceVersionId": 1937036.0, "ForumId": 1148924, "Type": 2, "CreationDate": "02/01/2021 07:35:48", "LastActivityDate": "02/01/2021", "TotalViews": 179357, "TotalDownloads": 32394, "TotalVotes": 329, "TotalKernels": 92}]
|
[{"Id": 5138057, "UserName": "harshitshankhdhar", "DisplayName": "Harshit Shankhdhar", "RegisterDate": "05/21/2020", "PerformanceTier": 2}]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # Import Libraries
import pandas as pd
import numpy as np
# # Load Data
df = pd.read_csv(
"/kaggle/input/imdb-dataset-of-top-1000-movies-and-tv-shows/imdb_top_1000.csv"
)
# # Preprocess Data
# Grab necessary columns
columns = df.columns[
~df.columns.isin(["Poster_Link", "Released_Year", "Certificate", "Meta_score"])
]
df = df[columns]
# Transform 'Runtime' from object to int
df["Runtime"] = df["Runtime"].apply(lambda x: x.split(" ")[0])
df["IMDB_Rating"] = df["IMDB_Rating"].astype(str)
df["Genre"] = df["Genre"].str.lower()
# Transform 'Gross' to 'Gross' in million. Fill Na value with mean value
def transform_gross(gross):
if gross is np.nan:
return gross
if isinstance(gross, str):
gross = float(gross.replace(",", ""))
return gross / 10**8
df["Gross"] = df["Gross"].apply(transform_gross)
df["Gross"] = df["Gross"].fillna(df["Gross"].mean())
import nltk
nltk.download("stopwords")
from string import punctuation
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
punctuation = list(punctuation)
stopwords = stopwords.words("english")
def preprocess_text(sentence):
list_words = []
tokens = word_tokenize(sentence)
return " ".join(
[t.lower() for t in tokens if t not in stopwords and t not in punctuation]
)
df["Overview"] = df["Overview"].apply(preprocess_text)
# # Join text from columns that contain movie information
names = df.columns[~df.columns.isin(["No_of_Votes", "Gross"])]
df["text"] = df[names].agg(" ".join, axis=1)
df["text"] = df["text"] + " " + df["Director"] + " " + df["Director"]
# # Load pre-trained model Word2Vec
# - I have trained Word2Vec model from IMDB Data, but the result is not as good as the pre-trained model
from gensim.models import Word2Vec
vec_size = 300
df["tokenized_text"] = df["text"].apply(word_tokenize)
# # Initialize the Word2Vec model (without training)
# model = Word2Vec(vector_size=vec_size, window=10, min_count=1, workers=2)
# # Build the vocabulary
# model.build_vocab(df['tokenized_text'])
# # Train the model
# model.train(df['tokenized_text'], total_examples=model.corpus_count, epochs=30)
import gensim.downloader as api
model = api.load("word2vec-google-news-300")
def get_avg_feature_vec(sentence, vec_size, model, vocab):
vec = np.zeros(vec_size)
tt_word = 0
for w in sentence:
if w in vocab:
vec += model[w]
tt_word += 1
if tt_word:
vec /= tt_word
return vec
vocabulary = set(model.index_to_key)
feature_matrix = [
get_avg_feature_vec(s, vec_size, model, vocabulary) for s in df["tokenized_text"]
]
# # Get recommendation
# - Assume that I have watched 'The Dark Knight'
# - Use cosine-similarity to find related movies
movie_index = df[df["Series_Title"] == "The Dark Knight"].index[0]
movie_index
from sklearn.metrics.pairwise import cosine_similarity
# Compute the cosine similarities between the user movie and all other movies
user_movie_vector = feature_matrix[movie_index].reshape(1, -1)
similarity_scores = cosine_similarity(user_movie_vector, feature_matrix)
similar_movies = list(enumerate(similarity_scores[0]))
# Get the top 10 most similar movies
sorted_similar_movies = sorted(similar_movies, key=lambda x: x[1], reverse=True)[1:20]
# Print the top 10 similar movies
for i, score in sorted_similar_movies:
print("{}: {}".format(i, df.loc[i, "Series_Title"]))
# ## Apply Weight_Rating for recommendation, this take into account movies that higher rating
def cal_weighted_rating(movie_indices):
weighted_movie_indices = dict()
m = df["No_of_Votes"].quantile(0.75)
C = df["IMDB_Rating"].astype(float).mean()
for movie_index in movie_indices:
if df.loc[movie_index, "No_of_Votes"] > m:
v = df.loc[movie_index, "No_of_Votes"]
R = float(df.loc[movie_index, "IMDB_Rating"])
weighted_movie_indices[movie_index] = R * v / (v + m) + C * m / (v + m)
return sorted(weighted_movie_indices.items(), key=lambda x: x[1], reverse=True)
top_movies = sorted(similar_movies, key=lambda x: x[1], reverse=True)[1:100]
indices = cal_weighted_rating([i[0] for i in top_movies])[0:20]
for i, score in indices:
print("{}: {}".format(i, df.loc[i, "Series_Title"]))
| false | 1 | 1,552 | 2 | 1,965 | 1,552 |
||
129080229
|
<jupyter_start><jupyter_text>Video Game Sales
This dataset contains a list of video games with sales greater than 100,000 copies. It was generated by a scrape of [vgchartz.com][1].
Fields include
* Rank - Ranking of overall sales
* Name - The games name
* Platform - Platform of the games release (i.e. PC,PS4, etc.)
* Year - Year of the game's release
* Genre - Genre of the game
* Publisher - Publisher of the game
* NA_Sales - Sales in North America (in millions)
* EU_Sales - Sales in Europe (in millions)
* JP_Sales - Sales in Japan (in millions)
* Other_Sales - Sales in the rest of the world (in millions)
* Global_Sales - Total worldwide sales.
The script to scrape the data is available at https://github.com/GregorUT/vgchartzScrape.
It is based on BeautifulSoup using Python.
There are 16,598 records. 2 records were dropped due to incomplete information.
[1]: http://www.vgchartz.com/
Kaggle dataset identifier: videogamesales
<jupyter_code>import pandas as pd
df = pd.read_csv('videogamesales/vgsales.csv')
df.info()
<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 16598 entries, 0 to 16597
Data columns (total 11 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Rank 16598 non-null int64
1 Name 16598 non-null object
2 Platform 16598 non-null object
3 Year 16327 non-null float64
4 Genre 16598 non-null object
5 Publisher 16540 non-null object
6 NA_Sales 16598 non-null float64
7 EU_Sales 16598 non-null float64
8 JP_Sales 16598 non-null float64
9 Other_Sales 16598 non-null float64
10 Global_Sales 16598 non-null float64
dtypes: float64(6), int64(1), object(4)
memory usage: 1.4+ MB
<jupyter_text>Examples:
{
"Rank": 1,
"Name": "Wii Sports",
"Platform": "Wii",
"Year": 2006,
"Genre": "Sports",
"Publisher": "Nintendo",
"NA_Sales": 41.49,
"EU_Sales": 29.02,
"JP_Sales": 3.77,
"Other_Sales": 8.46,
"Global_Sales": 82.74
}
{
"Rank": 2,
"Name": "Super Mario Bros.",
"Platform": "NES",
"Year": 1985,
"Genre": "Platform",
"Publisher": "Nintendo",
"NA_Sales": 29.08,
"EU_Sales": 3.58,
"JP_Sales": 6.8100000000000005,
"Other_Sales": 0.77,
"Global_Sales": 40.24
}
{
"Rank": 3,
"Name": "Mario Kart Wii",
"Platform": "Wii",
"Year": 2008,
"Genre": "Racing",
"Publisher": "Nintendo",
"NA_Sales": 15.85,
"EU_Sales": 12.88,
"JP_Sales": 3.79,
"Other_Sales": 3.31,
"Global_Sales": 35.82
}
{
"Rank": 4,
"Name": "Wii Sports Resort",
"Platform": "Wii",
"Year": 2009,
"Genre": "Sports",
"Publisher": "Nintendo",
"NA_Sales": 15.75,
"EU_Sales": 11.01,
"JP_Sales": 3.2800000000000002,
"Other_Sales": 2.96,
"Global_Sales": 33.0
}
<jupyter_script># # VG-stats
# ## Abdulkareem Abunabhan
# ### 10/5/2023
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
df = pd.read_csv("/kaggle/input/videogamesales/vgsales.csv")
# #### Q1-Which company is the most common video game publisher?
publisher_counts = df.groupby("Publisher")["Rank"].count()
most_common_publisher = publisher_counts.idxmax()
print("The most common video game publisher is:", most_common_publisher)
# #### Q2-What’s the most common platform?
platform_counts = df.groupby("Platform")["Rank"].count()
most_common_platform = platform_counts.idxmax()
print("The most common video game platform is", most_common_platform)
# #### Q3-What about the most common genre?
genre_counts = df.groupby("Genre")["Rank"].count()
most_common_genre = genre_counts.idxmax()
print("The most common video game genre is", most_common_genre)
# #### Q4-What are the top 20 highest grossing games?
#
df["Total_sales"] = df["NA_Sales"] + df["EU_Sales"] + df["JP_Sales"] + df["Other_Sales"]
df = df.sort_values("Total_sales", ascending=False)
top_20_games = df.head(21)
print(top_20_games[["Name", "Platform", "Publisher", "Total_sales"]])
# #### Q5-For North American video game sales, what’s the median?
#
na_median = df["NA_Sales"].median()
print("na_median is:", na_median)
# ##### Provide a secondary output showing ten games surrounding the median sales output.
# ##### Assume that games with same median value are sorted in descending order.
surrounding_games = df.loc[
(df["NA_Sales"] >= na_median - 0.05) & (df["NA_Sales"] <= na_median + 0.05)
]
surrounding_games = surrounding_games.sort_values(by="NA_Sales", ascending=False)
print("The ten games surrounding the median sales are:")
print(surrounding_games[["Name", "Platform", "Publisher", "NA_Sales"]].head(10))
# #### Q6-For the top-selling game of all time, how many standard deviations above/below the mean are its sales for North America?
na_mean = df["NA_Sales"].mean()
na_std = df["NA_Sales"].std()
top_game = df.loc[df["Rank"] == 1]
deviations = (top_game["NA_Sales"] - na_mean) / na_std
print(deviations)
# #### Q7-The Nintendo Wii seems to have outdone itself with games. How does its average number of sales compare with all of the other platforms?
platform_sales = df.groupby("Platform")["Total_sales"].mean()
wii_sales = platform_sales.loc["Wii"]
other_sales = platform_sales.loc[platform_sales.index != "Wii"].mean()
if wii_sales > other_sales:
print(
"The Nintendo Wii has a higher average number of sales than all other platforms."
)
else:
print(
"The Nintendo Wii has a lower average number of sales than all other platforms."
)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/080/129080229.ipynb
|
videogamesales
|
gregorut
|
[{"Id": 129080229, "ScriptId": 38370622, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 15002387, "CreationDate": "05/10/2023 20:49:21", "VersionNumber": 1.0, "Title": "Vg-stats", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 89.0, "LinesInsertedFromPrevious": 89.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 184816925, "KernelVersionId": 129080229, "SourceDatasetVersionId": 618}]
|
[{"Id": 618, "DatasetId": 284, "DatasourceVersionId": 618, "CreatorUserId": 462330, "LicenseName": "Unknown", "CreationDate": "10/26/2016 09:10:49", "VersionNumber": 2.0, "Title": "Video Game Sales", "Slug": "videogamesales", "Subtitle": "Analyze sales data from more than 16,500 games.", "Description": "This dataset contains a list of video games with sales greater than 100,000 copies. It was generated by a scrape of [vgchartz.com][1].\n\nFields include\n\n* Rank - Ranking of overall sales\n\n* Name - The games name\n\n* Platform - Platform of the games release (i.e. PC,PS4, etc.)\n\n* Year - Year of the game's release\n\n* Genre - Genre of the game\n\n* Publisher - Publisher of the game\n\n* NA_Sales - Sales in North America (in millions)\n\n* EU_Sales - Sales in Europe (in millions)\n\n* JP_Sales - Sales in Japan (in millions)\n\n* Other_Sales - Sales in the rest of the world (in millions)\n\n* Global_Sales - Total worldwide sales.\n\nThe script to scrape the data is available at https://github.com/GregorUT/vgchartzScrape.\nIt is based on BeautifulSoup using Python.\nThere are 16,598 records. 2 records were dropped due to incomplete information.\n\n\n [1]: http://www.vgchartz.com/", "VersionNotes": "Cleaned up formating", "TotalCompressedBytes": 1355781.0, "TotalUncompressedBytes": 1355781.0}]
|
[{"Id": 284, "CreatorUserId": 462330, "OwnerUserId": 462330.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 618.0, "CurrentDatasourceVersionId": 618.0, "ForumId": 1788, "Type": 2, "CreationDate": "10/26/2016 08:17:30", "LastActivityDate": "02/06/2018", "TotalViews": 1798828, "TotalDownloads": 471172, "TotalVotes": 5485, "TotalKernels": 1480}]
|
[{"Id": 462330, "UserName": "gregorut", "DisplayName": "GregorySmith", "RegisterDate": "11/09/2015", "PerformanceTier": 1}]
|
# # VG-stats
# ## Abdulkareem Abunabhan
# ### 10/5/2023
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
df = pd.read_csv("/kaggle/input/videogamesales/vgsales.csv")
# #### Q1-Which company is the most common video game publisher?
publisher_counts = df.groupby("Publisher")["Rank"].count()
most_common_publisher = publisher_counts.idxmax()
print("The most common video game publisher is:", most_common_publisher)
# #### Q2-What’s the most common platform?
platform_counts = df.groupby("Platform")["Rank"].count()
most_common_platform = platform_counts.idxmax()
print("The most common video game platform is", most_common_platform)
# #### Q3-What about the most common genre?
genre_counts = df.groupby("Genre")["Rank"].count()
most_common_genre = genre_counts.idxmax()
print("The most common video game genre is", most_common_genre)
# #### Q4-What are the top 20 highest grossing games?
#
df["Total_sales"] = df["NA_Sales"] + df["EU_Sales"] + df["JP_Sales"] + df["Other_Sales"]
df = df.sort_values("Total_sales", ascending=False)
top_20_games = df.head(21)
print(top_20_games[["Name", "Platform", "Publisher", "Total_sales"]])
# #### Q5-For North American video game sales, what’s the median?
#
na_median = df["NA_Sales"].median()
print("na_median is:", na_median)
# ##### Provide a secondary output showing ten games surrounding the median sales output.
# ##### Assume that games with same median value are sorted in descending order.
surrounding_games = df.loc[
(df["NA_Sales"] >= na_median - 0.05) & (df["NA_Sales"] <= na_median + 0.05)
]
surrounding_games = surrounding_games.sort_values(by="NA_Sales", ascending=False)
print("The ten games surrounding the median sales are:")
print(surrounding_games[["Name", "Platform", "Publisher", "NA_Sales"]].head(10))
# #### Q6-For the top-selling game of all time, how many standard deviations above/below the mean are its sales for North America?
na_mean = df["NA_Sales"].mean()
na_std = df["NA_Sales"].std()
top_game = df.loc[df["Rank"] == 1]
deviations = (top_game["NA_Sales"] - na_mean) / na_std
print(deviations)
# #### Q7-The Nintendo Wii seems to have outdone itself with games. How does its average number of sales compare with all of the other platforms?
platform_sales = df.groupby("Platform")["Total_sales"].mean()
wii_sales = platform_sales.loc["Wii"]
other_sales = platform_sales.loc[platform_sales.index != "Wii"].mean()
if wii_sales > other_sales:
print(
"The Nintendo Wii has a higher average number of sales than all other platforms."
)
else:
print(
"The Nintendo Wii has a lower average number of sales than all other platforms."
)
|
[{"videogamesales/vgsales.csv": {"column_names": "[\"Rank\", \"Name\", \"Platform\", \"Year\", \"Genre\", \"Publisher\", \"NA_Sales\", \"EU_Sales\", \"JP_Sales\", \"Other_Sales\", \"Global_Sales\"]", "column_data_types": "{\"Rank\": \"int64\", \"Name\": \"object\", \"Platform\": \"object\", \"Year\": \"float64\", \"Genre\": \"object\", \"Publisher\": \"object\", \"NA_Sales\": \"float64\", \"EU_Sales\": \"float64\", \"JP_Sales\": \"float64\", \"Other_Sales\": \"float64\", \"Global_Sales\": \"float64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 16598 entries, 0 to 16597\nData columns (total 11 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Rank 16598 non-null int64 \n 1 Name 16598 non-null object \n 2 Platform 16598 non-null object \n 3 Year 16327 non-null float64\n 4 Genre 16598 non-null object \n 5 Publisher 16540 non-null object \n 6 NA_Sales 16598 non-null float64\n 7 EU_Sales 16598 non-null float64\n 8 JP_Sales 16598 non-null float64\n 9 Other_Sales 16598 non-null float64\n 10 Global_Sales 16598 non-null float64\ndtypes: float64(6), int64(1), object(4)\nmemory usage: 1.4+ MB\n", "summary": "{\"Rank\": {\"count\": 16598.0, \"mean\": 8300.605253645017, \"std\": 4791.853932896403, \"min\": 1.0, \"25%\": 4151.25, \"50%\": 8300.5, \"75%\": 12449.75, \"max\": 16600.0}, \"Year\": {\"count\": 16327.0, \"mean\": 2006.4064433147546, \"std\": 5.828981114712805, \"min\": 1980.0, \"25%\": 2003.0, \"50%\": 2007.0, \"75%\": 2010.0, \"max\": 2020.0}, \"NA_Sales\": {\"count\": 16598.0, \"mean\": 0.26466742981082064, \"std\": 0.8166830292988796, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.08, \"75%\": 0.24, \"max\": 41.49}, \"EU_Sales\": {\"count\": 16598.0, \"mean\": 0.14665200626581515, \"std\": 0.5053512312869116, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.02, \"75%\": 0.11, \"max\": 29.02}, \"JP_Sales\": {\"count\": 16598.0, \"mean\": 0.077781660441017, \"std\": 0.30929064808220297, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.04, \"max\": 10.22}, \"Other_Sales\": {\"count\": 16598.0, \"mean\": 0.0480630196409206, \"std\": 0.18858840291271461, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.01, \"75%\": 0.04, \"max\": 10.57}, \"Global_Sales\": {\"count\": 16598.0, \"mean\": 0.5374406555006628, \"std\": 1.5550279355699124, \"min\": 0.01, \"25%\": 0.06, \"50%\": 0.17, \"75%\": 0.47, \"max\": 82.74}}", "examples": "{\"Rank\":{\"0\":1,\"1\":2,\"2\":3,\"3\":4},\"Name\":{\"0\":\"Wii Sports\",\"1\":\"Super Mario Bros.\",\"2\":\"Mario Kart Wii\",\"3\":\"Wii Sports Resort\"},\"Platform\":{\"0\":\"Wii\",\"1\":\"NES\",\"2\":\"Wii\",\"3\":\"Wii\"},\"Year\":{\"0\":2006.0,\"1\":1985.0,\"2\":2008.0,\"3\":2009.0},\"Genre\":{\"0\":\"Sports\",\"1\":\"Platform\",\"2\":\"Racing\",\"3\":\"Sports\"},\"Publisher\":{\"0\":\"Nintendo\",\"1\":\"Nintendo\",\"2\":\"Nintendo\",\"3\":\"Nintendo\"},\"NA_Sales\":{\"0\":41.49,\"1\":29.08,\"2\":15.85,\"3\":15.75},\"EU_Sales\":{\"0\":29.02,\"1\":3.58,\"2\":12.88,\"3\":11.01},\"JP_Sales\":{\"0\":3.77,\"1\":6.81,\"2\":3.79,\"3\":3.28},\"Other_Sales\":{\"0\":8.46,\"1\":0.77,\"2\":3.31,\"3\":2.96},\"Global_Sales\":{\"0\":82.74,\"1\":40.24,\"2\":35.82,\"3\":33.0}}"}}]
| true | 1 |
<start_data_description><data_path>videogamesales/vgsales.csv:
<column_names>
['Rank', 'Name', 'Platform', 'Year', 'Genre', 'Publisher', 'NA_Sales', 'EU_Sales', 'JP_Sales', 'Other_Sales', 'Global_Sales']
<column_types>
{'Rank': 'int64', 'Name': 'object', 'Platform': 'object', 'Year': 'float64', 'Genre': 'object', 'Publisher': 'object', 'NA_Sales': 'float64', 'EU_Sales': 'float64', 'JP_Sales': 'float64', 'Other_Sales': 'float64', 'Global_Sales': 'float64'}
<dataframe_Summary>
{'Rank': {'count': 16598.0, 'mean': 8300.605253645017, 'std': 4791.853932896403, 'min': 1.0, '25%': 4151.25, '50%': 8300.5, '75%': 12449.75, 'max': 16600.0}, 'Year': {'count': 16327.0, 'mean': 2006.4064433147546, 'std': 5.828981114712805, 'min': 1980.0, '25%': 2003.0, '50%': 2007.0, '75%': 2010.0, 'max': 2020.0}, 'NA_Sales': {'count': 16598.0, 'mean': 0.26466742981082064, 'std': 0.8166830292988796, 'min': 0.0, '25%': 0.0, '50%': 0.08, '75%': 0.24, 'max': 41.49}, 'EU_Sales': {'count': 16598.0, 'mean': 0.14665200626581515, 'std': 0.5053512312869116, 'min': 0.0, '25%': 0.0, '50%': 0.02, '75%': 0.11, 'max': 29.02}, 'JP_Sales': {'count': 16598.0, 'mean': 0.077781660441017, 'std': 0.30929064808220297, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.04, 'max': 10.22}, 'Other_Sales': {'count': 16598.0, 'mean': 0.0480630196409206, 'std': 0.18858840291271461, 'min': 0.0, '25%': 0.0, '50%': 0.01, '75%': 0.04, 'max': 10.57}, 'Global_Sales': {'count': 16598.0, 'mean': 0.5374406555006628, 'std': 1.5550279355699124, 'min': 0.01, '25%': 0.06, '50%': 0.17, '75%': 0.47, 'max': 82.74}}
<dataframe_info>
RangeIndex: 16598 entries, 0 to 16597
Data columns (total 11 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Rank 16598 non-null int64
1 Name 16598 non-null object
2 Platform 16598 non-null object
3 Year 16327 non-null float64
4 Genre 16598 non-null object
5 Publisher 16540 non-null object
6 NA_Sales 16598 non-null float64
7 EU_Sales 16598 non-null float64
8 JP_Sales 16598 non-null float64
9 Other_Sales 16598 non-null float64
10 Global_Sales 16598 non-null float64
dtypes: float64(6), int64(1), object(4)
memory usage: 1.4+ MB
<some_examples>
{'Rank': {'0': 1, '1': 2, '2': 3, '3': 4}, 'Name': {'0': 'Wii Sports', '1': 'Super Mario Bros.', '2': 'Mario Kart Wii', '3': 'Wii Sports Resort'}, 'Platform': {'0': 'Wii', '1': 'NES', '2': 'Wii', '3': 'Wii'}, 'Year': {'0': 2006.0, '1': 1985.0, '2': 2008.0, '3': 2009.0}, 'Genre': {'0': 'Sports', '1': 'Platform', '2': 'Racing', '3': 'Sports'}, 'Publisher': {'0': 'Nintendo', '1': 'Nintendo', '2': 'Nintendo', '3': 'Nintendo'}, 'NA_Sales': {'0': 41.49, '1': 29.08, '2': 15.85, '3': 15.75}, 'EU_Sales': {'0': 29.02, '1': 3.58, '2': 12.88, '3': 11.01}, 'JP_Sales': {'0': 3.77, '1': 6.81, '2': 3.79, '3': 3.28}, 'Other_Sales': {'0': 8.46, '1': 0.77, '2': 3.31, '3': 2.96}, 'Global_Sales': {'0': 82.74, '1': 40.24, '2': 35.82, '3': 33.0}}
<end_description>
| 996 | 0 | 2,109 | 996 |
129080111
|
# # Import
import pandas as pd
import numpy as np
import torch
import torchaudio
import math, random
from IPython.display import Audio
import librosa
from tqdm import tqdm
import warnings
from torch.utils.data import Dataset, DataLoader
import os
import matplotlib.pyplot as plt
from torchvision.transforms import Resize
from sklearn.model_selection import train_test_split
import torchvision.models as models
import torch.nn as nn
from timeit import default_timer as timer
from sklearn import preprocessing
from sklearn.metrics import (
f1_score,
recall_score,
confusion_matrix,
classification_report,
)
# disable all warning messages
warnings.filterwarnings("ignore")
train_on_gpu = True
def seed_everything(seed: int):
import random, os
import numpy as np
import torch
random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
seed_everything(42)
config = {
"sample_rate": 22050,
"clip_length": 5,
"classes_number": 264,
"learning_rate": 0.001,
"batch_size": 32,
"num_epochs": 10,
}
# # Create Train DataFrame
# Because the audio dosn't have the same legnth we will create new dataframe contains information about the length of each Audio,
# and then Create dataframe the contains the start and the end of each new audio which will be the input to the AI pipline
# Original_Df = pd.read_csv("/kaggle/input/birdclef-2023/train_metadata.csv")
# for i, audio_name in tqdm(enumerate(Original_Df.filename)):
# audio_data, Audio_sample_rate = librosa.load(f"/kaggle/input/birdclef-2023/train_audio/{audio_name}")
# audio_duration = librosa.get_duration(y=audio_data, sr=Audio_sample_rate)
# Original_Df.loc[i, "duration"] = audio_duration
# Original_Df.to_csv("train_metadata_with_length.csv", index =False)
# ## Split each audio file into equil lengths DF
Df_length = pd.read_csv("/kaggle/working/train_metadata_with_length.csv")
def Create_from_end(Df_length, clip_length=5, overlap_length=0):
# Create New DataFrame
New_columns = np.append(Df_length.columns.values, ["start", "end"])
New_df = pd.DataFrame(columns=New_columns)
# itterate over the audios
for i in tqdm(range(len(Df_length)), total=len(Df_length)):
row = Df_length.iloc[i]
audio_length = original_length = row.duration # Audio Duration
start = 0
end = min(clip_length, audio_length)
while audio_length > 1:
# Append Start and the End to the DataFrame
new_row = pd.concat([row, pd.Series([start, end], index=["start", "end"])])
New_df = New_df.append(new_row, ignore_index=True)
# Update the start and the end
start = clip_length + (start - overlap_length)
end = min(clip_length + start, original_length)
# Decrease the length of the audio
audio_length = audio_length - (clip_length - overlap_length)
return New_df
# train_df = Create_from_end(Df_length,clip_length=5, overlap_length =2)
# train_df.to_csv("train_df_5s.csv", index=False)
train_df
DataSet = pd.read_csv("/kaggle/working/train_df_5s.csv")
X_train, X_valid, _, _ = train_test_split(
DataSet,
DataSet["primary_label"],
shuffle=True,
test_size=0.1,
random_state=42,
stratify=DataSet["primary_label"],
)
le = preprocessing.LabelEncoder()
le.fit(X_train["primary_label"])
train_label = le.transform(X_train["primary_label"])
valid_label = le.transform(X_valid["primary_label"])
X_train["label"] = train_label
X_valid["label"] = valid_label
X_train.reset_index(inplace=True)
X_valid.reset_index(inplace=True)
X_train.to_csv("Splited_train.csv", index=False)
X_valid.to_csv("Splited_valid.csv", index=False)
# # Data loader
class Bird_Dataset(Dataset):
def __init__(
self,
csv_file,
root_dir,
mode="train",
duration_sec=5,
transform=None,
transform_Aug=None,
):
"""
Args:
csv_file (string): Path to the csv file with annotations.
root_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.Bird_audios = pd.read_csv(csv_file)
self.root_dir = root_dir
self.transform = transform
self.transform_Aug = transform_Aug
self.duration_sec = duration_sec
self.mode = mode
def __len__(self):
return len(self.Bird_audios)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
audio_path = os.path.join(
self.root_dir, str(self.Bird_audios.loc[idx, "filename"])
)
# Load the audio signal
waveform, sample_rate = torchaudio.load(audio_path)
# Resample the Audio
waveform = torchaudio.functional.resample(
waveform, orig_freq=sample_rate, new_freq=22050
)
# Clip the audio
start_sample = int(self.Bird_audios.loc[idx, "start"] * config["sample_rate"])
end_sample = int(self.Bird_audios.loc[idx, "end"] * config["sample_rate"])
waveform = waveform[:, start_sample:end_sample]
# # Padd if the shorter less than duration_sec
# target_frames = int(self.duration_sec * config["sample_rate"])
# pad_transform = torchaudio.transforms.PadTrim(target_frames)
# waveform = pad_transform(waveform)
# Compute the spectrogram
spec_transform = torchaudio.transforms.MelSpectrogram(
n_fft=800, hop_length=320, n_mels=128
)
specgram = spec_transform(waveform)
specgram = torchaudio.transforms.AmplitudeToDB()(specgram)
resize_transform = Resize((128, 224))
specgram = resize_transform(specgram)
# # Define the learnable parameter alpha
# alpha = torch.nn.Parameter(torch.tensor([1.0]))
# # Apply exponential transformation with alpha
# exp_specgram = torch.exp(alpha * specgram)
# # If alpha is a tensor, apply different values for each mel band
# if alpha.dim() == 1:
# exp_specgram = exp_specgram * alpha.view(1, -1, 1)
specgram = torch.cat([specgram, specgram, specgram], dim=0)
label = self.Bird_audios.loc[idx, "label"]
return (specgram, label) if self.mode == "train" else specgram
train_Dataset = Bird_Dataset(
csv_file="/kaggle/working/Splited_train.csv",
root_dir="/kaggle/input/birdclef-2023/train_audio",
)
valid_Dataset = Bird_Dataset(
csv_file="/kaggle/working/Splited_train.csv",
root_dir="/kaggle/input/birdclef-2023/train_audio",
)
train_loader = torch.utils.data.DataLoader(
train_Dataset, batch_size=128, shuffle=True, num_workers=8, pin_memory=True
)
valid_loader = torch.utils.data.DataLoader(
valid_Dataset, batch_size=128, shuffle=True, num_workers=8, pin_memory=True
)
# show item from dataset
x = None
for image, label in train_Dataset:
# specgram_db = torchaudio.transforms.AmplitudeToDB()(image)
print(image.shape)
# plot the spectrogram
plt.imshow(image[0, :, :].numpy())
plt.xlabel("Time")
plt.ylabel("Frequency")
plt.show()
break
model_Type = "resnet50"
feature = "baseline_model"
rd = np.random.randint(100000)
modelName = f"{model_Type}_{feature}_{rd}"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def get_pretrained_model(model_name):
if model_name == "ViT":
model = torch.hub.load(
"facebookresearch/deit:main", "deit_tiny_patch16_224", pretrained=True
)
for param in model.parameters(): # freeze model
param.requires_grad = False
n_inputs = model.head.in_features
model.head = nn.Sequential(
nn.Linear(n_inputs, 512), nn.ReLU(), nn.Dropout(0.3), nn.Linear(512, 7)
)
if model_name == "MaxViT":
model = timm.create_model("maxvit_tiny_rw_224", pretrained=False, img_size=224)
model.head.fc = nn.Linear(512, 7, bias=True)
if model_name == "vgg16":
model = models.vgg16(pretrained=True)
n_inputs = model.classifier[6].in_features
model.classifier[6] = nn.Sequential(
nn.Linear(n_inputs, 2048),
nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(2048, 4),
nn.LogSoftmax(dim=1),
)
if model_name == "resnet50":
model = models.resnet50(pretrained=True)
n_inputs = model.fc.in_features
model.fc = nn.Sequential(
nn.Linear(n_inputs, config["classes_number"]),
nn.LogSoftmax(dim=1),
)
if model_name == "alexnet":
model = models.AlexNet()
model.classifier[-1] = nn.Sequential(nn.Linear(4096, 7), nn.LogSoftmax(dim=1))
# Move to gpu and parallelize
model = model.to(device)
return model
model = get_pretrained_model(model_Type)
def train(
model,
criterion,
optimizer,
train_loader,
valid_loader,
save_file_name,
max_epochs_stop=3,
n_epochs=20,
print_every=1,
):
"""Train a PyTorch Model
Params
--------
model (PyTorch model): cnn to train
criterion (PyTorch loss): objective to minimize
optimizer (PyTorch optimizier): optimizer to compute gradients of model parameters
train_loader (PyTorch dataloader): training dataloader to iterate through
valid_loader (PyTorch dataloader): validation dataloader used for early stopping
save_file_name (str ending in '.pt'): file path to save the model state dict
max_epochs_stop (int): maximum number of epochs with no improvement in validation loss for early stopping
n_epochs (int): maximum number of training epochs
print_every (int): frequency of epochs to print training stats
Returns
--------
model (PyTorch model): trained cnn with best weights
history (DataFrame): history of train and validation loss and accuracy
"""
# Early stopping intialization
epochs_no_improve = 0
valid_loss_min = np.Inf
valid_acc_max = 0
valid_f1_max = 0
valid_max_acc = 0
history = []
# Number of epochs already trained (if using loaded in model weights)
try:
print(f"Model has been trained for: {model.epochs} epochs.\n")
except:
model.epochs = 0
print(f"Starting Training from Scratch.\n")
overall_start = timer()
# Main loop
for epoch in range(n_epochs):
# keep track of training and validation loss each epoch
train_loss = 0.0
valid_loss = 0.0
train_acc = 0
valid_acc = 0
Train_f1_sc = 0
Valid_f1_sc = 0
# Set to training
model.train()
start = timer()
# Training loop
for ii, (data, target) in enumerate(train_loader):
# Tensors to gpu
if train_on_gpu:
data, target = data.cuda(), target.cuda()
# Clear gradients
optimizer.zero_grad()
# Predicted outputs are log probabilities
output = model(data) # .sigmoid()
# target = target.unsqueeze(1)
# Loss and backpropagation of gradients
loss = criterion(output, target)
loss.backward()
# Update the parameters
optimizer.step()
# Track train loss by multiplying average loss by number of examples in batch
train_loss += loss.item() * data.size(0)
# Calculate accuracy by finding max log probability
_, pred = torch.max(output, dim=1)
# pred = torch.ge(output, 0.35)
correct_tensor = pred.eq(target.data.view_as(pred))
# Need to convert correct tensor from int to float to average
accuracy = torch.mean(correct_tensor.type(torch.FloatTensor))
# Multiply average accuracy times the number of examples in batch
Train_f1_sc = f1_score(target.cpu().data, pred.cpu(), average="macro")
train_acc += accuracy.item() * data.size(0)
# Track training progress
print(
f"Epoch: {epoch}\t{100 * (ii + 1) / len(train_loader):.2f}% complete. {timer() - start:.2f} seconds elapsed in epoch.",
end="\r",
)
# After training loops ends, start validation
else:
model.epochs += 1
# Don't need to keep track of gradients
with torch.no_grad():
# Set to evaluation mode
model.eval()
# Validation loop
for data, target in valid_loader:
# Tensors to gpu
if train_on_gpu:
data, target = data.cuda(), target.cuda()
# Forward pass
# output = model(data)
# Validation loss
output = model(data) # .sigmoid()
# target = target.unsqueeze(1)
# Loss and backpropagation of gradients
loss = criterion(output, target)
# Multiply average loss times the number of examples in batch
valid_loss += loss.item() * data.size(0)
# Calculate validation accuracy
_, pred = torch.max(output, dim=1)
# pred = torch.ge(output, 0.35)
correct_tensor = pred.eq(target.data.view_as(pred))
accuracy = torch.mean(correct_tensor.type(torch.FloatTensor))
# Multiply average accuracy times the number of examples
valid_acc += accuracy.item() * data.size(0)
Valid_f1_sc = f1_score(
target.cpu().data, pred.cpu(), average="macro"
)
# Calculate average losses
train_loss = train_loss / len(train_loader.dataset)
valid_loss = valid_loss / len(valid_loader.dataset)
# Calculate average accuracy
train_acc = train_acc / len(train_loader.dataset)
valid_acc = valid_acc / len(valid_loader.dataset)
history.append(
[
train_loss,
valid_loss,
train_acc,
valid_acc,
Train_f1_sc,
Valid_f1_sc,
]
)
# Print training and validation results
if (epoch + 1) % print_every == 0:
print(
f"\nEpoch: {epoch} \tTraining Loss: {train_loss:.4f} \tValidation Loss: {valid_loss:.4f}"
)
print(
f"\t\tTraining Accuracy: {100 * train_acc:.2f}%\t Train F1 score: {100 * Train_f1_sc:.2f}%\t Validation Accuracy: {100 * valid_acc:.2f}%\t Validation F1 score: {100 * Valid_f1_sc:.2f}%"
)
# Save the model if validation loss decreases
if valid_loss < valid_loss_min:
# if Valid_f1_sc > valid_f1_max:
# Save model
torch.save(model.state_dict(), save_file_name)
# Track improvement
epochs_no_improve = 0
valid_loss_min = valid_loss
valid_acc_max = valid_acc
valid_f1_max = Valid_f1_sc
# valid_best_acc = valid_acc
best_epoch = epoch
# Otherwise increment count of epochs with no improvement
elif valid_loss >= valid_loss_min:
epochs_no_improve += 1
# Trigger early stopping
if epochs_no_improve >= max_epochs_stop:
print(
f"\nEarly Stopping! Total epochs: {epoch}. Best epoch: {best_epoch} with loss: {valid_loss_min:.2f} and acc: {100 * valid_acc_max:.2f}%"
)
total_time = timer() - overall_start
print(
f"{total_time:.2f} total seconds elapsed. {total_time / (epoch+1):.2f} seconds per epoch."
)
# Load the best state dict
model.load_state_dict(torch.load(save_file_name))
# Attach the optimizer
model.optimizer = optimizer
# Format history
history = pd.DataFrame(
history,
columns=[
"train_loss",
"valid_loss",
"train_acc",
"valid_acc",
"train_f1",
"valid_f1",
],
)
return model, history
# Attach the optimizer
model.optimizer = optimizer
# Record overall time and print out stats
total_time = timer() - overall_start
print(
f"\nBest epoch: {best_epoch} with loss: {valid_loss_min:.2f} and acc: {100 * valid_acc:.2f}%"
)
print(
f"{total_time:.2f} total seconds elapsed. {total_time / (epoch):.2f} seconds per epoch."
)
# Format history
history = pd.DataFrame(
history,
columns=[
"train_loss",
"valid_loss",
"train_acc",
"valid_acc",
"train_f1",
"valid_f1",
],
)
return model, history
# criterion = nn.CrossEntropyLoss(weight = torch.FloatTensor(class_w).to(device))
criterion = nn.CrossEntropyLoss()
# criterion =torch.nn.BCEWithLogitsLoss(pos_weight = torch.tensor(weight_for_1).to(device))
# criterion =torch.nn.BCEWithLogitsLoss()
# criterion = LabelSmoothingCrossEntropy(weight = class_w)
criterion = criterion.to("cuda")
optimizer = torch.optim.Adam(model.parameters(), lr=1e-5)
# optimizer = optim.Adam(model.parameters(), lr=0.003)
# exp_lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=4, gamma=0.97)
model, history = train(
model,
criterion,
optimizer,
train_loader,
valid_loader,
save_file_name=f"{modelName}.pt",
max_epochs_stop=4,
n_epochs=10,
print_every=1,
)
device
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/080/129080111.ipynb
| null | null |
[{"Id": 129080111, "ScriptId": 38204879, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13581918, "CreationDate": "05/10/2023 20:47:34", "VersionNumber": 1.0, "Title": "Bird_Classifiaction", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 546.0, "LinesInsertedFromPrevious": 546.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# # Import
import pandas as pd
import numpy as np
import torch
import torchaudio
import math, random
from IPython.display import Audio
import librosa
from tqdm import tqdm
import warnings
from torch.utils.data import Dataset, DataLoader
import os
import matplotlib.pyplot as plt
from torchvision.transforms import Resize
from sklearn.model_selection import train_test_split
import torchvision.models as models
import torch.nn as nn
from timeit import default_timer as timer
from sklearn import preprocessing
from sklearn.metrics import (
f1_score,
recall_score,
confusion_matrix,
classification_report,
)
# disable all warning messages
warnings.filterwarnings("ignore")
train_on_gpu = True
def seed_everything(seed: int):
import random, os
import numpy as np
import torch
random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
seed_everything(42)
config = {
"sample_rate": 22050,
"clip_length": 5,
"classes_number": 264,
"learning_rate": 0.001,
"batch_size": 32,
"num_epochs": 10,
}
# # Create Train DataFrame
# Because the audio dosn't have the same legnth we will create new dataframe contains information about the length of each Audio,
# and then Create dataframe the contains the start and the end of each new audio which will be the input to the AI pipline
# Original_Df = pd.read_csv("/kaggle/input/birdclef-2023/train_metadata.csv")
# for i, audio_name in tqdm(enumerate(Original_Df.filename)):
# audio_data, Audio_sample_rate = librosa.load(f"/kaggle/input/birdclef-2023/train_audio/{audio_name}")
# audio_duration = librosa.get_duration(y=audio_data, sr=Audio_sample_rate)
# Original_Df.loc[i, "duration"] = audio_duration
# Original_Df.to_csv("train_metadata_with_length.csv", index =False)
# ## Split each audio file into equil lengths DF
Df_length = pd.read_csv("/kaggle/working/train_metadata_with_length.csv")
def Create_from_end(Df_length, clip_length=5, overlap_length=0):
# Create New DataFrame
New_columns = np.append(Df_length.columns.values, ["start", "end"])
New_df = pd.DataFrame(columns=New_columns)
# itterate over the audios
for i in tqdm(range(len(Df_length)), total=len(Df_length)):
row = Df_length.iloc[i]
audio_length = original_length = row.duration # Audio Duration
start = 0
end = min(clip_length, audio_length)
while audio_length > 1:
# Append Start and the End to the DataFrame
new_row = pd.concat([row, pd.Series([start, end], index=["start", "end"])])
New_df = New_df.append(new_row, ignore_index=True)
# Update the start and the end
start = clip_length + (start - overlap_length)
end = min(clip_length + start, original_length)
# Decrease the length of the audio
audio_length = audio_length - (clip_length - overlap_length)
return New_df
# train_df = Create_from_end(Df_length,clip_length=5, overlap_length =2)
# train_df.to_csv("train_df_5s.csv", index=False)
train_df
DataSet = pd.read_csv("/kaggle/working/train_df_5s.csv")
X_train, X_valid, _, _ = train_test_split(
DataSet,
DataSet["primary_label"],
shuffle=True,
test_size=0.1,
random_state=42,
stratify=DataSet["primary_label"],
)
le = preprocessing.LabelEncoder()
le.fit(X_train["primary_label"])
train_label = le.transform(X_train["primary_label"])
valid_label = le.transform(X_valid["primary_label"])
X_train["label"] = train_label
X_valid["label"] = valid_label
X_train.reset_index(inplace=True)
X_valid.reset_index(inplace=True)
X_train.to_csv("Splited_train.csv", index=False)
X_valid.to_csv("Splited_valid.csv", index=False)
# # Data loader
class Bird_Dataset(Dataset):
def __init__(
self,
csv_file,
root_dir,
mode="train",
duration_sec=5,
transform=None,
transform_Aug=None,
):
"""
Args:
csv_file (string): Path to the csv file with annotations.
root_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.Bird_audios = pd.read_csv(csv_file)
self.root_dir = root_dir
self.transform = transform
self.transform_Aug = transform_Aug
self.duration_sec = duration_sec
self.mode = mode
def __len__(self):
return len(self.Bird_audios)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
audio_path = os.path.join(
self.root_dir, str(self.Bird_audios.loc[idx, "filename"])
)
# Load the audio signal
waveform, sample_rate = torchaudio.load(audio_path)
# Resample the Audio
waveform = torchaudio.functional.resample(
waveform, orig_freq=sample_rate, new_freq=22050
)
# Clip the audio
start_sample = int(self.Bird_audios.loc[idx, "start"] * config["sample_rate"])
end_sample = int(self.Bird_audios.loc[idx, "end"] * config["sample_rate"])
waveform = waveform[:, start_sample:end_sample]
# # Padd if the shorter less than duration_sec
# target_frames = int(self.duration_sec * config["sample_rate"])
# pad_transform = torchaudio.transforms.PadTrim(target_frames)
# waveform = pad_transform(waveform)
# Compute the spectrogram
spec_transform = torchaudio.transforms.MelSpectrogram(
n_fft=800, hop_length=320, n_mels=128
)
specgram = spec_transform(waveform)
specgram = torchaudio.transforms.AmplitudeToDB()(specgram)
resize_transform = Resize((128, 224))
specgram = resize_transform(specgram)
# # Define the learnable parameter alpha
# alpha = torch.nn.Parameter(torch.tensor([1.0]))
# # Apply exponential transformation with alpha
# exp_specgram = torch.exp(alpha * specgram)
# # If alpha is a tensor, apply different values for each mel band
# if alpha.dim() == 1:
# exp_specgram = exp_specgram * alpha.view(1, -1, 1)
specgram = torch.cat([specgram, specgram, specgram], dim=0)
label = self.Bird_audios.loc[idx, "label"]
return (specgram, label) if self.mode == "train" else specgram
train_Dataset = Bird_Dataset(
csv_file="/kaggle/working/Splited_train.csv",
root_dir="/kaggle/input/birdclef-2023/train_audio",
)
valid_Dataset = Bird_Dataset(
csv_file="/kaggle/working/Splited_train.csv",
root_dir="/kaggle/input/birdclef-2023/train_audio",
)
train_loader = torch.utils.data.DataLoader(
train_Dataset, batch_size=128, shuffle=True, num_workers=8, pin_memory=True
)
valid_loader = torch.utils.data.DataLoader(
valid_Dataset, batch_size=128, shuffle=True, num_workers=8, pin_memory=True
)
# show item from dataset
x = None
for image, label in train_Dataset:
# specgram_db = torchaudio.transforms.AmplitudeToDB()(image)
print(image.shape)
# plot the spectrogram
plt.imshow(image[0, :, :].numpy())
plt.xlabel("Time")
plt.ylabel("Frequency")
plt.show()
break
model_Type = "resnet50"
feature = "baseline_model"
rd = np.random.randint(100000)
modelName = f"{model_Type}_{feature}_{rd}"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def get_pretrained_model(model_name):
if model_name == "ViT":
model = torch.hub.load(
"facebookresearch/deit:main", "deit_tiny_patch16_224", pretrained=True
)
for param in model.parameters(): # freeze model
param.requires_grad = False
n_inputs = model.head.in_features
model.head = nn.Sequential(
nn.Linear(n_inputs, 512), nn.ReLU(), nn.Dropout(0.3), nn.Linear(512, 7)
)
if model_name == "MaxViT":
model = timm.create_model("maxvit_tiny_rw_224", pretrained=False, img_size=224)
model.head.fc = nn.Linear(512, 7, bias=True)
if model_name == "vgg16":
model = models.vgg16(pretrained=True)
n_inputs = model.classifier[6].in_features
model.classifier[6] = nn.Sequential(
nn.Linear(n_inputs, 2048),
nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(2048, 4),
nn.LogSoftmax(dim=1),
)
if model_name == "resnet50":
model = models.resnet50(pretrained=True)
n_inputs = model.fc.in_features
model.fc = nn.Sequential(
nn.Linear(n_inputs, config["classes_number"]),
nn.LogSoftmax(dim=1),
)
if model_name == "alexnet":
model = models.AlexNet()
model.classifier[-1] = nn.Sequential(nn.Linear(4096, 7), nn.LogSoftmax(dim=1))
# Move to gpu and parallelize
model = model.to(device)
return model
model = get_pretrained_model(model_Type)
def train(
model,
criterion,
optimizer,
train_loader,
valid_loader,
save_file_name,
max_epochs_stop=3,
n_epochs=20,
print_every=1,
):
"""Train a PyTorch Model
Params
--------
model (PyTorch model): cnn to train
criterion (PyTorch loss): objective to minimize
optimizer (PyTorch optimizier): optimizer to compute gradients of model parameters
train_loader (PyTorch dataloader): training dataloader to iterate through
valid_loader (PyTorch dataloader): validation dataloader used for early stopping
save_file_name (str ending in '.pt'): file path to save the model state dict
max_epochs_stop (int): maximum number of epochs with no improvement in validation loss for early stopping
n_epochs (int): maximum number of training epochs
print_every (int): frequency of epochs to print training stats
Returns
--------
model (PyTorch model): trained cnn with best weights
history (DataFrame): history of train and validation loss and accuracy
"""
# Early stopping intialization
epochs_no_improve = 0
valid_loss_min = np.Inf
valid_acc_max = 0
valid_f1_max = 0
valid_max_acc = 0
history = []
# Number of epochs already trained (if using loaded in model weights)
try:
print(f"Model has been trained for: {model.epochs} epochs.\n")
except:
model.epochs = 0
print(f"Starting Training from Scratch.\n")
overall_start = timer()
# Main loop
for epoch in range(n_epochs):
# keep track of training and validation loss each epoch
train_loss = 0.0
valid_loss = 0.0
train_acc = 0
valid_acc = 0
Train_f1_sc = 0
Valid_f1_sc = 0
# Set to training
model.train()
start = timer()
# Training loop
for ii, (data, target) in enumerate(train_loader):
# Tensors to gpu
if train_on_gpu:
data, target = data.cuda(), target.cuda()
# Clear gradients
optimizer.zero_grad()
# Predicted outputs are log probabilities
output = model(data) # .sigmoid()
# target = target.unsqueeze(1)
# Loss and backpropagation of gradients
loss = criterion(output, target)
loss.backward()
# Update the parameters
optimizer.step()
# Track train loss by multiplying average loss by number of examples in batch
train_loss += loss.item() * data.size(0)
# Calculate accuracy by finding max log probability
_, pred = torch.max(output, dim=1)
# pred = torch.ge(output, 0.35)
correct_tensor = pred.eq(target.data.view_as(pred))
# Need to convert correct tensor from int to float to average
accuracy = torch.mean(correct_tensor.type(torch.FloatTensor))
# Multiply average accuracy times the number of examples in batch
Train_f1_sc = f1_score(target.cpu().data, pred.cpu(), average="macro")
train_acc += accuracy.item() * data.size(0)
# Track training progress
print(
f"Epoch: {epoch}\t{100 * (ii + 1) / len(train_loader):.2f}% complete. {timer() - start:.2f} seconds elapsed in epoch.",
end="\r",
)
# After training loops ends, start validation
else:
model.epochs += 1
# Don't need to keep track of gradients
with torch.no_grad():
# Set to evaluation mode
model.eval()
# Validation loop
for data, target in valid_loader:
# Tensors to gpu
if train_on_gpu:
data, target = data.cuda(), target.cuda()
# Forward pass
# output = model(data)
# Validation loss
output = model(data) # .sigmoid()
# target = target.unsqueeze(1)
# Loss and backpropagation of gradients
loss = criterion(output, target)
# Multiply average loss times the number of examples in batch
valid_loss += loss.item() * data.size(0)
# Calculate validation accuracy
_, pred = torch.max(output, dim=1)
# pred = torch.ge(output, 0.35)
correct_tensor = pred.eq(target.data.view_as(pred))
accuracy = torch.mean(correct_tensor.type(torch.FloatTensor))
# Multiply average accuracy times the number of examples
valid_acc += accuracy.item() * data.size(0)
Valid_f1_sc = f1_score(
target.cpu().data, pred.cpu(), average="macro"
)
# Calculate average losses
train_loss = train_loss / len(train_loader.dataset)
valid_loss = valid_loss / len(valid_loader.dataset)
# Calculate average accuracy
train_acc = train_acc / len(train_loader.dataset)
valid_acc = valid_acc / len(valid_loader.dataset)
history.append(
[
train_loss,
valid_loss,
train_acc,
valid_acc,
Train_f1_sc,
Valid_f1_sc,
]
)
# Print training and validation results
if (epoch + 1) % print_every == 0:
print(
f"\nEpoch: {epoch} \tTraining Loss: {train_loss:.4f} \tValidation Loss: {valid_loss:.4f}"
)
print(
f"\t\tTraining Accuracy: {100 * train_acc:.2f}%\t Train F1 score: {100 * Train_f1_sc:.2f}%\t Validation Accuracy: {100 * valid_acc:.2f}%\t Validation F1 score: {100 * Valid_f1_sc:.2f}%"
)
# Save the model if validation loss decreases
if valid_loss < valid_loss_min:
# if Valid_f1_sc > valid_f1_max:
# Save model
torch.save(model.state_dict(), save_file_name)
# Track improvement
epochs_no_improve = 0
valid_loss_min = valid_loss
valid_acc_max = valid_acc
valid_f1_max = Valid_f1_sc
# valid_best_acc = valid_acc
best_epoch = epoch
# Otherwise increment count of epochs with no improvement
elif valid_loss >= valid_loss_min:
epochs_no_improve += 1
# Trigger early stopping
if epochs_no_improve >= max_epochs_stop:
print(
f"\nEarly Stopping! Total epochs: {epoch}. Best epoch: {best_epoch} with loss: {valid_loss_min:.2f} and acc: {100 * valid_acc_max:.2f}%"
)
total_time = timer() - overall_start
print(
f"{total_time:.2f} total seconds elapsed. {total_time / (epoch+1):.2f} seconds per epoch."
)
# Load the best state dict
model.load_state_dict(torch.load(save_file_name))
# Attach the optimizer
model.optimizer = optimizer
# Format history
history = pd.DataFrame(
history,
columns=[
"train_loss",
"valid_loss",
"train_acc",
"valid_acc",
"train_f1",
"valid_f1",
],
)
return model, history
# Attach the optimizer
model.optimizer = optimizer
# Record overall time and print out stats
total_time = timer() - overall_start
print(
f"\nBest epoch: {best_epoch} with loss: {valid_loss_min:.2f} and acc: {100 * valid_acc:.2f}%"
)
print(
f"{total_time:.2f} total seconds elapsed. {total_time / (epoch):.2f} seconds per epoch."
)
# Format history
history = pd.DataFrame(
history,
columns=[
"train_loss",
"valid_loss",
"train_acc",
"valid_acc",
"train_f1",
"valid_f1",
],
)
return model, history
# criterion = nn.CrossEntropyLoss(weight = torch.FloatTensor(class_w).to(device))
criterion = nn.CrossEntropyLoss()
# criterion =torch.nn.BCEWithLogitsLoss(pos_weight = torch.tensor(weight_for_1).to(device))
# criterion =torch.nn.BCEWithLogitsLoss()
# criterion = LabelSmoothingCrossEntropy(weight = class_w)
criterion = criterion.to("cuda")
optimizer = torch.optim.Adam(model.parameters(), lr=1e-5)
# optimizer = optim.Adam(model.parameters(), lr=0.003)
# exp_lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=4, gamma=0.97)
model, history = train(
model,
criterion,
optimizer,
train_loader,
valid_loader,
save_file_name=f"{modelName}.pt",
max_epochs_stop=4,
n_epochs=10,
print_every=1,
)
device
| false | 0 | 4,946 | 0 | 4,946 | 4,946 |
||
129080661
|
<jupyter_start><jupyter_text>Iris Species
The Iris dataset was used in R.A. Fisher's classic 1936 paper, [The Use of Multiple Measurements in Taxonomic Problems](http://rcs.chemometrics.ru/Tutorials/classification/Fisher.pdf), and can also be found on the [UCI Machine Learning Repository][1].
It includes three iris species with 50 samples each as well as some properties about each flower. One flower species is linearly separable from the other two, but the other two are not linearly separable from each other.
The columns in this dataset are:
- Id
- SepalLengthCm
- SepalWidthCm
- PetalLengthCm
- PetalWidthCm
- Species
[](https://www.kaggle.com/benhamner/d/uciml/iris/sepal-width-vs-length)
[1]: http://archive.ics.uci.edu/ml/
Kaggle dataset identifier: iris
<jupyter_script># # EDA: Iris Data
# Import packages
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
# import statsmodels.api as sm
import plotly.express as px
# Settings
pd.options.display.max_columns = 100
pd.options.display.max_rows = 100
pd.options.display.float_format = "{:.2f}".format
plt.style.use("ggplot")
df = pd.read_csv("/kaggle/input/iris/Iris.csv")
newcols = [
"id",
"sepal_length",
"sepal_width",
"petal_length",
"petal_width",
"species",
]
df.columns = newcols
df.head()
df.shape
df.columns
df.loc[:, "species"].value_counts()
#
# Introduction to IRIS dataset and 2D scatter plot
#
sns.relplot(data=df, x="sepal_length", y="sepal_width", hue="species")
sns.relplot(data=df, x="petal_length", y="petal_width", hue="species")
# 3D scatter plot
fig = px.scatter_3d(
df, x="sepal_length", y="sepal_width", z="petal_length", color="species"
)
fig.show()
# PairPlots
sns.pairplot(df, hue="species")
# Limitations of Pair Plots
# Histogram and Introduction to PDF(Probability Density Function)
# * Histogram: How many points exist for each value on the x-axis.
# * PDF: Smoothed form of histogram
sns.displot(df, x="petal_length", hue="species", kind="kde", fill=True)
# Univariate Analysis using PDF
fig, ax = plt.subplots(nrows=2, ncols=2, figsize=(10, 8))
sns.kdeplot(df, x="petal_length", hue="species", fill=True, ax=ax[0, 0])
sns.kdeplot(df, x="petal_width", hue="species", fill=True, ax=ax[0, 1])
sns.kdeplot(df, x="sepal_length", hue="species", fill=True, ax=ax[1, 0])
sns.kdeplot(df, x="sepal_width", hue="species", fill=True, ax=ax[1, 1])
# Cumulative Distibution Function
# * What fraction of
sns.ecdfplot(data=df, x="petal_length", stat="proportion")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/080/129080661.ipynb
|
iris
| null |
[{"Id": 129080661, "ScriptId": 38285178, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 473615, "CreationDate": "05/10/2023 20:55:57", "VersionNumber": 2.0, "Title": "EDA: Iris Data", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 105.0, "LinesInsertedFromPrevious": 65.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 40.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 184817708, "KernelVersionId": 129080661, "SourceDatasetVersionId": 420}]
|
[{"Id": 420, "DatasetId": 19, "DatasourceVersionId": 420, "CreatorUserId": 1, "LicenseName": "CC0: Public Domain", "CreationDate": "09/27/2016 07:38:05", "VersionNumber": 2.0, "Title": "Iris Species", "Slug": "iris", "Subtitle": "Classify iris plants into three species in this classic dataset", "Description": "The Iris dataset was used in R.A. Fisher's classic 1936 paper, [The Use of Multiple Measurements in Taxonomic Problems](http://rcs.chemometrics.ru/Tutorials/classification/Fisher.pdf), and can also be found on the [UCI Machine Learning Repository][1].\n\nIt includes three iris species with 50 samples each as well as some properties about each flower. One flower species is linearly separable from the other two, but the other two are not linearly separable from each other.\n\nThe columns in this dataset are:\n\n - Id\n - SepalLengthCm\n - SepalWidthCm\n - PetalLengthCm\n - PetalWidthCm\n - Species\n\n[](https://www.kaggle.com/benhamner/d/uciml/iris/sepal-width-vs-length)\n\n\n [1]: http://archive.ics.uci.edu/ml/", "VersionNotes": "Republishing files so they're formally in our system", "TotalCompressedBytes": 15347.0, "TotalUncompressedBytes": 15347.0}]
|
[{"Id": 19, "CreatorUserId": 1, "OwnerUserId": NaN, "OwnerOrganizationId": 7.0, "CurrentDatasetVersionId": 420.0, "CurrentDatasourceVersionId": 420.0, "ForumId": 997, "Type": 2, "CreationDate": "01/12/2016 00:33:31", "LastActivityDate": "02/06/2018", "TotalViews": 1637863, "TotalDownloads": 423540, "TotalVotes": 3416, "TotalKernels": 6420}]
| null |
# # EDA: Iris Data
# Import packages
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
# import statsmodels.api as sm
import plotly.express as px
# Settings
pd.options.display.max_columns = 100
pd.options.display.max_rows = 100
pd.options.display.float_format = "{:.2f}".format
plt.style.use("ggplot")
df = pd.read_csv("/kaggle/input/iris/Iris.csv")
newcols = [
"id",
"sepal_length",
"sepal_width",
"petal_length",
"petal_width",
"species",
]
df.columns = newcols
df.head()
df.shape
df.columns
df.loc[:, "species"].value_counts()
#
# Introduction to IRIS dataset and 2D scatter plot
#
sns.relplot(data=df, x="sepal_length", y="sepal_width", hue="species")
sns.relplot(data=df, x="petal_length", y="petal_width", hue="species")
# 3D scatter plot
fig = px.scatter_3d(
df, x="sepal_length", y="sepal_width", z="petal_length", color="species"
)
fig.show()
# PairPlots
sns.pairplot(df, hue="species")
# Limitations of Pair Plots
# Histogram and Introduction to PDF(Probability Density Function)
# * Histogram: How many points exist for each value on the x-axis.
# * PDF: Smoothed form of histogram
sns.displot(df, x="petal_length", hue="species", kind="kde", fill=True)
# Univariate Analysis using PDF
fig, ax = plt.subplots(nrows=2, ncols=2, figsize=(10, 8))
sns.kdeplot(df, x="petal_length", hue="species", fill=True, ax=ax[0, 0])
sns.kdeplot(df, x="petal_width", hue="species", fill=True, ax=ax[0, 1])
sns.kdeplot(df, x="sepal_length", hue="species", fill=True, ax=ax[1, 0])
sns.kdeplot(df, x="sepal_width", hue="species", fill=True, ax=ax[1, 1])
# Cumulative Distibution Function
# * What fraction of
sns.ecdfplot(data=df, x="petal_length", stat="proportion")
| false | 0 | 640 | 0 | 938 | 640 |
||
129163813
|
<jupyter_start><jupyter_text>Advertising dataset
This data expresses sales according to the type of advertisement and the size of the cost .
The dataset contains 200 rows of 3 features [ TV , Radio , Newspaper] and target variable [Sales].
Kaggle dataset identifier: advertising-dataset
<jupyter_script>import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error
import math
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import RandomForestRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.tree import DecisionTreeRegressor
reklamveri = pd.read_csv("/kaggle/input/advertising-dataset/Advertising.csv")
reklamveri.head()
reklamveri.tail()
reklamveri.shape
reklamveri.columns
reklamveri.rename(
columns={
"Unnamed: 0": "unnamed:0",
"TV": "Televizyon",
"Radio": "Radio",
"Newspaper": "Gazete",
"Sales": "Satış",
},
inplace=True,
)
reklamveri = reklamveri.drop(["unnamed:0"], axis=1)
reklamveri.info()
reklamveri.isnull().sum()
corr = reklamveri.corr()
corr
sns.heatmap(corr, annot=True)
sns.scatterplot(x="Televizyon", y="Satış", data=reklamveri)
sns.scatterplot(x="Radio", y="Satış", data=reklamveri)
reklamveri.describe().T
plt.figure(figsize=(6, 6))
sns.distplot(reklamveri["Televizyon"], hist=True)
plt.figure(figsize=(6, 6))
sns.distplot(reklamveri["Radio"], hist=True)
plt.figure(figsize=(6, 6))
sns.distplot(reklamveri["Gazete"], hist=True)
def satış_grup_(x):
if x <= 2:
x = "0-2sg"
elif x > 2 and x <= 7:
x = "3-7sg"
elif x > 7 and x <= 12:
x = "8-12sg"
elif x > 12 and x <= 17:
x = "13-17sg"
elif x > 17 and x <= 22:
x = "18-22sg"
else:
x = "22sg+"
return x
reklamveri["satış_grup"] = reklamveri["Satış"].apply(satış_grup_)
reklamveri.groupby("satış_grup")["Satış"].agg(["count"])
plt.figure(figsize=(12, 5))
sns.countplot(x="satış_grup", data=reklamveri)
sns.pairplot(reklamveri)
reklamveri = reklamveri.drop(["satış_grup"], axis=1)
x = reklamveri.drop(["Satış"], axis=1)
y = reklamveri["Satış"]
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.10, random_state=41
)
# DecisionTreeRegressor
r_dt = DecisionTreeRegressor(random_state=0)
model = r_dt.fit(x_train, y_train)
y_pred = r_dt.predict(x_test)
print("R2 score:", r2_score(y_test, y_pred))
MAE = mean_absolute_error(y_test, y_pred)
print("Ortalama Mutlak Hata (Mean Absolute Error):", MAE)
MSE = mean_squared_error(y_test, y_pred)
print("Ortalama Kare Hata (Mean Squared Error):", MSE)
RMSE = math.sqrt(MSE)
print("Kök Ortalama Kare Hata (Root Mean Square Error):", RMSE)
# RandomForestRegressor
rf_reg = RandomForestRegressor(n_estimators=10, random_state=0)
model = rf_reg.fit(x_train, y_train)
y_pred = rf_reg.predict(x_test)
print("R2 score:", r2_score(y_test, y_pred))
MAE = mean_absolute_error(y_test, y_pred)
print("Ortalama Mutlak Hata (Mean Absolute Error):", MAE)
MSE = mean_squared_error(y_test, y_pred)
print("Ortalama Kare Hata (Mean Squared Error):", MSE)
RMSE = math.sqrt(MSE)
print("Kök Ortalama Kare Hata (Root Mean Square Error):", RMSE)
# KNeighborsRegressor
neigh = KNeighborsRegressor(n_neighbors=2)
neigh.fit(x_train, y_train)
y_pred = neigh.predict(x_test)
print("R2 score:", r2_score(y_test, y_pred))
score_list = []
for each in range(1, 15):
knn2 = KNeighborsRegressor(n_neighbors=each)
knn2.fit(x_train, y_train)
score_list.append(knn2.score(x_test, y_test))
plt.plot(range(1, 15), score_list)
plt.xlabel("k values")
plt.ylabel("accuracy")
plt.show
neigh = KNeighborsRegressor(n_neighbors=1)
neigh.fit(x_train, y_train)
y_pred = neigh.predict(x_test)
print("R2 score:", r2_score(y_test, y_pred))
knn = KNeighborsRegressor()
knn_params = {"n_neighbors": np.arange(1, 11, 1)}
knn_cv_model = GridSearchCV(knn, knn_params, cv=10)
knn_cv_model.fit(x_train, y_train)
knn_cv_model.best_params_["n_neighbors"]
knn_cv_model.best_estimator_
knn_cv_model.best_score_
MAE = mean_absolute_error(y_test, y_pred)
print("Ortalama Mutlak Hata (Mean Absolute Error):", MAE)
MSE = mean_squared_error(y_test, y_pred)
print("Ortalama Kare Hata (Mean Squared Error):", MSE)
RMSE = math.sqrt(MSE)
print("Kök Ortalama Kare Hata (Root Mean Square Error):", RMSE)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/163/129163813.ipynb
|
advertising-dataset
|
tawfikelmetwally
|
[{"Id": 129163813, "ScriptId": 38398333, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 9623697, "CreationDate": "05/11/2023 13:13:35", "VersionNumber": 1.0, "Title": "Advertising dataset (%95 Accuracy", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 174.0, "LinesInsertedFromPrevious": 174.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
|
[{"Id": 184968141, "KernelVersionId": 129163813, "SourceDatasetVersionId": 5604378}]
|
[{"Id": 5604378, "DatasetId": 3223827, "DatasourceVersionId": 5679433, "CreatorUserId": 12641535, "LicenseName": "CC BY-SA 4.0", "CreationDate": "05/04/2023 21:48:38", "VersionNumber": 1.0, "Title": "Advertising dataset", "Slug": "advertising-dataset", "Subtitle": "sales prediction using linear regression", "Description": "This data expresses sales according to the type of advertisement and the size of the cost .\nThe dataset contains 200 rows of 3 features [ TV , Radio , Newspaper] and target variable [Sales].", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 3223827, "CreatorUserId": 12641535, "OwnerUserId": 12641535.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5604378.0, "CurrentDatasourceVersionId": 5679433.0, "ForumId": 3288817, "Type": 2, "CreationDate": "05/04/2023 21:48:38", "LastActivityDate": "05/04/2023", "TotalViews": 5648, "TotalDownloads": 883, "TotalVotes": 28, "TotalKernels": 14}]
|
[{"Id": 12641535, "UserName": "tawfikelmetwally", "DisplayName": "tawfik elmetwally", "RegisterDate": "11/27/2022", "PerformanceTier": 2}]
|
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error
import math
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import RandomForestRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.tree import DecisionTreeRegressor
reklamveri = pd.read_csv("/kaggle/input/advertising-dataset/Advertising.csv")
reklamveri.head()
reklamveri.tail()
reklamveri.shape
reklamveri.columns
reklamveri.rename(
columns={
"Unnamed: 0": "unnamed:0",
"TV": "Televizyon",
"Radio": "Radio",
"Newspaper": "Gazete",
"Sales": "Satış",
},
inplace=True,
)
reklamveri = reklamveri.drop(["unnamed:0"], axis=1)
reklamveri.info()
reklamveri.isnull().sum()
corr = reklamveri.corr()
corr
sns.heatmap(corr, annot=True)
sns.scatterplot(x="Televizyon", y="Satış", data=reklamveri)
sns.scatterplot(x="Radio", y="Satış", data=reklamveri)
reklamveri.describe().T
plt.figure(figsize=(6, 6))
sns.distplot(reklamveri["Televizyon"], hist=True)
plt.figure(figsize=(6, 6))
sns.distplot(reklamveri["Radio"], hist=True)
plt.figure(figsize=(6, 6))
sns.distplot(reklamveri["Gazete"], hist=True)
def satış_grup_(x):
if x <= 2:
x = "0-2sg"
elif x > 2 and x <= 7:
x = "3-7sg"
elif x > 7 and x <= 12:
x = "8-12sg"
elif x > 12 and x <= 17:
x = "13-17sg"
elif x > 17 and x <= 22:
x = "18-22sg"
else:
x = "22sg+"
return x
reklamveri["satış_grup"] = reklamveri["Satış"].apply(satış_grup_)
reklamveri.groupby("satış_grup")["Satış"].agg(["count"])
plt.figure(figsize=(12, 5))
sns.countplot(x="satış_grup", data=reklamveri)
sns.pairplot(reklamveri)
reklamveri = reklamveri.drop(["satış_grup"], axis=1)
x = reklamveri.drop(["Satış"], axis=1)
y = reklamveri["Satış"]
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.10, random_state=41
)
# DecisionTreeRegressor
r_dt = DecisionTreeRegressor(random_state=0)
model = r_dt.fit(x_train, y_train)
y_pred = r_dt.predict(x_test)
print("R2 score:", r2_score(y_test, y_pred))
MAE = mean_absolute_error(y_test, y_pred)
print("Ortalama Mutlak Hata (Mean Absolute Error):", MAE)
MSE = mean_squared_error(y_test, y_pred)
print("Ortalama Kare Hata (Mean Squared Error):", MSE)
RMSE = math.sqrt(MSE)
print("Kök Ortalama Kare Hata (Root Mean Square Error):", RMSE)
# RandomForestRegressor
rf_reg = RandomForestRegressor(n_estimators=10, random_state=0)
model = rf_reg.fit(x_train, y_train)
y_pred = rf_reg.predict(x_test)
print("R2 score:", r2_score(y_test, y_pred))
MAE = mean_absolute_error(y_test, y_pred)
print("Ortalama Mutlak Hata (Mean Absolute Error):", MAE)
MSE = mean_squared_error(y_test, y_pred)
print("Ortalama Kare Hata (Mean Squared Error):", MSE)
RMSE = math.sqrt(MSE)
print("Kök Ortalama Kare Hata (Root Mean Square Error):", RMSE)
# KNeighborsRegressor
neigh = KNeighborsRegressor(n_neighbors=2)
neigh.fit(x_train, y_train)
y_pred = neigh.predict(x_test)
print("R2 score:", r2_score(y_test, y_pred))
score_list = []
for each in range(1, 15):
knn2 = KNeighborsRegressor(n_neighbors=each)
knn2.fit(x_train, y_train)
score_list.append(knn2.score(x_test, y_test))
plt.plot(range(1, 15), score_list)
plt.xlabel("k values")
plt.ylabel("accuracy")
plt.show
neigh = KNeighborsRegressor(n_neighbors=1)
neigh.fit(x_train, y_train)
y_pred = neigh.predict(x_test)
print("R2 score:", r2_score(y_test, y_pred))
knn = KNeighborsRegressor()
knn_params = {"n_neighbors": np.arange(1, 11, 1)}
knn_cv_model = GridSearchCV(knn, knn_params, cv=10)
knn_cv_model.fit(x_train, y_train)
knn_cv_model.best_params_["n_neighbors"]
knn_cv_model.best_estimator_
knn_cv_model.best_score_
MAE = mean_absolute_error(y_test, y_pred)
print("Ortalama Mutlak Hata (Mean Absolute Error):", MAE)
MSE = mean_squared_error(y_test, y_pred)
print("Ortalama Kare Hata (Mean Squared Error):", MSE)
RMSE = math.sqrt(MSE)
print("Kök Ortalama Kare Hata (Root Mean Square Error):", RMSE)
| false | 1 | 1,608 | 1 | 1,676 | 1,608 |
||
129019596
|
<jupyter_start><jupyter_text>Forest Fire Classification
Kaggle dataset identifier: forest-fire-classification
<jupyter_script>data_dir = "/kaggle/input/forest-fire-classification/Forest_Fire"
from tensorflow.keras.preprocessing.image import ImageDataGenerator
train = ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
test = ImageDataGenerator(rescale=1.0 / 255)
traindata = train.flow_from_directory(
"/kaggle/input/forest-fire-classification/Forest_Fire/Training and Validation",
target_size=(128, 128),
batch_size=32,
class_mode="categorical",
)
testdata = test.flow_from_directory(
"/kaggle/input/forest-fire-classification/Forest_Fire/Testing",
target_size=(128, 128),
batch_size=32,
class_mode="categorical",
)
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten, Softmax, Activation
from tensorflow.keras.preprocessing import image
import matplotlib.pyplot as plt
import pandas as pd
from tensorflow.keras.layers import Input, Conv2D, MaxPooling2D
from tensorflow.keras.layers import Dense, Flatten
from tensorflow.keras.models import Model
import tensorflow as tf
# ***VGG NET 19***
import tensorflow as tf
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout
def VGG19(input_shape, num_classes):
model = tf.keras.Sequential()
# Block 1
model.add(
Conv2D(64, (3, 3), activation="relu", padding="same", input_shape=input_shape)
)
model.add(Conv2D(64, (3, 3), activation="relu", padding="same"))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
# Block 2
model.add(Conv2D(128, (3, 3), activation="relu", padding="same"))
model.add(Conv2D(128, (3, 3), activation="relu", padding="same"))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
# Block 3
model.add(Conv2D(256, (3, 3), activation="relu", padding="same"))
model.add(Conv2D(256, (3, 3), activation="relu", padding="same"))
model.add(Conv2D(256, (3, 3), activation="relu", padding="same"))
model.add(Conv2D(256, (3, 3), activation="relu", padding="same"))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
# Block 4
model.add(Conv2D(512, (3, 3), activation="relu", padding="same"))
model.add(Conv2D(512, (3, 3), activation="relu", padding="same"))
model.add(Conv2D(512, (3, 3), activation="relu", padding="same"))
model.add(Conv2D(512, (3, 3), activation="relu", padding="same"))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
# Block 5
model.add(Conv2D(512, (3, 3), activation="relu", padding="same"))
model.add(Conv2D(512, (3, 3), activation="relu", padding="same"))
model.add(Conv2D(512, (3, 3), activation="relu", padding="same"))
model.add(Conv2D(512, (3, 3), activation="relu", padding="same"))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
# Dense Layers
model.add(Flatten())
model.add(Dense(4096, activation="relu"))
model.add(Dropout(0.5))
model.add(Dense(4096, activation="relu"))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation="softmax"))
return model
input_shape = (128, 128, 3)
num_classes = 2
model = VGG19(input_shape, num_classes)
model.compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"])
model1 = model.fit(traindata, epochs=5, validation_data=testdata, batch_size=32)
import pandas as pd
x
x = model1.history
# Load the history into a pandas Dataframe
df = pd.DataFrame(x)
df.head()
plt.plot(model1.history["accuracy"])
plt.plot(model1.history["val_accuracy"])
plt.title("model accuracy")
plt.ylabel("accuracy")
plt.xlabel("epoch")
plt.legend(["Train", "Validation"], loc="upper left")
plt.show()
# summarize history for loss
plt.plot(model1.history["loss"])
plt.plot(model1.history["val_loss"])
plt.title("model loss")
plt.ylabel("loss")
plt.xlabel("epoch")
plt.legend(["Train", "Validation"], loc="upper left")
plt.show()
import cv2
t = cv2.imread(
"/content/gdrive/MyDrive/fire_clasify/forest_fire/Testing/nofire/abc358.jpg"
)
plt.imshow(t)
import numpy as np
from tensorflow.keras.preprocessing.image import img_to_array
testimg = cv2.resize(t, (128, 128))
testimg = img_to_array(testimg) / 255
h = np.expand_dims(testimg, axis=0)
r = model.predict(h)
classnames = ["fire", "nofire"]
ypred = classnames[np.argmax(r)]
ypred
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/019/129019596.ipynb
|
forest-fire-classification
|
seebicb
|
[{"Id": 129019596, "ScriptId": 38294738, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14551355, "CreationDate": "05/10/2023 11:03:23", "VersionNumber": 1.0, "Title": "Forest Fire Classification || VGG19", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 127.0, "LinesInsertedFromPrevious": 127.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 3}]
|
[{"Id": 184706482, "KernelVersionId": 129019596, "SourceDatasetVersionId": 5621471}]
|
[{"Id": 5621471, "DatasetId": 3232225, "DatasourceVersionId": 5696667, "CreatorUserId": 14551355, "LicenseName": "Unknown", "CreationDate": "05/06/2023 22:09:59", "VersionNumber": 1.0, "Title": "Forest Fire Classification", "Slug": "forest-fire-classification", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 3232225, "CreatorUserId": 14551355, "OwnerUserId": 14551355.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5621471.0, "CurrentDatasourceVersionId": 5696667.0, "ForumId": 3297373, "Type": 2, "CreationDate": "05/06/2023 22:09:59", "LastActivityDate": "05/06/2023", "TotalViews": 113, "TotalDownloads": 9, "TotalVotes": 1, "TotalKernels": 4}]
|
[{"Id": 14551355, "UserName": "seebicb", "DisplayName": "Haseeb Ahmed", "RegisterDate": "04/08/2023", "PerformanceTier": 0}]
|
data_dir = "/kaggle/input/forest-fire-classification/Forest_Fire"
from tensorflow.keras.preprocessing.image import ImageDataGenerator
train = ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
test = ImageDataGenerator(rescale=1.0 / 255)
traindata = train.flow_from_directory(
"/kaggle/input/forest-fire-classification/Forest_Fire/Training and Validation",
target_size=(128, 128),
batch_size=32,
class_mode="categorical",
)
testdata = test.flow_from_directory(
"/kaggle/input/forest-fire-classification/Forest_Fire/Testing",
target_size=(128, 128),
batch_size=32,
class_mode="categorical",
)
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten, Softmax, Activation
from tensorflow.keras.preprocessing import image
import matplotlib.pyplot as plt
import pandas as pd
from tensorflow.keras.layers import Input, Conv2D, MaxPooling2D
from tensorflow.keras.layers import Dense, Flatten
from tensorflow.keras.models import Model
import tensorflow as tf
# ***VGG NET 19***
import tensorflow as tf
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout
def VGG19(input_shape, num_classes):
model = tf.keras.Sequential()
# Block 1
model.add(
Conv2D(64, (3, 3), activation="relu", padding="same", input_shape=input_shape)
)
model.add(Conv2D(64, (3, 3), activation="relu", padding="same"))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
# Block 2
model.add(Conv2D(128, (3, 3), activation="relu", padding="same"))
model.add(Conv2D(128, (3, 3), activation="relu", padding="same"))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
# Block 3
model.add(Conv2D(256, (3, 3), activation="relu", padding="same"))
model.add(Conv2D(256, (3, 3), activation="relu", padding="same"))
model.add(Conv2D(256, (3, 3), activation="relu", padding="same"))
model.add(Conv2D(256, (3, 3), activation="relu", padding="same"))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
# Block 4
model.add(Conv2D(512, (3, 3), activation="relu", padding="same"))
model.add(Conv2D(512, (3, 3), activation="relu", padding="same"))
model.add(Conv2D(512, (3, 3), activation="relu", padding="same"))
model.add(Conv2D(512, (3, 3), activation="relu", padding="same"))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
# Block 5
model.add(Conv2D(512, (3, 3), activation="relu", padding="same"))
model.add(Conv2D(512, (3, 3), activation="relu", padding="same"))
model.add(Conv2D(512, (3, 3), activation="relu", padding="same"))
model.add(Conv2D(512, (3, 3), activation="relu", padding="same"))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
# Dense Layers
model.add(Flatten())
model.add(Dense(4096, activation="relu"))
model.add(Dropout(0.5))
model.add(Dense(4096, activation="relu"))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation="softmax"))
return model
input_shape = (128, 128, 3)
num_classes = 2
model = VGG19(input_shape, num_classes)
model.compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"])
model1 = model.fit(traindata, epochs=5, validation_data=testdata, batch_size=32)
import pandas as pd
x
x = model1.history
# Load the history into a pandas Dataframe
df = pd.DataFrame(x)
df.head()
plt.plot(model1.history["accuracy"])
plt.plot(model1.history["val_accuracy"])
plt.title("model accuracy")
plt.ylabel("accuracy")
plt.xlabel("epoch")
plt.legend(["Train", "Validation"], loc="upper left")
plt.show()
# summarize history for loss
plt.plot(model1.history["loss"])
plt.plot(model1.history["val_loss"])
plt.title("model loss")
plt.ylabel("loss")
plt.xlabel("epoch")
plt.legend(["Train", "Validation"], loc="upper left")
plt.show()
import cv2
t = cv2.imread(
"/content/gdrive/MyDrive/fire_clasify/forest_fire/Testing/nofire/abc358.jpg"
)
plt.imshow(t)
import numpy as np
from tensorflow.keras.preprocessing.image import img_to_array
testimg = cv2.resize(t, (128, 128))
testimg = img_to_array(testimg) / 255
h = np.expand_dims(testimg, axis=0)
r = model.predict(h)
classnames = ["fire", "nofire"]
ypred = classnames[np.argmax(r)]
ypred
| false | 0 | 1,474 | 3 | 1,496 | 1,474 |
||
129019255
|
"""
Python 3.10 Titanic Exploratory Data Analysis and visualization program will you survive on the titanic or not
File name Titanic_eda.py
Version: 0.1
Author: MLCV
Date: 2023-05-09
"""
# Importing Libraries
# visualization
from plotnine import * # plotnine is an implementation of a grammar of graphics in Python, it is based on ggplot2. The grammar allows users to compose plots by explicitly mapping data to the visual objects that make up the plot.
import matplotlib.pyplot as plt # collection of command style functions that make matplotlib work like MATLAB
import seaborn as sns # statistical data visualization
# data analysis
import pandas as pd # is a fast, powerful, flexible and easy to use open source data analysis and manipulation tool, built on top of the Python programming language
import numpy as np # offers comprehensive mathematical functions, random number generators, linear algebra routines, Fourier transforms, and more
import warnings # Base category for warnings triggered during the process of importing a module (ignored by default)
warnings.filterwarnings("ignore") # here we prescribe the action in case of an error.
"""let's create a function, feed the training and test data sets as an input,
and at the output we will get a combined one, without an index"""
def concat_df(train_data, test_data):
# Returns a concatenated df of training and test set
return pd.concat([train_data, test_data], sort=True).reset_index(drop=True)
"""Let's create a function at the input of which we feed the combined data set,
and at the output it returns the separated df of the training and test set, saved without a label"""
def divide_df(all_data):
# Returns divided dfs of training and test set
return all_data.loc[:890], all_data.loc[891:].drop(["Survived"], axis=1)
"""The Python Pandas packages helps us work with our datasets.
We start by acquiring the training and testing datasets into Pandas DataFrames.
We also combine these datasets to run certain operations on both datasets together.
"""
df_train = pd.read_csv("/kaggle/input/titanic/train.csv") # load train data
df_test = pd.read_csv("/kaggle/input/titanic/test.csv") # load test data
df_all = concat_df(
df_train, df_test
) # we apply the function described above, the union of two dataframes.
"""supplement the data sets with the name parameter"""
df_train.name = "Training Set" # set parameter for dataset - dataframe name
df_test.name = "Test Set" # set parameter for dataset - dataframe name
df_all.name = "All Set" # set parameter for dataset - dataframe name
dfs = [df_train, df_test]
# display information about datasets
print("Number of Training Examples = {}".format(df_train.shape[0]))
print("Number of Test Examples = {}\n".format(df_test.shape[0]))
print("Training X Shape = {}".format(df_train.shape))
print("Training y Shape = {}\n".format(df_train["Survived"].shape[0]))
print("Test X Shape = {}".format(df_test.shape))
print("Test y Shape = {}\n".format(df_test.shape[0]))
print(df_train.columns)
print(df_test.columns)
print(df_train.info())
print(df_train.describe())
df_train.sample(5)
"""Correlation Between The Features"""
sns.heatmap(df_train.corr(), annot=True, cmap="RdYlGn", linewidths=0.2)
fig = plt.gcf()
fig.set_size_inches(10, 8)
plt.show()
# function to analyze each column of the dataframe
def display_missing(df):
for col in df.columns.tolist():
print("{} column missing values: {}".format(col, df[col].isnull().sum()))
print("\n")
for df in dfs:
print("{}".format(df.name))
display_missing(df)
df_train[["Sex", "Survived"]].groupby(["Sex"], as_index=False).mean().sort_values(
by="Survived", ascending=False
)
df_train["Sex"].value_counts()
df_train[["Pclass", "Survived"]].groupby(["Pclass"], as_index=False).mean().sort_values(
by="Survived", ascending=False
)
df_train["Pclass"].value_counts()
df_train["Age"].value_counts()
df_train[["Age", "Survived"]].groupby(["Age"], as_index=False).mean().sort_values(
by="Survived", ascending=False
)
train_dropna_age = df_train["Age"].dropna()
df_train["Survived"] = df_train["Survived"].astype("category")
# Although it is a number, it is changed to a category because it is categorical data
df_train["Survived"] = df_train["Survived"].astype("category")
df_train.head(3)
df_train["Initial"] = 0
for i in df_train:
df_train["Initial"] = df_train.Name.str.extract(
"([A-Za-z]+)\."
) # lets extract the Salutations
"""We are using the Regex: [A-Za-z]+)..
So what it does is, it looks for strings which lie between A-Z or a-z and followed by a .(dot).
So we successfully extract the Initials from the Name."""
pd.crosstab(df_train.Initial, df_train.Sex).T.style.background_gradient(
cmap="summer_r"
) # Checking the Initials with the Sex
df_train["Initial"].replace(
[
"Mlle",
"Mme",
"Ms",
"Dr",
"Major",
"Lady",
"Countess",
"Jonkheer",
"Col",
"Rev",
"Capt",
"Sir",
"Don",
],
[
"Miss",
"Miss",
"Miss",
"Mr",
"Mr",
"Mrs",
"Mrs",
"Other",
"Other",
"Other",
"Mr",
"Mr",
"Mr",
],
inplace=True,
)
df_train.groupby("Initial")["Age"].mean() # lets check the average age by Initials
# Assigning the NaN Values with the Ceil values of the mean ages
df_train.loc[(df_train.Age.isnull()) & (df_train.Initial == "Mr"), "Age"] = 33
df_train.loc[(df_train.Age.isnull()) & (df_train.Initial == "Mrs"), "Age"] = 36
df_train.loc[(df_train.Age.isnull()) & (df_train.Initial == "Master"), "Age"] = 5
df_train.loc[(df_train.Age.isnull()) & (df_train.Initial == "Miss"), "Age"] = 22
df_train.loc[(df_train.Age.isnull()) & (df_train.Initial == "Other"), "Age"] = 46
df_train.Age.isnull().any() # So no null values left finally
# print(df_all.info())
# Assign all the null values to N
df_all.Cabin.fillna("N", inplace=True)
# group these cabins according to the letter of the cabin name
df_all.Cabin = [str(i)[0] for i in df_all.Cabin]
def percent_value_counts(df, feature):
"""This function takes in a dataframe and a column and finds the percentage of the value_counts"""
percent = pd.DataFrame(
round(df.loc[:, feature].value_counts(dropna=False, normalize=True) * 100, 2)
)
## creating a df with th
total = pd.DataFrame(df.loc[:, feature].value_counts(dropna=False))
## concating percent and total dataframe
total.columns = ["Total"]
percent.columns = ["Percent"]
return pd.concat([total, percent], axis=1)
percent_value_counts(df_all, "Cabin")
df_all.groupby("Cabin")["Fare"].mean().sort_values()
def cabin_estimator(i):
"""Grouping cabin feature by the first letter"""
a = 0
if i < 16:
a = "G"
elif i >= 16 and i < 27:
a = "F"
elif i >= 27 and i < 38:
a = "T"
elif i >= 38 and i < 47:
a = "A"
elif i >= 47 and i < 53:
a = "E"
elif i >= 53 and i < 54:
a = "D"
elif i >= 54 and i < 116:
a = "C"
else:
a = "B"
return a
"""Now, these means can help us determine the unknown cabins, if we compare each unknown cabin rows with the given mean's above.
Let's write a simple function so that we can give cabin names based on the means."""
# applying cabin estimator function.
df_all["Cabin"] = df_all.Fare.apply(lambda x: cabin_estimator(x))
percent_value_counts(df_all, "Cabin")
df_all["Fare"] = pd.qcut(df_all["Fare"], 13)
fig, axs = plt.subplots(figsize=(22, 9))
sns.countplot(x="Fare", hue="Survived", data=df_all)
plt.xlabel("Fare", size=15, labelpad=20)
plt.ylabel("Passenger Count", size=15, labelpad=20)
plt.tick_params(axis="x", labelsize=10)
plt.tick_params(axis="y", labelsize=15)
plt.legend(["Not Survived", "Survived"], loc="upper right", prop={"size": 15})
plt.title("Count of Survival in {} Feature".format("Fare"), size=15, y=1.05)
plt.show()
df_train["Embarked"].value_counts()
df_train[df_train.Embarked.isnull()]
"""
We may be able to solve these two missing values by looking at other independent variables of the two raws.
Both passengers paid a fare of $80, are of Pclass 1 and female Sex.
the average fare closest to $80 are in the C Embarked values where pclass is 1. So, let's fill in the missing values as "C"
"""
df_train["Embarked"].fillna("C", inplace=True)
df_train["Embarked"].value_counts()
df_train[["Embarked", "Survived"]].groupby(["Embarked"], as_index=False).mean()
pd.crosstab(
[df_train.Embarked, df_train.Pclass],
[df_train.Sex, df_train.Survived],
margins=True,
).style.background_gradient(cmap="summer_r")
df_train.head(3)
output = pd.DataFrame(
{"PassengerId": df_train.PassengerId, "Survived": df_train.Survived}
)
output.to_csv("submission.csv", index=False)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/019/129019255.ipynb
| null | null |
[{"Id": 129019255, "ScriptId": 38350646, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 15035340, "CreationDate": "05/10/2023 11:00:15", "VersionNumber": 4.0, "Title": "Titanic", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 211.0, "LinesInsertedFromPrevious": 211.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
"""
Python 3.10 Titanic Exploratory Data Analysis and visualization program will you survive on the titanic or not
File name Titanic_eda.py
Version: 0.1
Author: MLCV
Date: 2023-05-09
"""
# Importing Libraries
# visualization
from plotnine import * # plotnine is an implementation of a grammar of graphics in Python, it is based on ggplot2. The grammar allows users to compose plots by explicitly mapping data to the visual objects that make up the plot.
import matplotlib.pyplot as plt # collection of command style functions that make matplotlib work like MATLAB
import seaborn as sns # statistical data visualization
# data analysis
import pandas as pd # is a fast, powerful, flexible and easy to use open source data analysis and manipulation tool, built on top of the Python programming language
import numpy as np # offers comprehensive mathematical functions, random number generators, linear algebra routines, Fourier transforms, and more
import warnings # Base category for warnings triggered during the process of importing a module (ignored by default)
warnings.filterwarnings("ignore") # here we prescribe the action in case of an error.
"""let's create a function, feed the training and test data sets as an input,
and at the output we will get a combined one, without an index"""
def concat_df(train_data, test_data):
# Returns a concatenated df of training and test set
return pd.concat([train_data, test_data], sort=True).reset_index(drop=True)
"""Let's create a function at the input of which we feed the combined data set,
and at the output it returns the separated df of the training and test set, saved without a label"""
def divide_df(all_data):
# Returns divided dfs of training and test set
return all_data.loc[:890], all_data.loc[891:].drop(["Survived"], axis=1)
"""The Python Pandas packages helps us work with our datasets.
We start by acquiring the training and testing datasets into Pandas DataFrames.
We also combine these datasets to run certain operations on both datasets together.
"""
df_train = pd.read_csv("/kaggle/input/titanic/train.csv") # load train data
df_test = pd.read_csv("/kaggle/input/titanic/test.csv") # load test data
df_all = concat_df(
df_train, df_test
) # we apply the function described above, the union of two dataframes.
"""supplement the data sets with the name parameter"""
df_train.name = "Training Set" # set parameter for dataset - dataframe name
df_test.name = "Test Set" # set parameter for dataset - dataframe name
df_all.name = "All Set" # set parameter for dataset - dataframe name
dfs = [df_train, df_test]
# display information about datasets
print("Number of Training Examples = {}".format(df_train.shape[0]))
print("Number of Test Examples = {}\n".format(df_test.shape[0]))
print("Training X Shape = {}".format(df_train.shape))
print("Training y Shape = {}\n".format(df_train["Survived"].shape[0]))
print("Test X Shape = {}".format(df_test.shape))
print("Test y Shape = {}\n".format(df_test.shape[0]))
print(df_train.columns)
print(df_test.columns)
print(df_train.info())
print(df_train.describe())
df_train.sample(5)
"""Correlation Between The Features"""
sns.heatmap(df_train.corr(), annot=True, cmap="RdYlGn", linewidths=0.2)
fig = plt.gcf()
fig.set_size_inches(10, 8)
plt.show()
# function to analyze each column of the dataframe
def display_missing(df):
for col in df.columns.tolist():
print("{} column missing values: {}".format(col, df[col].isnull().sum()))
print("\n")
for df in dfs:
print("{}".format(df.name))
display_missing(df)
df_train[["Sex", "Survived"]].groupby(["Sex"], as_index=False).mean().sort_values(
by="Survived", ascending=False
)
df_train["Sex"].value_counts()
df_train[["Pclass", "Survived"]].groupby(["Pclass"], as_index=False).mean().sort_values(
by="Survived", ascending=False
)
df_train["Pclass"].value_counts()
df_train["Age"].value_counts()
df_train[["Age", "Survived"]].groupby(["Age"], as_index=False).mean().sort_values(
by="Survived", ascending=False
)
train_dropna_age = df_train["Age"].dropna()
df_train["Survived"] = df_train["Survived"].astype("category")
# Although it is a number, it is changed to a category because it is categorical data
df_train["Survived"] = df_train["Survived"].astype("category")
df_train.head(3)
df_train["Initial"] = 0
for i in df_train:
df_train["Initial"] = df_train.Name.str.extract(
"([A-Za-z]+)\."
) # lets extract the Salutations
"""We are using the Regex: [A-Za-z]+)..
So what it does is, it looks for strings which lie between A-Z or a-z and followed by a .(dot).
So we successfully extract the Initials from the Name."""
pd.crosstab(df_train.Initial, df_train.Sex).T.style.background_gradient(
cmap="summer_r"
) # Checking the Initials with the Sex
df_train["Initial"].replace(
[
"Mlle",
"Mme",
"Ms",
"Dr",
"Major",
"Lady",
"Countess",
"Jonkheer",
"Col",
"Rev",
"Capt",
"Sir",
"Don",
],
[
"Miss",
"Miss",
"Miss",
"Mr",
"Mr",
"Mrs",
"Mrs",
"Other",
"Other",
"Other",
"Mr",
"Mr",
"Mr",
],
inplace=True,
)
df_train.groupby("Initial")["Age"].mean() # lets check the average age by Initials
# Assigning the NaN Values with the Ceil values of the mean ages
df_train.loc[(df_train.Age.isnull()) & (df_train.Initial == "Mr"), "Age"] = 33
df_train.loc[(df_train.Age.isnull()) & (df_train.Initial == "Mrs"), "Age"] = 36
df_train.loc[(df_train.Age.isnull()) & (df_train.Initial == "Master"), "Age"] = 5
df_train.loc[(df_train.Age.isnull()) & (df_train.Initial == "Miss"), "Age"] = 22
df_train.loc[(df_train.Age.isnull()) & (df_train.Initial == "Other"), "Age"] = 46
df_train.Age.isnull().any() # So no null values left finally
# print(df_all.info())
# Assign all the null values to N
df_all.Cabin.fillna("N", inplace=True)
# group these cabins according to the letter of the cabin name
df_all.Cabin = [str(i)[0] for i in df_all.Cabin]
def percent_value_counts(df, feature):
"""This function takes in a dataframe and a column and finds the percentage of the value_counts"""
percent = pd.DataFrame(
round(df.loc[:, feature].value_counts(dropna=False, normalize=True) * 100, 2)
)
## creating a df with th
total = pd.DataFrame(df.loc[:, feature].value_counts(dropna=False))
## concating percent and total dataframe
total.columns = ["Total"]
percent.columns = ["Percent"]
return pd.concat([total, percent], axis=1)
percent_value_counts(df_all, "Cabin")
df_all.groupby("Cabin")["Fare"].mean().sort_values()
def cabin_estimator(i):
"""Grouping cabin feature by the first letter"""
a = 0
if i < 16:
a = "G"
elif i >= 16 and i < 27:
a = "F"
elif i >= 27 and i < 38:
a = "T"
elif i >= 38 and i < 47:
a = "A"
elif i >= 47 and i < 53:
a = "E"
elif i >= 53 and i < 54:
a = "D"
elif i >= 54 and i < 116:
a = "C"
else:
a = "B"
return a
"""Now, these means can help us determine the unknown cabins, if we compare each unknown cabin rows with the given mean's above.
Let's write a simple function so that we can give cabin names based on the means."""
# applying cabin estimator function.
df_all["Cabin"] = df_all.Fare.apply(lambda x: cabin_estimator(x))
percent_value_counts(df_all, "Cabin")
df_all["Fare"] = pd.qcut(df_all["Fare"], 13)
fig, axs = plt.subplots(figsize=(22, 9))
sns.countplot(x="Fare", hue="Survived", data=df_all)
plt.xlabel("Fare", size=15, labelpad=20)
plt.ylabel("Passenger Count", size=15, labelpad=20)
plt.tick_params(axis="x", labelsize=10)
plt.tick_params(axis="y", labelsize=15)
plt.legend(["Not Survived", "Survived"], loc="upper right", prop={"size": 15})
plt.title("Count of Survival in {} Feature".format("Fare"), size=15, y=1.05)
plt.show()
df_train["Embarked"].value_counts()
df_train[df_train.Embarked.isnull()]
"""
We may be able to solve these two missing values by looking at other independent variables of the two raws.
Both passengers paid a fare of $80, are of Pclass 1 and female Sex.
the average fare closest to $80 are in the C Embarked values where pclass is 1. So, let's fill in the missing values as "C"
"""
df_train["Embarked"].fillna("C", inplace=True)
df_train["Embarked"].value_counts()
df_train[["Embarked", "Survived"]].groupby(["Embarked"], as_index=False).mean()
pd.crosstab(
[df_train.Embarked, df_train.Pclass],
[df_train.Sex, df_train.Survived],
margins=True,
).style.background_gradient(cmap="summer_r")
df_train.head(3)
output = pd.DataFrame(
{"PassengerId": df_train.PassengerId, "Survived": df_train.Survived}
)
output.to_csv("submission.csv", index=False)
| false | 0 | 2,800 | 0 | 2,800 | 2,800 |
||
129019356
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import numpy as np
import pandas as pd
pd.set_option("max_columns", None)
pd.set_option("max_rows", 90)
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style("darkgrid")
from sklearn.neighbors import KNeighborsRegressor
import scipy.stats
from sklearn.preprocessing import StandardScaler
from pycaret.regression import setup, compare_models
from sklearn.model_selection import KFold, cross_val_score
from catboost import CatBoostRegressor
from sklearn.linear_model import (
BayesianRidge,
HuberRegressor,
Ridge,
OrthogonalMatchingPursuit,
)
from lightgbm import LGBMRegressor
from sklearn.ensemble import GradientBoostingRegressor
from xgboost import XGBRegressor
import optuna
train0 = pd.read_csv("../input/house-prices-advanced-regression-techniques/train.csv")
test0 = pd.read_csv("../input/house-prices-advanced-regression-techniques/test.csv")
sample_submission = pd.read_csv(
"../input/house-prices-advanced-regression-techniques/sample_submission.csv"
)
train0
test0
sample_submission
target = train0["SalePrice"]
test_ids = test0["Id"]
train1 = train0.drop(["Id", "SalePrice"], axis=1)
test1 = test0.drop("Id", axis=1)
data1 = pd.concat([train1, test1], axis=0).reset_index(drop=True)
data1
target
# ## Cleaning
data2 = data1.copy()
data2["MSSubClass"] = data2["MSSubClass"].astype(str)
# Impute using a constant value
for column in [
"Alley",
"BsmtQual",
"BsmtCond",
"BsmtExposure",
"BsmtFinType1",
"BsmtFinType2",
"FireplaceQu",
"GarageType",
"GarageFinish",
"GarageQual",
"GarageCond",
"PoolQC",
"Fence",
"MiscFeature",
]:
data2[column] = data2[column].fillna("None")
# Impute using the column mode
for column in [
"MSZoning",
"Utilities",
"Exterior1st",
"Exterior2nd",
"MasVnrType",
"Electrical",
"KitchenQual",
"Functional",
"SaleType",
]:
data2[column] = data2[column].fillna(data2[column].mode()[0])
data3 = data2.copy()
import pandas as pd
data = {
"Name": ["John", "Mary", "Peter", "Sarah"],
"Age": [25, 30, 35, 40],
"Gender": ["M", "F", "M", "F"],
"Salary": [50000, 60000, 70000, 80000],
}
df = pd.DataFrame(data)
# select row with index 2 and all columns
row2 = df.loc[2, :]
# select all rows and columns 'Name' and 'Gender'
subset = df.loc[:, ["Name", "Gender"]]
subset
# select first two rows and first three columns
subset = df.iloc[:2, :3]
subset
# select last row and last column
cell = df.iloc[-1, -1]
cell
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/019/129019356.ipynb
| null | null |
[{"Id": 129019356, "ScriptId": 38308563, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 12619548, "CreationDate": "05/10/2023 11:01:07", "VersionNumber": 2.0, "Title": "Hse_Predict", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 133.0, "LinesInsertedFromPrevious": 104.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 29.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import numpy as np
import pandas as pd
pd.set_option("max_columns", None)
pd.set_option("max_rows", 90)
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style("darkgrid")
from sklearn.neighbors import KNeighborsRegressor
import scipy.stats
from sklearn.preprocessing import StandardScaler
from pycaret.regression import setup, compare_models
from sklearn.model_selection import KFold, cross_val_score
from catboost import CatBoostRegressor
from sklearn.linear_model import (
BayesianRidge,
HuberRegressor,
Ridge,
OrthogonalMatchingPursuit,
)
from lightgbm import LGBMRegressor
from sklearn.ensemble import GradientBoostingRegressor
from xgboost import XGBRegressor
import optuna
train0 = pd.read_csv("../input/house-prices-advanced-regression-techniques/train.csv")
test0 = pd.read_csv("../input/house-prices-advanced-regression-techniques/test.csv")
sample_submission = pd.read_csv(
"../input/house-prices-advanced-regression-techniques/sample_submission.csv"
)
train0
test0
sample_submission
target = train0["SalePrice"]
test_ids = test0["Id"]
train1 = train0.drop(["Id", "SalePrice"], axis=1)
test1 = test0.drop("Id", axis=1)
data1 = pd.concat([train1, test1], axis=0).reset_index(drop=True)
data1
target
# ## Cleaning
data2 = data1.copy()
data2["MSSubClass"] = data2["MSSubClass"].astype(str)
# Impute using a constant value
for column in [
"Alley",
"BsmtQual",
"BsmtCond",
"BsmtExposure",
"BsmtFinType1",
"BsmtFinType2",
"FireplaceQu",
"GarageType",
"GarageFinish",
"GarageQual",
"GarageCond",
"PoolQC",
"Fence",
"MiscFeature",
]:
data2[column] = data2[column].fillna("None")
# Impute using the column mode
for column in [
"MSZoning",
"Utilities",
"Exterior1st",
"Exterior2nd",
"MasVnrType",
"Electrical",
"KitchenQual",
"Functional",
"SaleType",
]:
data2[column] = data2[column].fillna(data2[column].mode()[0])
data3 = data2.copy()
import pandas as pd
data = {
"Name": ["John", "Mary", "Peter", "Sarah"],
"Age": [25, 30, 35, 40],
"Gender": ["M", "F", "M", "F"],
"Salary": [50000, 60000, 70000, 80000],
}
df = pd.DataFrame(data)
# select row with index 2 and all columns
row2 = df.loc[2, :]
# select all rows and columns 'Name' and 'Gender'
subset = df.loc[:, ["Name", "Gender"]]
subset
# select first two rows and first three columns
subset = df.iloc[:2, :3]
subset
# select last row and last column
cell = df.iloc[-1, -1]
cell
| false | 0 | 1,001 | 0 | 1,001 | 1,001 |
||
129019808
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
df = pd.read_excel("/kaggle/input/iedata/iedatason.xlsx", sheet_name="Form Yanıtları 1")
df.info()
df.head()
# # Mean & Standard Deviation
mean = np.mean(df["Result"])
std = np.std(df["Result"])
print("Mean: ", mean)
print("Standard Deviation: ", std)
# # **Confidence Interval**
import numpy as np
from scipy.stats import t
# Calculate the mean and standard deviation of the dataset
# Set the level of confidence (1 - alpha)
conf_level = 0.95
# Calculate the sample size and degrees of freedom
n = len(df["Result"])
df = n - 1
# Calculate the t-statistic
t_stat = t.ppf((1 + conf_level) / 2, df)
# Calculate the margin of error
margin_of_error = t_stat * std / np.sqrt(n)
# Calculate the confidence interval
lower = mean - margin_of_error
upper = mean + margin_of_error
# Print the confidence interval
print(
"The 95% confidence interval for the mean is: ({:.2f}, {:.2f})".format(lower, upper)
)
# # **Hypothesis Testing**
import pandas as pd
import numpy as np
from scipy.stats import ttest_1samp
df = pd.read_excel("/kaggle/input/iedata/iedatason.xlsx", sheet_name="Form Yanıtları 1")
# Set the null hypothesis mean
null_mean = 0
# Set the level of significance (alpha)
alpha = 0.05
# Perform the t-test
t_stat, p_val = ttest_1samp(df["Result"], null_mean)
# Print the results
print("t-statistic: {:.2f}".format(t_stat))
print("p-value: {:.2f}".format(p_val))
# Check if the result is statistically significant
if p_val < alpha:
print("The result is statistically significant")
else:
print("The result is not statistically significant")
import pandas as pd
# your code for loading data
df = pd.read_excel("/kaggle/input/iedata/iedatason.xlsx", sheet_name="Form Yanıtları 1")
# replace F with 1 and M with 0 in Gender column
df["Gender"] = df["Gender"].replace({"F": 1, "M": 0})
# convert Gender column to float64
df["Gender"] = df["Gender"].astype("float64")
df.head()
# # **Regression**
#
import pandas as pd
import statsmodels.api as sm
# Split the data into X and y
X = df["Result"]
y = df["Gender"]
# Add a constant to the X data
X = sm.add_constant(X)
# Create the linear regression model
model = sm.OLS(y, X).fit()
# Print the model summary
print(model.summary())
# # Anova for Result
import pandas as pd
from scipy import stats
# Remove rows with missing data
df.dropna(inplace=True)
# Separate the data by gender
bigResult = df[df["Result"] >= 4.0].iloc[:, 1:]
smallResult = df[df["Result"] < 4.0].iloc[:, 1:]
# Check for empty groups and remove them
if len(bigResult) == 0 or len(smallResult) == 0:
print("Error: one or more groups has no data.")
else:
# Perform ANOVA using the f_oneway() function
f_val, p_val = stats.f_oneway(bigResult, smallResult)
# Print the F-value and p-value to the console
print("F-value:", f_val)
print("p-value:", p_val)
# # **Anova for Gender**
import pandas as pd
# your code for loading data
df = pd.read_excel("/kaggle/input/iedata/iedatason.xlsx", sheet_name="Form Yanıtları 1")
# replace F with 1 and M with 0 in Gender column
df["Gender"] = df["Gender"].replace({"F": 1, "M": 0})
# convert Gender column to float64
df["Gender"] = df["Gender"].astype("float64")
df.head()
import pandas as pd
from scipy import stats
# Remove rows with missing data
df.dropna(inplace=True)
# Separate the data by gender
maleData = df[df["Gender"] == 0.0].iloc[:, 1:]
femaleData = df[df["Gender"] == 1.0].iloc[:, 1:]
# Check for empty groups and remove them
if len(maleData) == 0 or len(femaleData) == 0:
print("Error: one or more groups has no data.")
else:
# Perform ANOVA using the f_oneway() function
f_val, p_val = stats.f_oneway(maleData, femaleData)
# Print the F-value and p-value to the console
print("F-value:", f_val)
print("p-value:", p_val)
# # **Graphs**
data = df["Result"].values
# Create a histogram plot with matplotlib
plt.hist(data, bins=9) # the number of bins can be adjusted as needed
# Add titles and labels to the plot
plt.title("Histogram of Reesults")
plt.xlabel("Data Values")
plt.ylabel("Frequency")
# Show the plot
plt.show()
data = df["Result"]
# Count the number of occurrences of each value in the column
counts = data.value_counts()
# Create a bar plot with matplotlib
counts.plot(kind="bar")
# Add titles and labels to the plot
plt.title("Bar Plot of Results")
plt.xlabel("Data Values")
plt.ylabel("Counts")
# Show the plot
plt.show()
data = df["Result"]
# Count the number of occurrences of each value in the column
counts = data.value_counts()
# Create a pie chart with matplotlib
counts.plot(kind="pie")
# Add titles and labels to the plot
plt.title("Pie Chart of Results")
plt.legend(labels=counts.index, loc="upper right")
# Show the plot
plt.show()
import pandas as pd
# your code for loading data
df = pd.read_excel("/kaggle/input/iedata/iedatason.xlsx", sheet_name="Form Yanıtları 1")
# replace F with 1 and M with 0 in Gender column
df["Gender"] = df["Gender"].replace({1: "F", 0: "M"})
# convert Gender column to float64
df["Gender"] = df["Gender"].astype("object")
df.head()
# Create a box plot using seaborn
sns.boxplot(x="Gender", y="Result", data=df)
# Create a violin plot using seaborn
sns.violinplot(x="Gender", y="Result", data=df)
# Show the plots
plt.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/019/129019808.ipynb
| null | null |
[{"Id": 129019808, "ScriptId": 38206276, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13154053, "CreationDate": "05/10/2023 11:05:31", "VersionNumber": 1.0, "Title": "statistical analysis", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 250.0, "LinesInsertedFromPrevious": 250.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
df = pd.read_excel("/kaggle/input/iedata/iedatason.xlsx", sheet_name="Form Yanıtları 1")
df.info()
df.head()
# # Mean & Standard Deviation
mean = np.mean(df["Result"])
std = np.std(df["Result"])
print("Mean: ", mean)
print("Standard Deviation: ", std)
# # **Confidence Interval**
import numpy as np
from scipy.stats import t
# Calculate the mean and standard deviation of the dataset
# Set the level of confidence (1 - alpha)
conf_level = 0.95
# Calculate the sample size and degrees of freedom
n = len(df["Result"])
df = n - 1
# Calculate the t-statistic
t_stat = t.ppf((1 + conf_level) / 2, df)
# Calculate the margin of error
margin_of_error = t_stat * std / np.sqrt(n)
# Calculate the confidence interval
lower = mean - margin_of_error
upper = mean + margin_of_error
# Print the confidence interval
print(
"The 95% confidence interval for the mean is: ({:.2f}, {:.2f})".format(lower, upper)
)
# # **Hypothesis Testing**
import pandas as pd
import numpy as np
from scipy.stats import ttest_1samp
df = pd.read_excel("/kaggle/input/iedata/iedatason.xlsx", sheet_name="Form Yanıtları 1")
# Set the null hypothesis mean
null_mean = 0
# Set the level of significance (alpha)
alpha = 0.05
# Perform the t-test
t_stat, p_val = ttest_1samp(df["Result"], null_mean)
# Print the results
print("t-statistic: {:.2f}".format(t_stat))
print("p-value: {:.2f}".format(p_val))
# Check if the result is statistically significant
if p_val < alpha:
print("The result is statistically significant")
else:
print("The result is not statistically significant")
import pandas as pd
# your code for loading data
df = pd.read_excel("/kaggle/input/iedata/iedatason.xlsx", sheet_name="Form Yanıtları 1")
# replace F with 1 and M with 0 in Gender column
df["Gender"] = df["Gender"].replace({"F": 1, "M": 0})
# convert Gender column to float64
df["Gender"] = df["Gender"].astype("float64")
df.head()
# # **Regression**
#
import pandas as pd
import statsmodels.api as sm
# Split the data into X and y
X = df["Result"]
y = df["Gender"]
# Add a constant to the X data
X = sm.add_constant(X)
# Create the linear regression model
model = sm.OLS(y, X).fit()
# Print the model summary
print(model.summary())
# # Anova for Result
import pandas as pd
from scipy import stats
# Remove rows with missing data
df.dropna(inplace=True)
# Separate the data by gender
bigResult = df[df["Result"] >= 4.0].iloc[:, 1:]
smallResult = df[df["Result"] < 4.0].iloc[:, 1:]
# Check for empty groups and remove them
if len(bigResult) == 0 or len(smallResult) == 0:
print("Error: one or more groups has no data.")
else:
# Perform ANOVA using the f_oneway() function
f_val, p_val = stats.f_oneway(bigResult, smallResult)
# Print the F-value and p-value to the console
print("F-value:", f_val)
print("p-value:", p_val)
# # **Anova for Gender**
import pandas as pd
# your code for loading data
df = pd.read_excel("/kaggle/input/iedata/iedatason.xlsx", sheet_name="Form Yanıtları 1")
# replace F with 1 and M with 0 in Gender column
df["Gender"] = df["Gender"].replace({"F": 1, "M": 0})
# convert Gender column to float64
df["Gender"] = df["Gender"].astype("float64")
df.head()
import pandas as pd
from scipy import stats
# Remove rows with missing data
df.dropna(inplace=True)
# Separate the data by gender
maleData = df[df["Gender"] == 0.0].iloc[:, 1:]
femaleData = df[df["Gender"] == 1.0].iloc[:, 1:]
# Check for empty groups and remove them
if len(maleData) == 0 or len(femaleData) == 0:
print("Error: one or more groups has no data.")
else:
# Perform ANOVA using the f_oneway() function
f_val, p_val = stats.f_oneway(maleData, femaleData)
# Print the F-value and p-value to the console
print("F-value:", f_val)
print("p-value:", p_val)
# # **Graphs**
data = df["Result"].values
# Create a histogram plot with matplotlib
plt.hist(data, bins=9) # the number of bins can be adjusted as needed
# Add titles and labels to the plot
plt.title("Histogram of Reesults")
plt.xlabel("Data Values")
plt.ylabel("Frequency")
# Show the plot
plt.show()
data = df["Result"]
# Count the number of occurrences of each value in the column
counts = data.value_counts()
# Create a bar plot with matplotlib
counts.plot(kind="bar")
# Add titles and labels to the plot
plt.title("Bar Plot of Results")
plt.xlabel("Data Values")
plt.ylabel("Counts")
# Show the plot
plt.show()
data = df["Result"]
# Count the number of occurrences of each value in the column
counts = data.value_counts()
# Create a pie chart with matplotlib
counts.plot(kind="pie")
# Add titles and labels to the plot
plt.title("Pie Chart of Results")
plt.legend(labels=counts.index, loc="upper right")
# Show the plot
plt.show()
import pandas as pd
# your code for loading data
df = pd.read_excel("/kaggle/input/iedata/iedatason.xlsx", sheet_name="Form Yanıtları 1")
# replace F with 1 and M with 0 in Gender column
df["Gender"] = df["Gender"].replace({1: "F", 0: "M"})
# convert Gender column to float64
df["Gender"] = df["Gender"].astype("object")
df.head()
# Create a box plot using seaborn
sns.boxplot(x="Gender", y="Result", data=df)
# Create a violin plot using seaborn
sns.violinplot(x="Gender", y="Result", data=df)
# Show the plots
plt.show()
| false | 0 | 1,896 | 0 | 1,896 | 1,896 |
||
129019183
|
<jupyter_start><jupyter_text>Brain Tumor Classification (MRI)
# Contribute to OpenSource
##Repo: [GitHub](https://github.com/SartajBhuvaji/Brain-Tumor-Classification-Using-Deep-Learning-Algorithms)
## Read Me: [Link](https://github.com/SartajBhuvaji/Brain-Tumor-Classification-Using-Deep-Learning-Algorithms/tree/master#contributing-to-the-project)
# Abstract
A Brain tumor is considered as one of the aggressive diseases, among children and adults. Brain tumors account for 85 to 90 percent of all primary Central Nervous System(CNS) tumors. Every year, around 11,700 people are diagnosed with a brain tumor. The 5-year survival rate for people with a cancerous brain or CNS tumor is approximately 34 percent for men and36 percent for women. Brain Tumors are classified as: Benign Tumor, Malignant Tumor, Pituitary Tumor, etc. Proper treatment, planning, and accurate diagnostics should be implemented to improve the life expectancy of the patients. The best technique to detect brain tumors is Magnetic Resonance Imaging (MRI). A huge amount of image data is generated through the scans. These images are examined by the radiologist. A manual examination can be error-prone due to the level of complexities involved in brain tumors and their properties.
Application of automated classification techniques using Machine Learning(ML) and Artificial Intelligence(AI)has consistently shown higher accuracy than manual classification. Hence, proposing a system performing detection and classification by using Deep Learning Algorithms using ConvolutionNeural Network (CNN), Artificial Neural Network (ANN), and TransferLearning (TL) would be helpful to doctors all around the world.
### Context
Brain Tumors are complex. There are a lot of abnormalities in the sizes and location of the brain tumor(s). This makes it really difficult for complete understanding of the nature of the tumor. Also, a professional Neurosurgeon is required for MRI analysis. Often times in developing countries the lack of skillful doctors and lack of knowledge about tumors makes it really challenging and time-consuming to generate reports from MRI’. So an automated system on Cloud can solve this problem.
### Definition
To Detect and Classify Brain Tumor using, CNN and TL; as an asset of Deep Learning and to examine the tumor position(segmentation).
Kaggle dataset identifier: brain-tumor-classification-mri
<jupyter_script>import subprocess
whls = [
"/kaggle/input/pyg-cp37-pt111/torch_cluster-1.6.0-cp37-cp37m-linux_x86_64.whl",
"/kaggle/input/pyg-cp37-pt111/torch_scatter-2.1.0-cp37-cp37m-linux_x86_64.whl",
"/kaggle/input/pyg-cp37-pt111/torch_sparse-0.6.16-cp37-cp37m-linux_x86_64.whl",
"/kaggle/input/pyg-cp37-pt111/torch_spline_conv-1.2.1-cp37-cp37m-linux_x86_64.whl",
"/kaggle/input/pyg-cp37-pt111/torch_geometric-2.2.0-py3-none-any.whl",
"/kaggle/input/pyg-cp37-pt111/ruamel.yaml-0.17.21-py3-none-any.whl",
]
for w in whls:
print("Installing", w)
subprocess.call(["pip", "install", w, "--no-deps", "--upgrade"])
import torch
import torch.nn as nn
import torchvision
import torch.utils.data as data_utils
import torch_geometric
from types import MethodType
import matplotlib.pyplot as plt
import tqdm
from IPython import display
dtype = torch.float16
device = torch.device("cuda:0")
def train(model: nn.Module, num_epochs, train_loader, validate_loader, lr):
model.type(dtype)
model.to(device)
model.train()
optimizer = torch.optim.SGD(model.parameters(), lr)
loss = nn.CrossEntropyLoss()
lrc = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, T_max=num_epochs, eta_min=lr / 10
)
loss_array = []
acc_array = []
for eopch in tqdm.tqdm(range(num_epochs)):
running_loss, c = 0, 0
for inputs, target in train_loader:
inputs = inputs.type(dtype)
inputs = inputs.to(device)
target = target.to(device)
y_pre = model(inputs)
l = loss(y_pre, target)
optimizer.zero_grad()
l.backward()
optimizer.step()
running_loss += l
c += 1
loss_array.append(running_loss.item() / c)
acc_array.append(test(model, validate_loader))
display.clear_output(True)
plt.xlim([0, num_epochs - 1])
plt.plot(loss_array)
plt.plot(acc_array)
plt.show()
print(f"current loss{loss_array[-1]} current acc{acc_array[-1]}")
print(f"best acc: {max(acc_array)}")
@torch.no_grad()
def test(model, validate_loader):
correct = 0
total = 0
model.eval()
model.to(device)
model.type(dtype)
with torch.no_grad():
for data in validate_loader:
image, lables = data
outputs = model(image.type(dtype).to(device))
_, predicted = torch.max(outputs.data, dim=1)
total += lables.size(0)
correct += (predicted == lables.to(device)).sum().item()
acc = correct / total
return acc
transforms = torchvision.transforms
data_transform = {
"train": transforms.Compose(
[
transforms.RandomResizedCrop(224),
transforms.Resize((224, 224)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]
),
"val": transforms.Compose(
[
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]
),
}
def train_flowers(model, epochs, batch_size=32, lr=0.001):
train_dataset = torchvision.datasets.ImageFolder(
"/kaggle/input/flowerss/train", transform=data_transform["train"]
)
train_loader = data_utils.DataLoader(
train_dataset, batch_size=batch_size, shuffle=True
)
validate_dataset = torchvision.datasets.ImageFolder(
"/kaggle/input/flowerss/val", transform=data_transform["val"]
)
validate_loader = data_utils.DataLoader(
validate_dataset, batch_size=batch_size, shuffle=False
)
train(model, epochs, train_loader, validate_loader, lr)
test(model, validate_loader)
def train_tumor(model, epochs, batch_size=32, lr=0.001):
train_dataset = torchvision.datasets.ImageFolder(
"/kaggle/input/brain-tumor-classification-mri/Training",
transform=data_transform["train"],
)
train_loader = data_utils.DataLoader(
train_dataset, batch_size=batch_size, shuffle=True
)
validate_dataset = torchvision.datasets.ImageFolder(
"/kaggle/input/brain-tumor-classification-mri/Testing",
transform=data_transform["val"],
)
validate_loader = data_utils.DataLoader(
validate_dataset, batch_size=batch_size, shuffle=False
)
train(model, epochs, train_loader, validate_loader, lr)
def _forward_impl(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
class ResGCN2(nn.Module):
def __init__(self, k, num_classes):
super().__init__()
self.k = k
self.num_classes = num_classes
self.resnet = torchvision.models.resnet18(True)
self.resnet._forward_impl = MethodType(_forward_impl, self.resnet)
self.conv1 = torch_geometric.nn.GCNConv(512, 256)
self.dropout = nn.Dropout(0.5)
self.conv2 = torch_geometric.nn.GCNConv(256, num_classes)
def forward_features(self, X):
# B * 512 * 7 * 7
return self.resnet(X)
def convert_graph(self, x):
b, c, h, w = x.shape
device = x.device
k = self.k
x = torch.permute(x, (0, 2, 3, 1)).reshape((b, h * w, c))
y = torch.cdist(x, x, 2) # [b, hw, hw]
_, idx = torch.topk(y, k, -1)
source = torch.arange(h * w, device=device).repeat_interleave(k).repeat(b)
target = torch.flatten(idx)
step = torch.arange(b, device=device).repeat_interleave(h * w * k) * h * w
adj = torch.row_stack([source, target]) + step
return x.reshape((b * h * w, c)), adj.long()
def froward_gcn(self, X, adj):
X = self.conv1(X, adj)
X = self.dropout(X)
X = self.conv2(X, adj) # (B*7*7, num_classes)
return X
def forward(self, X):
batch = X.shape[0]
X = self.forward_features(X)
X, adj = self.convert_graph(X)
X = self.froward_gcn(X, adj)
X = torch.reshape(X, (batch, -1, self.num_classes))
X = torch.mean(X, 1) # (B, num_classes)
return X
model = ResGCN2(k=8, num_classes=5)
train_flowers(model, 20, batch_size=32, lr=0.001)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/019/129019183.ipynb
|
brain-tumor-classification-mri
|
sartajbhuvaji
|
[{"Id": 129019183, "ScriptId": 38352626, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 12160881, "CreationDate": "05/10/2023 10:59:41", "VersionNumber": 1.0, "Title": "Fork of Resnet+GCN\u56fe\u7247\u5206\u7c7b", "EvaluationDate": NaN, "IsChange": true, "TotalLines": 168.0, "LinesInsertedFromPrevious": 79.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 89.0, "LinesInsertedFromFork": 79.0, "LinesDeletedFromFork": 158.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 89.0, "TotalVotes": 0}]
|
[{"Id": 184705588, "KernelVersionId": 129019183, "SourceDatasetVersionId": 1183165}, {"Id": 184705590, "KernelVersionId": 129019183, "SourceDatasetVersionId": 5610519}, {"Id": 184705589, "KernelVersionId": 129019183, "SourceDatasetVersionId": 5226033}]
|
[{"Id": 1183165, "DatasetId": 672377, "DatasourceVersionId": 1214258, "CreatorUserId": 3469060, "LicenseName": "CC0: Public Domain", "CreationDate": "05/24/2020 16:24:55", "VersionNumber": 2.0, "Title": "Brain Tumor Classification (MRI)", "Slug": "brain-tumor-classification-mri", "Subtitle": "Classify MRI images into four classes", "Description": "# Contribute to OpenSource\n##Repo: [GitHub](https://github.com/SartajBhuvaji/Brain-Tumor-Classification-Using-Deep-Learning-Algorithms)\n## Read Me: [Link](https://github.com/SartajBhuvaji/Brain-Tumor-Classification-Using-Deep-Learning-Algorithms/tree/master#contributing-to-the-project)\n\n\n# Abstract\nA Brain tumor is considered as one of the aggressive diseases, among children and adults. Brain tumors account for 85 to 90 percent of all primary Central Nervous System(CNS) tumors. Every year, around 11,700 people are diagnosed with a brain tumor. The 5-year survival rate for people with a cancerous brain or CNS tumor is approximately 34 percent for men and36 percent for women. Brain Tumors are classified as: Benign Tumor, Malignant Tumor, Pituitary Tumor, etc. Proper treatment, planning, and accurate diagnostics should be implemented to improve the life expectancy of the patients. The best technique to detect brain tumors is Magnetic Resonance Imaging (MRI). A huge amount of image data is generated through the scans. These images are examined by the radiologist. A manual examination can be error-prone due to the level of complexities involved in brain tumors and their properties.\n\nApplication of automated classification techniques using Machine Learning(ML) and Artificial Intelligence(AI)has consistently shown higher accuracy than manual classification. Hence, proposing a system performing detection and classification by using Deep Learning Algorithms using ConvolutionNeural Network (CNN), Artificial Neural Network (ANN), and TransferLearning (TL) would be helpful to doctors all around the world.\n\n### Context\n\nBrain Tumors are complex. There are a lot of abnormalities in the sizes and location of the brain tumor(s). This makes it really difficult for complete understanding of the nature of the tumor. Also, a professional Neurosurgeon is required for MRI analysis. Often times in developing countries the lack of skillful doctors and lack of knowledge about tumors makes it really challenging and time-consuming to generate reports from MRI\u2019. So an automated system on Cloud can solve this problem.\n\n\n### Definition\n\nTo Detect and Classify Brain Tumor using, CNN and TL; as an asset of Deep Learning and to examine the tumor position(segmentation).\n\n\n### Acknowledgements for Dataset.\n\nNavoneel Chakrabarty\nSwati Kanchan\n\n### Team\n\nSartaj Bhuvaji\nAnkita Kadam\nPrajakta Bhumkar\nSameer Dedge", "VersionNotes": "Automatic Update 2020-05-24", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 672377, "CreatorUserId": 3469060, "OwnerUserId": 3469060.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 1183165.0, "CurrentDatasourceVersionId": 1214258.0, "ForumId": 686859, "Type": 2, "CreationDate": "05/24/2020 16:22:54", "LastActivityDate": "05/24/2020", "TotalViews": 302511, "TotalDownloads": 32508, "TotalVotes": 481, "TotalKernels": 255}]
|
[{"Id": 3469060, "UserName": "sartajbhuvaji", "DisplayName": "Sartaj", "RegisterDate": "07/16/2019", "PerformanceTier": 0}]
|
import subprocess
whls = [
"/kaggle/input/pyg-cp37-pt111/torch_cluster-1.6.0-cp37-cp37m-linux_x86_64.whl",
"/kaggle/input/pyg-cp37-pt111/torch_scatter-2.1.0-cp37-cp37m-linux_x86_64.whl",
"/kaggle/input/pyg-cp37-pt111/torch_sparse-0.6.16-cp37-cp37m-linux_x86_64.whl",
"/kaggle/input/pyg-cp37-pt111/torch_spline_conv-1.2.1-cp37-cp37m-linux_x86_64.whl",
"/kaggle/input/pyg-cp37-pt111/torch_geometric-2.2.0-py3-none-any.whl",
"/kaggle/input/pyg-cp37-pt111/ruamel.yaml-0.17.21-py3-none-any.whl",
]
for w in whls:
print("Installing", w)
subprocess.call(["pip", "install", w, "--no-deps", "--upgrade"])
import torch
import torch.nn as nn
import torchvision
import torch.utils.data as data_utils
import torch_geometric
from types import MethodType
import matplotlib.pyplot as plt
import tqdm
from IPython import display
dtype = torch.float16
device = torch.device("cuda:0")
def train(model: nn.Module, num_epochs, train_loader, validate_loader, lr):
model.type(dtype)
model.to(device)
model.train()
optimizer = torch.optim.SGD(model.parameters(), lr)
loss = nn.CrossEntropyLoss()
lrc = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, T_max=num_epochs, eta_min=lr / 10
)
loss_array = []
acc_array = []
for eopch in tqdm.tqdm(range(num_epochs)):
running_loss, c = 0, 0
for inputs, target in train_loader:
inputs = inputs.type(dtype)
inputs = inputs.to(device)
target = target.to(device)
y_pre = model(inputs)
l = loss(y_pre, target)
optimizer.zero_grad()
l.backward()
optimizer.step()
running_loss += l
c += 1
loss_array.append(running_loss.item() / c)
acc_array.append(test(model, validate_loader))
display.clear_output(True)
plt.xlim([0, num_epochs - 1])
plt.plot(loss_array)
plt.plot(acc_array)
plt.show()
print(f"current loss{loss_array[-1]} current acc{acc_array[-1]}")
print(f"best acc: {max(acc_array)}")
@torch.no_grad()
def test(model, validate_loader):
correct = 0
total = 0
model.eval()
model.to(device)
model.type(dtype)
with torch.no_grad():
for data in validate_loader:
image, lables = data
outputs = model(image.type(dtype).to(device))
_, predicted = torch.max(outputs.data, dim=1)
total += lables.size(0)
correct += (predicted == lables.to(device)).sum().item()
acc = correct / total
return acc
transforms = torchvision.transforms
data_transform = {
"train": transforms.Compose(
[
transforms.RandomResizedCrop(224),
transforms.Resize((224, 224)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]
),
"val": transforms.Compose(
[
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]
),
}
def train_flowers(model, epochs, batch_size=32, lr=0.001):
train_dataset = torchvision.datasets.ImageFolder(
"/kaggle/input/flowerss/train", transform=data_transform["train"]
)
train_loader = data_utils.DataLoader(
train_dataset, batch_size=batch_size, shuffle=True
)
validate_dataset = torchvision.datasets.ImageFolder(
"/kaggle/input/flowerss/val", transform=data_transform["val"]
)
validate_loader = data_utils.DataLoader(
validate_dataset, batch_size=batch_size, shuffle=False
)
train(model, epochs, train_loader, validate_loader, lr)
test(model, validate_loader)
def train_tumor(model, epochs, batch_size=32, lr=0.001):
train_dataset = torchvision.datasets.ImageFolder(
"/kaggle/input/brain-tumor-classification-mri/Training",
transform=data_transform["train"],
)
train_loader = data_utils.DataLoader(
train_dataset, batch_size=batch_size, shuffle=True
)
validate_dataset = torchvision.datasets.ImageFolder(
"/kaggle/input/brain-tumor-classification-mri/Testing",
transform=data_transform["val"],
)
validate_loader = data_utils.DataLoader(
validate_dataset, batch_size=batch_size, shuffle=False
)
train(model, epochs, train_loader, validate_loader, lr)
def _forward_impl(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
class ResGCN2(nn.Module):
def __init__(self, k, num_classes):
super().__init__()
self.k = k
self.num_classes = num_classes
self.resnet = torchvision.models.resnet18(True)
self.resnet._forward_impl = MethodType(_forward_impl, self.resnet)
self.conv1 = torch_geometric.nn.GCNConv(512, 256)
self.dropout = nn.Dropout(0.5)
self.conv2 = torch_geometric.nn.GCNConv(256, num_classes)
def forward_features(self, X):
# B * 512 * 7 * 7
return self.resnet(X)
def convert_graph(self, x):
b, c, h, w = x.shape
device = x.device
k = self.k
x = torch.permute(x, (0, 2, 3, 1)).reshape((b, h * w, c))
y = torch.cdist(x, x, 2) # [b, hw, hw]
_, idx = torch.topk(y, k, -1)
source = torch.arange(h * w, device=device).repeat_interleave(k).repeat(b)
target = torch.flatten(idx)
step = torch.arange(b, device=device).repeat_interleave(h * w * k) * h * w
adj = torch.row_stack([source, target]) + step
return x.reshape((b * h * w, c)), adj.long()
def froward_gcn(self, X, adj):
X = self.conv1(X, adj)
X = self.dropout(X)
X = self.conv2(X, adj) # (B*7*7, num_classes)
return X
def forward(self, X):
batch = X.shape[0]
X = self.forward_features(X)
X, adj = self.convert_graph(X)
X = self.froward_gcn(X, adj)
X = torch.reshape(X, (batch, -1, self.num_classes))
X = torch.mean(X, 1) # (B, num_classes)
return X
model = ResGCN2(k=8, num_classes=5)
train_flowers(model, 20, batch_size=32, lr=0.001)
| false | 0 | 2,106 | 0 | 2,735 | 2,106 |
||
129019305
|
<jupyter_start><jupyter_text>User_Data
The dataset consists of information about users who are potential customers for a product or service. It contains four input features - User ID, Gender, Age, and Estimated Salary - which are used to predict whether or not the user purchased the product, indicated by the output or target column 'Purchased'.
The User ID is a unique identifier assigned to each user, while Gender is the user's gender, which can be either male or female. Age is the age of the user in years, and Estimated Salary is an estimate of the user's annual salary.
The dataset is likely used for binary classification tasks to determine whether or not a user is likely to purchase a particular product or service. The features provided could potentially be used to create a model that predicts the probability of a user purchasing the product based on their age, gender, and estimated salary.
Kaggle dataset identifier: user-data
<jupyter_script># # Naive Bayes
# #### Python Implementation of the Naïve Bayes algorithm:
# - Now we will implement a Naive Bayes Algorithm using Python. So for this, we will use the "user_data" dataset, which we have used in our other classification model. Therefore we can easily compare the Naive Bayes model with the other models.
# # Importing the libraries
#
import numpy as np
import matplotlib.pyplot as mtp
import pandas as pd
# Importing the dataset
dataset = pd.read_csv("/kaggle/input/user-data/User_Data.csv")
x = dataset.iloc[:, [2, 3]].values
y = dataset.iloc[:, 4].values
dataset.head()
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.25, random_state=0
)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
x_train = sc.fit_transform(x_train)
x_test = sc.transform(x_test)
# Fitting Naive Bayes to the Training set
from sklearn.naive_bayes import GaussianNB
classifier = GaussianNB()
classifier.fit(x_train, y_train)
# Predicting the Test set results
y_pred = classifier.predict(x_test)
pd.DataFrame(y_pred, y_test).head(20)
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
cm
# Visualising the Training set results
from matplotlib.colors import ListedColormap
x_set, y_set = x_train, y_train
X1, X2 = np.meshgrid(
np.arange(start=x_set[:, 0].min() - 1, stop=x_set[:, 0].max() + 1, step=0.01),
np.arange(start=x_set[:, 1].min() - 1, stop=x_set[:, 1].max() + 1, step=0.01),
)
mtp.contourf(
X1,
X2,
classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha=0.75,
cmap=ListedColormap(("purple", "green")),
)
mtp.xlim(X1.min(), X1.max())
mtp.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
mtp.scatter(
x_set[y_set == j, 0],
x_set[y_set == j, 1],
c=ListedColormap(("purple", "green"))(i),
label=j,
)
mtp.title("Naive Bayes (Training set)")
mtp.xlabel("Age")
mtp.ylabel("Estimated Salary")
mtp.legend()
mtp.show()
# Visualising the Test set results
from matplotlib.colors import ListedColormap
x_set, y_set = x_test, y_test
X1, X2 = np.meshgrid(
np.arange(start=x_set[:, 0].min() - 1, stop=x_set[:, 0].max() + 1, step=0.01),
np.arange(start=x_set[:, 1].min() - 1, stop=x_set[:, 1].max() + 1, step=0.01),
)
mtp.contourf(
X1,
X2,
classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha=0.75,
cmap=ListedColormap(("purple", "green")),
)
mtp.xlim(X1.min(), X1.max())
mtp.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
mtp.scatter(
x_set[y_set == j, 0],
x_set[y_set == j, 1],
c=ListedColormap(("purple", "green"))(i),
label=j,
)
mtp.title("Naive Bayes (test set)")
mtp.xlabel("Age")
mtp.ylabel("Estimated Salary")
mtp.legend()
mtp.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/019/129019305.ipynb
|
user-data
|
sandragracenelson
|
[{"Id": 129019305, "ScriptId": 38352314, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 8803259, "CreationDate": "05/10/2023 11:00:40", "VersionNumber": 1.0, "Title": "Naive Bayes", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 80.0, "LinesInsertedFromPrevious": 80.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 184705819, "KernelVersionId": 129019305, "SourceDatasetVersionId": 3960552}]
|
[{"Id": 3960552, "DatasetId": 2350614, "DatasourceVersionId": 4016089, "CreatorUserId": 8893519, "LicenseName": "CC0: Public Domain", "CreationDate": "07/19/2022 10:12:10", "VersionNumber": 1.0, "Title": "User_Data", "Slug": "user-data", "Subtitle": "Data about product purchased or not", "Description": "The dataset consists of information about users who are potential customers for a product or service. It contains four input features - User ID, Gender, Age, and Estimated Salary - which are used to predict whether or not the user purchased the product, indicated by the output or target column 'Purchased'.\n\nThe User ID is a unique identifier assigned to each user, while Gender is the user's gender, which can be either male or female. Age is the age of the user in years, and Estimated Salary is an estimate of the user's annual salary.\n\nThe dataset is likely used for binary classification tasks to determine whether or not a user is likely to purchase a particular product or service. The features provided could potentially be used to create a model that predicts the probability of a user purchasing the product based on their age, gender, and estimated salary.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 2350614, "CreatorUserId": 8893519, "OwnerUserId": 8893519.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 3960552.0, "CurrentDatasourceVersionId": 4016089.0, "ForumId": 2377623, "Type": 2, "CreationDate": "07/19/2022 10:12:10", "LastActivityDate": "07/19/2022", "TotalViews": 5428, "TotalDownloads": 3239, "TotalVotes": 30, "TotalKernels": 11}]
|
[{"Id": 8893519, "UserName": "sandragracenelson", "DisplayName": "Sandra Grace Nelson", "RegisterDate": "11/15/2021", "PerformanceTier": 3}]
|
# # Naive Bayes
# #### Python Implementation of the Naïve Bayes algorithm:
# - Now we will implement a Naive Bayes Algorithm using Python. So for this, we will use the "user_data" dataset, which we have used in our other classification model. Therefore we can easily compare the Naive Bayes model with the other models.
# # Importing the libraries
#
import numpy as np
import matplotlib.pyplot as mtp
import pandas as pd
# Importing the dataset
dataset = pd.read_csv("/kaggle/input/user-data/User_Data.csv")
x = dataset.iloc[:, [2, 3]].values
y = dataset.iloc[:, 4].values
dataset.head()
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.25, random_state=0
)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
x_train = sc.fit_transform(x_train)
x_test = sc.transform(x_test)
# Fitting Naive Bayes to the Training set
from sklearn.naive_bayes import GaussianNB
classifier = GaussianNB()
classifier.fit(x_train, y_train)
# Predicting the Test set results
y_pred = classifier.predict(x_test)
pd.DataFrame(y_pred, y_test).head(20)
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
cm
# Visualising the Training set results
from matplotlib.colors import ListedColormap
x_set, y_set = x_train, y_train
X1, X2 = np.meshgrid(
np.arange(start=x_set[:, 0].min() - 1, stop=x_set[:, 0].max() + 1, step=0.01),
np.arange(start=x_set[:, 1].min() - 1, stop=x_set[:, 1].max() + 1, step=0.01),
)
mtp.contourf(
X1,
X2,
classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha=0.75,
cmap=ListedColormap(("purple", "green")),
)
mtp.xlim(X1.min(), X1.max())
mtp.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
mtp.scatter(
x_set[y_set == j, 0],
x_set[y_set == j, 1],
c=ListedColormap(("purple", "green"))(i),
label=j,
)
mtp.title("Naive Bayes (Training set)")
mtp.xlabel("Age")
mtp.ylabel("Estimated Salary")
mtp.legend()
mtp.show()
# Visualising the Test set results
from matplotlib.colors import ListedColormap
x_set, y_set = x_test, y_test
X1, X2 = np.meshgrid(
np.arange(start=x_set[:, 0].min() - 1, stop=x_set[:, 0].max() + 1, step=0.01),
np.arange(start=x_set[:, 1].min() - 1, stop=x_set[:, 1].max() + 1, step=0.01),
)
mtp.contourf(
X1,
X2,
classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha=0.75,
cmap=ListedColormap(("purple", "green")),
)
mtp.xlim(X1.min(), X1.max())
mtp.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
mtp.scatter(
x_set[y_set == j, 0],
x_set[y_set == j, 1],
c=ListedColormap(("purple", "green"))(i),
label=j,
)
mtp.title("Naive Bayes (test set)")
mtp.xlabel("Age")
mtp.ylabel("Estimated Salary")
mtp.legend()
mtp.show()
| false | 1 | 1,101 | 0 | 1,300 | 1,101 |
||
129524947
|
# # **Study Case**
# The case study that will be used is a related to Health using the Heart Disease Dataset.
# The dataset can be accessed through the following link:
# [Heart Disease Dataset](https://www.kaggle.com/datasets/johnsmith88/heart-disease-dataset/code?select=heart.csv)
# # **Workflow**
# **1) Business Understanding**
# Business Understanding is the process of understanding the goals and business environment to be faced. In this case study, the business refers to the health sector, primarily related to heart disease. The Business Understanding phase is carried out to identify business problems, metrics, and objectives.
# **2) Data Understanding**
# Data Understanding is a stage to understand the data contained in the dataset.
# **3) Data Preparation**
# Data Preparation is a working stage that is conducted to prepare data before further processing. The activities in the Data Preparation process include:
# * Checking for missing values
# * Checking for duplicate data
# * Checking for outliers
# * Checking for data imbalance
# **4) Descriptive Statistic Analysis**
# Descriptive statistics or descriptive statistical analysis is conducted to find and obtain an overview of data characteristics through statistical measurements.
# **5) Visualization**
# Visualization is process to present data visually in the form of graphs or diagrams to make it easier to understand and analyze. Visualization can be done manually with EDA or automatically using Automated EDA.
# **6) Correlation Analysis and Feature Selection**
# Correlation analysis is performed to identify the relationship or correlation between variables or features in the dataset.
# **7) Feature Engineering**
# Feature engineering is the process of selecting and transforming features (or variables) in a dataset to improve the performance of a machine learning model. It involves identifying and extracting important features that are relevant and informative to the problem at hand, as well as creating new features from existing ones. The goal of feature engineering is to enhance the accuracy, efficiency, and interpretability of the model by providing it with more useful and relevant information to learn from.
# **8) Determine The Hypothesis**
# Formulating a hypothesis serves as a guide to test or evaluate an assumption or conjecture about the available data, which is the heart disease dataset in this case.
# **9) Choosing Model**
# Choosing a model to use for modeling the data.
# **10) Cross Validation and Bootsrapping**
# Cross validation and bootsrapping conducted to evaluate the model to be used.
# **11) Building Model**
# Build the model.
# **12) Model Evaluation**
# The purpose of model evaluation is to assess the performance of a machine learning model and determine how well it is able to generalize to new, unseen data. Model evaluation helps to identify potential issues with the model, such as overfitting or underfitting, and provides insights into how the model can be improved. By evaluating the model's performance on a separate test set, it is possible to estimate how well the model will perform on new data and make more informed decisions about its deployment. Additionally, model evaluation can help to compare different models and select the best one for the given task or problem.
# # **1) Business Understanding**
# a. Business Problem
# Based on the Heart Disease Dataset that will be used, the dataset contains data related to patient demographics and clinical conditions that can be used to estimate the likelihood of the presence or absence of heart disease in patients, as well as which factors can influence the presence or absence of heart disease.
# b. Goals
# * Identifying the features that have the most influence on the presence or absence of heart disease in patients. This can be used to determine treatment and prevention strategies for patients.
# * Developing a predictive model that can classify patients as having or not having heart disease based on their clinical conditions.
# # **2) Data Understanding**
# In this case study, the dataset used is the Heart Disease Dataset obtained through Kaggle. The dataset contains data related to the clinical and demographic conditions of patients who have the potential to develop heart disease.
# There are 13 features or columns in the dataset:
# * **Age**
#
# Description: Patient's age in years
# Data Type: Continuous
# * **Sex**
#
# Description: Gender of the patient with a value of 1 = Male, 0 = Female
# Data Type: Categoric
# * **Chest Pain Type (cp)**
#
# Description: Type of patient's chest pain in a value of 0, 1, 2, 3
# Data Type: Categoric
# * **Resting Blood Pressure (trestbps)**
#
# Description: The patient's blood pressure at rest in mmHG
# Data Type: Continuous
# * **Serum Cholesterol in mg/dl (chol)**
#
# Description: Total cholesterol in the patient's blood in units of mg/dl
# Data Type: Continuous
# * **Fasting Blood Sugar (fbs)**
#
# Description: The patient's blood sugar level is in the condition of fasting for at least 8 hours in units of mg/dl
# Data Type: Categoric
# * **Resting Electrocardiographic Results (restecg)**
#
# Description: The results of the patient's electrocardiogram performed at rest in values 0, 1, 2
# Data Type: Categoric
# * **Maximum Heart Rate Achieved (thalach)**
#
# Description: The highest value of the patient's heart rate during maximum physical activity
# Data Type: Continuous
# * **Exercise Induced Angina (exang)**
#
# Description: Chest pain that appears after doing physical activity with a value of 1 = there is chest pain, 0 = there is no chest pain
# Data Type: Categoric
# * **ST Depression Induced by Exercise Relative to Rest (oldpeak)**
#
# Description: ST segment depression values on the electrocardiogram during exercise compared to rest
# Data Type: Continuous
# * **The Slope of The Peak Exercise ST Segment (slope)**
#
# Description: Slope of the ST segment at the time of the peak exercise test
# Data Type: Categoric
# * **Number of Major Vessels Colored by Flourosopy (ca)**
#
# Description: The number of large blood vessels seen on the results of angiography in the value 0, 1, 2, 3
# Data Type: Categoric
# * **Thalasemia (thal)**
#
# Description: Whether or not thalassemia is present in the patient in a value of 0 = normal, 1 = fixed defect, 2 = reversible defect
# Data Type: Categoric
# * **Target**
# Description: Presence or absence of heart disease in the patient
# Data Type: Categoric
# # **3) Data Preparation**
# ## Import Library and Dataset
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from google.colab import files
uploaded = files.upload()
import io
df = pd.read_csv(io.BytesIO(uploaded["heart.csv"]))
df.shape
df.info()
df
# ## Checking for Missing Values
df.isnull().sum()
# ## Checking for Balanced Dataset
df["target"].value_counts()
sns.countplot(data=df["target"])
# ## Handling Duplicate Data
duplicate_rows = df[df.duplicated()]
duplicate_sorted = duplicate_rows.sort_values(by=["age"])
print(duplicate_sorted)
df = df.drop_duplicates()
df.shape
# ## Handling Outliers
plt.figure(figsize=(17, 6))
sns.boxplot(data=df, orient="h")
# ### Outliers trestbps
plt.figure(figsize=(10, 3))
sns.boxplot(df["trestbps"])
q1 = df["trestbps"].quantile(0.25)
q3 = df["trestbps"].quantile(0.75)
IQR = q3 - q1
lower_limit = q1 - 1.5 * IQR
upper_limit = q3 + 1.5 * IQR
df = df[(df["trestbps"] > lower_limit) & (df["trestbps"] < upper_limit)]
plt.figure(figsize=(10, 3))
sns.boxplot(df["trestbps"])
# ### Outliers chol
plt.figure(figsize=(10, 3))
sns.boxplot(df["chol"])
q1 = df["chol"].quantile(0.25)
q3 = df["chol"].quantile(0.75)
IQR = q3 - q1
lower_limit = q1 - 1.5 * IQR
upper_limit = q3 + 1.5 * IQR
df = df[(df["chol"] > lower_limit) & (df["chol"] < upper_limit)]
plt.figure(figsize=(10, 3))
sns.boxplot(df["chol"])
# ### Outliers thalach
plt.figure(figsize=(10, 3))
sns.boxplot(df["thalach"])
q1 = df["thalach"].quantile(0.25)
q3 = df["thalach"].quantile(0.75)
IQR = q3 - q1
lower_limit = q1 - 1.5 * IQR
upper_limit = q3 + 1.5 * IQR
df = df[(df["thalach"] > lower_limit) & (df["thalach"] < upper_limit)]
plt.figure(figsize=(10, 3))
sns.boxplot(df["thalach"])
# ### Outliers oldpeak
plt.figure(figsize=(10, 3))
sns.boxplot(df["oldpeak"])
q1 = df["oldpeak"].quantile(0.25)
q3 = df["oldpeak"].quantile(0.75)
IQR = q3 - q1
lower_limit = q1 - 1.5 * IQR
upper_limit = q3 + 1.5 * IQR
df = df[(df["oldpeak"] > lower_limit) & (df["oldpeak"] < upper_limit)]
plt.figure(figsize=(10, 3))
sns.boxplot(df["oldpeak"])
# ### Rechecking Outliers
plt.figure(figsize=(15, 6))
sns.boxplot(data=df, orient="h")
# # **4) Descriptive Statistic Analysis**
df.describe()
# # **5) Visualization**
from dataprep.eda import create_report
create_report(df).show()
# # **6) Correlation Analysis and Feature Selection**
# ## Performing Feature Selection
# ### Checking Correlation
corr = df.corr()
corr
print(corr["target"].sort_values(ascending=False))
X = df.drop("target", axis=1)
X
y = df["target"]
y
# ### Univariate Selection for Categorical Variable
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
bestfeatures = SelectKBest(score_func=chi2)
fit = bestfeatures.fit(X, y)
scores = pd.DataFrame(fit.scores_)
dfcolumns = pd.DataFrame(X.columns)
featureScores = pd.concat([dfcolumns, scores], axis=1)
featureScores.columns = ["Label", "Score"]
featureScores.sort_values(by="Score", ascending=False)
# ### Dropping Features which are Not Correlated
df.drop(["fbs", "restecg"], axis=1, inplace=True)
df.head()
df.shape
# # **7) Feature Engineering**
# ### Feature Engineering
for i in df.columns:
print(i, df[i].unique())
print("\n")
new_sex = pd.get_dummies(data=df["sex"], prefix="sex")
new_sex
new_cp = pd.get_dummies(df["cp"], prefix="chestPain")
new_cp
new_exang = pd.get_dummies(df["exang"], prefix="exang")
new_exang
new_slope = pd.get_dummies(df["slope"], prefix="slope")
new_slope
new_thal = pd.get_dummies(df["thal"], prefix="thal")
new_thal
new_ca = pd.get_dummies(df["ca"], prefix="ca")
new_ca
app = [df, new_sex, new_cp, new_ca, new_thal, new_exang, new_slope]
df1 = pd.concat(app, axis=1)
df1.columns
df1.drop(["sex", "cp", "thal", "exang", "ca", "slope"], axis=1, inplace=True)
df1.head()
df1.shape
# ### Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
df1[["age", "trestbps", "chol", "oldpeak", "thalach"]] = sc.fit_transform(
df1[["age", "trestbps", "chol", "oldpeak", "thalach"]]
)
df1.head(10)
df1.columns
X = df1.drop("target", axis=1)
X
y = df1["target"]
y
# # **8) Determine The Hypothesis**
# **Null Hypothesis**
# H0: There is no relationship between the independent variables ('age', 'trestbps', 'chol', 'thalach', 'oldpeak', 'target', 'sex', 'chestPain', 'ca', 'thal', 'exang' , 'slope') with the dependent variable ('target')
# **Alternative Hypothesis**
# H1: There is a relationship between the independent variables ('age', 'trestbps', 'chol', 'thalach', 'oldpeak', 'target', 'sex', 'chestPain', 'ca', 'thal', 'exang ', 'slope') with dependent variable ('target')
# # **9) Choosing Model**
# ## 1. Random Forest
# The Random Forest model will be used for modeling the Heart Disease dataset because this model is effective for dealing with multicollinearity in dataset variables. In addition, Random Forest is also suitable for use in disease prediction case studies and can provide an important feature for identifying risk factors that most influence the risk of heart disease in patients.
# ## 2. Logistic Regression
# The Logistic Regression model is used because this model can predict the probability of an event occurring based on a given variable, in this case study it is predicting the probability of the presence or absence of heart disease based on the patient's health factors. So by using the Logistic Regression model it is expected to be able to analyze the relationship between these variables with the risk of the presence or absence of heart disease.
# # **10) Cross Validation and Bootsrapping**
# ## Splitting Dataset to Data Test and Data Train
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
# ## Cross Validation
from sklearn.metrics import accuracy_score
from sklearn.model_selection import cross_val_score
# ### Cross Validation for Model using Random Forest
from sklearn.ensemble import RandomForestClassifier
rfc = RandomForestClassifier()
rfc_scores = cross_val_score(rfc, X_train, y_train, cv=5)
print(
"Accuracy with cross-validation: %.2f with standard deviation %.2f"
% (rfc_scores.mean(), rfc_scores.std())
)
# The Cross Validation result for the model using Random Forest is 0.82 or 82%, which consider to category of good performance for prediction. Therefore, the process can proceed to create a model using Random Forest.
# ### Cross Validation for Model using Logistic Regression
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression()
lr_scores = cross_val_score(lr, X_train, y_train, cv=5)
print(
"Accuracy with cross-validation: %.2f with standard deviation %.2f"
% (lr_scores.mean(), lr_scores.std())
)
# The Cross Validation result for the model using Logistic Regression is 0.83 or 83%, which consider into the category of good performance for prediction. Thus, the process can be continued to create a model using Logistic Regression.
# ## Bootsrapping
from sklearn.utils import resample
# ### Bootsrapping for Model using Random Forest
rfc = RandomForestClassifier()
n_boot_rfc = 100
accuracies = []
train_size = 0.8
for i in range(n_boot_rfc):
X_boot_rfc, y_boot_rfc = resample(
X_train, y_train, n_samples=int(train_size * len(X_train))
)
rfc.fit(X_boot_rfc, y_boot_rfc)
rfc_accuracy = rfc.score(X_test, y_test)
accuracies.append(rfc_accuracy)
rfc_mean_acc = np.mean(accuracies)
rfc_std_acc = np.std(accuracies)
rfc_lower_ci = rfc_mean_acc - 1.96 * rfc_std_acc
rfc_upper_ci = rfc_mean_acc + 1.96 * rfc_std_acc
print("Mean accuracy: %.2f" % rfc_mean_acc)
print("95%% confidence interval: [%.2f, %.2f]" % (rfc_lower_ci, rfc_upper_ci))
# Bootstrapping results for models using Random Forest are 0.87 or 87%, indicating that the model has good performance for predicting new data.
# ### Bootsrapping for Model using Logistic Regression
lr = LogisticRegression()
n_boot_lr = 100
accuracies = []
train_size = 0.8
for i in range(n_boot_lr):
X_boot_lr, y_boot_lr = resample(
X_train, y_train, n_samples=int(train_size * len(X_train))
)
lr.fit(X_boot_lr, y_boot_lr)
lr_accuracy = lr.score(X_test, y_test)
accuracies.append(lr_accuracy)
lr_mean_acc = np.mean(accuracies)
lr_std_acc = np.std(accuracies)
lr_lower_ci = lr_mean_acc - 1.96 * lr_std_acc
lr_upper_ci = lr_mean_acc + 1.96 * lr_std_acc
print("Mean accuracy: %.2f" % lr_mean_acc)
print("95%% confidence interval: [%.2f, %.2f]" % (lr_lower_ci, lr_upper_ci))
# Bootstrapping results for models using Logistic Regression have a value of 0.87 or 87%, indicating that the model has good performance for predicting new data.
# # **11) Building Model**
# ## Random Forest
rfc = RandomForestClassifier()
rfc.fit(X_train, y_train)
y_pred_rfc = rfc.predict(X_test)
pd.DataFrame(np.c_[y_test, y_pred_rfc], columns=["Actual", "Predicted"])
# ## Logistic Regression
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression().fit(X_train, y_train)
y_pred_lr = lr.predict(X_test)
pd.DataFrame(np.c_[y_test, y_pred_lr], columns=["Actual", "Predicted"])
# # **12) Model Evaluation**
# The evaluation model used is:
# ## 1. Confusion Matrix
# Confusion matrix is suitable for use in the Heart Disease dataset because it can provide information regarding true positive, true negative, false positive, and false negative that are relevant to the classification case in the case study using the Heart Disease dataset.
# ## 2. Precision Recall
# Precision-recall is used because it can provide information about the trade-off between precision and recall which is important for evaluating classification models such as the Heart Disease dataset.
from sklearn.metrics import (
confusion_matrix,
accuracy_score,
precision_score,
recall_score,
f1_score,
)
from sklearn.metrics import classification_report
from sklearn import metrics
# ## Model Evaluation for Model using Random Forest
rfc = RandomForestClassifier()
rfc.fit(X_train, y_train)
rfc_train = rfc.score(X_train, y_train) * 100
rfc_test = rfc.score(X_test, y_test) * 100
print("Testing Accuracy:", round(rfc_test, 2), "%")
print("Training Accuracy:", round(rfc_train, 2), "%")
print(classification_report(y_pred_rfc, y_test))
data_rfc = confusion_matrix(y_test, y_pred_rfc)
rfc_con = pd.DataFrame(data_rfc, columns=np.unique(y_test), index=np.unique(y_test))
plt.figure(figsize=(5, 3))
sns.heatmap(rfc_con, annot=True, fmt="g", cmap="YlGnBu")
plt.xlabel("Predicted")
plt.ylabel("Actual")
plt.show()
# Based on the results of the model evaluation above, it can be concluded that the Random Forest model has a good performance in classifying the dataset. It can be seen from the high value of precision, recall, and f1-score.
# ## Model Evaluation for Model using Logistic Regression
lr = LogisticRegression()
lr.fit(X_train, y_train)
lr_train = lr.score(X_train, y_train) * 100
lr_test = lr.score(X_test, y_test) * 100
print("Testing Accuracy:", round(lr_test, 2), "%")
print("Training Accuracy:", round(lr_train, 2), "%")
print(classification_report(y_pred_lr, y_test))
data_lr = confusion_matrix(y_test, y_pred_lr)
lr_con = pd.DataFrame(data_lr, columns=np.unique(y_test), index=np.unique(y_test))
plt.figure(figsize=(5, 3))
sns.heatmap(lr_con, annot=True, fmt="g", cmap="YlGnBu")
plt.xlabel("Predicted")
plt.ylabel("Actual")
plt.show()
# Based on the results of the model evaluation above, it can be concluded that the Logistic Regression model has good performance in classifying the dataset. It can be seen from the high value of precision, recall, and f1-score.
# ## Feature Importance
# ### Feature Importance untuk Model dengan Menggunakan Random Forest
rfc = RandomForestClassifier(n_estimators=100, random_state=42)
rfc.fit(X_train, y_train)
rfc_fimp = pd.Series(rfc.feature_importances_, index=X.columns)
rfc_fimp.sort_values(ascending=False)
plt.figure(figsize=(10, 6))
rfc_fimp.nsmallest(30).plot(kind="barh")
round(rfc_fimp, 4) * 100
plt.title("Important Features", size=12)
plt.show()
# Features that have the most significant feature importance values in the models created using the Random Forest:
# * oldpeak
# * thalach
# * age
# * ca_0
# * chol
# Based on the results of the feature importance, it can be seen that the oldpeak variable has the greatest influence on the 'target' prediction results (presence or absence of heart disease) in the Random Forest model. Then the variables thalach, age, ca_0, and chol, have a significant effect on the 'target' prediction results.
# Thus, if you want to make a model using the Random Forest method that is more effective for predicting the potential risk of heart disease, then this variable must be considered and selected as an important feature.
# ### Feature Importance untuk Model dengan Menggunakan Logistic Regression
lr = LogisticRegression()
lr.fit(X_train, y_train)
coef = lr.coef_[0]
features = X.columns
feature_importance = pd.DataFrame({"Features": features, "Coef": coef})
feature_importance["Exp_Coef"] = np.exp(feature_importance["Coef"])
feature_importance = feature_importance.sort_values(by="Exp_Coef", ascending=False)
print(feature_importance)
fig, ax = plt.subplots(figsize=(8, 6))
ax.barh(feature_importance["Features"], feature_importance["Exp_Coef"])
ax.set_xlabel("Feature Importance (Exp Coef)")
ax.set_ylabel("Features")
ax.invert_yaxis()
plt.title("Important Features", size=12)
plt.show()
# Features that have the most significant feature importance values in the models created using the Logistic Regression:
# * ca_0
# * chestPain_3
# * sex_0
# * chestPain_2
# * ca_4
# Based on the results of the feature importance, it can be seen that the ca_0 variable has the greatest influence on the 'target' prediction results in the Random Forest model. Then the variables chestPain_3, sex_0, chestPain_2, and ca_4, have a significant effect on the 'target' prediction results.
# Thus, if you want to make a model using the Logistic Regression method which is more effective for predicting the potential risk of heart disease, then this variable must be considered and selected as an important feature.
# # **13) Model Comparison**
# a. Confusion Matrix Results
# Confusion Matrix in the Random Forest model obtained True Positive values = 24, False Positive 4, True Negative = 27, and False Negative 1. Meanwhile in the Logistic Regression model obtained True Positive values = 24, False Positive 6, True = 25, and False Negatives 1.
# b. Classification Report results
# Classification Report on the Random Forest model obtained an F1-Score value of 91% while on the Logistic Regression model obtained an F1-Score value of 88%
# c. Performance
# Random Forest is more accurate than Logistic Regression, it can be seen from the F1-Score value of Random Forest which is bigger than Logistic Regression
# d. Scalability
# Logistic Regression can be used for very large datasets with a relatively faster time compared to using Random Forest
# # **14) Hyperparameter Tuning**
from sklearn.model_selection import GridSearchCV
# ## Hyperparameter Tuning for Random Forest
rfc = RandomForestClassifier()
rfc_param = [
{
"max_depth": np.arange(1, 10),
"min_samples_split": [0.1, 0.5, 1.0],
"random_state": np.arange(1, 50),
"n_estimators": [25, 50, 100],
}
]
rfc_search = GridSearchCV(rfc, rfc_param, scoring="accuracy")
rfc_result = rfc_search.fit(X_train, y_train)
print("Best Score: %s" % rfc_result.best_score_)
print("Best Hyperparameters: %s" % rfc_result.best_params_)
# The results of Hyperparameter Tuning for models using Random Forest are that the best score is 0.86, then the best parameters used in making models with Random Forest are the maximum tree depth (max depth) is 9, the minimum number of sample splits (minimum sample split) is 0.1, the number of estimators (n estimators) is 50, and the random state is 30.
# ## Hyperparameter Tuning for Logistic Regression
lr = LogisticRegression()
lr_param = [{"penalty": ["l2"], "C": [0.1, 0.4, 0.5], "random_state": [0]}]
lr_search = GridSearchCV(
lr,
lr_param,
scoring="accuracy",
n_jobs=-1,
)
lr_result = lr_search.fit(X_train, y_train)
print("Best Score: %s" % lr_result.best_score_)
print("Best Hyperparameters: %s" % lr_result.best_params_)
# The results of Hyperparameter Tuning for models using Logistic Regression are that the best score is 0.84, then the best parameters used in making models with Logistic Regression are the type of regularization used (penalty) is l2, the regularization strength (C) is 0.1, and the random state is 0.
# # **15) Predict How Well Model Performance in Testing The Dataset**
# ### Random Forest
hrfc = RandomForestClassifier(
max_depth=9, min_samples_split=0.1, n_estimators=50, random_state=30
)
hrfc.fit(X_train, y_train)
y_pred_hrfc = hrfc.predict(X_test)
hrfc_accuracy = accuracy_score(y_test, y_pred_hrfc)
print("Accuracy on test dataset: %.2f%%" % (hrfc_accuracy * 100))
# The prediction accuracy of the model made with Random Forest and Hyperparameter Tuning has been done is 89.29%
# ## Logistic Regression
hlr = LogisticRegression(C=0.1, penalty="l2", random_state=0)
hlr.fit(X_train, y_train)
y_pred_hlr = hlr.predict(X_test)
hlr_accuracy = accuracy_score(y_test, y_pred_hlr)
print("Accuracy on test dataset: %.2f%%" % (hlr_accuracy * 100))
# Predicted model accuracy made with Logistics and Hyperparameter Tuning has been done is 87.5%
# # **16) Learning Curve**
from sklearn.model_selection import learning_curve
def plot_learning_curve(
estimator, title, X, y, cv=None, n_jobs=None, train_sizes=np.linspace(0.1, 1.0, 5)
):
plt.figure()
plt.title(title)
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores, fit_times, _ = learning_curve(
estimator,
X,
y,
cv=cv,
n_jobs=n_jobs,
train_sizes=train_sizes,
return_times=True,
)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(
train_sizes,
train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std,
alpha=0.1,
color="r",
)
plt.fill_between(
train_sizes,
test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std,
alpha=0.1,
color="g",
)
plt.plot(train_sizes, train_scores_mean, "o-", color="r", label="Training score")
plt.plot(
train_sizes, test_scores_mean, "o-", color="g", label="Cross-validation score"
)
plt.legend(loc="best")
return plt
# ### Learning Curve Model Random Forest
hrfc = RandomForestClassifier(
max_depth=9, min_samples_split=0.1, n_estimators=50, random_state=30
)
title = "Random Forest Learning Curve"
plot_learning_curve(hrfc, title, X_train, y_train, cv=5, n_jobs=-1)
# Based on the Learning Curve for the Random Forest Model above, it can be seen:
# 1. Based on the Training Score curve, initially the model has a perfect level of accuracy (1.0) for the training data when modeling small amounts of data, then this accuracy decreases when the amount of data to be modeled starts to increase. This shows that the model has the possibility of overfitting the training data when the amount of data is still small, but is able to balance the accuracy and complexity of the model when the amount of data increases.
# 2. Based on the Cross-Validation Score curve, it shows that initially the model has low accuracy (about 0.76) when modeling data that has never been seen before (out of sample) with a small amount. But then the accuracy starts to increase when the amount of data increases.
# 3. At the initial point, the two curves have a fairly large gap but the gap gets smaller as the amount of data increases. This shows that the model which initially has the problem of overfitting, is then able to start to balance the accuracy when the amount of data increases
# Thus, the model using Random Forest is good enough to handle data that is not too complex and not too simple, and is able to generalize well to data that has never been seen before.
# ### Learning Curve Model Logistic Regression
hlr = LogisticRegression(C=0.1, penalty="l2", random_state=0)
title = "Logistic Regression Learning Curve"
plot_learning_curve(hlr, title, X_train, y_train, cv=5, n_jobs=-1)
# Based on the Learning Curve for the Logistic Regression Model above, it can be seen:
# 1. Based on the Training Score curve, the training score value increases as the amount of training data increases, which means that the model is able to slowly increase its accuracy to the training data when the data provided begins to increase.
# 2. Based on the Cross-Validation Score curve, it shows that the model tends to be stable when modeling data that has never been seen before and continues to increase as the amount of data increases. This shows that the model can improve performance but it is not optimal, seen from the increase in the curve which tends to be small.
# 3. At the second point, the Cross-Validation Score curve point is higher than the Training Score curve point. This indicates that in these conditions there is overfitting in the training data.
# Thus, the model using Logistic Regression experiences a bit of overfitting at the second curve point.
# # **17) ROC Analysis**
from sklearn.metrics import roc_curve, roc_auc_score
hrfc = RandomForestClassifier(
max_depth=9, min_samples_split=0.1, n_estimators=50, random_state=30
)
hlr = LogisticRegression(C=0.1, penalty="l2", random_state=0)
hrfc.fit(X_train, y_train)
hlr.fit(X_train, y_train)
hrfc_prob = hrfc.predict_proba(X_test)[:, 1]
hlr_prob = hlr.predict_proba(X_test)[:, 1]
fpr_hrfc, tpr_hrfc, _ = roc_curve(y_test, hrfc_prob)
roc_auc_hrfc = roc_auc_score(y_test, hrfc_prob)
fpr_hlr, tpr_hlr, _ = roc_curve(y_test, hlr_prob)
roc_auc_hlr = roc_auc_score(y_test, hlr_prob)
plt.figure(figsize=(8, 6))
plt.plot(fpr_hrfc, tpr_hrfc, label="Random Forest (AUC = %0.2f)" % roc_auc_hrfc)
plt.plot(fpr_hlr, tpr_hlr, label="Logistic Regression (AUC = %0.2f)" % roc_auc_hlr)
plt.plot([0, 1], [0, 1], linestyle="--", color="gray")
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.title("ROC Curve")
plt.legend()
plt.show()
# Hasil ROC Curve dari kedua model yang telah dibuat yaitu Random Forest dan Decision Tree menunjukkan bahwa model dengan Logistic Regression memiliki nilai AUC yaitu 0.96 yang lebih besar dari nilai AUC dari model dengan Random Forest, berdasarkan nilai AUC tersebut kedua model dapat dikategorikan memiliki kemampuan yang sangat baik dalam membedakan antara kelas positif dan negatif. Kemudian model dengan Logistic Regression juka memiliki posisi yang cenderung lebih dekat dengan bagian pojok kiri atas (koordinat 0.0, 1.0), semakin dekat kurva model dengan bagian pojok kiri atas maka semakin baik model yang dibuat. Oleh karena itu jika dilihat berdasarkan kurva ROC di atas, model dengan Logistic Regression lebih baik dibandingkan model dengan Random Forest.
# ### ROC Model Random Forest
plt.figure(figsize=(8, 6))
plt.plot(fpr_hrfc, tpr_hrfc, label="Random Forest (AUC = %0.2f)" % roc_auc_hrfc)
plt.plot([0, 1], [0, 1], linestyle="--", color="gray")
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.title("ROC Curve")
plt.legend()
plt.show()
# ### ROC Model Logistic Regression
plt.figure(figsize=(8, 6))
plt.plot(
fpr_hlr,
tpr_hlr,
label="Logistic Regression (AUC = %0.2f)" % roc_auc_hlr,
color="orange",
)
plt.plot([0, 1], [0, 1], linestyle="--", color="gray")
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.title("ROC Curve")
plt.legend()
plt.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/524/129524947.ipynb
| null | null |
[{"Id": 129524947, "ScriptId": 38514066, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14357379, "CreationDate": "05/14/2023 14:27:57", "VersionNumber": 1.0, "Title": "Heart Disease Prediction Modeling", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 868.0, "LinesInsertedFromPrevious": 868.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
| null | null | null | null |
# # **Study Case**
# The case study that will be used is a related to Health using the Heart Disease Dataset.
# The dataset can be accessed through the following link:
# [Heart Disease Dataset](https://www.kaggle.com/datasets/johnsmith88/heart-disease-dataset/code?select=heart.csv)
# # **Workflow**
# **1) Business Understanding**
# Business Understanding is the process of understanding the goals and business environment to be faced. In this case study, the business refers to the health sector, primarily related to heart disease. The Business Understanding phase is carried out to identify business problems, metrics, and objectives.
# **2) Data Understanding**
# Data Understanding is a stage to understand the data contained in the dataset.
# **3) Data Preparation**
# Data Preparation is a working stage that is conducted to prepare data before further processing. The activities in the Data Preparation process include:
# * Checking for missing values
# * Checking for duplicate data
# * Checking for outliers
# * Checking for data imbalance
# **4) Descriptive Statistic Analysis**
# Descriptive statistics or descriptive statistical analysis is conducted to find and obtain an overview of data characteristics through statistical measurements.
# **5) Visualization**
# Visualization is process to present data visually in the form of graphs or diagrams to make it easier to understand and analyze. Visualization can be done manually with EDA or automatically using Automated EDA.
# **6) Correlation Analysis and Feature Selection**
# Correlation analysis is performed to identify the relationship or correlation between variables or features in the dataset.
# **7) Feature Engineering**
# Feature engineering is the process of selecting and transforming features (or variables) in a dataset to improve the performance of a machine learning model. It involves identifying and extracting important features that are relevant and informative to the problem at hand, as well as creating new features from existing ones. The goal of feature engineering is to enhance the accuracy, efficiency, and interpretability of the model by providing it with more useful and relevant information to learn from.
# **8) Determine The Hypothesis**
# Formulating a hypothesis serves as a guide to test or evaluate an assumption or conjecture about the available data, which is the heart disease dataset in this case.
# **9) Choosing Model**
# Choosing a model to use for modeling the data.
# **10) Cross Validation and Bootsrapping**
# Cross validation and bootsrapping conducted to evaluate the model to be used.
# **11) Building Model**
# Build the model.
# **12) Model Evaluation**
# The purpose of model evaluation is to assess the performance of a machine learning model and determine how well it is able to generalize to new, unseen data. Model evaluation helps to identify potential issues with the model, such as overfitting or underfitting, and provides insights into how the model can be improved. By evaluating the model's performance on a separate test set, it is possible to estimate how well the model will perform on new data and make more informed decisions about its deployment. Additionally, model evaluation can help to compare different models and select the best one for the given task or problem.
# # **1) Business Understanding**
# a. Business Problem
# Based on the Heart Disease Dataset that will be used, the dataset contains data related to patient demographics and clinical conditions that can be used to estimate the likelihood of the presence or absence of heart disease in patients, as well as which factors can influence the presence or absence of heart disease.
# b. Goals
# * Identifying the features that have the most influence on the presence or absence of heart disease in patients. This can be used to determine treatment and prevention strategies for patients.
# * Developing a predictive model that can classify patients as having or not having heart disease based on their clinical conditions.
# # **2) Data Understanding**
# In this case study, the dataset used is the Heart Disease Dataset obtained through Kaggle. The dataset contains data related to the clinical and demographic conditions of patients who have the potential to develop heart disease.
# There are 13 features or columns in the dataset:
# * **Age**
#
# Description: Patient's age in years
# Data Type: Continuous
# * **Sex**
#
# Description: Gender of the patient with a value of 1 = Male, 0 = Female
# Data Type: Categoric
# * **Chest Pain Type (cp)**
#
# Description: Type of patient's chest pain in a value of 0, 1, 2, 3
# Data Type: Categoric
# * **Resting Blood Pressure (trestbps)**
#
# Description: The patient's blood pressure at rest in mmHG
# Data Type: Continuous
# * **Serum Cholesterol in mg/dl (chol)**
#
# Description: Total cholesterol in the patient's blood in units of mg/dl
# Data Type: Continuous
# * **Fasting Blood Sugar (fbs)**
#
# Description: The patient's blood sugar level is in the condition of fasting for at least 8 hours in units of mg/dl
# Data Type: Categoric
# * **Resting Electrocardiographic Results (restecg)**
#
# Description: The results of the patient's electrocardiogram performed at rest in values 0, 1, 2
# Data Type: Categoric
# * **Maximum Heart Rate Achieved (thalach)**
#
# Description: The highest value of the patient's heart rate during maximum physical activity
# Data Type: Continuous
# * **Exercise Induced Angina (exang)**
#
# Description: Chest pain that appears after doing physical activity with a value of 1 = there is chest pain, 0 = there is no chest pain
# Data Type: Categoric
# * **ST Depression Induced by Exercise Relative to Rest (oldpeak)**
#
# Description: ST segment depression values on the electrocardiogram during exercise compared to rest
# Data Type: Continuous
# * **The Slope of The Peak Exercise ST Segment (slope)**
#
# Description: Slope of the ST segment at the time of the peak exercise test
# Data Type: Categoric
# * **Number of Major Vessels Colored by Flourosopy (ca)**
#
# Description: The number of large blood vessels seen on the results of angiography in the value 0, 1, 2, 3
# Data Type: Categoric
# * **Thalasemia (thal)**
#
# Description: Whether or not thalassemia is present in the patient in a value of 0 = normal, 1 = fixed defect, 2 = reversible defect
# Data Type: Categoric
# * **Target**
# Description: Presence or absence of heart disease in the patient
# Data Type: Categoric
# # **3) Data Preparation**
# ## Import Library and Dataset
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from google.colab import files
uploaded = files.upload()
import io
df = pd.read_csv(io.BytesIO(uploaded["heart.csv"]))
df.shape
df.info()
df
# ## Checking for Missing Values
df.isnull().sum()
# ## Checking for Balanced Dataset
df["target"].value_counts()
sns.countplot(data=df["target"])
# ## Handling Duplicate Data
duplicate_rows = df[df.duplicated()]
duplicate_sorted = duplicate_rows.sort_values(by=["age"])
print(duplicate_sorted)
df = df.drop_duplicates()
df.shape
# ## Handling Outliers
plt.figure(figsize=(17, 6))
sns.boxplot(data=df, orient="h")
# ### Outliers trestbps
plt.figure(figsize=(10, 3))
sns.boxplot(df["trestbps"])
q1 = df["trestbps"].quantile(0.25)
q3 = df["trestbps"].quantile(0.75)
IQR = q3 - q1
lower_limit = q1 - 1.5 * IQR
upper_limit = q3 + 1.5 * IQR
df = df[(df["trestbps"] > lower_limit) & (df["trestbps"] < upper_limit)]
plt.figure(figsize=(10, 3))
sns.boxplot(df["trestbps"])
# ### Outliers chol
plt.figure(figsize=(10, 3))
sns.boxplot(df["chol"])
q1 = df["chol"].quantile(0.25)
q3 = df["chol"].quantile(0.75)
IQR = q3 - q1
lower_limit = q1 - 1.5 * IQR
upper_limit = q3 + 1.5 * IQR
df = df[(df["chol"] > lower_limit) & (df["chol"] < upper_limit)]
plt.figure(figsize=(10, 3))
sns.boxplot(df["chol"])
# ### Outliers thalach
plt.figure(figsize=(10, 3))
sns.boxplot(df["thalach"])
q1 = df["thalach"].quantile(0.25)
q3 = df["thalach"].quantile(0.75)
IQR = q3 - q1
lower_limit = q1 - 1.5 * IQR
upper_limit = q3 + 1.5 * IQR
df = df[(df["thalach"] > lower_limit) & (df["thalach"] < upper_limit)]
plt.figure(figsize=(10, 3))
sns.boxplot(df["thalach"])
# ### Outliers oldpeak
plt.figure(figsize=(10, 3))
sns.boxplot(df["oldpeak"])
q1 = df["oldpeak"].quantile(0.25)
q3 = df["oldpeak"].quantile(0.75)
IQR = q3 - q1
lower_limit = q1 - 1.5 * IQR
upper_limit = q3 + 1.5 * IQR
df = df[(df["oldpeak"] > lower_limit) & (df["oldpeak"] < upper_limit)]
plt.figure(figsize=(10, 3))
sns.boxplot(df["oldpeak"])
# ### Rechecking Outliers
plt.figure(figsize=(15, 6))
sns.boxplot(data=df, orient="h")
# # **4) Descriptive Statistic Analysis**
df.describe()
# # **5) Visualization**
from dataprep.eda import create_report
create_report(df).show()
# # **6) Correlation Analysis and Feature Selection**
# ## Performing Feature Selection
# ### Checking Correlation
corr = df.corr()
corr
print(corr["target"].sort_values(ascending=False))
X = df.drop("target", axis=1)
X
y = df["target"]
y
# ### Univariate Selection for Categorical Variable
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
bestfeatures = SelectKBest(score_func=chi2)
fit = bestfeatures.fit(X, y)
scores = pd.DataFrame(fit.scores_)
dfcolumns = pd.DataFrame(X.columns)
featureScores = pd.concat([dfcolumns, scores], axis=1)
featureScores.columns = ["Label", "Score"]
featureScores.sort_values(by="Score", ascending=False)
# ### Dropping Features which are Not Correlated
df.drop(["fbs", "restecg"], axis=1, inplace=True)
df.head()
df.shape
# # **7) Feature Engineering**
# ### Feature Engineering
for i in df.columns:
print(i, df[i].unique())
print("\n")
new_sex = pd.get_dummies(data=df["sex"], prefix="sex")
new_sex
new_cp = pd.get_dummies(df["cp"], prefix="chestPain")
new_cp
new_exang = pd.get_dummies(df["exang"], prefix="exang")
new_exang
new_slope = pd.get_dummies(df["slope"], prefix="slope")
new_slope
new_thal = pd.get_dummies(df["thal"], prefix="thal")
new_thal
new_ca = pd.get_dummies(df["ca"], prefix="ca")
new_ca
app = [df, new_sex, new_cp, new_ca, new_thal, new_exang, new_slope]
df1 = pd.concat(app, axis=1)
df1.columns
df1.drop(["sex", "cp", "thal", "exang", "ca", "slope"], axis=1, inplace=True)
df1.head()
df1.shape
# ### Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
df1[["age", "trestbps", "chol", "oldpeak", "thalach"]] = sc.fit_transform(
df1[["age", "trestbps", "chol", "oldpeak", "thalach"]]
)
df1.head(10)
df1.columns
X = df1.drop("target", axis=1)
X
y = df1["target"]
y
# # **8) Determine The Hypothesis**
# **Null Hypothesis**
# H0: There is no relationship between the independent variables ('age', 'trestbps', 'chol', 'thalach', 'oldpeak', 'target', 'sex', 'chestPain', 'ca', 'thal', 'exang' , 'slope') with the dependent variable ('target')
# **Alternative Hypothesis**
# H1: There is a relationship between the independent variables ('age', 'trestbps', 'chol', 'thalach', 'oldpeak', 'target', 'sex', 'chestPain', 'ca', 'thal', 'exang ', 'slope') with dependent variable ('target')
# # **9) Choosing Model**
# ## 1. Random Forest
# The Random Forest model will be used for modeling the Heart Disease dataset because this model is effective for dealing with multicollinearity in dataset variables. In addition, Random Forest is also suitable for use in disease prediction case studies and can provide an important feature for identifying risk factors that most influence the risk of heart disease in patients.
# ## 2. Logistic Regression
# The Logistic Regression model is used because this model can predict the probability of an event occurring based on a given variable, in this case study it is predicting the probability of the presence or absence of heart disease based on the patient's health factors. So by using the Logistic Regression model it is expected to be able to analyze the relationship between these variables with the risk of the presence or absence of heart disease.
# # **10) Cross Validation and Bootsrapping**
# ## Splitting Dataset to Data Test and Data Train
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
# ## Cross Validation
from sklearn.metrics import accuracy_score
from sklearn.model_selection import cross_val_score
# ### Cross Validation for Model using Random Forest
from sklearn.ensemble import RandomForestClassifier
rfc = RandomForestClassifier()
rfc_scores = cross_val_score(rfc, X_train, y_train, cv=5)
print(
"Accuracy with cross-validation: %.2f with standard deviation %.2f"
% (rfc_scores.mean(), rfc_scores.std())
)
# The Cross Validation result for the model using Random Forest is 0.82 or 82%, which consider to category of good performance for prediction. Therefore, the process can proceed to create a model using Random Forest.
# ### Cross Validation for Model using Logistic Regression
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression()
lr_scores = cross_val_score(lr, X_train, y_train, cv=5)
print(
"Accuracy with cross-validation: %.2f with standard deviation %.2f"
% (lr_scores.mean(), lr_scores.std())
)
# The Cross Validation result for the model using Logistic Regression is 0.83 or 83%, which consider into the category of good performance for prediction. Thus, the process can be continued to create a model using Logistic Regression.
# ## Bootsrapping
from sklearn.utils import resample
# ### Bootsrapping for Model using Random Forest
rfc = RandomForestClassifier()
n_boot_rfc = 100
accuracies = []
train_size = 0.8
for i in range(n_boot_rfc):
X_boot_rfc, y_boot_rfc = resample(
X_train, y_train, n_samples=int(train_size * len(X_train))
)
rfc.fit(X_boot_rfc, y_boot_rfc)
rfc_accuracy = rfc.score(X_test, y_test)
accuracies.append(rfc_accuracy)
rfc_mean_acc = np.mean(accuracies)
rfc_std_acc = np.std(accuracies)
rfc_lower_ci = rfc_mean_acc - 1.96 * rfc_std_acc
rfc_upper_ci = rfc_mean_acc + 1.96 * rfc_std_acc
print("Mean accuracy: %.2f" % rfc_mean_acc)
print("95%% confidence interval: [%.2f, %.2f]" % (rfc_lower_ci, rfc_upper_ci))
# Bootstrapping results for models using Random Forest are 0.87 or 87%, indicating that the model has good performance for predicting new data.
# ### Bootsrapping for Model using Logistic Regression
lr = LogisticRegression()
n_boot_lr = 100
accuracies = []
train_size = 0.8
for i in range(n_boot_lr):
X_boot_lr, y_boot_lr = resample(
X_train, y_train, n_samples=int(train_size * len(X_train))
)
lr.fit(X_boot_lr, y_boot_lr)
lr_accuracy = lr.score(X_test, y_test)
accuracies.append(lr_accuracy)
lr_mean_acc = np.mean(accuracies)
lr_std_acc = np.std(accuracies)
lr_lower_ci = lr_mean_acc - 1.96 * lr_std_acc
lr_upper_ci = lr_mean_acc + 1.96 * lr_std_acc
print("Mean accuracy: %.2f" % lr_mean_acc)
print("95%% confidence interval: [%.2f, %.2f]" % (lr_lower_ci, lr_upper_ci))
# Bootstrapping results for models using Logistic Regression have a value of 0.87 or 87%, indicating that the model has good performance for predicting new data.
# # **11) Building Model**
# ## Random Forest
rfc = RandomForestClassifier()
rfc.fit(X_train, y_train)
y_pred_rfc = rfc.predict(X_test)
pd.DataFrame(np.c_[y_test, y_pred_rfc], columns=["Actual", "Predicted"])
# ## Logistic Regression
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression().fit(X_train, y_train)
y_pred_lr = lr.predict(X_test)
pd.DataFrame(np.c_[y_test, y_pred_lr], columns=["Actual", "Predicted"])
# # **12) Model Evaluation**
# The evaluation model used is:
# ## 1. Confusion Matrix
# Confusion matrix is suitable for use in the Heart Disease dataset because it can provide information regarding true positive, true negative, false positive, and false negative that are relevant to the classification case in the case study using the Heart Disease dataset.
# ## 2. Precision Recall
# Precision-recall is used because it can provide information about the trade-off between precision and recall which is important for evaluating classification models such as the Heart Disease dataset.
from sklearn.metrics import (
confusion_matrix,
accuracy_score,
precision_score,
recall_score,
f1_score,
)
from sklearn.metrics import classification_report
from sklearn import metrics
# ## Model Evaluation for Model using Random Forest
rfc = RandomForestClassifier()
rfc.fit(X_train, y_train)
rfc_train = rfc.score(X_train, y_train) * 100
rfc_test = rfc.score(X_test, y_test) * 100
print("Testing Accuracy:", round(rfc_test, 2), "%")
print("Training Accuracy:", round(rfc_train, 2), "%")
print(classification_report(y_pred_rfc, y_test))
data_rfc = confusion_matrix(y_test, y_pred_rfc)
rfc_con = pd.DataFrame(data_rfc, columns=np.unique(y_test), index=np.unique(y_test))
plt.figure(figsize=(5, 3))
sns.heatmap(rfc_con, annot=True, fmt="g", cmap="YlGnBu")
plt.xlabel("Predicted")
plt.ylabel("Actual")
plt.show()
# Based on the results of the model evaluation above, it can be concluded that the Random Forest model has a good performance in classifying the dataset. It can be seen from the high value of precision, recall, and f1-score.
# ## Model Evaluation for Model using Logistic Regression
lr = LogisticRegression()
lr.fit(X_train, y_train)
lr_train = lr.score(X_train, y_train) * 100
lr_test = lr.score(X_test, y_test) * 100
print("Testing Accuracy:", round(lr_test, 2), "%")
print("Training Accuracy:", round(lr_train, 2), "%")
print(classification_report(y_pred_lr, y_test))
data_lr = confusion_matrix(y_test, y_pred_lr)
lr_con = pd.DataFrame(data_lr, columns=np.unique(y_test), index=np.unique(y_test))
plt.figure(figsize=(5, 3))
sns.heatmap(lr_con, annot=True, fmt="g", cmap="YlGnBu")
plt.xlabel("Predicted")
plt.ylabel("Actual")
plt.show()
# Based on the results of the model evaluation above, it can be concluded that the Logistic Regression model has good performance in classifying the dataset. It can be seen from the high value of precision, recall, and f1-score.
# ## Feature Importance
# ### Feature Importance untuk Model dengan Menggunakan Random Forest
rfc = RandomForestClassifier(n_estimators=100, random_state=42)
rfc.fit(X_train, y_train)
rfc_fimp = pd.Series(rfc.feature_importances_, index=X.columns)
rfc_fimp.sort_values(ascending=False)
plt.figure(figsize=(10, 6))
rfc_fimp.nsmallest(30).plot(kind="barh")
round(rfc_fimp, 4) * 100
plt.title("Important Features", size=12)
plt.show()
# Features that have the most significant feature importance values in the models created using the Random Forest:
# * oldpeak
# * thalach
# * age
# * ca_0
# * chol
# Based on the results of the feature importance, it can be seen that the oldpeak variable has the greatest influence on the 'target' prediction results (presence or absence of heart disease) in the Random Forest model. Then the variables thalach, age, ca_0, and chol, have a significant effect on the 'target' prediction results.
# Thus, if you want to make a model using the Random Forest method that is more effective for predicting the potential risk of heart disease, then this variable must be considered and selected as an important feature.
# ### Feature Importance untuk Model dengan Menggunakan Logistic Regression
lr = LogisticRegression()
lr.fit(X_train, y_train)
coef = lr.coef_[0]
features = X.columns
feature_importance = pd.DataFrame({"Features": features, "Coef": coef})
feature_importance["Exp_Coef"] = np.exp(feature_importance["Coef"])
feature_importance = feature_importance.sort_values(by="Exp_Coef", ascending=False)
print(feature_importance)
fig, ax = plt.subplots(figsize=(8, 6))
ax.barh(feature_importance["Features"], feature_importance["Exp_Coef"])
ax.set_xlabel("Feature Importance (Exp Coef)")
ax.set_ylabel("Features")
ax.invert_yaxis()
plt.title("Important Features", size=12)
plt.show()
# Features that have the most significant feature importance values in the models created using the Logistic Regression:
# * ca_0
# * chestPain_3
# * sex_0
# * chestPain_2
# * ca_4
# Based on the results of the feature importance, it can be seen that the ca_0 variable has the greatest influence on the 'target' prediction results in the Random Forest model. Then the variables chestPain_3, sex_0, chestPain_2, and ca_4, have a significant effect on the 'target' prediction results.
# Thus, if you want to make a model using the Logistic Regression method which is more effective for predicting the potential risk of heart disease, then this variable must be considered and selected as an important feature.
# # **13) Model Comparison**
# a. Confusion Matrix Results
# Confusion Matrix in the Random Forest model obtained True Positive values = 24, False Positive 4, True Negative = 27, and False Negative 1. Meanwhile in the Logistic Regression model obtained True Positive values = 24, False Positive 6, True = 25, and False Negatives 1.
# b. Classification Report results
# Classification Report on the Random Forest model obtained an F1-Score value of 91% while on the Logistic Regression model obtained an F1-Score value of 88%
# c. Performance
# Random Forest is more accurate than Logistic Regression, it can be seen from the F1-Score value of Random Forest which is bigger than Logistic Regression
# d. Scalability
# Logistic Regression can be used for very large datasets with a relatively faster time compared to using Random Forest
# # **14) Hyperparameter Tuning**
from sklearn.model_selection import GridSearchCV
# ## Hyperparameter Tuning for Random Forest
rfc = RandomForestClassifier()
rfc_param = [
{
"max_depth": np.arange(1, 10),
"min_samples_split": [0.1, 0.5, 1.0],
"random_state": np.arange(1, 50),
"n_estimators": [25, 50, 100],
}
]
rfc_search = GridSearchCV(rfc, rfc_param, scoring="accuracy")
rfc_result = rfc_search.fit(X_train, y_train)
print("Best Score: %s" % rfc_result.best_score_)
print("Best Hyperparameters: %s" % rfc_result.best_params_)
# The results of Hyperparameter Tuning for models using Random Forest are that the best score is 0.86, then the best parameters used in making models with Random Forest are the maximum tree depth (max depth) is 9, the minimum number of sample splits (minimum sample split) is 0.1, the number of estimators (n estimators) is 50, and the random state is 30.
# ## Hyperparameter Tuning for Logistic Regression
lr = LogisticRegression()
lr_param = [{"penalty": ["l2"], "C": [0.1, 0.4, 0.5], "random_state": [0]}]
lr_search = GridSearchCV(
lr,
lr_param,
scoring="accuracy",
n_jobs=-1,
)
lr_result = lr_search.fit(X_train, y_train)
print("Best Score: %s" % lr_result.best_score_)
print("Best Hyperparameters: %s" % lr_result.best_params_)
# The results of Hyperparameter Tuning for models using Logistic Regression are that the best score is 0.84, then the best parameters used in making models with Logistic Regression are the type of regularization used (penalty) is l2, the regularization strength (C) is 0.1, and the random state is 0.
# # **15) Predict How Well Model Performance in Testing The Dataset**
# ### Random Forest
hrfc = RandomForestClassifier(
max_depth=9, min_samples_split=0.1, n_estimators=50, random_state=30
)
hrfc.fit(X_train, y_train)
y_pred_hrfc = hrfc.predict(X_test)
hrfc_accuracy = accuracy_score(y_test, y_pred_hrfc)
print("Accuracy on test dataset: %.2f%%" % (hrfc_accuracy * 100))
# The prediction accuracy of the model made with Random Forest and Hyperparameter Tuning has been done is 89.29%
# ## Logistic Regression
hlr = LogisticRegression(C=0.1, penalty="l2", random_state=0)
hlr.fit(X_train, y_train)
y_pred_hlr = hlr.predict(X_test)
hlr_accuracy = accuracy_score(y_test, y_pred_hlr)
print("Accuracy on test dataset: %.2f%%" % (hlr_accuracy * 100))
# Predicted model accuracy made with Logistics and Hyperparameter Tuning has been done is 87.5%
# # **16) Learning Curve**
from sklearn.model_selection import learning_curve
def plot_learning_curve(
estimator, title, X, y, cv=None, n_jobs=None, train_sizes=np.linspace(0.1, 1.0, 5)
):
plt.figure()
plt.title(title)
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores, fit_times, _ = learning_curve(
estimator,
X,
y,
cv=cv,
n_jobs=n_jobs,
train_sizes=train_sizes,
return_times=True,
)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(
train_sizes,
train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std,
alpha=0.1,
color="r",
)
plt.fill_between(
train_sizes,
test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std,
alpha=0.1,
color="g",
)
plt.plot(train_sizes, train_scores_mean, "o-", color="r", label="Training score")
plt.plot(
train_sizes, test_scores_mean, "o-", color="g", label="Cross-validation score"
)
plt.legend(loc="best")
return plt
# ### Learning Curve Model Random Forest
hrfc = RandomForestClassifier(
max_depth=9, min_samples_split=0.1, n_estimators=50, random_state=30
)
title = "Random Forest Learning Curve"
plot_learning_curve(hrfc, title, X_train, y_train, cv=5, n_jobs=-1)
# Based on the Learning Curve for the Random Forest Model above, it can be seen:
# 1. Based on the Training Score curve, initially the model has a perfect level of accuracy (1.0) for the training data when modeling small amounts of data, then this accuracy decreases when the amount of data to be modeled starts to increase. This shows that the model has the possibility of overfitting the training data when the amount of data is still small, but is able to balance the accuracy and complexity of the model when the amount of data increases.
# 2. Based on the Cross-Validation Score curve, it shows that initially the model has low accuracy (about 0.76) when modeling data that has never been seen before (out of sample) with a small amount. But then the accuracy starts to increase when the amount of data increases.
# 3. At the initial point, the two curves have a fairly large gap but the gap gets smaller as the amount of data increases. This shows that the model which initially has the problem of overfitting, is then able to start to balance the accuracy when the amount of data increases
# Thus, the model using Random Forest is good enough to handle data that is not too complex and not too simple, and is able to generalize well to data that has never been seen before.
# ### Learning Curve Model Logistic Regression
hlr = LogisticRegression(C=0.1, penalty="l2", random_state=0)
title = "Logistic Regression Learning Curve"
plot_learning_curve(hlr, title, X_train, y_train, cv=5, n_jobs=-1)
# Based on the Learning Curve for the Logistic Regression Model above, it can be seen:
# 1. Based on the Training Score curve, the training score value increases as the amount of training data increases, which means that the model is able to slowly increase its accuracy to the training data when the data provided begins to increase.
# 2. Based on the Cross-Validation Score curve, it shows that the model tends to be stable when modeling data that has never been seen before and continues to increase as the amount of data increases. This shows that the model can improve performance but it is not optimal, seen from the increase in the curve which tends to be small.
# 3. At the second point, the Cross-Validation Score curve point is higher than the Training Score curve point. This indicates that in these conditions there is overfitting in the training data.
# Thus, the model using Logistic Regression experiences a bit of overfitting at the second curve point.
# # **17) ROC Analysis**
from sklearn.metrics import roc_curve, roc_auc_score
hrfc = RandomForestClassifier(
max_depth=9, min_samples_split=0.1, n_estimators=50, random_state=30
)
hlr = LogisticRegression(C=0.1, penalty="l2", random_state=0)
hrfc.fit(X_train, y_train)
hlr.fit(X_train, y_train)
hrfc_prob = hrfc.predict_proba(X_test)[:, 1]
hlr_prob = hlr.predict_proba(X_test)[:, 1]
fpr_hrfc, tpr_hrfc, _ = roc_curve(y_test, hrfc_prob)
roc_auc_hrfc = roc_auc_score(y_test, hrfc_prob)
fpr_hlr, tpr_hlr, _ = roc_curve(y_test, hlr_prob)
roc_auc_hlr = roc_auc_score(y_test, hlr_prob)
plt.figure(figsize=(8, 6))
plt.plot(fpr_hrfc, tpr_hrfc, label="Random Forest (AUC = %0.2f)" % roc_auc_hrfc)
plt.plot(fpr_hlr, tpr_hlr, label="Logistic Regression (AUC = %0.2f)" % roc_auc_hlr)
plt.plot([0, 1], [0, 1], linestyle="--", color="gray")
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.title("ROC Curve")
plt.legend()
plt.show()
# Hasil ROC Curve dari kedua model yang telah dibuat yaitu Random Forest dan Decision Tree menunjukkan bahwa model dengan Logistic Regression memiliki nilai AUC yaitu 0.96 yang lebih besar dari nilai AUC dari model dengan Random Forest, berdasarkan nilai AUC tersebut kedua model dapat dikategorikan memiliki kemampuan yang sangat baik dalam membedakan antara kelas positif dan negatif. Kemudian model dengan Logistic Regression juka memiliki posisi yang cenderung lebih dekat dengan bagian pojok kiri atas (koordinat 0.0, 1.0), semakin dekat kurva model dengan bagian pojok kiri atas maka semakin baik model yang dibuat. Oleh karena itu jika dilihat berdasarkan kurva ROC di atas, model dengan Logistic Regression lebih baik dibandingkan model dengan Random Forest.
# ### ROC Model Random Forest
plt.figure(figsize=(8, 6))
plt.plot(fpr_hrfc, tpr_hrfc, label="Random Forest (AUC = %0.2f)" % roc_auc_hrfc)
plt.plot([0, 1], [0, 1], linestyle="--", color="gray")
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.title("ROC Curve")
plt.legend()
plt.show()
# ### ROC Model Logistic Regression
plt.figure(figsize=(8, 6))
plt.plot(
fpr_hlr,
tpr_hlr,
label="Logistic Regression (AUC = %0.2f)" % roc_auc_hlr,
color="orange",
)
plt.plot([0, 1], [0, 1], linestyle="--", color="gray")
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.title("ROC Curve")
plt.legend()
plt.show()
| false | 0 | 9,260 | 1 | 9,260 | 9,260 |
||
129524810
|
<jupyter_start><jupyter_text>Financial Inclusion in Africa
You are asked to predict the likelihood of the person having a bank account or not (Yes = 1, No = 0), for each unique id in the test dataset . You will train your model on 70% of the data and test your model on the final 30% of the data, across four East African countries - Kenya, Rwanda, Tanzania, and Uganda.
Kaggle dataset identifier: financial-inclusion-in-africa
<jupyter_script>import pandas as pd
train = pd.read_csv("/kaggle/input/financial-inclusion-in-africa/Train.csv")
train.head()
test = pd.read_csv("/kaggle/input/financial-inclusion-in-africa/Test.csv")
test.head()
train.nunique()
train.info()
train = train.drop(["uniqueid"], axis=1)
train.head()
test = test.drop(["uniqueid"], axis=1)
test.head()
train["country"].value_counts()
train["bank_account"] = train["bank_account"].replace({"Yes": 1, "No": 0})
train.head()
train["cellphone_access"] = train["cellphone_access"].replace({"Yes": 1, "No": 0})
train.head()
test["cellphone_access"] = test["cellphone_access"].replace({"Yes": 1, "No": 0})
test.head()
train["relationship_with_head"].value_counts()
train["marital_status"].value_counts()
train["education_level"].value_counts()
train["job_type"].value_counts()
round(train["bank_account"].value_counts() * 100 / len(train), 2)
from lazypredict.Supervised import LazyClassifier
y = train.pop("bank_account")
X = train
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42, shuffle=True, stratify=y
)
clf = LazyClassifier(verbose=0, predictions=True)
models, predictions = clf.fit(X_train, X_test, y_train, y_test)
models
predictions.head()
from sklearn.metrics import classification_report
for i in predictions.columns.tolist():
print("\t\t", i, "\n")
print(classification_report(y_test, predictions[i]), "\n")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/524/129524810.ipynb
|
financial-inclusion-in-africa
|
gauravduttakiit
|
[{"Id": 129524810, "ScriptId": 38513196, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4760409, "CreationDate": "05/14/2023 14:26:40", "VersionNumber": 1.0, "Title": "Bank Account Prediction : LazyPredict", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 58.0, "LinesInsertedFromPrevious": 58.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 5}]
|
[{"Id": 185670275, "KernelVersionId": 129524810, "SourceDatasetVersionId": 5683202}]
|
[{"Id": 5683202, "DatasetId": 3267290, "DatasourceVersionId": 5758770, "CreatorUserId": 4760409, "LicenseName": "Unknown", "CreationDate": "05/14/2023 13:56:48", "VersionNumber": 1.0, "Title": "Financial Inclusion in Africa", "Slug": "financial-inclusion-in-africa", "Subtitle": NaN, "Description": "You are asked to predict the likelihood of the person having a bank account or not (Yes = 1, No = 0), for each unique id in the test dataset . You will train your model on 70% of the data and test your model on the final 30% of the data, across four East African countries - Kenya, Rwanda, Tanzania, and Uganda.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 3267290, "CreatorUserId": 4760409, "OwnerUserId": 4760409.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5683202.0, "CurrentDatasourceVersionId": 5758770.0, "ForumId": 3332906, "Type": 2, "CreationDate": "05/14/2023 13:56:48", "LastActivityDate": "05/14/2023", "TotalViews": 108, "TotalDownloads": 6, "TotalVotes": 1, "TotalKernels": 2}]
|
[{"Id": 4760409, "UserName": "gauravduttakiit", "DisplayName": "Gaurav Dutta", "RegisterDate": "03/28/2020", "PerformanceTier": 3}]
|
import pandas as pd
train = pd.read_csv("/kaggle/input/financial-inclusion-in-africa/Train.csv")
train.head()
test = pd.read_csv("/kaggle/input/financial-inclusion-in-africa/Test.csv")
test.head()
train.nunique()
train.info()
train = train.drop(["uniqueid"], axis=1)
train.head()
test = test.drop(["uniqueid"], axis=1)
test.head()
train["country"].value_counts()
train["bank_account"] = train["bank_account"].replace({"Yes": 1, "No": 0})
train.head()
train["cellphone_access"] = train["cellphone_access"].replace({"Yes": 1, "No": 0})
train.head()
test["cellphone_access"] = test["cellphone_access"].replace({"Yes": 1, "No": 0})
test.head()
train["relationship_with_head"].value_counts()
train["marital_status"].value_counts()
train["education_level"].value_counts()
train["job_type"].value_counts()
round(train["bank_account"].value_counts() * 100 / len(train), 2)
from lazypredict.Supervised import LazyClassifier
y = train.pop("bank_account")
X = train
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42, shuffle=True, stratify=y
)
clf = LazyClassifier(verbose=0, predictions=True)
models, predictions = clf.fit(X_train, X_test, y_train, y_test)
models
predictions.head()
from sklearn.metrics import classification_report
for i in predictions.columns.tolist():
print("\t\t", i, "\n")
print(classification_report(y_test, predictions[i]), "\n")
| false | 2 | 494 | 5 | 613 | 494 |
||
129524735
|
<jupyter_start><jupyter_text>Most famous video card manufacturers' share prices
# Share prices of the top 5 GPU companies:
**NVIDIA** (1999-2023 share prices)

**AMD** (1980-2023 share prices)

**Intel** (1980-2023 share prices)

**ASUS** (2000-2023 share prices)

**MSI** (1962-2023 share prices)

Kaggle dataset identifier: nvidia-amd-intel-asus-msi-share-prices
<jupyter_script># ### 模型
# - ARIMA(2,1,3)
# - 加法模型因素分解
# - LSTM
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from statsmodels.tsa.stattools import adfuller
from numpy import log
from statsmodels.tsa.stattools import kpss
from statsmodels.graphics.tsaplots import plot_acf
from statsmodels.graphics.tsaplots import plot_pacf
import matplotlib.pyplot as plt
from statsmodels.tsa.arima.model import ARIMA
from statsmodels.tsa.stattools import pacf
from keras.models import Sequential
from keras.layers.core import Dense, Activation
import keras
import math
from sklearn.metrics import r2_score
from sklearn.metrics import mean_absolute_error as mae
from sklearn.metrics import mean_squared_error
from numpy import array
from keras.models import Sequential
from keras.layers import LSTM, SimpleRNN
from keras.layers import Dense
from statsmodels.tsa.stattools import pacf
from keras.models import Sequential
from keras.layers import LSTM
from keras.layers import Dense
import warnings
warnings.filterwarnings("ignore")
# ## 数据
# - 挑一个品牌,NVIDIA
# - 单变量,预测close
# 数据描述:1999-01-25 至 2023-04-12 每天 NVIDIA 的股票价格以及市场份额数据,共6094条。选择close价格预测。
dataset = pd.read_csv(
"/kaggle/input/nvidia-amd-intel-asus-msi-share-prices/NVIDIA (1999-2023).csv"
)
dataset.head()
# dataset.tail()
dataset.describe()
# ### 数据探索性分析
# 没有缺失值
dataset.isnull().sum()
# #### 时序图
start_date = pd.to_datetime(dataset.Date[0])
end_date = pd.to_datetime(dataset.Date.values[-1])
dataset["Date"] = pd.to_datetime(dataset["Date"])
top_plt = plt.subplot2grid((5, 4), (0, 0), rowspan=3, colspan=4)
top_plt.plot(dataset.Date, dataset["Close"])
plt.title("Historical Stock Close Price of Nvidia")
# bottom_plt = plt.subplot2grid((5,4), (3,0), rowspan = 1, colspan = 4)
# bottom_plt.bar(dataset.Date, dataset['Volume'])
# plt.title('\nNvidia Trading Volume', y = -0.60)
plt.gcf().set_size_inches(16, 10)
# 后续神经网络结果好,可能是用了Volume
dataset2 = dataset[["Close", "Volume", "Date"]]
dataset2["logclose"] = np.log(dataset2["Close"])
dataset2.head()
dataset2.isnull().sum()
# ## ARIMA
# 1. 差分去趋势,取log解决异方差
# 2. ACF,PACF定阶
# 3. 利用auto ARIMA定阶
# ### Auto ARIMA
# 先自动定阶,然后再结合ACF,PACF以及AIC解释
# ARIMA(2,1,3)
import pmdarima as pm
smodel = pm.auto_arima(
dataset2.logclose,
start_p=1,
max_p=6,
start_q=1,
max_q=6,
seasonal=True, # seasonal=True的话,是SARIMA
stationary=False,
test="adf", # 自动差分
information_criterion="aic",
stepwise=False,
error_action="ignore",
suppress_warnings=True,
)
smodel.summary()
smodel.plot_diagnostics()
plt.gcf().set_size_inches(16, 10)
plt.show()
# #### 差分过程
# 取log,一阶差分
# 需要检验平稳、非纯随机
np.min(dataset2.logclose)
plt.rcParams.update({"figure.figsize": (18, 7), "figure.dpi": 120})
# Original Series
fig, axes = plt.subplots(3, 2, sharex=True)
axes[0, 0].plot(dataset2["Close"].values)
axes[0, 0].set_title("Original Series")
# plot_acf(dataset2['Close'].values, ax = axes[0, 1]);axes[0, 1].set_title('ACF of Original Series')
axes[0, 1].plot(dataset2["logclose"].values)
axes[0, 1].set_title("Original Series(log)")
# plot_acf(dataset2['logclose'].values, ax = axes[0, 3]);axes[0, 3].set_title('ACF of Original Series(log)')
# 1st Differencing
axes[1, 0].plot(dataset2["Close"].diff())
axes[1, 0].set_title("1st Order Differencing")
# plot_acf(dataset2['Close'].diff().dropna(), ax = axes[1, 1]);axes[1, 1].set_title('ACF of 1st Order Differencing')
axes[1, 1].plot(dataset2["logclose"].diff())
axes[1, 1].set_title("1st Order Differencing(log)")
# plot_acf(dataset2['logclose'].diff(), ax = axes[1, 3]);axes[1, 3].set_title('ACF of 1st Order Differencing(log)')
# 2nd Differencing
axes[2, 0].plot(dataset2["Close"].diff().diff(periods=8))
axes[2, 0].set_title("2nd Order Differencing")
# plot_acf(dataset2['Close'].diff().diff().dropna(), ax = axes[2, 1]);axes[2, 1].set_title('ACF of 2nd Order Differencing')
axes[2, 1].plot(dataset2["logclose"].diff().diff(periods=8))
axes[2, 1].set_title("2nd Order Differencing(log)")
# plot_acf(dataset2['logclose'].diff().diff(), ax = axes[2, 3]);axes[2, 3].set_title('ACF of 2nd Order Differencing(log)')
plt.show()
# #### 平稳性检验
# - ADF 拒绝,平稳
# - BP 拒绝,还存在异方差。把异常值处理一下可能能过?
result = adfuller(dataset2.logclose.diff()[1:], autolag="AIC")
print(f"ADF Statistic: {result[0]}")
print(f"p-value: {result[1]}")
for key, value in result[4].items():
print("Critial Values:")
print(f" {key}, {value}")
import statsmodels.stats.api as sms
test = sms.het_breuschpagan(
np.array(dataset2.logclose.diff()[1:]).reshape(-1, 1),
np.array(range(len(dataset2.logclose.diff()[1:]))).reshape(-1, 1),
)
test[-1]
# #### 纯随机检验
# 有阶数拒绝就可以
from statsmodels.stats.diagnostic import acorr_ljungbox
ljungbox_result = acorr_ljungbox(
dataset2.logclose.diff().diff(periods=8)[9:], lags=12
) # 返回统计量和p值,lags为检验的延迟数
ljungbox_result
# #### ACF,PACF
plt.rcParams.update({"figure.figsize": (20, 5), "figure.dpi": 120})
fig, axes = plt.subplots(1, 2, sharex=True)
plot_acf(dataset2.logclose.diff()[1:], ax=axes[0])
axes[0].set_title("ACF of Original Series(log)")
plot_pacf(dataset2.logclose.diff()[1:], ax=axes[1])
axes[1].set_title("PACF of 1st Order Differencing(log)")
plt.show()
# ## Models considered :
# **ARIMA(2,1,3) **
#
# **加法模型 **
#
# **LSTM **
# ## ARIMA
data = dataset2["logclose"].values
print("Length of Total data: ", len(data))
train_length = int(len(data) * 0.9)
train_data = data[:train_length]
test_data = data[train_length:]
print("Train and Test data length: ", len(train_data), len(test_data))
# ### Building ARIMA Model
import statsmodels.api as sm
model = sm.tsa.arima.ARIMA(train_data, order=(2, 1, 3))
model_fit = model.fit()
print(model_fit.summary())
# Plot residual errors
residuals = pd.DataFrame(model_fit.resid)
fig, ax = plt.subplots(1, 2)
residuals.plot(title="Residuals", ax=ax[0])
residuals.plot(kind="kde", title="Density", ax=ax[1])
plt.show()
# Forecast
forecast_result = model_fit.forecast(steps=50, alpha=0.05) # 95% conf
forecast_result[:50]
test_data[:50]
# ### Plotting Test and Predicted Results
# 预测未来30天的销量
pred_uc = model_fit.get_forecast(steps=30, alpha=0.05)
pred_pr = pred_uc.predicted_mean
# 获取预测的置信区间
pred_ci = pred_uc.conf_int()
# 合并预测值与置信区间
pred_data = pd.DataFrame(
{"forecast": pred_pr, "lower_ci_95": pred_ci[:, 0], "upper_ci_95": pred_ci[:, 1]}
)
pred_data.head()
fig, ax = plt.subplots(figsize=(15, 6))
ax.plot(pred_data.index[:30] - 30, train_data[-30:])
ax.plot(pred_data.forecast[:30], color="green", label="forecast", alpha=0.7)
ax.plot(
pred_data.index[:30], test_data[:30], color="yellow", label="observed", alpha=0.7
)
ax.fill_between(
pred_data.index,
pred_data.lower_ci_95,
pred_data.upper_ci_95,
color="grey",
alpha=0.5,
label="95% confidence interval",
)
ax.set_title("ARIMA Model for Nvidia Price Forecasting")
ax.legend()
plt.show()
# Here we're plotting Test and Predicted data
plt.figure(figsize=(12, 6))
plt.rcParams.update({"font.size": 12})
plt.plot(test_data[:50], "#0077be", label="Actual")
plt.plot(forecast_result[:], "#ff8841", label="Predicted")
plt.title("ARIMA Model for Nvidia Price Forecasting")
plt.ylabel("Nvidia Price [in Dollar]")
plt.xlabel("Time Steps [in Days] ")
plt.legend()
plt.show()
# ## 加法模型
# 三个,周期为7,30,365。三个周期下都没有明显的季节趋势
from statsmodels.tsa.seasonal import seasonal_decompose
decomposition = seasonal_decompose(
dataset2.logclose,
# model='multiplicative',
period=7,
)
fig = plt.figure()
fig = decomposition.plot()
fig.set_size_inches(15, 12)
plt.show()
from statsmodels.tsa.seasonal import seasonal_decompose
decomposition = seasonal_decompose(
dataset2.logclose,
# model='multiplicative',
period=30,
)
fig = plt.figure()
fig = decomposition.plot()
fig.set_size_inches(15, 12)
plt.show()
from statsmodels.tsa.seasonal import seasonal_decompose
decomposition = seasonal_decompose(
dataset2.logclose,
# model='multiplicative',
period=365,
)
fig = plt.figure()
fig = decomposition.plot()
fig.set_size_inches(15, 12)
plt.show()
help(seasonal_decompose)
# ## Artificial Neural Network
data = dataset2["Close"].values
print("Shape of data: ", data.shape)
# Separating train and test data
train_length = int(len(data) * 0.8)
print("Train length: ", train_length)
train_data, test_data = data[:train_length], data[train_length:]
print("Shape of Train and Test data: ", train_data.shape, test_data.shape)
# Changing dataset shape to 2D
train_data = train_data.reshape(-1, 1)
test_data = test_data.reshape(-1, 1)
print("Shape of Train and Test data: ", train_data.shape, test_data.shape)
def create_dataset(dataset, lookback):
dataX, dataY = [], []
for i in range(len(dataset) - lookback - 1):
a = dataset[i : (i + lookback), 0]
dataX.append(a)
b = dataset[i + lookback, 0]
dataY.append(b)
return np.array(dataX), np.array(dataY)
# ### Selecting Lag value from PACF graph
plot_pacf(data, lags=10)
plt.show()
# ### Considering only Auto-correlation Lag value Greater than 10%
pacf_value = pacf(data, nlags=20)
lag = 0
# collect lag values greater than 10% correlation
for x in pacf_value:
if x > 0.1:
lag += 1
else:
break
print("Selected look_back (or lag = ): ", lag)
# ### Separating Input and Output values
train_X, train_y = create_dataset(train_data, lag)
test_X, test_y = create_dataset(test_data, lag)
print("Shape of train_X and train_y: ", train_X.shape, train_y.shape)
print("Shape of test_X and test_y: ", test_X.shape, test_y.shape)
# ### Building an MLP model
np.random.seed(7)
# model = Sequential()
# model.add(Dense(64, input_dim = lag, activation='relu', name= "1st_hidden"))
# # model.add(Dense(64, activation='relu', name = '2nd_hidden'))
# model.add(Dense(1, name = 'Output_layer', activation = 'linear'))
# # model.add(Activation("linear", name = 'Linear_activation'))
# model.compile(loss = "mean_squared_error", optimizer = "adam")
# model.summary()
# ### Fitting data to Model
# epoch_number = 100
# batches = 64
# history = model.fit(train_X, train_y, epochs = epoch_number, batch_size = batches, verbose = 1, shuffle = False,
# validation_split = 0.1)
# ### Train and Validation Loss
# # plot history
# plt.clf
# plt.figure(figsize = (10,8))
# plt.plot(history.history['loss'], label = 'train')
# plt.plot(history.history['val_loss'], label = 'test')
# plt.xlabel('Number of Epochs')
# plt.ylabel('Train and Test Loss')
# plt.title('Train and Test loss per epochs [Univariate]')
# plt.legend()
# plt.show()
# ### Making Predictions
# # Make prediction
# testPredict = model.predict(test_X)
# predicted_value = testPredict[:, 0]
# ### Evaluation Metrics for measuring performance
# * ** R-Squared **
# * ** Mean Absolute Error **
# * ** Mean Absolute Percentage Error**
# * ** Mean Squared Error**
# * ** Root Mean Squared Error**
# * ** Normalized Root Mean Squared Error**
# * ** Weighted Absolute Percentage Error**
# * ** Weighted Mean Absolute Percentage Error**
def evaluate_forecast_results(actual, predicted):
print("R2 Score: ", round(r2_score(actual, predicted), 2))
print("MAE : ", round(mae(actual, predicted), 2))
print("MSE: ", round(mean_squared_error(actual, predicted), 2))
print("RMSE: ", round(math.sqrt(mean_squared_error(actual, predicted)), 2))
print("NRMSE: ", NRMSE(actual, predicted))
print("WMAPE: ", WMAPE(actual, predicted))
def NRMSE(actual, predicted):
rmse = math.sqrt(mean_squared_error(actual, predicted))
nrmse = rmse / np.mean(actual)
return round(nrmse, 4)
def WMAPE(actual, predicted):
abs_error = np.sum(actual - predicted)
wmape = abs_error / np.sum(actual)
return round(wmape, 4)
evaluate_forecast_results(test_y, predicted_value)
# plt.figure(figsize = (16, 8))
# plt.rcParams.update({'font.size': 12})
# plt.plot(test_y[:], '#0077be', label = 'Actual')
# plt.plot(predicted_value, '#ff8841', label = 'Predicted')
# plt.title('MLP Model for Nvidia Price Forecasting')
# plt.ylabel('Nvidia Stock Close Price ')
# plt.xlabel('Time Steps [in Days] ')
# plt.legend()
# plt.show()
# ## RNN - Univariate Time Series Forecasting
#
# data = dataset2['Close'].values
# print('Shape of data: ', data.shape)
# Separate train and test data
train_length = int(len(data) * 0.8)
print("Train length: ", train_length)
train_data, test_data = data[:train_length], data[train_length:]
print("Shape of Train and Test data: ", len(train_data), len(test_data))
def split_sequence(sequence, n_steps):
X, y = list(), list()
for i in range(len(sequence)):
end_ix = i + n_steps
if end_ix > len(sequence) - 1:
break
seq_x, seq_y = sequence[i:end_ix], sequence[end_ix]
X.append(seq_x)
y.append(seq_y)
return array(X), array(y)
# ### Lag Value already to be chosen from PACF Plot
pacf_value = pacf(data, nlags=20)
lag = 0
# collect lag values greater than 10% correlation
for x in pacf_value:
if x > 0.1:
lag += 1
else:
break
print("Selected look_back (or lag = ): ", lag)
n_features = 1
train_X, train_y = split_sequence(train_data, lag)
test_X, test_y = split_sequence(test_data, lag)
print("Shape of train_X and train_y: ", train_X.shape, train_y.shape)
print("Shape of test_X and test_y: ", test_X.shape, test_y.shape)
# ### Reshaping train_X and test_X to 3-D
train_X = train_X.reshape((train_X.shape[0], train_X.shape[1], n_features))
test_X = test_X.reshape((test_X.shape[0], test_X.shape[1], n_features))
# New shape of train_X and test_X are :-
print("Shape of train_X and train_y: ", train_X.shape, train_y.shape)
print("Shape of test_X and test_y: ", test_X.shape, test_y.shape)
# ### Building the model
# # define model
# model = Sequential()
# model.add(SimpleRNN(64, activation='relu', return_sequences = False, input_shape = (lag, n_features)))
# model.add(Dense(1))
# model.compile(optimizer = 'adam', loss = 'mse')
# model.summary()
# ### Fit the model - with training data
import tensorflow as tf
tf.config.run_functions_eagerly(True)
# fit model
# cb = tf.keras.callbacks.EarlyStopping(monitor = 'loss', patience = 15, restore_best_weights = True)
# history = model.fit(train_X, train_y, epochs = 150, batch_size = 64, verbose = 1, validation_split = 0.1,
# callbacks = [cb])
# ### Summarizing model accuracy and Loss
# # summarize history for loss
# plt.plot(history.history['loss'])
# plt.plot(history.history['val_loss'])
# plt.title('model loss')
# plt.ylabel('loss')
# plt.xlabel('epoch')
# plt.legend(['train', 'test'], loc = 'upper left')
# plt.show()
# ### Making prediction with Test data
# train_predict = model.predict(train_X)
# test_predict = model.predict(test_X)
# print('Shape of train and test predict: ', train_predict.shape, test_predict.shape)
# ### Model evaluation
# actual_ = test_y
# predicted_ = test_predict[:, 0]
# len(actual_), len(predicted_)
# evaluate_forecast_results(actual_, predicted_)
# ### Plotting test and predicted data
#
# plt.rc("figure", figsize = (14,8))
# plt.rcParams.update({'font.size': 16})
# plt.plot(actual_, label = 'Actual')
# plt.plot(predicted_, label = 'Predicted')
# plt.xlabel('Time in days')
# plt.ylabel('Nvidia stock price')
# plt.title('Nvidia Stock Close price prediction using Simple RNN - Test data')
# plt.legend()
# plt.show()
# df_train = pd.DataFrame(columns = ['Train data'])
# df_train['Train data'] = train_data
# df = pd.DataFrame(columns = ['Test data', 'Predicted data'])
# df['Test data'] = actual_
# df['Predicted data'] = predicted_
# total_len = len(df_train['Train data']) + len(df['Test data'])
# range(len(df_train['Train data']), total_len)
# x_list = [x for x in range(len(df_train['Train data']), total_len)]
# df.index = x_list
# plt.rc("figure", figsize=(14,8))
# plt.rcParams.update({'font.size': 16})
# plt.xlabel('Time in days')
# plt.ylabel('Nvidia price')
# plt.title('Nvidia price prediction using Simple RNN')
# plt.plot(df_train['Train data'])
# plt.plot(df[['Test data', 'Predicted data']])
# plt.legend(['Train', 'Test', 'Predictions'], loc='lower right')
# plt.show()
# ## LSTM
data = dataset2["Close"].values
print("Shape of data: ", data.shape)
# Separate train and test data
train_length = int(len(data) * 0.8)
print("Train length: ", train_length)
train_data, test_data = data[:train_length], data[train_length:]
print("Shape of Train and Test data: ", len(train_data), len(test_data))
def split_sequence(sequence, n_steps):
X, y = list(), list()
for i in range(len(sequence)):
end_ix = i + n_steps
if end_ix > len(sequence) - 1:
break
seq_x, seq_y = sequence[i:end_ix], sequence[end_ix]
X.append(seq_x)
y.append(seq_y)
return array(X), array(y)
# ### Choosing the appropriate lag value
lag = 2
n_features = 1
train_X, train_y = split_sequence(train_data, lag)
test_X, test_y = split_sequence(test_data, lag)
print("Shape of train_X and train_y: ", train_X.shape, train_y.shape)
print("Shape of test_X and test_y: ", test_X.shape, test_y.shape)
# ### Reshaping train_X and test_X to 3D
train_X = train_X.reshape((train_X.shape[0], train_X.shape[1], n_features))
test_X = test_X.reshape((test_X.shape[0], test_X.shape[1], n_features))
print("Shape of train_X and train_y: ", train_X.shape, train_y.shape)
print("Shape of test_X and test_y: ", test_X.shape, test_y.shape)
# ### Building LSTM Model
model = Sequential()
model.add(
LSTM(64, activation="relu", return_sequences=True, input_shape=(lag, n_features))
)
model.add(LSTM(64, activation="relu"))
model.add(Dense(1))
model.compile(optimizer="adam", loss="mse")
model.summary()
# ### Fitting model with data
import tensorflow as tf
tf.config.run_functions_eagerly(True)
cb = tf.keras.callbacks.EarlyStopping(
monitor="loss", patience=15, restore_best_weights=True
)
history = model.fit(
train_X,
train_y,
epochs=150,
batch_size=64,
verbose=1,
validation_split=0.1,
callbacks=[cb],
)
# ### Summarizing model accuracy and Loss
plt.plot(history.history["loss"])
plt.plot(history.history["val_loss"])
plt.title("model loss")
plt.ylabel("loss")
plt.xlabel("epoch")
plt.legend(["train", "test"], loc="upper left")
plt.show()
# ### Making the prediction
train_predict = model.predict(train_X)
test_predict = model.predict(test_X)
print("Shape of train and test predict: ", train_predict.shape, test_predict.shape)
# ### Model Evaluation
actual_lstm = test_y
predicted_lstm = test_predict[:, 0]
evaluate_forecast_results(actual_lstm, predicted_lstm)
df_train = pd.DataFrame(columns=["Train data"])
df_train["Train data"] = train_data
df = pd.DataFrame(columns=["Test data", "Predicted data"])
df["Test data"] = actual_lstm
df["Predicted data"] = predicted_lstm
total_len = len(df_train["Train data"]) + len(df["Test data"])
range(len(df_train["Train data"]), total_len)
x_list = [x for x in range(len(df_train["Train data"]), total_len)]
df.index = x_list
plt.rc("figure", figsize=(14, 8))
plt.rcParams.update({"font.size": 16})
plt.xlabel("Time in days")
plt.ylabel("Nvidia Stock Close price")
plt.title("Nvidia Stock price prediction using LSTM")
plt.plot(df_train["Train data"])
plt.plot(df[["Test data", "Predicted data"]])
plt.legend(["Train", "Test", "Predictions"], loc="lower right")
plt.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/524/129524735.ipynb
|
nvidia-amd-intel-asus-msi-share-prices
|
kapturovalexander
|
[{"Id": 129524735, "ScriptId": 37699502, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 10493971, "CreationDate": "05/14/2023 14:26:12", "VersionNumber": 1.0, "Title": "GPU Stock Price Prediction \u81ea\u7528", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 662.0, "LinesInsertedFromPrevious": 285.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 377.0, "LinesInsertedFromFork": 285.0, "LinesDeletedFromFork": 229.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 377.0, "TotalVotes": 1}]
|
[{"Id": 185670143, "KernelVersionId": 129524735, "SourceDatasetVersionId": 5634621}]
|
[{"Id": 5634621, "DatasetId": 3126518, "DatasourceVersionId": 5709896, "CreatorUserId": 10074224, "LicenseName": "Other (specified in description)", "CreationDate": "05/08/2023 15:34:07", "VersionNumber": 7.0, "Title": "Most famous video card manufacturers' share prices", "Slug": "nvidia-amd-intel-asus-msi-share-prices", "Subtitle": "Share prices of 5 biggest companies who make GPU", "Description": "# Share prices of the top 5 GPU companies:\n**NVIDIA** (1999-2023 share prices)\n\n**AMD** (1980-2023 share prices)\n\n**Intel** (1980-2023 share prices)\n\n**ASUS** (2000-2023 share prices)\n\n**MSI** (1962-2023 share prices)\n", "VersionNotes": "Data Update 2023-05-08", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 3126518, "CreatorUserId": 10074224, "OwnerUserId": 10074224.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 6218583.0, "CurrentDatasourceVersionId": 6297954.0, "ForumId": 3190101, "Type": 2, "CreationDate": "04/13/2023 12:15:18", "LastActivityDate": "04/13/2023", "TotalViews": 9305, "TotalDownloads": 1486, "TotalVotes": 69, "TotalKernels": 7}]
|
[{"Id": 10074224, "UserName": "kapturovalexander", "DisplayName": "Alexander Kapturov", "RegisterDate": "03/28/2022", "PerformanceTier": 2}]
|
# ### 模型
# - ARIMA(2,1,3)
# - 加法模型因素分解
# - LSTM
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from statsmodels.tsa.stattools import adfuller
from numpy import log
from statsmodels.tsa.stattools import kpss
from statsmodels.graphics.tsaplots import plot_acf
from statsmodels.graphics.tsaplots import plot_pacf
import matplotlib.pyplot as plt
from statsmodels.tsa.arima.model import ARIMA
from statsmodels.tsa.stattools import pacf
from keras.models import Sequential
from keras.layers.core import Dense, Activation
import keras
import math
from sklearn.metrics import r2_score
from sklearn.metrics import mean_absolute_error as mae
from sklearn.metrics import mean_squared_error
from numpy import array
from keras.models import Sequential
from keras.layers import LSTM, SimpleRNN
from keras.layers import Dense
from statsmodels.tsa.stattools import pacf
from keras.models import Sequential
from keras.layers import LSTM
from keras.layers import Dense
import warnings
warnings.filterwarnings("ignore")
# ## 数据
# - 挑一个品牌,NVIDIA
# - 单变量,预测close
# 数据描述:1999-01-25 至 2023-04-12 每天 NVIDIA 的股票价格以及市场份额数据,共6094条。选择close价格预测。
dataset = pd.read_csv(
"/kaggle/input/nvidia-amd-intel-asus-msi-share-prices/NVIDIA (1999-2023).csv"
)
dataset.head()
# dataset.tail()
dataset.describe()
# ### 数据探索性分析
# 没有缺失值
dataset.isnull().sum()
# #### 时序图
start_date = pd.to_datetime(dataset.Date[0])
end_date = pd.to_datetime(dataset.Date.values[-1])
dataset["Date"] = pd.to_datetime(dataset["Date"])
top_plt = plt.subplot2grid((5, 4), (0, 0), rowspan=3, colspan=4)
top_plt.plot(dataset.Date, dataset["Close"])
plt.title("Historical Stock Close Price of Nvidia")
# bottom_plt = plt.subplot2grid((5,4), (3,0), rowspan = 1, colspan = 4)
# bottom_plt.bar(dataset.Date, dataset['Volume'])
# plt.title('\nNvidia Trading Volume', y = -0.60)
plt.gcf().set_size_inches(16, 10)
# 后续神经网络结果好,可能是用了Volume
dataset2 = dataset[["Close", "Volume", "Date"]]
dataset2["logclose"] = np.log(dataset2["Close"])
dataset2.head()
dataset2.isnull().sum()
# ## ARIMA
# 1. 差分去趋势,取log解决异方差
# 2. ACF,PACF定阶
# 3. 利用auto ARIMA定阶
# ### Auto ARIMA
# 先自动定阶,然后再结合ACF,PACF以及AIC解释
# ARIMA(2,1,3)
import pmdarima as pm
smodel = pm.auto_arima(
dataset2.logclose,
start_p=1,
max_p=6,
start_q=1,
max_q=6,
seasonal=True, # seasonal=True的话,是SARIMA
stationary=False,
test="adf", # 自动差分
information_criterion="aic",
stepwise=False,
error_action="ignore",
suppress_warnings=True,
)
smodel.summary()
smodel.plot_diagnostics()
plt.gcf().set_size_inches(16, 10)
plt.show()
# #### 差分过程
# 取log,一阶差分
# 需要检验平稳、非纯随机
np.min(dataset2.logclose)
plt.rcParams.update({"figure.figsize": (18, 7), "figure.dpi": 120})
# Original Series
fig, axes = plt.subplots(3, 2, sharex=True)
axes[0, 0].plot(dataset2["Close"].values)
axes[0, 0].set_title("Original Series")
# plot_acf(dataset2['Close'].values, ax = axes[0, 1]);axes[0, 1].set_title('ACF of Original Series')
axes[0, 1].plot(dataset2["logclose"].values)
axes[0, 1].set_title("Original Series(log)")
# plot_acf(dataset2['logclose'].values, ax = axes[0, 3]);axes[0, 3].set_title('ACF of Original Series(log)')
# 1st Differencing
axes[1, 0].plot(dataset2["Close"].diff())
axes[1, 0].set_title("1st Order Differencing")
# plot_acf(dataset2['Close'].diff().dropna(), ax = axes[1, 1]);axes[1, 1].set_title('ACF of 1st Order Differencing')
axes[1, 1].plot(dataset2["logclose"].diff())
axes[1, 1].set_title("1st Order Differencing(log)")
# plot_acf(dataset2['logclose'].diff(), ax = axes[1, 3]);axes[1, 3].set_title('ACF of 1st Order Differencing(log)')
# 2nd Differencing
axes[2, 0].plot(dataset2["Close"].diff().diff(periods=8))
axes[2, 0].set_title("2nd Order Differencing")
# plot_acf(dataset2['Close'].diff().diff().dropna(), ax = axes[2, 1]);axes[2, 1].set_title('ACF of 2nd Order Differencing')
axes[2, 1].plot(dataset2["logclose"].diff().diff(periods=8))
axes[2, 1].set_title("2nd Order Differencing(log)")
# plot_acf(dataset2['logclose'].diff().diff(), ax = axes[2, 3]);axes[2, 3].set_title('ACF of 2nd Order Differencing(log)')
plt.show()
# #### 平稳性检验
# - ADF 拒绝,平稳
# - BP 拒绝,还存在异方差。把异常值处理一下可能能过?
result = adfuller(dataset2.logclose.diff()[1:], autolag="AIC")
print(f"ADF Statistic: {result[0]}")
print(f"p-value: {result[1]}")
for key, value in result[4].items():
print("Critial Values:")
print(f" {key}, {value}")
import statsmodels.stats.api as sms
test = sms.het_breuschpagan(
np.array(dataset2.logclose.diff()[1:]).reshape(-1, 1),
np.array(range(len(dataset2.logclose.diff()[1:]))).reshape(-1, 1),
)
test[-1]
# #### 纯随机检验
# 有阶数拒绝就可以
from statsmodels.stats.diagnostic import acorr_ljungbox
ljungbox_result = acorr_ljungbox(
dataset2.logclose.diff().diff(periods=8)[9:], lags=12
) # 返回统计量和p值,lags为检验的延迟数
ljungbox_result
# #### ACF,PACF
plt.rcParams.update({"figure.figsize": (20, 5), "figure.dpi": 120})
fig, axes = plt.subplots(1, 2, sharex=True)
plot_acf(dataset2.logclose.diff()[1:], ax=axes[0])
axes[0].set_title("ACF of Original Series(log)")
plot_pacf(dataset2.logclose.diff()[1:], ax=axes[1])
axes[1].set_title("PACF of 1st Order Differencing(log)")
plt.show()
# ## Models considered :
# **ARIMA(2,1,3) **
#
# **加法模型 **
#
# **LSTM **
# ## ARIMA
data = dataset2["logclose"].values
print("Length of Total data: ", len(data))
train_length = int(len(data) * 0.9)
train_data = data[:train_length]
test_data = data[train_length:]
print("Train and Test data length: ", len(train_data), len(test_data))
# ### Building ARIMA Model
import statsmodels.api as sm
model = sm.tsa.arima.ARIMA(train_data, order=(2, 1, 3))
model_fit = model.fit()
print(model_fit.summary())
# Plot residual errors
residuals = pd.DataFrame(model_fit.resid)
fig, ax = plt.subplots(1, 2)
residuals.plot(title="Residuals", ax=ax[0])
residuals.plot(kind="kde", title="Density", ax=ax[1])
plt.show()
# Forecast
forecast_result = model_fit.forecast(steps=50, alpha=0.05) # 95% conf
forecast_result[:50]
test_data[:50]
# ### Plotting Test and Predicted Results
# 预测未来30天的销量
pred_uc = model_fit.get_forecast(steps=30, alpha=0.05)
pred_pr = pred_uc.predicted_mean
# 获取预测的置信区间
pred_ci = pred_uc.conf_int()
# 合并预测值与置信区间
pred_data = pd.DataFrame(
{"forecast": pred_pr, "lower_ci_95": pred_ci[:, 0], "upper_ci_95": pred_ci[:, 1]}
)
pred_data.head()
fig, ax = plt.subplots(figsize=(15, 6))
ax.plot(pred_data.index[:30] - 30, train_data[-30:])
ax.plot(pred_data.forecast[:30], color="green", label="forecast", alpha=0.7)
ax.plot(
pred_data.index[:30], test_data[:30], color="yellow", label="observed", alpha=0.7
)
ax.fill_between(
pred_data.index,
pred_data.lower_ci_95,
pred_data.upper_ci_95,
color="grey",
alpha=0.5,
label="95% confidence interval",
)
ax.set_title("ARIMA Model for Nvidia Price Forecasting")
ax.legend()
plt.show()
# Here we're plotting Test and Predicted data
plt.figure(figsize=(12, 6))
plt.rcParams.update({"font.size": 12})
plt.plot(test_data[:50], "#0077be", label="Actual")
plt.plot(forecast_result[:], "#ff8841", label="Predicted")
plt.title("ARIMA Model for Nvidia Price Forecasting")
plt.ylabel("Nvidia Price [in Dollar]")
plt.xlabel("Time Steps [in Days] ")
plt.legend()
plt.show()
# ## 加法模型
# 三个,周期为7,30,365。三个周期下都没有明显的季节趋势
from statsmodels.tsa.seasonal import seasonal_decompose
decomposition = seasonal_decompose(
dataset2.logclose,
# model='multiplicative',
period=7,
)
fig = plt.figure()
fig = decomposition.plot()
fig.set_size_inches(15, 12)
plt.show()
from statsmodels.tsa.seasonal import seasonal_decompose
decomposition = seasonal_decompose(
dataset2.logclose,
# model='multiplicative',
period=30,
)
fig = plt.figure()
fig = decomposition.plot()
fig.set_size_inches(15, 12)
plt.show()
from statsmodels.tsa.seasonal import seasonal_decompose
decomposition = seasonal_decompose(
dataset2.logclose,
# model='multiplicative',
period=365,
)
fig = plt.figure()
fig = decomposition.plot()
fig.set_size_inches(15, 12)
plt.show()
help(seasonal_decompose)
# ## Artificial Neural Network
data = dataset2["Close"].values
print("Shape of data: ", data.shape)
# Separating train and test data
train_length = int(len(data) * 0.8)
print("Train length: ", train_length)
train_data, test_data = data[:train_length], data[train_length:]
print("Shape of Train and Test data: ", train_data.shape, test_data.shape)
# Changing dataset shape to 2D
train_data = train_data.reshape(-1, 1)
test_data = test_data.reshape(-1, 1)
print("Shape of Train and Test data: ", train_data.shape, test_data.shape)
def create_dataset(dataset, lookback):
dataX, dataY = [], []
for i in range(len(dataset) - lookback - 1):
a = dataset[i : (i + lookback), 0]
dataX.append(a)
b = dataset[i + lookback, 0]
dataY.append(b)
return np.array(dataX), np.array(dataY)
# ### Selecting Lag value from PACF graph
plot_pacf(data, lags=10)
plt.show()
# ### Considering only Auto-correlation Lag value Greater than 10%
pacf_value = pacf(data, nlags=20)
lag = 0
# collect lag values greater than 10% correlation
for x in pacf_value:
if x > 0.1:
lag += 1
else:
break
print("Selected look_back (or lag = ): ", lag)
# ### Separating Input and Output values
train_X, train_y = create_dataset(train_data, lag)
test_X, test_y = create_dataset(test_data, lag)
print("Shape of train_X and train_y: ", train_X.shape, train_y.shape)
print("Shape of test_X and test_y: ", test_X.shape, test_y.shape)
# ### Building an MLP model
np.random.seed(7)
# model = Sequential()
# model.add(Dense(64, input_dim = lag, activation='relu', name= "1st_hidden"))
# # model.add(Dense(64, activation='relu', name = '2nd_hidden'))
# model.add(Dense(1, name = 'Output_layer', activation = 'linear'))
# # model.add(Activation("linear", name = 'Linear_activation'))
# model.compile(loss = "mean_squared_error", optimizer = "adam")
# model.summary()
# ### Fitting data to Model
# epoch_number = 100
# batches = 64
# history = model.fit(train_X, train_y, epochs = epoch_number, batch_size = batches, verbose = 1, shuffle = False,
# validation_split = 0.1)
# ### Train and Validation Loss
# # plot history
# plt.clf
# plt.figure(figsize = (10,8))
# plt.plot(history.history['loss'], label = 'train')
# plt.plot(history.history['val_loss'], label = 'test')
# plt.xlabel('Number of Epochs')
# plt.ylabel('Train and Test Loss')
# plt.title('Train and Test loss per epochs [Univariate]')
# plt.legend()
# plt.show()
# ### Making Predictions
# # Make prediction
# testPredict = model.predict(test_X)
# predicted_value = testPredict[:, 0]
# ### Evaluation Metrics for measuring performance
# * ** R-Squared **
# * ** Mean Absolute Error **
# * ** Mean Absolute Percentage Error**
# * ** Mean Squared Error**
# * ** Root Mean Squared Error**
# * ** Normalized Root Mean Squared Error**
# * ** Weighted Absolute Percentage Error**
# * ** Weighted Mean Absolute Percentage Error**
def evaluate_forecast_results(actual, predicted):
print("R2 Score: ", round(r2_score(actual, predicted), 2))
print("MAE : ", round(mae(actual, predicted), 2))
print("MSE: ", round(mean_squared_error(actual, predicted), 2))
print("RMSE: ", round(math.sqrt(mean_squared_error(actual, predicted)), 2))
print("NRMSE: ", NRMSE(actual, predicted))
print("WMAPE: ", WMAPE(actual, predicted))
def NRMSE(actual, predicted):
rmse = math.sqrt(mean_squared_error(actual, predicted))
nrmse = rmse / np.mean(actual)
return round(nrmse, 4)
def WMAPE(actual, predicted):
abs_error = np.sum(actual - predicted)
wmape = abs_error / np.sum(actual)
return round(wmape, 4)
evaluate_forecast_results(test_y, predicted_value)
# plt.figure(figsize = (16, 8))
# plt.rcParams.update({'font.size': 12})
# plt.plot(test_y[:], '#0077be', label = 'Actual')
# plt.plot(predicted_value, '#ff8841', label = 'Predicted')
# plt.title('MLP Model for Nvidia Price Forecasting')
# plt.ylabel('Nvidia Stock Close Price ')
# plt.xlabel('Time Steps [in Days] ')
# plt.legend()
# plt.show()
# ## RNN - Univariate Time Series Forecasting
#
# data = dataset2['Close'].values
# print('Shape of data: ', data.shape)
# Separate train and test data
train_length = int(len(data) * 0.8)
print("Train length: ", train_length)
train_data, test_data = data[:train_length], data[train_length:]
print("Shape of Train and Test data: ", len(train_data), len(test_data))
def split_sequence(sequence, n_steps):
X, y = list(), list()
for i in range(len(sequence)):
end_ix = i + n_steps
if end_ix > len(sequence) - 1:
break
seq_x, seq_y = sequence[i:end_ix], sequence[end_ix]
X.append(seq_x)
y.append(seq_y)
return array(X), array(y)
# ### Lag Value already to be chosen from PACF Plot
pacf_value = pacf(data, nlags=20)
lag = 0
# collect lag values greater than 10% correlation
for x in pacf_value:
if x > 0.1:
lag += 1
else:
break
print("Selected look_back (or lag = ): ", lag)
n_features = 1
train_X, train_y = split_sequence(train_data, lag)
test_X, test_y = split_sequence(test_data, lag)
print("Shape of train_X and train_y: ", train_X.shape, train_y.shape)
print("Shape of test_X and test_y: ", test_X.shape, test_y.shape)
# ### Reshaping train_X and test_X to 3-D
train_X = train_X.reshape((train_X.shape[0], train_X.shape[1], n_features))
test_X = test_X.reshape((test_X.shape[0], test_X.shape[1], n_features))
# New shape of train_X and test_X are :-
print("Shape of train_X and train_y: ", train_X.shape, train_y.shape)
print("Shape of test_X and test_y: ", test_X.shape, test_y.shape)
# ### Building the model
# # define model
# model = Sequential()
# model.add(SimpleRNN(64, activation='relu', return_sequences = False, input_shape = (lag, n_features)))
# model.add(Dense(1))
# model.compile(optimizer = 'adam', loss = 'mse')
# model.summary()
# ### Fit the model - with training data
import tensorflow as tf
tf.config.run_functions_eagerly(True)
# fit model
# cb = tf.keras.callbacks.EarlyStopping(monitor = 'loss', patience = 15, restore_best_weights = True)
# history = model.fit(train_X, train_y, epochs = 150, batch_size = 64, verbose = 1, validation_split = 0.1,
# callbacks = [cb])
# ### Summarizing model accuracy and Loss
# # summarize history for loss
# plt.plot(history.history['loss'])
# plt.plot(history.history['val_loss'])
# plt.title('model loss')
# plt.ylabel('loss')
# plt.xlabel('epoch')
# plt.legend(['train', 'test'], loc = 'upper left')
# plt.show()
# ### Making prediction with Test data
# train_predict = model.predict(train_X)
# test_predict = model.predict(test_X)
# print('Shape of train and test predict: ', train_predict.shape, test_predict.shape)
# ### Model evaluation
# actual_ = test_y
# predicted_ = test_predict[:, 0]
# len(actual_), len(predicted_)
# evaluate_forecast_results(actual_, predicted_)
# ### Plotting test and predicted data
#
# plt.rc("figure", figsize = (14,8))
# plt.rcParams.update({'font.size': 16})
# plt.plot(actual_, label = 'Actual')
# plt.plot(predicted_, label = 'Predicted')
# plt.xlabel('Time in days')
# plt.ylabel('Nvidia stock price')
# plt.title('Nvidia Stock Close price prediction using Simple RNN - Test data')
# plt.legend()
# plt.show()
# df_train = pd.DataFrame(columns = ['Train data'])
# df_train['Train data'] = train_data
# df = pd.DataFrame(columns = ['Test data', 'Predicted data'])
# df['Test data'] = actual_
# df['Predicted data'] = predicted_
# total_len = len(df_train['Train data']) + len(df['Test data'])
# range(len(df_train['Train data']), total_len)
# x_list = [x for x in range(len(df_train['Train data']), total_len)]
# df.index = x_list
# plt.rc("figure", figsize=(14,8))
# plt.rcParams.update({'font.size': 16})
# plt.xlabel('Time in days')
# plt.ylabel('Nvidia price')
# plt.title('Nvidia price prediction using Simple RNN')
# plt.plot(df_train['Train data'])
# plt.plot(df[['Test data', 'Predicted data']])
# plt.legend(['Train', 'Test', 'Predictions'], loc='lower right')
# plt.show()
# ## LSTM
data = dataset2["Close"].values
print("Shape of data: ", data.shape)
# Separate train and test data
train_length = int(len(data) * 0.8)
print("Train length: ", train_length)
train_data, test_data = data[:train_length], data[train_length:]
print("Shape of Train and Test data: ", len(train_data), len(test_data))
def split_sequence(sequence, n_steps):
X, y = list(), list()
for i in range(len(sequence)):
end_ix = i + n_steps
if end_ix > len(sequence) - 1:
break
seq_x, seq_y = sequence[i:end_ix], sequence[end_ix]
X.append(seq_x)
y.append(seq_y)
return array(X), array(y)
# ### Choosing the appropriate lag value
lag = 2
n_features = 1
train_X, train_y = split_sequence(train_data, lag)
test_X, test_y = split_sequence(test_data, lag)
print("Shape of train_X and train_y: ", train_X.shape, train_y.shape)
print("Shape of test_X and test_y: ", test_X.shape, test_y.shape)
# ### Reshaping train_X and test_X to 3D
train_X = train_X.reshape((train_X.shape[0], train_X.shape[1], n_features))
test_X = test_X.reshape((test_X.shape[0], test_X.shape[1], n_features))
print("Shape of train_X and train_y: ", train_X.shape, train_y.shape)
print("Shape of test_X and test_y: ", test_X.shape, test_y.shape)
# ### Building LSTM Model
model = Sequential()
model.add(
LSTM(64, activation="relu", return_sequences=True, input_shape=(lag, n_features))
)
model.add(LSTM(64, activation="relu"))
model.add(Dense(1))
model.compile(optimizer="adam", loss="mse")
model.summary()
# ### Fitting model with data
import tensorflow as tf
tf.config.run_functions_eagerly(True)
cb = tf.keras.callbacks.EarlyStopping(
monitor="loss", patience=15, restore_best_weights=True
)
history = model.fit(
train_X,
train_y,
epochs=150,
batch_size=64,
verbose=1,
validation_split=0.1,
callbacks=[cb],
)
# ### Summarizing model accuracy and Loss
plt.plot(history.history["loss"])
plt.plot(history.history["val_loss"])
plt.title("model loss")
plt.ylabel("loss")
plt.xlabel("epoch")
plt.legend(["train", "test"], loc="upper left")
plt.show()
# ### Making the prediction
train_predict = model.predict(train_X)
test_predict = model.predict(test_X)
print("Shape of train and test predict: ", train_predict.shape, test_predict.shape)
# ### Model Evaluation
actual_lstm = test_y
predicted_lstm = test_predict[:, 0]
evaluate_forecast_results(actual_lstm, predicted_lstm)
df_train = pd.DataFrame(columns=["Train data"])
df_train["Train data"] = train_data
df = pd.DataFrame(columns=["Test data", "Predicted data"])
df["Test data"] = actual_lstm
df["Predicted data"] = predicted_lstm
total_len = len(df_train["Train data"]) + len(df["Test data"])
range(len(df_train["Train data"]), total_len)
x_list = [x for x in range(len(df_train["Train data"]), total_len)]
df.index = x_list
plt.rc("figure", figsize=(14, 8))
plt.rcParams.update({"font.size": 16})
plt.xlabel("Time in days")
plt.ylabel("Nvidia Stock Close price")
plt.title("Nvidia Stock price prediction using LSTM")
plt.plot(df_train["Train data"])
plt.plot(df[["Test data", "Predicted data"]])
plt.legend(["Train", "Test", "Predictions"], loc="lower right")
plt.show()
| false | 1 | 6,952 | 1 | 7,632 | 6,952 |
||
129524299
|
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
import warnings
warnings.filterwarnings("ignore")
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import balanced_accuracy_score, log_loss
from sklearn.impute import SimpleImputer
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import balanced_accuracy_score, balanced_log_loss
from sklearn.model_selection import cross_val_score, train_test_split
test = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/test.csv")
train = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/train.csv")
train.head(5)
train.columns
expected_results = train[["Id", "Class"]].copy()
expected_results.head(5)
plt.figure(figsize=(3, 3))
plot_train = train.Class.value_counts()
plot_train.plot(kind="bar", color=["red", "orange"])
Health_indicators = [
"AB",
"AF",
"AH",
"AM",
"AR",
"AX",
"AY",
"AZ",
"BC",
"BD ",
"BN",
"BP",
"BQ",
"BR",
"BZ",
"CB",
"CC",
"CD ",
"CF",
"CH",
"CL",
"CR",
"CS",
"CU",
"CW ",
"DA",
"DE",
"EJ",
"DF",
"DH",
"DI",
"DL",
"DN",
"DU",
"DV",
"DY",
"EB",
"EE",
"EG",
"EH",
"EL",
"EP",
"EU",
"FC",
"FD ",
"FE",
"FI",
"FL",
"FR",
"FS",
"GB",
"GE",
"GF",
"GH",
"GI",
"GL",
]
sns.set(font_scale=2)
fig, axes = plt.subplots(10, 6, figsize=(40, 60))
for idx, type_col in enumerate(Health_indicators):
sns.barplot(
x="Class",
y=type_col,
data=train,
ax=axes[idx // 6, idx % 6],
hue="Class",
palette={0: "red", 1: "orange"},
)
plt.tight_layout()
train_df.isnull().sum()
train_df = train.drop(["EJ", "Id"], axis=1)
test_df = test.drop(["EJ"], axis=1)
train_df.head()
sns.set(font_scale=0.5)
log_data = np.log(train_df)
# Plot histogram of each column
log_data.hist()
def balanced_log_loss(y_true, y_pred):
"""
Computes the balanced logarithmic loss between y_true and y_pred.
Parameters:
- y_true: array-like, shape (n_samples,)
True binary labels.
- y_pred: array-like, shape (n_samples,)
Predicted probabilities for class 1.
Returns:
- loss: float
The balanced logarithmic loss.
"""
# Compute the class weights
class_weight = len(y_true) / (2 * np.bincount(y_true))
# Compute the loss for each observation
loss = np.zeros_like(y_true, dtype=float)
loss[y_true == 0] = -np.log(1 - y_pred[y_true == 0])
loss[y_true == 1] = -np.log(y_pred[y_true == 1])
# Weight the loss for each observation
loss_weighted = np.zeros_like(y_true, dtype=float)
loss_weighted[y_true == 0] = loss[y_true == 0] * class_weight[0]
loss_weighted[y_true == 1] = loss[y_true == 1] * class_weight[1]
# Compute the average loss
return np.mean(loss_weighted)
# ### Logistic Regression
# Split the data into training and validation sets
X = train_df.drop(["Class"], axis=1)
y = train_df["Class"]
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=42)
# Impute missing values
imputer = SimpleImputer(strategy="mean")
imputer.fit(X_train)
X_train_imputed = imputer.transform(X_train)
X_val_imputed = imputer.transform(X_val)
# Train the logistic regression model on the imputed data
model = LogisticRegression()
model.fit(X_train_imputed, y_train)
# Predict on the validation set and calculate the balanced logarithmic loss
y_val_pred = model.predict_proba(X_val_imputed)[:, 1]
log_loss = balanced_log_loss(y_val, y_val_pred)
print("Balanced logarithmic loss on validation set:", log_loss)
# ### Decision Tree
from sklearn.tree import DecisionTreeClassifier
# Train the decision tree model on the imputed data
model = DecisionTreeClassifier(random_state=42)
model.fit(X_train_imputed, y_train)
# Predict on the validation set and calculate the balanced logarithmic loss
y_val_pred = model.predict_proba(X_val_imputed)[:, 1]
log_loss = balanced_log_loss(y_val, y_val_pred)
print("Balanced logarithmic loss on validation set:", log_loss)
# ### Random Forest
# Train a random forest classifier on the imputed data
model = RandomForestClassifier(n_estimators=100, random_state=42)
model.fit(X_train_imputed, y_train)
# Predict on the validation set and calculate the balanced logarithmic loss
y_val_pred = model.predict_proba(X_val_imputed)[:, 1]
log_loss = balanced_log_loss(y_val, y_val_pred)
print("Balanced logarithmic loss on validation set:", log_loss)
# ### SVC Model
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import cross_val_score
from sklearn.metrics import balanced_accuracy_score, make_scorer
from sklearn.impute import SimpleImputer
from sklearn.pipeline import make_pipeline
# Impute missing values
imputer = SimpleImputer(strategy="mean")
imputer.fit(X_train)
X_train_imputed = imputer.transform(X_train)
X_val_imputed = imputer.transform(X_val)
# Scale the data
scaler = StandardScaler()
scaler.fit(X_train_imputed)
X_train_scaled = scaler.transform(X_train_imputed)
X_val_scaled = scaler.transform(X_val_imputed)
# Train the SVM model on the scaled data
model = make_pipeline(SVC(probability=True, random_state=42))
model.fit(X_train_scaled, y_train)
# Predict on the validation set and calculate the balanced logarithmic loss
y_val_pred = model.predict_proba(X_val_scaled)[:, 1]
log_loss = balanced_log_loss(y_val, y_val_pred)
print("Balanced logarithmic loss on validation set:", log_loss)
# ### XGBClassifier Model
from xgboost import XGBClassifier
model = XGBClassifier()
model.fit(X_train_imputed, y_train)
# Predict on the validation set and calculate the balanced logarithmic loss
y_val_pred = model.predict_proba(X_val_imputed)[:, 1]
log_loss = balanced_log_loss(y_val, y_val_pred)
print("Balanced logarithmic loss on validation set:", log_loss)
# Balanced Logarithmic loss on validation set :
# * RL Model = 0.836177246086312
# * DT Model = inf
# * RFC Model = 0.4081586115192574
# * SVM Model = 0.5020848394635566
# * XGB Model = 0.2550011273428488
# Make predictions on the test set using the trained model
test_imputed = imputer.transform(test_df.drop("Id", axis=1))
test_preds = model.predict_proba(test_imputed)[:, 1]
# Create the submission dataframe
submission_df = pd.DataFrame(
{"Id": test_df["Id"], "0": 1 - test_preds, "1": test_preds}
)
# Save the submission dataframe to a CSV file
submission_df.to_csv("submission.csv", index=False)
submission_df.head()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/524/129524299.ipynb
| null | null |
[{"Id": 129524299, "ScriptId": 38497159, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13119587, "CreationDate": "05/14/2023 14:22:41", "VersionNumber": 2.0, "Title": "notebook63eeba59cd", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 200.0, "LinesInsertedFromPrevious": 170.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 30.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
import warnings
warnings.filterwarnings("ignore")
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import balanced_accuracy_score, log_loss
from sklearn.impute import SimpleImputer
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import balanced_accuracy_score, balanced_log_loss
from sklearn.model_selection import cross_val_score, train_test_split
test = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/test.csv")
train = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/train.csv")
train.head(5)
train.columns
expected_results = train[["Id", "Class"]].copy()
expected_results.head(5)
plt.figure(figsize=(3, 3))
plot_train = train.Class.value_counts()
plot_train.plot(kind="bar", color=["red", "orange"])
Health_indicators = [
"AB",
"AF",
"AH",
"AM",
"AR",
"AX",
"AY",
"AZ",
"BC",
"BD ",
"BN",
"BP",
"BQ",
"BR",
"BZ",
"CB",
"CC",
"CD ",
"CF",
"CH",
"CL",
"CR",
"CS",
"CU",
"CW ",
"DA",
"DE",
"EJ",
"DF",
"DH",
"DI",
"DL",
"DN",
"DU",
"DV",
"DY",
"EB",
"EE",
"EG",
"EH",
"EL",
"EP",
"EU",
"FC",
"FD ",
"FE",
"FI",
"FL",
"FR",
"FS",
"GB",
"GE",
"GF",
"GH",
"GI",
"GL",
]
sns.set(font_scale=2)
fig, axes = plt.subplots(10, 6, figsize=(40, 60))
for idx, type_col in enumerate(Health_indicators):
sns.barplot(
x="Class",
y=type_col,
data=train,
ax=axes[idx // 6, idx % 6],
hue="Class",
palette={0: "red", 1: "orange"},
)
plt.tight_layout()
train_df.isnull().sum()
train_df = train.drop(["EJ", "Id"], axis=1)
test_df = test.drop(["EJ"], axis=1)
train_df.head()
sns.set(font_scale=0.5)
log_data = np.log(train_df)
# Plot histogram of each column
log_data.hist()
def balanced_log_loss(y_true, y_pred):
"""
Computes the balanced logarithmic loss between y_true and y_pred.
Parameters:
- y_true: array-like, shape (n_samples,)
True binary labels.
- y_pred: array-like, shape (n_samples,)
Predicted probabilities for class 1.
Returns:
- loss: float
The balanced logarithmic loss.
"""
# Compute the class weights
class_weight = len(y_true) / (2 * np.bincount(y_true))
# Compute the loss for each observation
loss = np.zeros_like(y_true, dtype=float)
loss[y_true == 0] = -np.log(1 - y_pred[y_true == 0])
loss[y_true == 1] = -np.log(y_pred[y_true == 1])
# Weight the loss for each observation
loss_weighted = np.zeros_like(y_true, dtype=float)
loss_weighted[y_true == 0] = loss[y_true == 0] * class_weight[0]
loss_weighted[y_true == 1] = loss[y_true == 1] * class_weight[1]
# Compute the average loss
return np.mean(loss_weighted)
# ### Logistic Regression
# Split the data into training and validation sets
X = train_df.drop(["Class"], axis=1)
y = train_df["Class"]
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=42)
# Impute missing values
imputer = SimpleImputer(strategy="mean")
imputer.fit(X_train)
X_train_imputed = imputer.transform(X_train)
X_val_imputed = imputer.transform(X_val)
# Train the logistic regression model on the imputed data
model = LogisticRegression()
model.fit(X_train_imputed, y_train)
# Predict on the validation set and calculate the balanced logarithmic loss
y_val_pred = model.predict_proba(X_val_imputed)[:, 1]
log_loss = balanced_log_loss(y_val, y_val_pred)
print("Balanced logarithmic loss on validation set:", log_loss)
# ### Decision Tree
from sklearn.tree import DecisionTreeClassifier
# Train the decision tree model on the imputed data
model = DecisionTreeClassifier(random_state=42)
model.fit(X_train_imputed, y_train)
# Predict on the validation set and calculate the balanced logarithmic loss
y_val_pred = model.predict_proba(X_val_imputed)[:, 1]
log_loss = balanced_log_loss(y_val, y_val_pred)
print("Balanced logarithmic loss on validation set:", log_loss)
# ### Random Forest
# Train a random forest classifier on the imputed data
model = RandomForestClassifier(n_estimators=100, random_state=42)
model.fit(X_train_imputed, y_train)
# Predict on the validation set and calculate the balanced logarithmic loss
y_val_pred = model.predict_proba(X_val_imputed)[:, 1]
log_loss = balanced_log_loss(y_val, y_val_pred)
print("Balanced logarithmic loss on validation set:", log_loss)
# ### SVC Model
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import cross_val_score
from sklearn.metrics import balanced_accuracy_score, make_scorer
from sklearn.impute import SimpleImputer
from sklearn.pipeline import make_pipeline
# Impute missing values
imputer = SimpleImputer(strategy="mean")
imputer.fit(X_train)
X_train_imputed = imputer.transform(X_train)
X_val_imputed = imputer.transform(X_val)
# Scale the data
scaler = StandardScaler()
scaler.fit(X_train_imputed)
X_train_scaled = scaler.transform(X_train_imputed)
X_val_scaled = scaler.transform(X_val_imputed)
# Train the SVM model on the scaled data
model = make_pipeline(SVC(probability=True, random_state=42))
model.fit(X_train_scaled, y_train)
# Predict on the validation set and calculate the balanced logarithmic loss
y_val_pred = model.predict_proba(X_val_scaled)[:, 1]
log_loss = balanced_log_loss(y_val, y_val_pred)
print("Balanced logarithmic loss on validation set:", log_loss)
# ### XGBClassifier Model
from xgboost import XGBClassifier
model = XGBClassifier()
model.fit(X_train_imputed, y_train)
# Predict on the validation set and calculate the balanced logarithmic loss
y_val_pred = model.predict_proba(X_val_imputed)[:, 1]
log_loss = balanced_log_loss(y_val, y_val_pred)
print("Balanced logarithmic loss on validation set:", log_loss)
# Balanced Logarithmic loss on validation set :
# * RL Model = 0.836177246086312
# * DT Model = inf
# * RFC Model = 0.4081586115192574
# * SVM Model = 0.5020848394635566
# * XGB Model = 0.2550011273428488
# Make predictions on the test set using the trained model
test_imputed = imputer.transform(test_df.drop("Id", axis=1))
test_preds = model.predict_proba(test_imputed)[:, 1]
# Create the submission dataframe
submission_df = pd.DataFrame(
{"Id": test_df["Id"], "0": 1 - test_preds, "1": test_preds}
)
# Save the submission dataframe to a CSV file
submission_df.to_csv("submission.csv", index=False)
submission_df.head()
| false | 0 | 2,250 | 0 | 2,250 | 2,250 |
||
129524710
|
# ## Imports
import os
import gc
import glob
import json
import multiprocessing as mp
import warnings
import albumentations as A
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import PIL.Image as Image
import numpy as np
import pandas as pd
import random
import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils.data as thd
import segmentation_models_pytorch as smp
from collections import defaultdict
from types import SimpleNamespace
from typing import Dict, List, Optional, Tuple
from pathlib import Path
from sklearn.metrics import fbeta_score
from sklearn.exceptions import UndefinedMetricWarning
from albumentations.pytorch import ToTensorV2
from segmentation_models_pytorch.encoders import get_preprocessing_fn
from tqdm import tqdm
warnings.simplefilter("ignore")
# ## Config
class CFG:
# ============== comp exp name =============
comp_name = "vesuvius"
comp_dir_path = "/kaggle/input"
comp_folder_name = "vesuvius-challenge-ink-detection"
comp_dataset_path = os.path.join(comp_dir_path, comp_folder_name)
exp_name = "vesuvius_2d_slide_unet_exp001"
# ============== pred target =============
target_size = 1
# ============== model cfg =============
model_name = "Unet"
backbone = "efficientnet-b0"
# backbone = 'se_resnext50_32x4d'
in_chans = 10 # 65
# ============== data preprocessing =============
preprocess_input = get_preprocessing_fn(backbone, pretrained="imagenet")
# ============== training cfg =============
size = 224
tile_size = 224
stride = tile_size // 2
train_batch_size = 32 # 32
valid_batch_size = train_batch_size
use_amp = True
scheduler = "GradualWarmupSchedulerV2"
# scheduler = 'CosineAnnealingLR'
epochs = 15 # 30
# adamW warmupあり
warmup_factor = 10
# lr = 1e-3 / warmup_factor
lr = 1e-3
# ============== fold =============
valid_id = 1
# objective_cv = 'binary' # 'binary', 'multiclass', 'regression'
metric_direction = "maximize" # maximize, 'minimize'
# metrics = 'dice_coef'
# ============== fixed =============
pretrained = True
inf_weight = "best" # 'best'
min_lr = 1e-6
weight_decay = 1e-6
max_grad_norm = 1000
print_freq = 50
num_workers = 4
seed = 42
# ============== set dataset path =============
outputs_path = f"/kaggle/working/outputs/{comp_name}/{exp_name}/"
submission_dir = outputs_path + "submissions/"
submission_path = submission_dir + f"submission_{exp_name}.csv"
model_dir = outputs_path + f"{comp_name}-models/"
figures_dir = outputs_path + "figures/"
log_dir = outputs_path + "logs/"
log_path = log_dir + f"{exp_name}.txt"
# ============== augmentation =============
train_aug_list = [
A.Resize(size, size),
A.RandomBrightnessContrast(p=0.75),
A.OneOf(
[
A.GaussNoise(var_limit=[10, 50]),
A.GaussianBlur(),
A.MotionBlur(),
],
p=0.4,
),
A.Normalize(mean=[0] * in_chans, std=[1] * in_chans),
ToTensorV2(transpose_mask=True),
]
valid_aug_list = [
A.Resize(size, size),
A.Normalize(mean=[0] * in_chans, std=[1] * in_chans),
ToTensorV2(transpose_mask=True),
]
# ## Set up data
class SubvolumeDataset(thd.Dataset):
def __init__(self, fragments: List[Path], transform=None):
self.fragments = sorted(map(lambda path: path.resolve(), fragments))
self.transform = transform
# Load sequentially
image_stacks = []
labels = []
for fragment_id, fragment_path in enumerate(self.fragments):
fragment_path = fragment_path.resolve() # absolute path
print(fragment_path)
images, label = self.read_image_mask(fragment_path)
image_stack = np.stack(images, axis=0)
image_stacks.append(image_stack)
labels.append(label)
print(f"Loaded fragment {fragment_path} on {os.getpid()}")
self.labels = labels
self.image_stacks = image_stacks
def slice_fragment_to_subvolumes(self, images, mask):
sliced_images = []
sliced_ink_masks = []
x1_list = list(range(0, images.shape[2] - CFG.tile_size + 1, CFG.stride))
y1_list = list(range(0, images.shape[1] - CFG.tile_size + 1, CFG.stride))
for y1 in y1_list:
for x1 in x1_list:
y2 = y1 + CFG.tile_size
x2 = x1 + CFG.tile_size
def read_image_mask(self, fragment_path):
surface_volume_paths = sorted((fragment_path / "surface_volume").rglob("*.tif"))
z_dim = CFG.in_chans
z_mid = len(surface_volume_paths) // 2
z_start, z_end = z_mid - z_dim // 2, z_mid + z_dim // 2
# we don't convert to torch since it doesn't support uint16
images = [
np.array(Image.open(fn)) for fn in surface_volume_paths[z_start:z_end]
]
pad0 = CFG.tile_size - images[0].shape[0] % CFG.tile_size
pad1 = CFG.tile_size - images[0].shape[1] % CFG.tile_size
images = np.pad(images, ((0, 0), (0, pad0), (0, pad1)), mode="constant")
ink_mask = np.array(
Image.open(str(fragment_path / "inklabels.png")).convert("1")
)
ink_mask = np.pad(ink_mask, [(0, pad0), (0, pad1)], constant_values=0)
ink_mask = ink_mask.astype("float32")
ink_mask /= 255.0
return images, ink_mask
def __len__(self):
return len(self.image_stacks)
def __getitem__(self, index):
return self.image_stacks[index], self.labels[index]
if self.transform:
data = self.transform(image=subvolume, mask=inklabel)
subvolume = data["image"]
inklabel = data["mask"]
# return torch.from_numpy(subvolume).unsqueeze(0), torch.FloatTensor([inklabel])
# return torch.from_numpy(subvolume), torch.FloatTensor([inklabel])
def plot_label(self, index, **kwargs):
pixel = self.pixels[index]
label = self.labels[pixel[-1]]
print("Index:", index)
print("Pixel:", pixel)
print("Label:", int(label[pixel[0], pixel[1]]))
if isinstance(label, torch.Tensor):
label = label.numpy()
fig, ax = plt.subplots(**kwargs)
ax.imshow(label, cmap="gray")
y, x, _ = pixel
_, y_dim, x_dim = self.voxel_shape
x_min = x - (x_dim // 2)
x_max = x + (x_dim // 2)
y_min = y - (y_dim // 2)
y_max = y + (y_dim // 2)
rect = plt.Rectangle(
(x_min, y_min), x_dim, y_dim, linewidth=2, edgecolor="y", facecolor="none"
)
ax.add_patch(rect)
plt.show()
base_path = Path("/kaggle/input/vesuvius-challenge/")
train_path = base_path / "train"
all_fragments = sorted([f.name for f in train_path.iterdir()])
print("All fragments:", all_fragments)
train_fragments = [train_path / fragment_name for fragment_name in all_fragments]
train_fragments
train_dset = SubvolumeDataset(
fragments=train_fragments,
)
# transform=cfg.preprocess_input
print("Num items (pixels)", len(train_dset))
# #### Sanity check
index = 1
print(f"Sub Volume image shape = {train_dset[index][0].shape}")
# train_dset.plot_label(index, figsize=(16, 10))
train_loader = thd.DataLoader(train_dset, batch_size=CFG.train_batch_size, shuffle=True)
print("Num batches:", len(train_loader))
# ### Set up model
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class InkDetector(torch.nn.Module):
def __init__(self, cfg, weight=None):
super().__init__()
self.cfg = cfg
self.model = smp.Unet(
encoder_name=cfg.backbone,
encoder_weights=weight,
in_channels=cfg.in_chans,
classes=cfg.target_size,
activation=None,
)
def forward(self, image):
output = self.model(image)
return output
model = InkDetector(CFG, "imagenet").to(DEVICE)
# ### Train
TRAINING_STEPS = 1000
LEARNING_RATE = cfg.lr
TRAIN_RUN = True # To avoid re-running when saving the notebook
if TRAIN_RUN:
criterion = nn.BCEWithLogitsLoss()
optimizer = optim.SGD(model.parameters(), lr=LEARNING_RATE)
scheduler = torch.optim.lr_scheduler.OneCycleLR(
optimizer, max_lr=LEARNING_RATE, total_steps=TRAINING_STEPS
)
model.train()
running_loss = 0.0
running_accuracy = 0.0
running_fbeta = 0.0
denom = 0
pbar = tqdm(enumerate(train_loader), total=TRAINING_STEPS)
for i, (subvolumes, inklabels) in pbar:
if i >= TRAINING_STEPS:
break
optimizer.zero_grad()
outputs = model(subvolumes.to(DEVICE))
print(f"outpus shape = {outputs.shape}")
loss = criterion(outputs, inklabels.to(DEVICE))
loss.backward()
optimizer.step()
scheduler.step()
pred_ink = outputs.detach().sigmoid().gt(0.4).cpu().int()
accuracy = (pred_ink == inklabels).sum().float().div(inklabels.size(0))
running_fbeta += fbeta_score(
inklabels.view(-1).numpy(), pred_ink.view(-1).numpy(), beta=0.5
)
running_accuracy += accuracy.item()
running_loss += loss.item()
denom += 1
pbar.set_postfix(
{
"Loss": running_loss / denom,
"Accuracy": running_accuracy / denom,
"[email protected]": running_fbeta / denom,
}
)
if (i + 1) % 500 == 0:
running_loss = 0.0
running_accuracy = 0.0
running_fbeta = 0.0
denom = 0
torch.save(model.state_dict(), "/kaggle/working/model.pt")
else:
model_weights = torch.load("/kaggle/working/model.pt")
model.load_state_dict(model_weights)
# ### Evaluate
# Clear memory before loading test fragments
train_dset.labels = None
train_dset.image_stacks = []
del train_loader, train_dset
gc.collect()
test_path = base_path / "test"
test_fragments = [train_path / fragment_name for fragment_name in test_path.iterdir()]
print("All fragments:", test_fragments)
pred_images = []
model.eval()
for test_fragment in test_fragments:
outputs = []
eval_dset = SubvolumeDataset(
fragments=[test_fragment], voxel_shape=(48, 64, 64), load_inklabels=False
)
eval_loader = thd.DataLoader(eval_dset, batch_size=BATCH_SIZE, shuffle=False)
with torch.no_grad():
for i, (subvolumes, _) in enumerate(tqdm(eval_loader)):
output = model(subvolumes.to(DEVICE)).view(-1).sigmoid().cpu().numpy()
outputs.append(output)
# we only load 1 fragment at a time
image_shape = eval_dset.image_stacks[0].shape[1:]
eval_dset.labels = None
eval_dset.image_stacks = None
del eval_loader
gc.collect()
pred_image = np.zeros(image_shape, dtype=np.uint8)
outputs = np.concatenate(outputs)
for (y, x, _), prob in zip(eval_dset.pixels[: outputs.shape[0]], outputs):
pred_image[y, x] = prob > 0.4
pred_images.append(pred_image)
eval_dset.pixels = None
del eval_dset
gc.collect()
print("Finished", test_fragment)
plt.imshow(pred_images[1], cmap="gray")
# ### Submission
def rle(output):
flat_img = np.where(output > 0.4, 1, 0).astype(np.uint8)
starts = np.array((flat_img[:-1] == 0) & (flat_img[1:] == 1))
ends = np.array((flat_img[:-1] == 1) & (flat_img[1:] == 0))
starts_ix = np.where(starts)[0] + 2
ends_ix = np.where(ends)[0] + 2
lengths = ends_ix - starts_ix
return " ".join(map(str, sum(zip(starts_ix, lengths), ())))
submission = defaultdict(list)
for fragment_id, fragment_name in enumerate(test_fragments):
submission["Id"].append(fragment_name.name)
submission["Predicted"].append(rle(pred_images[fragment_id]))
pd.DataFrame.from_dict(submission).to_csv("/kaggle/working/submission.csv", index=False)
pd.DataFrame.from_dict(submission)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/524/129524710.ipynb
| null | null |
[{"Id": 129524710, "ScriptId": 38514101, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11219911, "CreationDate": "05/14/2023 14:26:00", "VersionNumber": 1.0, "Title": "UNet Segmentataion [training]", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 416.0, "LinesInsertedFromPrevious": 416.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# ## Imports
import os
import gc
import glob
import json
import multiprocessing as mp
import warnings
import albumentations as A
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import PIL.Image as Image
import numpy as np
import pandas as pd
import random
import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils.data as thd
import segmentation_models_pytorch as smp
from collections import defaultdict
from types import SimpleNamespace
from typing import Dict, List, Optional, Tuple
from pathlib import Path
from sklearn.metrics import fbeta_score
from sklearn.exceptions import UndefinedMetricWarning
from albumentations.pytorch import ToTensorV2
from segmentation_models_pytorch.encoders import get_preprocessing_fn
from tqdm import tqdm
warnings.simplefilter("ignore")
# ## Config
class CFG:
# ============== comp exp name =============
comp_name = "vesuvius"
comp_dir_path = "/kaggle/input"
comp_folder_name = "vesuvius-challenge-ink-detection"
comp_dataset_path = os.path.join(comp_dir_path, comp_folder_name)
exp_name = "vesuvius_2d_slide_unet_exp001"
# ============== pred target =============
target_size = 1
# ============== model cfg =============
model_name = "Unet"
backbone = "efficientnet-b0"
# backbone = 'se_resnext50_32x4d'
in_chans = 10 # 65
# ============== data preprocessing =============
preprocess_input = get_preprocessing_fn(backbone, pretrained="imagenet")
# ============== training cfg =============
size = 224
tile_size = 224
stride = tile_size // 2
train_batch_size = 32 # 32
valid_batch_size = train_batch_size
use_amp = True
scheduler = "GradualWarmupSchedulerV2"
# scheduler = 'CosineAnnealingLR'
epochs = 15 # 30
# adamW warmupあり
warmup_factor = 10
# lr = 1e-3 / warmup_factor
lr = 1e-3
# ============== fold =============
valid_id = 1
# objective_cv = 'binary' # 'binary', 'multiclass', 'regression'
metric_direction = "maximize" # maximize, 'minimize'
# metrics = 'dice_coef'
# ============== fixed =============
pretrained = True
inf_weight = "best" # 'best'
min_lr = 1e-6
weight_decay = 1e-6
max_grad_norm = 1000
print_freq = 50
num_workers = 4
seed = 42
# ============== set dataset path =============
outputs_path = f"/kaggle/working/outputs/{comp_name}/{exp_name}/"
submission_dir = outputs_path + "submissions/"
submission_path = submission_dir + f"submission_{exp_name}.csv"
model_dir = outputs_path + f"{comp_name}-models/"
figures_dir = outputs_path + "figures/"
log_dir = outputs_path + "logs/"
log_path = log_dir + f"{exp_name}.txt"
# ============== augmentation =============
train_aug_list = [
A.Resize(size, size),
A.RandomBrightnessContrast(p=0.75),
A.OneOf(
[
A.GaussNoise(var_limit=[10, 50]),
A.GaussianBlur(),
A.MotionBlur(),
],
p=0.4,
),
A.Normalize(mean=[0] * in_chans, std=[1] * in_chans),
ToTensorV2(transpose_mask=True),
]
valid_aug_list = [
A.Resize(size, size),
A.Normalize(mean=[0] * in_chans, std=[1] * in_chans),
ToTensorV2(transpose_mask=True),
]
# ## Set up data
class SubvolumeDataset(thd.Dataset):
def __init__(self, fragments: List[Path], transform=None):
self.fragments = sorted(map(lambda path: path.resolve(), fragments))
self.transform = transform
# Load sequentially
image_stacks = []
labels = []
for fragment_id, fragment_path in enumerate(self.fragments):
fragment_path = fragment_path.resolve() # absolute path
print(fragment_path)
images, label = self.read_image_mask(fragment_path)
image_stack = np.stack(images, axis=0)
image_stacks.append(image_stack)
labels.append(label)
print(f"Loaded fragment {fragment_path} on {os.getpid()}")
self.labels = labels
self.image_stacks = image_stacks
def slice_fragment_to_subvolumes(self, images, mask):
sliced_images = []
sliced_ink_masks = []
x1_list = list(range(0, images.shape[2] - CFG.tile_size + 1, CFG.stride))
y1_list = list(range(0, images.shape[1] - CFG.tile_size + 1, CFG.stride))
for y1 in y1_list:
for x1 in x1_list:
y2 = y1 + CFG.tile_size
x2 = x1 + CFG.tile_size
def read_image_mask(self, fragment_path):
surface_volume_paths = sorted((fragment_path / "surface_volume").rglob("*.tif"))
z_dim = CFG.in_chans
z_mid = len(surface_volume_paths) // 2
z_start, z_end = z_mid - z_dim // 2, z_mid + z_dim // 2
# we don't convert to torch since it doesn't support uint16
images = [
np.array(Image.open(fn)) for fn in surface_volume_paths[z_start:z_end]
]
pad0 = CFG.tile_size - images[0].shape[0] % CFG.tile_size
pad1 = CFG.tile_size - images[0].shape[1] % CFG.tile_size
images = np.pad(images, ((0, 0), (0, pad0), (0, pad1)), mode="constant")
ink_mask = np.array(
Image.open(str(fragment_path / "inklabels.png")).convert("1")
)
ink_mask = np.pad(ink_mask, [(0, pad0), (0, pad1)], constant_values=0)
ink_mask = ink_mask.astype("float32")
ink_mask /= 255.0
return images, ink_mask
def __len__(self):
return len(self.image_stacks)
def __getitem__(self, index):
return self.image_stacks[index], self.labels[index]
if self.transform:
data = self.transform(image=subvolume, mask=inklabel)
subvolume = data["image"]
inklabel = data["mask"]
# return torch.from_numpy(subvolume).unsqueeze(0), torch.FloatTensor([inklabel])
# return torch.from_numpy(subvolume), torch.FloatTensor([inklabel])
def plot_label(self, index, **kwargs):
pixel = self.pixels[index]
label = self.labels[pixel[-1]]
print("Index:", index)
print("Pixel:", pixel)
print("Label:", int(label[pixel[0], pixel[1]]))
if isinstance(label, torch.Tensor):
label = label.numpy()
fig, ax = plt.subplots(**kwargs)
ax.imshow(label, cmap="gray")
y, x, _ = pixel
_, y_dim, x_dim = self.voxel_shape
x_min = x - (x_dim // 2)
x_max = x + (x_dim // 2)
y_min = y - (y_dim // 2)
y_max = y + (y_dim // 2)
rect = plt.Rectangle(
(x_min, y_min), x_dim, y_dim, linewidth=2, edgecolor="y", facecolor="none"
)
ax.add_patch(rect)
plt.show()
base_path = Path("/kaggle/input/vesuvius-challenge/")
train_path = base_path / "train"
all_fragments = sorted([f.name for f in train_path.iterdir()])
print("All fragments:", all_fragments)
train_fragments = [train_path / fragment_name for fragment_name in all_fragments]
train_fragments
train_dset = SubvolumeDataset(
fragments=train_fragments,
)
# transform=cfg.preprocess_input
print("Num items (pixels)", len(train_dset))
# #### Sanity check
index = 1
print(f"Sub Volume image shape = {train_dset[index][0].shape}")
# train_dset.plot_label(index, figsize=(16, 10))
train_loader = thd.DataLoader(train_dset, batch_size=CFG.train_batch_size, shuffle=True)
print("Num batches:", len(train_loader))
# ### Set up model
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class InkDetector(torch.nn.Module):
def __init__(self, cfg, weight=None):
super().__init__()
self.cfg = cfg
self.model = smp.Unet(
encoder_name=cfg.backbone,
encoder_weights=weight,
in_channels=cfg.in_chans,
classes=cfg.target_size,
activation=None,
)
def forward(self, image):
output = self.model(image)
return output
model = InkDetector(CFG, "imagenet").to(DEVICE)
# ### Train
TRAINING_STEPS = 1000
LEARNING_RATE = cfg.lr
TRAIN_RUN = True # To avoid re-running when saving the notebook
if TRAIN_RUN:
criterion = nn.BCEWithLogitsLoss()
optimizer = optim.SGD(model.parameters(), lr=LEARNING_RATE)
scheduler = torch.optim.lr_scheduler.OneCycleLR(
optimizer, max_lr=LEARNING_RATE, total_steps=TRAINING_STEPS
)
model.train()
running_loss = 0.0
running_accuracy = 0.0
running_fbeta = 0.0
denom = 0
pbar = tqdm(enumerate(train_loader), total=TRAINING_STEPS)
for i, (subvolumes, inklabels) in pbar:
if i >= TRAINING_STEPS:
break
optimizer.zero_grad()
outputs = model(subvolumes.to(DEVICE))
print(f"outpus shape = {outputs.shape}")
loss = criterion(outputs, inklabels.to(DEVICE))
loss.backward()
optimizer.step()
scheduler.step()
pred_ink = outputs.detach().sigmoid().gt(0.4).cpu().int()
accuracy = (pred_ink == inklabels).sum().float().div(inklabels.size(0))
running_fbeta += fbeta_score(
inklabels.view(-1).numpy(), pred_ink.view(-1).numpy(), beta=0.5
)
running_accuracy += accuracy.item()
running_loss += loss.item()
denom += 1
pbar.set_postfix(
{
"Loss": running_loss / denom,
"Accuracy": running_accuracy / denom,
"[email protected]": running_fbeta / denom,
}
)
if (i + 1) % 500 == 0:
running_loss = 0.0
running_accuracy = 0.0
running_fbeta = 0.0
denom = 0
torch.save(model.state_dict(), "/kaggle/working/model.pt")
else:
model_weights = torch.load("/kaggle/working/model.pt")
model.load_state_dict(model_weights)
# ### Evaluate
# Clear memory before loading test fragments
train_dset.labels = None
train_dset.image_stacks = []
del train_loader, train_dset
gc.collect()
test_path = base_path / "test"
test_fragments = [train_path / fragment_name for fragment_name in test_path.iterdir()]
print("All fragments:", test_fragments)
pred_images = []
model.eval()
for test_fragment in test_fragments:
outputs = []
eval_dset = SubvolumeDataset(
fragments=[test_fragment], voxel_shape=(48, 64, 64), load_inklabels=False
)
eval_loader = thd.DataLoader(eval_dset, batch_size=BATCH_SIZE, shuffle=False)
with torch.no_grad():
for i, (subvolumes, _) in enumerate(tqdm(eval_loader)):
output = model(subvolumes.to(DEVICE)).view(-1).sigmoid().cpu().numpy()
outputs.append(output)
# we only load 1 fragment at a time
image_shape = eval_dset.image_stacks[0].shape[1:]
eval_dset.labels = None
eval_dset.image_stacks = None
del eval_loader
gc.collect()
pred_image = np.zeros(image_shape, dtype=np.uint8)
outputs = np.concatenate(outputs)
for (y, x, _), prob in zip(eval_dset.pixels[: outputs.shape[0]], outputs):
pred_image[y, x] = prob > 0.4
pred_images.append(pred_image)
eval_dset.pixels = None
del eval_dset
gc.collect()
print("Finished", test_fragment)
plt.imshow(pred_images[1], cmap="gray")
# ### Submission
def rle(output):
flat_img = np.where(output > 0.4, 1, 0).astype(np.uint8)
starts = np.array((flat_img[:-1] == 0) & (flat_img[1:] == 1))
ends = np.array((flat_img[:-1] == 1) & (flat_img[1:] == 0))
starts_ix = np.where(starts)[0] + 2
ends_ix = np.where(ends)[0] + 2
lengths = ends_ix - starts_ix
return " ".join(map(str, sum(zip(starts_ix, lengths), ())))
submission = defaultdict(list)
for fragment_id, fragment_name in enumerate(test_fragments):
submission["Id"].append(fragment_name.name)
submission["Predicted"].append(rle(pred_images[fragment_id]))
pd.DataFrame.from_dict(submission).to_csv("/kaggle/working/submission.csv", index=False)
pd.DataFrame.from_dict(submission)
| false | 0 | 3,613 | 0 | 3,613 | 3,613 |
||
129553692
|
<jupyter_start><jupyter_text>ISMI_Group3_PANDA_36_256_256_res1_tiles
This dataset is a preprocessed provides a preprocessed version of the [PANDA](https://www.kaggle.com/competitions/prostate-cancer-grade-assessment) challenge. Each sample has 36 tiles, of 256 x 256 pixels. The tiles are taken from the medium resolution.
Kaggle dataset identifier: ismi-group3-panda-36-256-256-res1-tiles
<jupyter_script>import numpy as np
import torch
import pytorch_lightning as pl
from torch import nn
import os
import pandas as pd
import matplotlib.pyplot as plt
import torchvision
import torch.nn.functional as F
from torchsummary import summary
class EfficientNetModule(pl.LightningModule):
def __init__(self, num_classes=6):
super().__init__()
self.model = torch.hub.load(
"NVIDIA/DeepLearningExamples:torchhub",
"nvidia_efficientnet_b0",
pretrained=True,
)
# Replace last layer with a 6 class output layer
self.model.classifier.fc = torch.nn.Linear(
in_features=self.model.classifier.fc.in_features,
out_features=num_classes,
bias=True,
)
self.train_losses = []
self.val_losses = []
self.epoch_train_losses = []
self.epoch_val_losses = []
def forward(self, x):
x = self.model(x)
return x
def training_step(self, batch, batch_idx):
x, y = batch
x = x.float()
y_hat = self.model(x)
loss = F.cross_entropy(y_hat, y)
self.epoch_train_losses.append(loss)
return loss
def validation_step(self, batch, batch_idx):
x, y = batch
x = x.float()
y_hat = self.model(x)
loss = F.cross_entropy(y_hat, y)
self.epoch_val_losses.append(loss)
return loss
def on_epoch_end(self):
print(f"Train loss: {average(self.epoch_train_losses)}")
print(f"Validation loss: {average(self.epoch_val_losses)}")
self.train_losses.append(average(self.epoch_train_losses))
self.val_losses.append(average(self.epoch_val_losses))
self.epoch_train_losses = []
self.epoch_val_losses = []
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.model.classifier.fc.parameters())
return optimizer
def average(self, lst):
return sum(lst) / len(lst)
class PANDATilesDataModule(pl.LightningModule):
def __init__(self, batch_size: int = 16, stage: str = "train"):
super().__init__()
self.batch_size = batch_size
self.data_dir = (
"/kaggle/input/ismi-group3-panda-36-256-256-res1-tiles/tiles/tiles"
)
# self.transform = transforms.Compose([transforms.ToTensor()])
train_csv = pd.read_csv(
"/kaggle/input/prostate-cancer-grade-assessment/train.csv"
)
if stage == "train":
self.data = train_csv[:9000]
elif stage == "val":
self.data = train_csv[9000:]
elif stage == "test":
self.data = pd.read_csv(
"/kaggle/input/prostate-cancer-grade-assessment/test.csv"
)
else:
print("ERROR: YOUR STAGE INPUT WAS NOT RECOGNISED AS EITHER train/val/test")
self.data = train_csv
# Returns the image and ISUP label
def __getitem__(self, idx):
item_name = self.data.iloc[idx].loc["image_id"] ## Get the name of the image
item_dir = os.path.join(self.data_dir, item_name + "/tile_0.png")
return (
torchvision.io.read_image(item_dir),
self.data.iloc[idx].loc["isup_grade"],
)
def __len__(self):
return len(self.data)
class PANDADataModule(pl.LightningModule):
def __init__(self, batch_size: int = 16, stage: str = "train"):
super().__init__()
self.batch_size = batch_size
self.data_dir = "/kaggle/input/ismi-group3-panda-36-256-256-res1-tiles/tiled_images/tiled_images"
# self.transform = transforms.Compose([transforms.ToTensor()])
train_csv = pd.read_csv(
"/kaggle/input/prostate-cancer-grade-assessment/train.csv"
)
if stage == "train":
self.data = train_csv[:9000]
elif stage == "val":
self.data = train_csv[9000:]
elif stage == "test":
self.data = pd.read_csv(
"/kaggle/input/prostate-cancer-grade-assessment/test.csv"
)
else:
print("ERROR: YOUR STAGE INPUT WAS NOT RECOGNISED AS EITHER train/val/test")
self.data = train_csv
# Returns the image and ISUP label
def __getitem__(self, idx):
item_name = self.data.iloc[idx].loc["image_id"] ## Get the name of the image
item_dir = os.path.join(self.data_dir, item_name + "_tiled.png")
return (
torchvision.io.read_image(item_dir),
self.data.iloc[idx].loc["isup_grade"],
)
def __len__(self):
return len(self.data)
batch_size = 16
n_epochs = 2
train_data = PANDATilesDataModule("train")
train_loader = torch.utils.data.DataLoader(
train_data, batch_size=batch_size, shuffle=True
)
val_data = PANDATilesDataModule("val")
val_loader = torch.utils.data.DataLoader(val_data, batch_size=batch_size, shuffle=False)
EfficientNet = EfficientNetModule()
train_data.__getitem__(0)[0].shape
summary(EfficientNet, (3, 256, 256))
# trainer = pl.Trainer(limit_train_batches=100, max_epochs=n_epochs, accelerator='gpu')
# trainer.fit(model=EfficientNet, train_dataloaders=train_loader, val_dataloaders=val_loader)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/553/129553692.ipynb
|
ismi-group3-panda-36-256-256-res1-tiles
|
florisvanwettum
|
[{"Id": 129553692, "ScriptId": 38516756, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 2476129, "CreationDate": "05/14/2023 19:19:45", "VersionNumber": 1.0, "Title": "Group3_ISMI_Baseline_Floris", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 127.0, "LinesInsertedFromPrevious": 127.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 185731578, "KernelVersionId": 129553692, "SourceDatasetVersionId": 5682352}]
|
[{"Id": 5682352, "DatasetId": 3228105, "DatasourceVersionId": 5757916, "CreatorUserId": 2476129, "LicenseName": "Unknown", "CreationDate": "05/14/2023 11:36:05", "VersionNumber": 5.0, "Title": "ISMI_Group3_PANDA_36_256_256_res1_tiles", "Slug": "ismi-group3-panda-36-256-256-res1-tiles", "Subtitle": "Medium resolution 36 256x256 tiles per sample, individual and combined images.", "Description": "This dataset is a preprocessed provides a preprocessed version of the [PANDA](https://www.kaggle.com/competitions/prostate-cancer-grade-assessment) challenge. Each sample has 36 tiles, of 256 x 256 pixels. The tiles are taken from the medium resolution.", "VersionNotes": "Added the last sample of the train.csv to the tiled_images", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 3228105, "CreatorUserId": 2476129, "OwnerUserId": 2476129.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5682352.0, "CurrentDatasourceVersionId": 5757916.0, "ForumId": 3293216, "Type": 2, "CreationDate": "05/05/2023 21:28:46", "LastActivityDate": "05/05/2023", "TotalViews": 99, "TotalDownloads": 0, "TotalVotes": 0, "TotalKernels": 4}]
|
[{"Id": 2476129, "UserName": "florisvanwettum", "DisplayName": "Florijs", "RegisterDate": "11/10/2018", "PerformanceTier": 0}]
|
import numpy as np
import torch
import pytorch_lightning as pl
from torch import nn
import os
import pandas as pd
import matplotlib.pyplot as plt
import torchvision
import torch.nn.functional as F
from torchsummary import summary
class EfficientNetModule(pl.LightningModule):
def __init__(self, num_classes=6):
super().__init__()
self.model = torch.hub.load(
"NVIDIA/DeepLearningExamples:torchhub",
"nvidia_efficientnet_b0",
pretrained=True,
)
# Replace last layer with a 6 class output layer
self.model.classifier.fc = torch.nn.Linear(
in_features=self.model.classifier.fc.in_features,
out_features=num_classes,
bias=True,
)
self.train_losses = []
self.val_losses = []
self.epoch_train_losses = []
self.epoch_val_losses = []
def forward(self, x):
x = self.model(x)
return x
def training_step(self, batch, batch_idx):
x, y = batch
x = x.float()
y_hat = self.model(x)
loss = F.cross_entropy(y_hat, y)
self.epoch_train_losses.append(loss)
return loss
def validation_step(self, batch, batch_idx):
x, y = batch
x = x.float()
y_hat = self.model(x)
loss = F.cross_entropy(y_hat, y)
self.epoch_val_losses.append(loss)
return loss
def on_epoch_end(self):
print(f"Train loss: {average(self.epoch_train_losses)}")
print(f"Validation loss: {average(self.epoch_val_losses)}")
self.train_losses.append(average(self.epoch_train_losses))
self.val_losses.append(average(self.epoch_val_losses))
self.epoch_train_losses = []
self.epoch_val_losses = []
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.model.classifier.fc.parameters())
return optimizer
def average(self, lst):
return sum(lst) / len(lst)
class PANDATilesDataModule(pl.LightningModule):
def __init__(self, batch_size: int = 16, stage: str = "train"):
super().__init__()
self.batch_size = batch_size
self.data_dir = (
"/kaggle/input/ismi-group3-panda-36-256-256-res1-tiles/tiles/tiles"
)
# self.transform = transforms.Compose([transforms.ToTensor()])
train_csv = pd.read_csv(
"/kaggle/input/prostate-cancer-grade-assessment/train.csv"
)
if stage == "train":
self.data = train_csv[:9000]
elif stage == "val":
self.data = train_csv[9000:]
elif stage == "test":
self.data = pd.read_csv(
"/kaggle/input/prostate-cancer-grade-assessment/test.csv"
)
else:
print("ERROR: YOUR STAGE INPUT WAS NOT RECOGNISED AS EITHER train/val/test")
self.data = train_csv
# Returns the image and ISUP label
def __getitem__(self, idx):
item_name = self.data.iloc[idx].loc["image_id"] ## Get the name of the image
item_dir = os.path.join(self.data_dir, item_name + "/tile_0.png")
return (
torchvision.io.read_image(item_dir),
self.data.iloc[idx].loc["isup_grade"],
)
def __len__(self):
return len(self.data)
class PANDADataModule(pl.LightningModule):
def __init__(self, batch_size: int = 16, stage: str = "train"):
super().__init__()
self.batch_size = batch_size
self.data_dir = "/kaggle/input/ismi-group3-panda-36-256-256-res1-tiles/tiled_images/tiled_images"
# self.transform = transforms.Compose([transforms.ToTensor()])
train_csv = pd.read_csv(
"/kaggle/input/prostate-cancer-grade-assessment/train.csv"
)
if stage == "train":
self.data = train_csv[:9000]
elif stage == "val":
self.data = train_csv[9000:]
elif stage == "test":
self.data = pd.read_csv(
"/kaggle/input/prostate-cancer-grade-assessment/test.csv"
)
else:
print("ERROR: YOUR STAGE INPUT WAS NOT RECOGNISED AS EITHER train/val/test")
self.data = train_csv
# Returns the image and ISUP label
def __getitem__(self, idx):
item_name = self.data.iloc[idx].loc["image_id"] ## Get the name of the image
item_dir = os.path.join(self.data_dir, item_name + "_tiled.png")
return (
torchvision.io.read_image(item_dir),
self.data.iloc[idx].loc["isup_grade"],
)
def __len__(self):
return len(self.data)
batch_size = 16
n_epochs = 2
train_data = PANDATilesDataModule("train")
train_loader = torch.utils.data.DataLoader(
train_data, batch_size=batch_size, shuffle=True
)
val_data = PANDATilesDataModule("val")
val_loader = torch.utils.data.DataLoader(val_data, batch_size=batch_size, shuffle=False)
EfficientNet = EfficientNetModule()
train_data.__getitem__(0)[0].shape
summary(EfficientNet, (3, 256, 256))
# trainer = pl.Trainer(limit_train_batches=100, max_epochs=n_epochs, accelerator='gpu')
# trainer.fit(model=EfficientNet, train_dataloaders=train_loader, val_dataloaders=val_loader)
| false | 2 | 1,529 | 0 | 1,664 | 1,529 |
||
129553544
|
<jupyter_start><jupyter_text>Parkinson's Disease Dataset
### Context
Try finding the reasons for Parkinsons disease and predict who might have it next!
Kaggle dataset identifier: parkinsonsdataset
<jupyter_script># Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # Importing The Libraries
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
from sklearn.metrics import confusion_matrix, accuracy_score
from xgboost import XGBClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split, KFold, cross_val_score
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.neighbors import KNeighborsClassifier
# importing th dataset
df = pd.read_csv("/kaggle/input/parkinsonsdataset/parkinsons.csv")
# # Analysis of Data
df.head()
df.tail()
X = df.drop(["name", "status"], axis=1) # name not needed
Y = df["status"] # output variable
# checking any irregularities,unwanted or empty data
df.info()
df.describe() # different central tendencies of the data
df.isnull().sum() # empty values
# There are no empty variables in the data
features = [feature for feature in X]
features
# checking for any outliers in the data
for feature in features:
sns.boxplot(x="status", y=feature, data=df)
plt.show()
# Some of the above data have outliers so we need to take steps for that
# heatmap
correlation = df.corr()
sns.heatmap(correlation)
# # Splitting Training Set and Test
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2)
# # Model
model = ExtraTreesClassifier()
model.fit(X, Y)
random_features = pd.Series(model.feature_importances_, index=X.columns)
random_features.nlargest(4).plot(kind="barh")
selected_feat = ["PPE", "MDVP:Fo(Hz)", "spread1", "spread2"]
# putting only the selected features from the training set and test set
X_train = X_train[selected_feat].values
X_test = X_test[selected_feat].values
y_train = y_train.values
y_test = y_test.values
# scaling the data
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
# **XGBoost Analysis**
# using the xgboost model in the data
xgb = XGBClassifier()
xgb.fit(X_train, y_train)
y_pred1 = xgb.predict(X_test)
cm1 = confusion_matrix(y_test, y_pred1)
print(cm1)
print(accuracy_score(y_test, y_pred1))
print(accuracy_score(y_train, xgb.predict(X_train)))
from sklearn.model_selection import KFold, cross_val_score
kf = KFold(10)
results = cross_val_score(xgb, X, Y, cv=kf)
np.mean(results)
# **Random Forresting Analysis**
# using random forest for classification
rf = RandomForestClassifier()
rf.fit(X_train, y_train)
y_pred2 = rf.predict(X_test)
cm2 = confusion_matrix(y_test, y_pred2)
print(cm2)
print(accuracy_score(y_test, y_pred2))
print(accuracy_score(y_train, rf.predict(X_train)))
kf = KFold(10)
results = cross_val_score(rf, X, Y, cv=kf)
np.mean(results)
# **Logistical Regression**
lr = LogisticRegression()
lr.fit(X_train, y_train)
y_pred = lr.predict(X_test)
cm = confusion_matrix(y_test, y_pred)
print(cm)
print(accuracy_score(y_test, y_pred))
print(accuracy_score(y_train, lr.predict(X_train)))
kf = KFold(10)
results = cross_val_score(rf, X, Y, cv=kf)
np.mean(results)
# **K Nearest Neighbours**
knn = KNeighborsClassifier(n_neighbors=5, metric="minkowski", p=2)
knn.fit(X_train, y_train)
y_pred = knn.predict(X_test)
cm = confusion_matrix(y_test, y_pred)
print(cm)
print(accuracy_score(y_test, y_pred))
print(accuracy_score(y_train, knn.predict(X_train)))
kf = KFold(10)
results = cross_val_score(knn, X, Y, cv=kf)
np.mean(results)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/553/129553544.ipynb
|
parkinsonsdataset
|
gargmanas
|
[{"Id": 129553544, "ScriptId": 36429996, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 12843112, "CreationDate": "05/14/2023 19:18:04", "VersionNumber": 10.0, "Title": "Parkinsons Disease Prediction", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 152.0, "LinesInsertedFromPrevious": 46.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 106.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 185731245, "KernelVersionId": 129553544, "SourceDatasetVersionId": 2172562}]
|
[{"Id": 2172562, "DatasetId": 1304147, "DatasourceVersionId": 2213865, "CreatorUserId": 4631534, "LicenseName": "GNU Free Documentation License 1.3", "CreationDate": "04/29/2021 08:15:50", "VersionNumber": 1.0, "Title": "Parkinson's Disease Dataset", "Slug": "parkinsonsdataset", "Subtitle": "Use the dataset to analyze it and detect Parkinson's disease", "Description": "### Context\n\nTry finding the reasons for Parkinsons disease and predict who might have it next!", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1304147, "CreatorUserId": 4631534, "OwnerUserId": 4631534.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2172562.0, "CurrentDatasourceVersionId": 2213865.0, "ForumId": 1322920, "Type": 2, "CreationDate": "04/29/2021 08:15:50", "LastActivityDate": "04/29/2021", "TotalViews": 15563, "TotalDownloads": 1911, "TotalVotes": 67, "TotalKernels": 15}]
|
[{"Id": 4631534, "UserName": "gargmanas", "DisplayName": "SHINIGAMI", "RegisterDate": "03/08/2020", "PerformanceTier": 3}]
|
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # Importing The Libraries
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
from sklearn.metrics import confusion_matrix, accuracy_score
from xgboost import XGBClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split, KFold, cross_val_score
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.neighbors import KNeighborsClassifier
# importing th dataset
df = pd.read_csv("/kaggle/input/parkinsonsdataset/parkinsons.csv")
# # Analysis of Data
df.head()
df.tail()
X = df.drop(["name", "status"], axis=1) # name not needed
Y = df["status"] # output variable
# checking any irregularities,unwanted or empty data
df.info()
df.describe() # different central tendencies of the data
df.isnull().sum() # empty values
# There are no empty variables in the data
features = [feature for feature in X]
features
# checking for any outliers in the data
for feature in features:
sns.boxplot(x="status", y=feature, data=df)
plt.show()
# Some of the above data have outliers so we need to take steps for that
# heatmap
correlation = df.corr()
sns.heatmap(correlation)
# # Splitting Training Set and Test
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2)
# # Model
model = ExtraTreesClassifier()
model.fit(X, Y)
random_features = pd.Series(model.feature_importances_, index=X.columns)
random_features.nlargest(4).plot(kind="barh")
selected_feat = ["PPE", "MDVP:Fo(Hz)", "spread1", "spread2"]
# putting only the selected features from the training set and test set
X_train = X_train[selected_feat].values
X_test = X_test[selected_feat].values
y_train = y_train.values
y_test = y_test.values
# scaling the data
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
# **XGBoost Analysis**
# using the xgboost model in the data
xgb = XGBClassifier()
xgb.fit(X_train, y_train)
y_pred1 = xgb.predict(X_test)
cm1 = confusion_matrix(y_test, y_pred1)
print(cm1)
print(accuracy_score(y_test, y_pred1))
print(accuracy_score(y_train, xgb.predict(X_train)))
from sklearn.model_selection import KFold, cross_val_score
kf = KFold(10)
results = cross_val_score(xgb, X, Y, cv=kf)
np.mean(results)
# **Random Forresting Analysis**
# using random forest for classification
rf = RandomForestClassifier()
rf.fit(X_train, y_train)
y_pred2 = rf.predict(X_test)
cm2 = confusion_matrix(y_test, y_pred2)
print(cm2)
print(accuracy_score(y_test, y_pred2))
print(accuracy_score(y_train, rf.predict(X_train)))
kf = KFold(10)
results = cross_val_score(rf, X, Y, cv=kf)
np.mean(results)
# **Logistical Regression**
lr = LogisticRegression()
lr.fit(X_train, y_train)
y_pred = lr.predict(X_test)
cm = confusion_matrix(y_test, y_pred)
print(cm)
print(accuracy_score(y_test, y_pred))
print(accuracy_score(y_train, lr.predict(X_train)))
kf = KFold(10)
results = cross_val_score(rf, X, Y, cv=kf)
np.mean(results)
# **K Nearest Neighbours**
knn = KNeighborsClassifier(n_neighbors=5, metric="minkowski", p=2)
knn.fit(X_train, y_train)
y_pred = knn.predict(X_test)
cm = confusion_matrix(y_test, y_pred)
print(cm)
print(accuracy_score(y_test, y_pred))
print(accuracy_score(y_train, knn.predict(X_train)))
kf = KFold(10)
results = cross_val_score(knn, X, Y, cv=kf)
np.mean(results)
| false | 1 | 1,307 | 0 | 1,355 | 1,307 |
||
129553823
|
<jupyter_start><jupyter_text>GitHub Repositories 2020
## Context
**You can star repositories to keep track of projects you find interesting.**
I have Scraped top stared repositories from GitHub with different topics. I have used Python BeautifulSoup to scrape the data. The main motivation behind this data is to analyze top GitHub stared repositories.
I have selected some topics like Data-Science, Machine-Learning, Computer-Vision, etc. Then I have watched most stared 100 repository details including repository commits, issue, fork, etc.
## GitHub
## Content
There are more than **1000** repository nformation.
Data contains the main 19 columns:
1) **topic**: A base word with the help of its fetched repository.
2) **name**: repository name.
3) **user**: repository user name.
4) **star**: stars are given by users.
5) **fork**: number of the fork that specific repository.
6) **watch**: repository watch
7) **issue**: number of issue in that repository.
8) **pull_requests**: number of pull requests
9) **projects**: a number of projects undergoing that topic_tag.
10) **topic_tag**: tag added to the repository by the user.
11) **discription_text**: short discription added by user.
12) **discription_url**: additional url provide by repository.
13) **commits**: number of commits to that repository.
14) **branches**: a number of different branches of the repository.
15) **packages**: number of packages.
16) **releases**: releases of the repository.
17) **contributors**: a number of users have contributed to the repository.
18) **License**: name of License.
19) **url**: URL of the repository.
**current repository topics**: Data-Science, Machine-Learning, Open-CV, Computer-Vision, GAN, variational-encoder, Android-studio, flutter, JAVA, awesome, javascript, c++
**stay tuned for more topics.**
Kaggle dataset identifier: github-repositories-analysis
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# Here, i have done ` Data Analsys ` part of popular Github Repositary.
import pandas as pd
data = pd.read_csv("/kaggle/input/github-repositories-analysis/Github_data.csv")
data.sample(5)
del_list = ["Unnamed: 0", "Unnamed: 0.1"]
data.drop(del_list, axis=1, inplace=True)
data.sample(5)
data.head()
data.info()
oolumns_name = data.columns
oolumns_name
# Rename the columns
new_column = [
"Topic",
"Name",
"User",
"Star",
"Fork",
"Watch",
"Issue",
"Pull_requests",
"Projects",
"Topic_tag",
"Discription_text",
"Discription_url",
"Commits",
"Branches",
"Packages",
"Releases",
"Contributers",
"License",
"Url",
]
data = data.rename(columns=dict(zip(oolumns_name, new_column)))
data.head(5)
data["Topic"].value_counts().plot(kind="bar")
data["License"].value_counts()
data[data["License"] == "BSD-3-Clause"]
data.info()
data["Star"] = data["Star"].apply(
lambda x: float(x.rstrip("k")) * 1000 if x.endswith("k") else float(x)
)
data["Fork"] = data["Fork"].apply(
lambda x: float(x.rstrip("k")) * 1000 if x.endswith("k") else float(x)
)
data["Watch"] = data["Watch"].apply(
lambda x: float(x.rstrip("k")) * 1000 if x.endswith("k") else float(x)
)
data.head(5)
data["Issue"] = data["Issue"].apply(lambda x: x.replace(",", "") if "," in x else x)
data["Commits"] = data["Commits"].apply(lambda x: x.replace(",", "") if "," in x else x)
data.head()
data.info()
cols = ["Issue", "Pull_requests", "Commits", "Contributers"]
data[cols] = data[cols].apply(pd.to_numeric, errors="coerce", axis=1)
data.dtypes
data.describe()
new_data = data.groupby("Topic").mean().reset_index()
new_data
# ## Which repo and topic has highest no. of stars?
star_df = new_data.sort_values(by=["Star"], ascending=False)
star_df
import matplotlib.pyplot as plt
plt.bar(star_df["Topic"], star_df["Star"], color="blue", width=0.4)
plt.xticks(rotation=90)
star_data = data.sort_values(by=["Star"], ascending=False)
star_data
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/553/129553823.ipynb
|
github-repositories-analysis
|
vatsalparsaniya
|
[{"Id": 129553823, "ScriptId": 38511450, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11869129, "CreationDate": "05/14/2023 19:21:36", "VersionNumber": 1.0, "Title": "Data Analysis of Github Repo", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 87.0, "LinesInsertedFromPrevious": 87.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 185731806, "KernelVersionId": 129553823, "SourceDatasetVersionId": 1106182}]
|
[{"Id": 1106182, "DatasetId": 619495, "DatasourceVersionId": 1136340, "CreatorUserId": 2907842, "LicenseName": "Unknown", "CreationDate": "04/24/2020 20:21:04", "VersionNumber": 1.0, "Title": "GitHub Repositories 2020", "Slug": "github-repositories-analysis", "Subtitle": "GitHub Top stared Repositories of specific Domain (1200+)", "Description": "## Context \n\n**You can star repositories to keep track of projects you find interesting.**\nI have Scraped top stared repositories from GitHub with different topics. I have used Python BeautifulSoup to scrape the data. The main motivation behind this data is to analyze top GitHub stared repositories.\n\nI have selected some topics like Data-Science, Machine-Learning, Computer-Vision, etc. Then I have watched most stared 100 repository details including repository commits, issue, fork, etc.\n \n## GitHub \n\n## Content\nThere are more than **1000** repository nformation.\n\nData contains the main 19 columns:\n1) **topic**: A base word with the help of its fetched repository.\n2) **name**: repository name.\n3) **user**: repository user name.\n4) **star**: stars are given by users.\n5) **fork**: number of the fork that specific repository.\n6) **watch**: repository watch\n7) **issue**: number of issue in that repository.\n8) **pull_requests**: number of pull requests \n9) **projects**: a number of projects undergoing that topic_tag.\n10) **topic_tag**: tag added to the repository by the user.\n11) **discription_text**: short discription added by user.\n12) **discription_url**: additional url provide by repository.\n13) **commits**: number of commits to that repository.\n14) **branches**: a number of different branches of the repository.\n15) **packages**: number of packages.\n16) **releases**: releases of the repository.\n17) **contributors**: a number of users have contributed to the repository.\n18) **License**: name of License.\n19) **url**: URL of the repository.\n\n**current repository topics**: Data-Science, Machine-Learning, Open-CV, Computer-Vision, GAN, variational-encoder, Android-studio, flutter, JAVA, awesome, javascript, c++\n\n**stay tuned for more topics.**", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 619495, "CreatorUserId": 2907842, "OwnerUserId": 2907842.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 1106182.0, "CurrentDatasourceVersionId": 1136340.0, "ForumId": 633624, "Type": 2, "CreationDate": "04/24/2020 20:21:04", "LastActivityDate": "04/24/2020", "TotalViews": 7978, "TotalDownloads": 545, "TotalVotes": 20, "TotalKernels": 7}]
|
[{"Id": 2907842, "UserName": "vatsalparsaniya", "DisplayName": "Vatsal Parsaniya", "RegisterDate": "03/07/2019", "PerformanceTier": 2}]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# Here, i have done ` Data Analsys ` part of popular Github Repositary.
import pandas as pd
data = pd.read_csv("/kaggle/input/github-repositories-analysis/Github_data.csv")
data.sample(5)
del_list = ["Unnamed: 0", "Unnamed: 0.1"]
data.drop(del_list, axis=1, inplace=True)
data.sample(5)
data.head()
data.info()
oolumns_name = data.columns
oolumns_name
# Rename the columns
new_column = [
"Topic",
"Name",
"User",
"Star",
"Fork",
"Watch",
"Issue",
"Pull_requests",
"Projects",
"Topic_tag",
"Discription_text",
"Discription_url",
"Commits",
"Branches",
"Packages",
"Releases",
"Contributers",
"License",
"Url",
]
data = data.rename(columns=dict(zip(oolumns_name, new_column)))
data.head(5)
data["Topic"].value_counts().plot(kind="bar")
data["License"].value_counts()
data[data["License"] == "BSD-3-Clause"]
data.info()
data["Star"] = data["Star"].apply(
lambda x: float(x.rstrip("k")) * 1000 if x.endswith("k") else float(x)
)
data["Fork"] = data["Fork"].apply(
lambda x: float(x.rstrip("k")) * 1000 if x.endswith("k") else float(x)
)
data["Watch"] = data["Watch"].apply(
lambda x: float(x.rstrip("k")) * 1000 if x.endswith("k") else float(x)
)
data.head(5)
data["Issue"] = data["Issue"].apply(lambda x: x.replace(",", "") if "," in x else x)
data["Commits"] = data["Commits"].apply(lambda x: x.replace(",", "") if "," in x else x)
data.head()
data.info()
cols = ["Issue", "Pull_requests", "Commits", "Contributers"]
data[cols] = data[cols].apply(pd.to_numeric, errors="coerce", axis=1)
data.dtypes
data.describe()
new_data = data.groupby("Topic").mean().reset_index()
new_data
# ## Which repo and topic has highest no. of stars?
star_df = new_data.sort_values(by=["Star"], ascending=False)
star_df
import matplotlib.pyplot as plt
plt.bar(star_df["Topic"], star_df["Star"], color="blue", width=0.4)
plt.xticks(rotation=90)
star_data = data.sort_values(by=["Star"], ascending=False)
star_data
| false | 1 | 869 | 0 | 1,349 | 869 |
||
129553542
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
import numpy as np
from lightgbm import LGBMRegressor
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
url = "/kaggle/input/demand-forecasting-kernels-only/train.csv"
df_train = pd.read_csv(url)
df_train["date"] = pd.to_datetime(df_train["date"])
df_train.head()
# load test set
url2 = "/kaggle/input/demand-forecasting-kernels-only/test.csv"
df_test = pd.read_csv(url2)
df_test["date"] = pd.to_datetime(df_test["date"])
df_test.head()
# Concatenate the training and testing dataframes
df_combined = pd.concat([df_train, df_test]).reset_index(drop=True)
import plotly.express as px
# Downsample the data by month and calculate the mean sales for each month
df_downsampled = df_train.resample("M", on="date").mean()
# Create a line plot using Plotly Express
fig = px.line(
df_downsampled, x=df_downsampled.index, y="sales", title="Sales Over Time"
)
# Display the plot
fig.show()
# Calculate SMAPE
def smape(preds, target):
n = len(preds)
masked_arr = ~((preds == 0) & (target == 0))
preds, target = preds[masked_arr], target[masked_arr]
num = np.abs(preds - target)
denom = np.abs(preds) + np.abs(target)
smape_val = (200 * np.sum(num / denom)) / n
return smape_val
import pandas as pd
import numpy as np
from lightgbm import LGBMRegressor
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
# Feature Engineering - Categorical
df_combined["day_of_week"] = df_combined["date"].dt.dayofweek
df_combined["month"] = df_combined["date"].dt.month
df_combined["year"] = df_combined["date"].dt.year
# df['week'] = df['date'].dt.week
df_combined["day_of_year"] = df_combined["date"].dt.dayofyear
df_combined["week_of_year"] = df_combined["date"].dt.weekofyear
# df_combined['sin_day_of_week'] = np.sin(2*np.pi*df_combined['day_of_week']/7)
# df_combined['cos_day_of_week'] = np.cos(2*np.pi*df_combined['day_of_week']/7)
# Encode categorical features
le_item = LabelEncoder()
le_store = LabelEncoder()
df_combined["item"] = le_item.fit_transform(df_combined["item"])
df_combined["store"] = le_store.fit_transform(df_combined["store"])
# Create dummy variables for day_of_week
day_of_week_dummies = pd.get_dummies(df_combined["day_of_week"], prefix="day_of_week")
df_combined = pd.concat([df_combined, day_of_week_dummies], axis=1)
# create a new dataframe to hold the dummy variables
# Create dummy variables for month
month_dummies = pd.get_dummies(df_combined["month"], prefix="month")
df_combined = pd.concat([df_combined, month_dummies], axis=1)
# Create dummy variables for year
year_dummies = pd.get_dummies(df_combined["year"], prefix="year")
df_combined = pd.concat([df_combined, year_dummies], axis=1)
# # Drop rows with NaN values
# df = df.dropna()
df_combined = df_combined.drop(
["month", "year", "day_of_year", "week_of_year", "day_of_week"], axis=1
)
# Separate your training and testing dataframes again
df_train = df_combined[df_combined["sales"].notna()]
df_test = df_combined[df_combined["sales"].isna()]
column_list = df_combined.columns.tolist()
print(column_list)
df_train = df_train.drop("id", axis=1)
df_train = df_train.dropna()
df_train
from sklearn.model_selection import TimeSeriesSplit
from lightgbm import LGBMRegressor
# Number of splits
n_splits = 5
# Initialize TimeSeriesSplit
tscv = TimeSeriesSplit(n_splits=n_splits)
model = LGBMRegressor()
df_fc = df_train.copy()
smape_values = []
# Perform cross-validation
for train_index, test_index in tscv.split(df_train):
CV_train, CV_test = df_train.iloc[train_index], df_train.iloc[test_index]
# Fit the model on the training data
model.fit(CV_train.drop(["sales", "date"], axis=1), CV_train["sales"])
# Predict on the test data
predictions = model.predict(CV_test.drop(["sales", "date"], axis=1))
df_fc.loc[df_train.iloc[test_index].index, "predictions"] = predictions[0]
# Calculate SMAPE and add it to the list of SMAPE values
smape_value = smape(CV_test["sales"].values, predictions)
smape_values.append(smape_value)
# Print the average SMAPE value across all folds
print("Average SMAPE: ", np.mean(smape_values)), smape_values
import plotly.express as px
# Downsample the data by month and calculate the mean sales for each month
df_downsampled = df_fc.resample("M", on="date").mean()
# Create a line plot using Plotly Express for sales
fig = px.line(
df_downsampled, x=df_downsampled.index, y="sales", title="Sales Over Time"
)
# Add line for predictions
fig.add_trace(
go.Scatter(
x=df_downsampled.index, y=df_downsampled["predictions"], name="predictions"
)
)
# Display the plot
fig.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/553/129553542.ipynb
| null | null |
[{"Id": 129553542, "ScriptId": 38522903, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11964784, "CreationDate": "05/14/2023 19:18:03", "VersionNumber": 1.0, "Title": "notebook57100693bb", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 167.0, "LinesInsertedFromPrevious": 167.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
import numpy as np
from lightgbm import LGBMRegressor
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
url = "/kaggle/input/demand-forecasting-kernels-only/train.csv"
df_train = pd.read_csv(url)
df_train["date"] = pd.to_datetime(df_train["date"])
df_train.head()
# load test set
url2 = "/kaggle/input/demand-forecasting-kernels-only/test.csv"
df_test = pd.read_csv(url2)
df_test["date"] = pd.to_datetime(df_test["date"])
df_test.head()
# Concatenate the training and testing dataframes
df_combined = pd.concat([df_train, df_test]).reset_index(drop=True)
import plotly.express as px
# Downsample the data by month and calculate the mean sales for each month
df_downsampled = df_train.resample("M", on="date").mean()
# Create a line plot using Plotly Express
fig = px.line(
df_downsampled, x=df_downsampled.index, y="sales", title="Sales Over Time"
)
# Display the plot
fig.show()
# Calculate SMAPE
def smape(preds, target):
n = len(preds)
masked_arr = ~((preds == 0) & (target == 0))
preds, target = preds[masked_arr], target[masked_arr]
num = np.abs(preds - target)
denom = np.abs(preds) + np.abs(target)
smape_val = (200 * np.sum(num / denom)) / n
return smape_val
import pandas as pd
import numpy as np
from lightgbm import LGBMRegressor
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
# Feature Engineering - Categorical
df_combined["day_of_week"] = df_combined["date"].dt.dayofweek
df_combined["month"] = df_combined["date"].dt.month
df_combined["year"] = df_combined["date"].dt.year
# df['week'] = df['date'].dt.week
df_combined["day_of_year"] = df_combined["date"].dt.dayofyear
df_combined["week_of_year"] = df_combined["date"].dt.weekofyear
# df_combined['sin_day_of_week'] = np.sin(2*np.pi*df_combined['day_of_week']/7)
# df_combined['cos_day_of_week'] = np.cos(2*np.pi*df_combined['day_of_week']/7)
# Encode categorical features
le_item = LabelEncoder()
le_store = LabelEncoder()
df_combined["item"] = le_item.fit_transform(df_combined["item"])
df_combined["store"] = le_store.fit_transform(df_combined["store"])
# Create dummy variables for day_of_week
day_of_week_dummies = pd.get_dummies(df_combined["day_of_week"], prefix="day_of_week")
df_combined = pd.concat([df_combined, day_of_week_dummies], axis=1)
# create a new dataframe to hold the dummy variables
# Create dummy variables for month
month_dummies = pd.get_dummies(df_combined["month"], prefix="month")
df_combined = pd.concat([df_combined, month_dummies], axis=1)
# Create dummy variables for year
year_dummies = pd.get_dummies(df_combined["year"], prefix="year")
df_combined = pd.concat([df_combined, year_dummies], axis=1)
# # Drop rows with NaN values
# df = df.dropna()
df_combined = df_combined.drop(
["month", "year", "day_of_year", "week_of_year", "day_of_week"], axis=1
)
# Separate your training and testing dataframes again
df_train = df_combined[df_combined["sales"].notna()]
df_test = df_combined[df_combined["sales"].isna()]
column_list = df_combined.columns.tolist()
print(column_list)
df_train = df_train.drop("id", axis=1)
df_train = df_train.dropna()
df_train
from sklearn.model_selection import TimeSeriesSplit
from lightgbm import LGBMRegressor
# Number of splits
n_splits = 5
# Initialize TimeSeriesSplit
tscv = TimeSeriesSplit(n_splits=n_splits)
model = LGBMRegressor()
df_fc = df_train.copy()
smape_values = []
# Perform cross-validation
for train_index, test_index in tscv.split(df_train):
CV_train, CV_test = df_train.iloc[train_index], df_train.iloc[test_index]
# Fit the model on the training data
model.fit(CV_train.drop(["sales", "date"], axis=1), CV_train["sales"])
# Predict on the test data
predictions = model.predict(CV_test.drop(["sales", "date"], axis=1))
df_fc.loc[df_train.iloc[test_index].index, "predictions"] = predictions[0]
# Calculate SMAPE and add it to the list of SMAPE values
smape_value = smape(CV_test["sales"].values, predictions)
smape_values.append(smape_value)
# Print the average SMAPE value across all folds
print("Average SMAPE: ", np.mean(smape_values)), smape_values
import plotly.express as px
# Downsample the data by month and calculate the mean sales for each month
df_downsampled = df_fc.resample("M", on="date").mean()
# Create a line plot using Plotly Express for sales
fig = px.line(
df_downsampled, x=df_downsampled.index, y="sales", title="Sales Over Time"
)
# Add line for predictions
fig.add_trace(
go.Scatter(
x=df_downsampled.index, y=df_downsampled["predictions"], name="predictions"
)
)
# Display the plot
fig.show()
| false | 0 | 1,729 | 0 | 1,729 | 1,729 |
||
129553728
|
# Shorten the video into 1 minute
from moviepy.video.io.ffmpeg_tools import ffmpeg_extract_subclip
# Specify the start and end times for the clip you want to extract (in seconds)
start_time = 0
end_time = 60 # First minute
# Extract the subclip
ffmpeg_extract_subclip(
"/kaggle/input/gesturevideo/video_original.mp4",
start_time,
end_time,
targetname="video_cut.mp4",
)
# Process video to get openness, total movement, and leaning direction
import cv2
import mediapipe as mp
import numpy as np
from scipy.spatial import ConvexHull
import csv
# Initialize MediaPipe's Holistic module
mp_drawing = mp.solutions.drawing_utils
mp_holistic = mp.solutions.holistic
# Function to calculate the Euclidean distance between two points
def euclidean_distance(p1, p2):
return np.sqrt((p1.x - p2.x) ** 2 + (p1.y - p2.y) ** 2)
# Function to calculate the openness of a pose
def pose_openness(holistic_landmarks):
keypoints = [
holistic_landmarks.landmark[mp_holistic.PoseLandmark.LEFT_SHOULDER],
holistic_landmarks.landmark[mp_holistic.PoseLandmark.RIGHT_SHOULDER],
holistic_landmarks.landmark[mp_holistic.PoseLandmark.LEFT_HIP],
holistic_landmarks.landmark[mp_holistic.PoseLandmark.RIGHT_HIP],
]
coords = np.array([(kp.x, kp.y) for kp in keypoints])
hull = ConvexHull(coords)
return hull.volume
# Function to calculate leaning direction
def leaning_direction(holistic_landmarks):
nose = holistic_landmarks.landmark[mp_holistic.PoseLandmark.NOSE]
left_shoulder = holistic_landmarks.landmark[mp_holistic.PoseLandmark.LEFT_SHOULDER]
right_shoulder = holistic_landmarks.landmark[
mp_holistic.PoseLandmark.RIGHT_SHOULDER
]
avg_shoulder_z = (left_shoulder.z + right_shoulder.z) / 2
if nose.z < avg_shoulder_z:
return "Forward"
else:
return "Backward"
# Load the video
video_path = "/kaggle/working/video_cut.mp4"
cap = cv2.VideoCapture(video_path)
# Get the video dimensions and FPS
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = int(cap.get(cv2.CAP_PROP_FPS))
# Initialize the VideoWriter
output_filename = "output_video.mp4"
fourcc = cv2.VideoWriter_fourcc(
*"mp4v"
) # You can also use "XVID" or "MJPG" for AVI files
# out = cv2.VideoWriter(output_filename, fourcc, fps, (width, height))
# Initialize variables
prev_landmarks = None
total_movement = 0
movement_threshold = (
0.001 # Adjust the threshold to fine-tune movement detection sensitivity
)
keypoints_to_track = [
mp_holistic.PoseLandmark.LEFT_WRIST,
mp_holistic.PoseLandmark.RIGHT_WRIST,
mp_holistic.PoseLandmark.LEFT_ANKLE,
mp_holistic.PoseLandmark.RIGHT_ANKLE,
]
# Initialize the output CSV File
csv_filename = "output_features.csv"
with open(csv_filename, "w", newline="") as file:
writer = csv.writer(file)
writer.writerow(
["video_name", "frame_number", "total_movement", "avg_pose_openness", "leaning"]
)
# Process the video frames
count = 0
frame_number = 0
with mp_holistic.Holistic(
static_image_mode=False, min_detection_confidence=0.5, min_tracking_confidence=0.5
) as holistic:
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
# process every fps's frame
if count % fps == 0:
# Convert the frame to RGB
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# Process the frame with MediaPipe's Holistic module
results = holistic.process(frame_rgb)
# Draw holistic landmarks on the frame
if results.pose_landmarks:
mp_drawing.draw_landmarks(
frame, results.pose_landmarks, mp_holistic.POSE_CONNECTIONS
)
# Calculate the total movement
if prev_landmarks:
frame_movement = 0
for kp in keypoints_to_track:
distance = euclidean_distance(
results.pose_landmarks.landmark[kp],
prev_landmarks.landmark[kp],
)
frame_movement += distance
if frame_movement > movement_threshold:
total_movement += frame_movement
prev_landmarks = results.pose_landmarks
# Calculate and display the total movement and pose openness on the frame
openness_value = pose_openness(results.pose_landmarks)
# Calculate and display the leaning direction
leaning_dir = leaning_direction(results.pose_landmarks)
with open(csv_filename, "a", newline="") as file:
writer = csv.writer(file)
writer.writerow(
[video_path, count, total_movement, openness_value, leaning_dir]
)
frame_number += 1
count += 1
# cv2.putText(frame, f"Total Movement: {total_movement:.2f}", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 0), 2)
# cv2.putText(frame, f"Pose Openness: {openness_value:.4f}", (10, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
# cv2.putText(frame, f"Leaning: {leaning_dir}", (10, 90), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
# Save the frame
# cv2.imwrite('frame' + str(count) + '.jpg', frame)
# out.write(frame)
# out.release()
# Process the video frames
count = 0
frame_number = 0
with mp_holistic.Holistic(
static_image_mode=False, min_detection_confidence=0.5, min_tracking_confidence=0.5
) as holistic:
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
# Process every fps'th frame
if count % fps == 0:
# Convert the frame to RGB
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# Process the frame with MediaPipe's Holistic module
results = holistic.process(frame_rgb)
# Draw holistic landmarks on the frame
if results.pose_landmarks:
mp_drawing.draw_landmarks(
frame, results.pose_landmarks, mp_holistic.POSE_CONNECTIONS
)
# Calculate the total movement
if prev_landmarks:
frame_movement = 0
for kp in keypoints_to_track:
distance = euclidean_distance(
results.pose_landmarks.landmark[kp],
prev_landmarks.landmark[kp],
)
frame_movement += distance
if frame_movement > movement_threshold:
total_movement += frame_movement
prev_landmarks = results.pose_landmarks
# Calculate and display the total movement and pose openness on the frame
openness_value = pose_openness(results.pose_landmarks)
# Calculate and display the leaning direction
leaning_dir = leaning_direction(results.pose_landmarks)
with open(csv_filename, "a", newline="") as file:
writer = csv.writer(file)
writer.writerow(
[
video_path,
frame_number,
total_movement,
openness_value,
leaning_dir,
]
)
# Increase frame_number every time you process a frame
frame_number += 1
count += 1
import csv
import librosa
import numpy as np
from moviepy.editor import VideoFileClip
import speech_recognition as sr
# Load the video
video_path = "/kaggle/working/video_cut.mp4"
clip = VideoFileClip(video_path)
# Initialize the output CSV File
csv_filename = "output_audio_features.csv"
with open(csv_filename, "w", newline="") as file:
writer = csv.writer(file)
writer.writerow(["time", "avg_pitch", "avg_intensity", "transcription"])
# Initialize the speech recognizer
r = sr.Recognizer()
# Process the audio one second at a time
for i in range(int(clip.duration)):
# Extract one second of audio
audio_segment = clip.subclip(i, i + 1).audio
audio_segment.write_audiofile("temp_audio.wav")
# Load the audio file with librosa
y, sampling_rate = librosa.load("temp_audio.wav")
# Calculate pitch with librosa
pitches, magnitudes = librosa.piptrack(y=y, sr=sampling_rate)
# Calculate average pitch and intensity for this second
avg_pitch = pitches.mean()
avg_intensity = magnitudes.mean()
# Transcribe the audio with SpeechRecognition
with sr.AudioFile("temp_audio.wav") as source:
audio = r.record(source) # read the entire audio file
try:
transcription = r.recognize_google(audio)
except sr.UnknownValueError:
transcription = ""
except sr.RequestError as e:
print(
f"Could not request results from Google Speech Recognition service; {e}"
)
transcription = ""
# Write the features to the CSV
with open(csv_filename, "a", newline="") as file:
writer = csv.writer(file)
writer.writerow([i, avg_pitch, avg_intensity, transcription])
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/553/129553728.ipynb
| null | null |
[{"Id": 129553728, "ScriptId": 38178405, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11926155, "CreationDate": "05/14/2023 19:20:17", "VersionNumber": 1.0, "Title": "practicum_holistic_and_audio_analysis", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 239.0, "LinesInsertedFromPrevious": 239.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# Shorten the video into 1 minute
from moviepy.video.io.ffmpeg_tools import ffmpeg_extract_subclip
# Specify the start and end times for the clip you want to extract (in seconds)
start_time = 0
end_time = 60 # First minute
# Extract the subclip
ffmpeg_extract_subclip(
"/kaggle/input/gesturevideo/video_original.mp4",
start_time,
end_time,
targetname="video_cut.mp4",
)
# Process video to get openness, total movement, and leaning direction
import cv2
import mediapipe as mp
import numpy as np
from scipy.spatial import ConvexHull
import csv
# Initialize MediaPipe's Holistic module
mp_drawing = mp.solutions.drawing_utils
mp_holistic = mp.solutions.holistic
# Function to calculate the Euclidean distance between two points
def euclidean_distance(p1, p2):
return np.sqrt((p1.x - p2.x) ** 2 + (p1.y - p2.y) ** 2)
# Function to calculate the openness of a pose
def pose_openness(holistic_landmarks):
keypoints = [
holistic_landmarks.landmark[mp_holistic.PoseLandmark.LEFT_SHOULDER],
holistic_landmarks.landmark[mp_holistic.PoseLandmark.RIGHT_SHOULDER],
holistic_landmarks.landmark[mp_holistic.PoseLandmark.LEFT_HIP],
holistic_landmarks.landmark[mp_holistic.PoseLandmark.RIGHT_HIP],
]
coords = np.array([(kp.x, kp.y) for kp in keypoints])
hull = ConvexHull(coords)
return hull.volume
# Function to calculate leaning direction
def leaning_direction(holistic_landmarks):
nose = holistic_landmarks.landmark[mp_holistic.PoseLandmark.NOSE]
left_shoulder = holistic_landmarks.landmark[mp_holistic.PoseLandmark.LEFT_SHOULDER]
right_shoulder = holistic_landmarks.landmark[
mp_holistic.PoseLandmark.RIGHT_SHOULDER
]
avg_shoulder_z = (left_shoulder.z + right_shoulder.z) / 2
if nose.z < avg_shoulder_z:
return "Forward"
else:
return "Backward"
# Load the video
video_path = "/kaggle/working/video_cut.mp4"
cap = cv2.VideoCapture(video_path)
# Get the video dimensions and FPS
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = int(cap.get(cv2.CAP_PROP_FPS))
# Initialize the VideoWriter
output_filename = "output_video.mp4"
fourcc = cv2.VideoWriter_fourcc(
*"mp4v"
) # You can also use "XVID" or "MJPG" for AVI files
# out = cv2.VideoWriter(output_filename, fourcc, fps, (width, height))
# Initialize variables
prev_landmarks = None
total_movement = 0
movement_threshold = (
0.001 # Adjust the threshold to fine-tune movement detection sensitivity
)
keypoints_to_track = [
mp_holistic.PoseLandmark.LEFT_WRIST,
mp_holistic.PoseLandmark.RIGHT_WRIST,
mp_holistic.PoseLandmark.LEFT_ANKLE,
mp_holistic.PoseLandmark.RIGHT_ANKLE,
]
# Initialize the output CSV File
csv_filename = "output_features.csv"
with open(csv_filename, "w", newline="") as file:
writer = csv.writer(file)
writer.writerow(
["video_name", "frame_number", "total_movement", "avg_pose_openness", "leaning"]
)
# Process the video frames
count = 0
frame_number = 0
with mp_holistic.Holistic(
static_image_mode=False, min_detection_confidence=0.5, min_tracking_confidence=0.5
) as holistic:
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
# process every fps's frame
if count % fps == 0:
# Convert the frame to RGB
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# Process the frame with MediaPipe's Holistic module
results = holistic.process(frame_rgb)
# Draw holistic landmarks on the frame
if results.pose_landmarks:
mp_drawing.draw_landmarks(
frame, results.pose_landmarks, mp_holistic.POSE_CONNECTIONS
)
# Calculate the total movement
if prev_landmarks:
frame_movement = 0
for kp in keypoints_to_track:
distance = euclidean_distance(
results.pose_landmarks.landmark[kp],
prev_landmarks.landmark[kp],
)
frame_movement += distance
if frame_movement > movement_threshold:
total_movement += frame_movement
prev_landmarks = results.pose_landmarks
# Calculate and display the total movement and pose openness on the frame
openness_value = pose_openness(results.pose_landmarks)
# Calculate and display the leaning direction
leaning_dir = leaning_direction(results.pose_landmarks)
with open(csv_filename, "a", newline="") as file:
writer = csv.writer(file)
writer.writerow(
[video_path, count, total_movement, openness_value, leaning_dir]
)
frame_number += 1
count += 1
# cv2.putText(frame, f"Total Movement: {total_movement:.2f}", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 0), 2)
# cv2.putText(frame, f"Pose Openness: {openness_value:.4f}", (10, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
# cv2.putText(frame, f"Leaning: {leaning_dir}", (10, 90), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
# Save the frame
# cv2.imwrite('frame' + str(count) + '.jpg', frame)
# out.write(frame)
# out.release()
# Process the video frames
count = 0
frame_number = 0
with mp_holistic.Holistic(
static_image_mode=False, min_detection_confidence=0.5, min_tracking_confidence=0.5
) as holistic:
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
# Process every fps'th frame
if count % fps == 0:
# Convert the frame to RGB
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# Process the frame with MediaPipe's Holistic module
results = holistic.process(frame_rgb)
# Draw holistic landmarks on the frame
if results.pose_landmarks:
mp_drawing.draw_landmarks(
frame, results.pose_landmarks, mp_holistic.POSE_CONNECTIONS
)
# Calculate the total movement
if prev_landmarks:
frame_movement = 0
for kp in keypoints_to_track:
distance = euclidean_distance(
results.pose_landmarks.landmark[kp],
prev_landmarks.landmark[kp],
)
frame_movement += distance
if frame_movement > movement_threshold:
total_movement += frame_movement
prev_landmarks = results.pose_landmarks
# Calculate and display the total movement and pose openness on the frame
openness_value = pose_openness(results.pose_landmarks)
# Calculate and display the leaning direction
leaning_dir = leaning_direction(results.pose_landmarks)
with open(csv_filename, "a", newline="") as file:
writer = csv.writer(file)
writer.writerow(
[
video_path,
frame_number,
total_movement,
openness_value,
leaning_dir,
]
)
# Increase frame_number every time you process a frame
frame_number += 1
count += 1
import csv
import librosa
import numpy as np
from moviepy.editor import VideoFileClip
import speech_recognition as sr
# Load the video
video_path = "/kaggle/working/video_cut.mp4"
clip = VideoFileClip(video_path)
# Initialize the output CSV File
csv_filename = "output_audio_features.csv"
with open(csv_filename, "w", newline="") as file:
writer = csv.writer(file)
writer.writerow(["time", "avg_pitch", "avg_intensity", "transcription"])
# Initialize the speech recognizer
r = sr.Recognizer()
# Process the audio one second at a time
for i in range(int(clip.duration)):
# Extract one second of audio
audio_segment = clip.subclip(i, i + 1).audio
audio_segment.write_audiofile("temp_audio.wav")
# Load the audio file with librosa
y, sampling_rate = librosa.load("temp_audio.wav")
# Calculate pitch with librosa
pitches, magnitudes = librosa.piptrack(y=y, sr=sampling_rate)
# Calculate average pitch and intensity for this second
avg_pitch = pitches.mean()
avg_intensity = magnitudes.mean()
# Transcribe the audio with SpeechRecognition
with sr.AudioFile("temp_audio.wav") as source:
audio = r.record(source) # read the entire audio file
try:
transcription = r.recognize_google(audio)
except sr.UnknownValueError:
transcription = ""
except sr.RequestError as e:
print(
f"Could not request results from Google Speech Recognition service; {e}"
)
transcription = ""
# Write the features to the CSV
with open(csv_filename, "a", newline="") as file:
writer = csv.writer(file)
writer.writerow([i, avg_pitch, avg_intensity, transcription])
| false | 0 | 2,566 | 0 | 2,566 | 2,566 |
||
129553283
|
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.datasets import mnist
from tensorflow.keras.layers import Input, Dense, Lambda
from tensorflow.keras.models import Model
from tensorflow.keras.losses import mse
from tensorflow.keras import backend as K
from tensorflow.keras.utils import plot_model
# **Load the MNIST dataset**
(x_train, _), (x_test, _) = mnist.load_data()
# **Normalize and flatten the images**
x_train = x_train.astype("float32") / 255.0
x_test = x_test.astype("float32") / 255.0
x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:])))
x_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:])))
# **Define the VAE architecture**
latent_dim = 2
# **Encoder network**
inputs = Input(shape=(784,))
enc_h1 = Dense(512, activation="relu")(inputs)
enc_h2 = Dense(256, activation="relu")(enc_h1)
z_mean = Dense(latent_dim)(enc_h2)
z_log_var = Dense(latent_dim)(enc_h2)
# **Latent space sampling function**
def sampling(args):
z_mean, z_log_var = args
epsilon = K.random_normal(
shape=(K.shape(z_mean)[0], latent_dim), mean=0.0, stddev=1.0
)
return z_mean + K.exp(0.5 * z_log_var) * epsilon
# **Reparameterization trick**
z = Lambda(sampling)([z_mean, z_log_var])
# **Decoder network**
dec_h1 = Dense(256, activation="relu")(z)
dec_h2 = Dense(512, activation="relu")(dec_h1)
outputs = Dense(784, activation="sigmoid")(dec_h2)
# **Define the VAE model**
vae = Model(inputs, outputs)
# **VAE loss function**
reconstruction_loss = mse(inputs, outputs)
kl_loss = -0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
vae_loss = K.mean(reconstruction_loss + kl_loss)
vae.add_loss(vae_loss)
# **Compile the VAE model**
vae.compile(optimizer="adam")
# **Train the VAE model**
history = vae.fit(x_train, epochs=50, batch_size=128, validation_data=(x_test, None))
# **Generate images using the VAE model**
n = 10 # Number of images to generate
encoded_imgs = vae.predict(x_test)
decoded_imgs = vae.predict(encoded_imgs)
# **Visualize the generated images**
plt.figure(figsize=(20, 4))
for i in range(n):
# Display original images
ax = plt.subplot(2, n, i + 1)
plt.imshow(x_test[i].reshape(28, 28), cmap="gray")
plt.title("Original")
plt.axis("off")
# Display reconstructed images
ax = plt.subplot(2, n, i + 1 + n)
plt.imshow(decoded_imgs[i].reshape(28, 28), cmap="gray")
plt.title("Generated")
plt.axis("off")
plt.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/553/129553283.ipynb
| null | null |
[{"Id": 129553283, "ScriptId": 38522492, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 12671037, "CreationDate": "05/14/2023 19:14:53", "VersionNumber": 1.0, "Title": "project basing on the MNIST dataset", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 90.0, "LinesInsertedFromPrevious": 90.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.datasets import mnist
from tensorflow.keras.layers import Input, Dense, Lambda
from tensorflow.keras.models import Model
from tensorflow.keras.losses import mse
from tensorflow.keras import backend as K
from tensorflow.keras.utils import plot_model
# **Load the MNIST dataset**
(x_train, _), (x_test, _) = mnist.load_data()
# **Normalize and flatten the images**
x_train = x_train.astype("float32") / 255.0
x_test = x_test.astype("float32") / 255.0
x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:])))
x_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:])))
# **Define the VAE architecture**
latent_dim = 2
# **Encoder network**
inputs = Input(shape=(784,))
enc_h1 = Dense(512, activation="relu")(inputs)
enc_h2 = Dense(256, activation="relu")(enc_h1)
z_mean = Dense(latent_dim)(enc_h2)
z_log_var = Dense(latent_dim)(enc_h2)
# **Latent space sampling function**
def sampling(args):
z_mean, z_log_var = args
epsilon = K.random_normal(
shape=(K.shape(z_mean)[0], latent_dim), mean=0.0, stddev=1.0
)
return z_mean + K.exp(0.5 * z_log_var) * epsilon
# **Reparameterization trick**
z = Lambda(sampling)([z_mean, z_log_var])
# **Decoder network**
dec_h1 = Dense(256, activation="relu")(z)
dec_h2 = Dense(512, activation="relu")(dec_h1)
outputs = Dense(784, activation="sigmoid")(dec_h2)
# **Define the VAE model**
vae = Model(inputs, outputs)
# **VAE loss function**
reconstruction_loss = mse(inputs, outputs)
kl_loss = -0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
vae_loss = K.mean(reconstruction_loss + kl_loss)
vae.add_loss(vae_loss)
# **Compile the VAE model**
vae.compile(optimizer="adam")
# **Train the VAE model**
history = vae.fit(x_train, epochs=50, batch_size=128, validation_data=(x_test, None))
# **Generate images using the VAE model**
n = 10 # Number of images to generate
encoded_imgs = vae.predict(x_test)
decoded_imgs = vae.predict(encoded_imgs)
# **Visualize the generated images**
plt.figure(figsize=(20, 4))
for i in range(n):
# Display original images
ax = plt.subplot(2, n, i + 1)
plt.imshow(x_test[i].reshape(28, 28), cmap="gray")
plt.title("Original")
plt.axis("off")
# Display reconstructed images
ax = plt.subplot(2, n, i + 1 + n)
plt.imshow(decoded_imgs[i].reshape(28, 28), cmap="gray")
plt.title("Generated")
plt.axis("off")
plt.show()
| false | 0 | 901 | 0 | 901 | 901 |
||
129553333
|
# Exploratory Data Analysis On Netflix Dataset in Python
# ***1. Importing the required libraries and dataset for EDA***
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import plotly.express as px
plt.style.use("fivethirtyeight")
plt.rcParams["figure.figsize"] = (10, 6)
data = pd.read_csv("/kaggle/input/datanetflix/netflix.csv")
data.sample(10)
# *****2.display the columns of my data*****
data.columns
# ****3.show column types****
data.dtypes
# ***4.show size my data***
data.shape
# ***6. Dropping the duplicate rows***
duplicate_rows_data = data[data.duplicated()]
print("number of duplicate rows: ", duplicate_rows_data.shape)
data.count()
data = data.drop_duplicates()
data.head(5)
# ***5. Renaming the columns***
data = data.rename(columns={"listed_in": "categorie "})
data.head(5)
# ***7. Dropping the missing or null values***
data.count()
print(data.isnull().sum())
data = data.dropna()
print(data.isnull().sum())
# **maintenant on a nettoyé la base de données avant l'analyse pour s'assurer que les résultats obtenus sont fiables, pertinents et cohérents avec la réalité.**
# ***QST:1 Top 5 des meilleures catégories.***
#
new_data = {key: data[key] for key in ["show_id", "categorie "]}
# Afficher les premières entrées de la nouvelle variable
for key in new_data.keys():
df = pd.DataFrame(new_data)
# Afficher les premières entrées du DataFrame
DFF = df[0:5]
DFF
# ***ensuite on on va compter le nombre des films et des series pour chaque categories pour préciser mieux les categories et les afficher dans un histogramme selon le nombre des film dans une categorie***
# Compter le nombre de films pour chaque catégorie
#'strip()' recherche les caractères spécifiés et les supprime des bords de la chaîne.
categories = new_data["categorie "].str.split(", ")
category_counts = {}
for movie_categories in categories:
for category in movie_categories:
if category.strip() in category_counts:
category_counts[category.strip()] += 1
else:
category_counts[category.strip()] = 1
# Tracer l'histogramme
plt.bar(category_counts.keys(), category_counts.values())
plt.xticks(rotation=90)
plt.xlabel("Catégorie de films")
plt.ylabel("Nombre de films")
plt.show()
# ***QST:2Top 5 des réalisateurs.***
#
filtered_directors = pd.DataFrame()
filtered_directors = data["director"].str.split(",", expand=True).stack()
filtered_directors = filtered_directors.to_frame()
filtered_directors.columns = ["Director"]
directors = (
filtered_directors.groupby(["Director"]).size().reset_index(name="Total Content")
)
directors = directors[directors.Director != "No Director Specified"]
directors = directors.sort_values(by=["Total Content"], ascending=False)
directorsTop5 = directors.head(5)
directorsTop5 = directorsTop5.sort_values(by=["Total Content"])
fig1 = px.bar(
directorsTop5, x="Director", y="Total Content", title="Top 5 Directors on Netflix"
)
fig1.show()
# Créer un dictionnaire pour stocker le nombre d'apparitions de chaque acteur
actors_count = {}
# Parcourir chaque entrée de la variable cast
for cast_list in data["cast"]:
# Diviser la chaîne de caractères contenant les noms des acteurs en une liste d'acteurs individuels
actors = cast_list.split(", ")
# Parcourir chaque acteur de la liste
for actor in actors:
# Ajouter 1 à l'entrée correspondant à l'acteur dans le dictionnaire actors_count, ou créer une nouvelle entrée avec une valeur de 1 si l'acteur n'a pas encore été rencontré
actors_count[actor] = actors_count.get(actor, 0) + 1
# Trier les entrées du dictionnaire par ordre décroissant de nombre d'apparitions et en extraire les 10 premières
top_actors = sorted(actors_count.items(), key=lambda x: x[1], reverse=True)[:10]
# Créer une liste pour stocker les noms des acteurs et une autre pour stocker le nombre d'apparitions correspondant
actor_names = []
actor_counts = []
for actor, count in top_actors:
actor_names.append(actor)
actor_counts.append(count)
plt.barh(actor_names, actor_counts)
# Ajouter un titre au graphique
plt.title("Top 10 des acteurs les plus fréquents")
# Ajouter une étiquette à l'axe y
plt.xlabel("Nombre d'apparitions")
# Afficher le graphique
plt.show()
# ***QST:3 Les cinq meilleures séries télévisées avec le plus grand nombre de saisons***
# ***Grouper les données par nom de série et trouver le maximum de saison pour chaque série***
# Grouper les données par nom de série et trouver le maximum de saison pour chaque série
tv_shows = data.loc[data["type"] == "TV Show"]
tv_shows
# ***-ensuite afficher les cinqs meilleure series qui ont la plus grande duration***
tv_shows = data[data.type == "TV Show"]
tv_shows_sorted = tv_shows.sort_values(by="duration", ascending=False)
tv_shows_sorted.head(5)
# ***QST:5 Netflix se concentre-t-il davantage sur les séries télévisées que sur les
# films ces dernières années ?***
# **Dans cette question on compte le nombre des films et des series dans la base de donnée aprés en voir le resultas des chaque variable**
nbr_tv_shows = 0
nbr_movies = 0
i = 0
while True:
if data.iloc[i]["type"] == "TV Show":
nbr_tv_shows += 1
if data.iloc[i]["type"] == "Movie":
nbr_movies += 1
i += 1
if i == len(data):
break
print("le nombre des series", nbr_tv_shows)
print("le nombre des films", nbr_movies)
# ***en fin , pour mieux visulaiser le traitement on affiche les resultas dans une presentation graphique pas secteur***
sizes = [nbr_tv_shows, nbr_movies]
labels = ["Tv_shows", "Movies"]
# Création du graphique circulaire
fig1, ax1 = plt.subplots()
ax1.pie(sizes, labels=labels, autopct="%1.1f%%", startangle=90)
ax1.axis("equal") # Pour que le cercle soit parfaitement circulaire
# Affichage du diagramme
plt.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/553/129553333.ipynb
| null | null |
[{"Id": 129553333, "ScriptId": 38440577, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14707911, "CreationDate": "05/14/2023 19:15:29", "VersionNumber": 2.0, "Title": "DBnetflix", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 211.0, "LinesInsertedFromPrevious": 176.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 35.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 2}]
| null | null | null | null |
# Exploratory Data Analysis On Netflix Dataset in Python
# ***1. Importing the required libraries and dataset for EDA***
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import plotly.express as px
plt.style.use("fivethirtyeight")
plt.rcParams["figure.figsize"] = (10, 6)
data = pd.read_csv("/kaggle/input/datanetflix/netflix.csv")
data.sample(10)
# *****2.display the columns of my data*****
data.columns
# ****3.show column types****
data.dtypes
# ***4.show size my data***
data.shape
# ***6. Dropping the duplicate rows***
duplicate_rows_data = data[data.duplicated()]
print("number of duplicate rows: ", duplicate_rows_data.shape)
data.count()
data = data.drop_duplicates()
data.head(5)
# ***5. Renaming the columns***
data = data.rename(columns={"listed_in": "categorie "})
data.head(5)
# ***7. Dropping the missing or null values***
data.count()
print(data.isnull().sum())
data = data.dropna()
print(data.isnull().sum())
# **maintenant on a nettoyé la base de données avant l'analyse pour s'assurer que les résultats obtenus sont fiables, pertinents et cohérents avec la réalité.**
# ***QST:1 Top 5 des meilleures catégories.***
#
new_data = {key: data[key] for key in ["show_id", "categorie "]}
# Afficher les premières entrées de la nouvelle variable
for key in new_data.keys():
df = pd.DataFrame(new_data)
# Afficher les premières entrées du DataFrame
DFF = df[0:5]
DFF
# ***ensuite on on va compter le nombre des films et des series pour chaque categories pour préciser mieux les categories et les afficher dans un histogramme selon le nombre des film dans une categorie***
# Compter le nombre de films pour chaque catégorie
#'strip()' recherche les caractères spécifiés et les supprime des bords de la chaîne.
categories = new_data["categorie "].str.split(", ")
category_counts = {}
for movie_categories in categories:
for category in movie_categories:
if category.strip() in category_counts:
category_counts[category.strip()] += 1
else:
category_counts[category.strip()] = 1
# Tracer l'histogramme
plt.bar(category_counts.keys(), category_counts.values())
plt.xticks(rotation=90)
plt.xlabel("Catégorie de films")
plt.ylabel("Nombre de films")
plt.show()
# ***QST:2Top 5 des réalisateurs.***
#
filtered_directors = pd.DataFrame()
filtered_directors = data["director"].str.split(",", expand=True).stack()
filtered_directors = filtered_directors.to_frame()
filtered_directors.columns = ["Director"]
directors = (
filtered_directors.groupby(["Director"]).size().reset_index(name="Total Content")
)
directors = directors[directors.Director != "No Director Specified"]
directors = directors.sort_values(by=["Total Content"], ascending=False)
directorsTop5 = directors.head(5)
directorsTop5 = directorsTop5.sort_values(by=["Total Content"])
fig1 = px.bar(
directorsTop5, x="Director", y="Total Content", title="Top 5 Directors on Netflix"
)
fig1.show()
# Créer un dictionnaire pour stocker le nombre d'apparitions de chaque acteur
actors_count = {}
# Parcourir chaque entrée de la variable cast
for cast_list in data["cast"]:
# Diviser la chaîne de caractères contenant les noms des acteurs en une liste d'acteurs individuels
actors = cast_list.split(", ")
# Parcourir chaque acteur de la liste
for actor in actors:
# Ajouter 1 à l'entrée correspondant à l'acteur dans le dictionnaire actors_count, ou créer une nouvelle entrée avec une valeur de 1 si l'acteur n'a pas encore été rencontré
actors_count[actor] = actors_count.get(actor, 0) + 1
# Trier les entrées du dictionnaire par ordre décroissant de nombre d'apparitions et en extraire les 10 premières
top_actors = sorted(actors_count.items(), key=lambda x: x[1], reverse=True)[:10]
# Créer une liste pour stocker les noms des acteurs et une autre pour stocker le nombre d'apparitions correspondant
actor_names = []
actor_counts = []
for actor, count in top_actors:
actor_names.append(actor)
actor_counts.append(count)
plt.barh(actor_names, actor_counts)
# Ajouter un titre au graphique
plt.title("Top 10 des acteurs les plus fréquents")
# Ajouter une étiquette à l'axe y
plt.xlabel("Nombre d'apparitions")
# Afficher le graphique
plt.show()
# ***QST:3 Les cinq meilleures séries télévisées avec le plus grand nombre de saisons***
# ***Grouper les données par nom de série et trouver le maximum de saison pour chaque série***
# Grouper les données par nom de série et trouver le maximum de saison pour chaque série
tv_shows = data.loc[data["type"] == "TV Show"]
tv_shows
# ***-ensuite afficher les cinqs meilleure series qui ont la plus grande duration***
tv_shows = data[data.type == "TV Show"]
tv_shows_sorted = tv_shows.sort_values(by="duration", ascending=False)
tv_shows_sorted.head(5)
# ***QST:5 Netflix se concentre-t-il davantage sur les séries télévisées que sur les
# films ces dernières années ?***
# **Dans cette question on compte le nombre des films et des series dans la base de donnée aprés en voir le resultas des chaque variable**
nbr_tv_shows = 0
nbr_movies = 0
i = 0
while True:
if data.iloc[i]["type"] == "TV Show":
nbr_tv_shows += 1
if data.iloc[i]["type"] == "Movie":
nbr_movies += 1
i += 1
if i == len(data):
break
print("le nombre des series", nbr_tv_shows)
print("le nombre des films", nbr_movies)
# ***en fin , pour mieux visulaiser le traitement on affiche les resultas dans une presentation graphique pas secteur***
sizes = [nbr_tv_shows, nbr_movies]
labels = ["Tv_shows", "Movies"]
# Création du graphique circulaire
fig1, ax1 = plt.subplots()
ax1.pie(sizes, labels=labels, autopct="%1.1f%%", startangle=90)
ax1.axis("equal") # Pour que le cercle soit parfaitement circulaire
# Affichage du diagramme
plt.show()
| false | 0 | 1,848 | 2 | 1,848 | 1,848 |
||
129909725
|
# # Imports
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from PIL import Image
import cv2
# Data Directories
ROOT_DIR = "/kaggle/input/airbus-ship-detection"
train_image_dir = os.path.join(ROOT_DIR, "train_v2")
test_image_dir = os.path.join(ROOT_DIR, "test_v2")
sample_submission_dir = os.path.join(ROOT_DIR, "train_ship_segmentations_v2.csv")
train_ship_segmentations_dir = os.path.join(ROOT_DIR, "train_ship_segmentations_v2.csv")
# Data Loading
train = os.listdir(train_image_dir)
test = os.listdir(test_image_dir)
sample_submission_df = pd.read_csv(sample_submission_dir)
train_ship_segmentations_df = pd.read_csv(train_ship_segmentations_dir)
# # Data View
first_train_image_path = os.path.join(train_image_dir, train[0])
first_train_image = cv2.imread(first_train_image_path)
first_train_image = cv2.cvtColor(first_train_image, cv2.COLOR_BGR2RGB)
print(f"{first_train_image.shape = }\n")
plt.title("First Train Image")
plt.imshow(first_train_image)
train_ship_segmentations_df.head(10)
num_of_total_images = train_ship_segmentations_df.ImageId.nunique()
not_empty = pd.notna(train_ship_segmentations_df.EncodedPixels)
num_of_empty_images = (~not_empty).sum()
num_of_non_empty_images = not_empty.sum()
nun_of_total_masks = train_ship_segmentations_df[not_empty].ImageId.nunique()
print(
f"{num_of_total_images = } | {num_of_empty_images = } | {num_of_non_empty_images = } | {nun_of_total_masks = }"
)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/909/129909725.ipynb
| null | null |
[{"Id": 129909725, "ScriptId": 38642628, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 12676283, "CreationDate": "05/17/2023 11:17:09", "VersionNumber": 2.0, "Title": "Ido_Ronen_Ship_Detection", "EvaluationDate": "05/17/2023", "IsChange": false, "TotalLines": 49.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 49.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# # Imports
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from PIL import Image
import cv2
# Data Directories
ROOT_DIR = "/kaggle/input/airbus-ship-detection"
train_image_dir = os.path.join(ROOT_DIR, "train_v2")
test_image_dir = os.path.join(ROOT_DIR, "test_v2")
sample_submission_dir = os.path.join(ROOT_DIR, "train_ship_segmentations_v2.csv")
train_ship_segmentations_dir = os.path.join(ROOT_DIR, "train_ship_segmentations_v2.csv")
# Data Loading
train = os.listdir(train_image_dir)
test = os.listdir(test_image_dir)
sample_submission_df = pd.read_csv(sample_submission_dir)
train_ship_segmentations_df = pd.read_csv(train_ship_segmentations_dir)
# # Data View
first_train_image_path = os.path.join(train_image_dir, train[0])
first_train_image = cv2.imread(first_train_image_path)
first_train_image = cv2.cvtColor(first_train_image, cv2.COLOR_BGR2RGB)
print(f"{first_train_image.shape = }\n")
plt.title("First Train Image")
plt.imshow(first_train_image)
train_ship_segmentations_df.head(10)
num_of_total_images = train_ship_segmentations_df.ImageId.nunique()
not_empty = pd.notna(train_ship_segmentations_df.EncodedPixels)
num_of_empty_images = (~not_empty).sum()
num_of_non_empty_images = not_empty.sum()
nun_of_total_masks = train_ship_segmentations_df[not_empty].ImageId.nunique()
print(
f"{num_of_total_images = } | {num_of_empty_images = } | {num_of_non_empty_images = } | {nun_of_total_masks = }"
)
| false | 0 | 532 | 0 | 532 | 532 |
||
129429904
|
import math
import bisect
import itertools
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import skew, kurtosis
# Regressors and classifiers
from xgboost import XGBRegressor
from sklearn.ensemble import (
RandomForestClassifier,
HistGradientBoostingClassifier,
HistGradientBoostingRegressor,
)
from sklearn.linear_model import Ridge
from catboost import CatBoostRegressor
from sklearn.metrics import (
mean_squared_error,
mean_absolute_error,
accuracy_score,
make_scorer,
)
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.model_selection import RandomizedSearchCV, GridSearchCV
from sklearn.decomposition import PCA
from sklearn.feature_selection import SelectKBest
from sklearn.preprocessing import StandardScaler
from sklearn.impute import KNNImputer
import category_encoders as ce
ONE = 1.123456789
# # Loading Dataframes
# Dataset taken from Kaggle competition: Prediction of Wild Blueberry Yield.
dir_input = "../input/playground-series-s3e14"
dir_output = "../working"
out_prefix = dir_output + "/playground-series-s3e14"
dir_train_csv = dir_input + "/train.csv"
dir_test_csv = dir_input + "/test.csv"
pd.read_csv(dir_train_csv)
df_train = pd.read_csv(dir_train_csv)
target = "yield"
df_target = df_train[[target]]
df_train = df_train.drop(columns=[target])
df_test = pd.read_csv(dir_test_csv)
df = pd.concat([df_train, df_test])
df.index = df["id"].values
df = df.drop(columns=["id"])
idx_train = df.index[: len(df_train)]
idx_pred = df.index[len(df_train) :]
df_target.index = idx_train
# df : contains both training and testing data without the target column.
# to be used for applying the same preprocessing to both training and testing data.
# df_target : target column of the training data.
# idx_train : indices of all the training samples.
# idx_pred : indices of the samples from the testing data.
print("No. of training samples:", len(idx_train))
print("No. of testing samples:", len(idx_pred))
df
# # Imputing Missing Values
df.isnull().sum()
# Nothing to do here.
# # Detecting Outliers
df_out = df.copy()
df_out[target] = df_target[target] # adding the target column here to check correations
df_out.dtypes
# Distributions of all the features
m, n = 6, 3
fig, ax = plt.subplots(m, n, figsize=[12, 15])
fig.tight_layout()
for i in range(m):
for j in range(n):
col = i * n + j
if col >= len(df_out.columns):
break
col = df_out.columns[col]
AX = ax[i][j]
sns.histplot(df_out[col], ax=AX)
AX.set(yticks=[], ylabel="")
plt.show()
# Even though all features have dtype float64, all but fruitset, fruitmass, seeds, and yield have the flavor of categorical features. I am guessing that because of this tree based regression will work better than linear regression.
# Observing the distributions of yield by all the predictors.
m, n = 4, 4
fig, ax = plt.subplots(m, n, figsize=[12, 10])
fig.tight_layout()
fig.subplots_adjust(hspace=0.5)
for i in range(m):
for j in range(n):
col = i * n + j
if col >= len(df_out.columns):
break
col = df_out.columns[col]
if col == target:
continue
Ax = ax[i][j]
sns.scatterplot(
x=df_out.loc[idx_train][col], y=df_target.loc[idx_train][target], ax=Ax
)
# putting ticks and labels nicely along the xaxis, in retrospect. this will be helpful in
# identifying outlier values
xmin = df_out.loc[idx_train][col].min()
xmax = df_out.loc[idx_train][col].max()
n_xticks = 10
d = (xmax - xmin) / n_xticks
Ax.set(xticks=[round(xmin + i * d, 2) for i in range(n_xticks + 1)])
Ax.set_xticklabels(Ax.get_xticks(), rotation=45)
Ax.grid(True, linestyle="-.")
# also in retrospect, putting yticks and ylables to help out outlier detection
Ax.set(ylabel="")
if i < m - 1 or j == 0:
Ax.set(yticks=[])
plt.show()
# There appears to be positive correlation between `yield` and the last three features. However there are some samples falling onto a horizontal floor there, which seems anomalous. In the other pictures most samples are neatly on vertical lines, some are scattered. Since all but the last three features seem rather categorical in nature we shall project some of the samples onto the nearest vertical lines. We shall identify the remaining outliers and see how many there are.
df_o2 = df_out.copy()
df_o2["clonesize"] = df_o2["clonesize"].map(lambda x: x if x > 12 else 12.5)
df_o2["clonesize"] = df_o2["clonesize"].map(lambda x: x if x < 40 else 37.5)
df_o2["bumbles"] = df_o2["bumbles"].map(lambda x: x if x > 0.1 else 0.117)
df_o2["bumbles"] = df_o2["bumbles"].map(lambda x: x if x < 0.255 or x > 0.35 else 0.25)
df_o2["osmia"] = df_o2["osmia"].map(lambda x: x if x > 0.2 else 0.058)
df_o2["osmia"] = df_o2["osmia"].map(lambda x: x if x < 0.53 or x > 0.63 else 0.63)
df_o2["MaxOfUpperTRange"] = df_o2["MaxOfUpperTRange"].map(
lambda x: x if x < 77.5 or x > 80 else 77.4
)
df_o2["MaxOfUpperTRange"] = df_o2["MaxOfUpperTRange"].map(
lambda x: x if x < 87 or x > 90 else 86
)
df_o2["MinOfUpperTRange"] = df_o2["MinOfUpperTRange"].map(
lambda x: x if x > 40 else 42.1
)
df_o2["AverageOfUpperTRange"] = df_o2["AverageOfUpperTRange"].map(
lambda x: x if x < 65 or x > 70 else 64.7
)
df_o2["MaxOfLowerTRange"] = df_o2["MaxOfLowerTRange"].map(
lambda x: x if x < 51 or x > 53 else 50.2
)
df_o2["MaxOfLowerTRange"] = df_o2["MaxOfLowerTRange"].map(
lambda x: x if x < 65 or x > 67 else 68.2
)
df_o2["MinOfLowerTRange"] = df_o2["MinOfLowerTRange"].map(
lambda x: x if x < 24.5 or x > 26 else 24.3
)
df_o2["MinOfLowerTRange"] = df_o2["MinOfLowerTRange"].map(
lambda x: x if x < 27.5 or x > 29 else 27
)
df_o2["MinOfLowerTRange"] = df_o2["MinOfLowerTRange"].map(
lambda x: x if x < 30.5 or x > 32 else 30
)
df_o2["AverageOfLowerTRange"] = df_o2["AverageOfLowerTRange"].map(
lambda x: x if x < 44 or x > 45.7 else 45.8
)
df_o2["RainingDays"] = df_o2["RainingDays"].map(lambda x: x if x < 25 or x > 30 else 24)
df_o2["AverageRainingDays"] = df_o2["AverageRainingDays"].map(
lambda x: x if x < 0.065 or x > 0.075 else 0.06
)
df_o2["AverageRainingDays"] = df_o2["AverageRainingDays"].map(
lambda x: x if x < 0.12 or x > 0.15 else 0.1
)
df_o2["AverageRainingDays"] = df_o2["AverageRainingDays"].map(
lambda x: x if x < 0.24 or x > 0.255 else 0.26
)
df_o2["andrena"] = df_o2["andrena"].map(lambda x: x if x < 0.2 or x > 0.245 else 0.25)
df_o2["andrena"] = df_o2["andrena"].map(lambda x: x if x < 0.48 or x > 0.56 else 0.5)
df_o2["andrena"] = df_o2["andrena"].map(lambda x: x if x < 0.7 or x > 0.71 else 0.75)
m, n = 4, 4
fig, ax = plt.subplots(m, n, figsize=[12, 10])
fig.tight_layout()
fig.subplots_adjust(hspace=0.5)
for i in range(m):
for j in range(n):
col = i * n + j
if col >= len(df_o2.columns):
break
col = df_o2.columns[col]
if col == target:
continue
Ax = ax[i][j]
sns.scatterplot(
x=df_o2.loc[idx_train][col], y=df_target.loc[idx_train][target], ax=Ax
)
# putting ticks and labels nicely along the xaxis, in retrospect. this will be helpful in
# identifying outlier values
xmin = df_o2.loc[idx_train][col].min()
xmax = df_o2.loc[idx_train][col].max()
n_xticks = 10
d = (xmax - xmin) / n_xticks
Ax.set(xticks=[round(xmin + i * d, 2) for i in range(n_xticks + 1)])
Ax.set_xticklabels(Ax.get_xticks(), rotation=45)
Ax.grid(True, linestyle="-.")
# also in retrospect, putting yticks and ylables to help out outlier detection
Ax.set(ylabel="")
if i < m - 1 or j == 0:
Ax.set(yticks=[])
plt.show()
conds = []
conds.append(df_o2.loc[idx_train][target] < 2550)
conds.append(df_o2.loc[idx_train]["seeds"] < 24)
conds.append(df_o2.loc[idx_train]["honeybee"] > 5)
conds.append(df_o2.loc[idx_train]["fruitmass"] < 0.34)
conds.append(
df_o2.loc[idx_train]["bumbles"].apply(lambda i: True if i > 0.4 else False).values
)
conds.append(
df_o2.loc[idx_train]["andrena"].apply(lambda i: True if i < 0.2 else False).values
)
conds.append(
df_o2.apply(lambda r: r["seeds"] > 37 and r["yield"] < 3000, axis=1).values
)
idx_custom = [any(X) for X in zip(*conds)]
idx_all_in = [not x for x in idx_custom] # boolean flag for inlier indices in idx_train
idx_all_in = df_o2.loc[idx_train].loc[idx_all_in].index # inlier indices in idx_train.
m, n = 4, 4
fig, ax = plt.subplots(m, n, figsize=[12, 10])
fig.tight_layout()
fig.subplots_adjust(hspace=0.5)
for i in range(m):
for j in range(n):
col = i * n + j
if col >= len(df_o2.columns):
break
col = df_o2.columns[col]
if col == target:
continue
Ax = ax[i][j]
sns.scatterplot(
x=df_o2.loc[idx_all_in][col], y=df_target.loc[idx_all_in][target], ax=Ax
)
sns.scatterplot(
x=df_o2.loc[idx_train].loc[idx_custom][col],
y=df_target.loc[idx_custom][target],
ax=Ax,
s=20,
)
# putting ticks and labels nicely along the xaxis, in retrospect. this will be helpful in
# identifying outlier values
xmin = df_o2.loc[idx_train][col].min()
xmax = df_o2.loc[idx_train][col].max()
n_xticks = 10
d = (xmax - xmin) / n_xticks
Ax.set(xticks=[round(xmin + i * d, 2) for i in range(n_xticks + 1)])
Ax.set_xticklabels(Ax.get_xticks(), rotation=45)
Ax.grid(True, linestyle="-.")
# also in retrospect, putting yticks and ylables to help out outlier detection
if i < m - 1 or j == 0:
Ax.set(yticks=[])
Ax.set(ylabel="")
plt.show()
print("% of outliers in training data:", 100 * (1 - len(idx_all_in) / len(idx_train)))
# The outliers have been identified with reasonable accuracy and only about 1% of the total training data are outliers, so we are going to drop them from training. All inlier indices are in idx_all_in
# Some of the samples I have identified as outliers fall squarely on a potential linear regression line in the plots against the highly correlated predictors fruitset, fruitmass, and seeds. These are samples that fell out of the vertical lines in the other plots. Whether it would be better to project these onto the vertical lines as well or not is unclear since they are a bit too scattered.
# # Preprocessing and Feature Engineering
# ## Dealing with Skewness
# Checking for skewness
df_full = df_o2.copy()
def zero(s):
if 0 in s.index:
return s.loc[0]
else:
return 0
num_columns = [c for c in df_full.columns if df_full[c].dtype in ["int64", "float64"]]
zero_percents = [
100 * zero(df_full[c].value_counts()) / len(df_full) for c in num_columns
]
skewness = df_full.skew(numeric_only=True)
skew_zero = pd.DataFrame(skewness, columns=["skewness"])
skew_zero["% zero"] = zero_percents
skew_zero.sort_values(by="skewness", ascending=False)
# Most features are not very skewed. `honeybee` is the only odd one.
# A quick look at the honeybee distribution
df_full["honeybee"].describe()
# Which samples have the high values in honeybee?
df_full.loc[df_full["honeybee"] > 1]
# Not sure why these values are so much higher than the rest. Almost equal but small number of such anomalous values appear both in training and testing data (as evidenced by valid and NaN values in the `yield` column).
# Log transforming the highly skewed columns
df_unskewed = df_full.copy().drop(
columns=[target]
) # the target columns will not be transformed. It has low skewness.
for c in df_full.columns:
if df_full[c].dtype == "object":
continue
if df_full[c].skew() < 0.5:
continue # threshhold for being considered to be highly skewed.
shift = -df_full[c].min() + ONE
df_unskewed[c] = df_unskewed[c].apply(lambda x: math.log(shift + x))
df_unskewed.describe()
# ## Engigeering Features
# How are all the features correlated?
# .corr() will get correlations among all features, there is no object type feature.
df_aug = df_unskewed.copy()
df_aug[target] = df_target[target] # adding the target column here to check correations
sns.heatmap(df_aug.corr())
df_aug.drop(columns=[target], inplace=True) # getting rid of the target column
# The temperature features are perfectly correlated, as are the two rain related features. No need to keep all of them. Instead, we can create some interaction features.
df_aug = df_unskewed.copy()
df_aug["TRange"] = df_aug["MaxOfUpperTRange"] - df_aug["MinOfLowerTRange"]
# df_aug['AverageT'] = (df_aug['AverageOfUpperTRange'] + df_aug['AverageOfLowerTRange'])/2
df_aug["T_bee_int"] = (
df_aug["AverageOfUpperTRange"] + df_aug["AverageOfLowerTRange"]
) * (df_aug["honeybee"] + df_aug["bumbles"] + df_aug["andrena"] + df_aug["osmia"])
df_aug["honeybee_clonesize_int"] = df_aug["honeybee"] * df_aug["clonesize"]
df_aug["fruitset_seeds_int"] = df_aug["fruitset"] * df_aug["seeds"]
df_aug["fruitmass_seeds_int"] = df_aug["fruitmass"] * df_aug["seeds"]
df_aug["mass_set_int"] = df_aug["fruitmass"] * df_aug["fruitset"]
df_aug["andrena_osmia_int"] = df_aug["andrena"] * df_aug["osmia"]
df_aug["set_bee_int"] = (
df_aug["bumbles"] + df_aug["osmia"] + df_aug["andrena"]
) * df_aug["fruitset"]
df_aug["mass_bee_int"] = (
df_aug["bumbles"] + df_aug["osmia"] + df_aug["andrena"]
) * df_aug["fruitmass"]
df_aug["seed_bee_int"] = (
df_aug["bumbles"] + df_aug["osmia"] + df_aug["andrena"]
) * df_aug["seeds"]
df_aug.drop(
columns=[
"MaxOfUpperTRange",
"MinOfUpperTRange",
"MaxOfLowerTRange",
"MinOfLowerTRange",
"AverageOfUpperTRange",
"AverageOfLowerTRange",
"AverageRainingDays",
],
inplace=True,
)
sns.heatmap(df_aug.corr())
plt.show()
# There is no concrete benchmark showing the efficacy of these feature engineerings.
# ## Standardization
# ### Input Data
scaler = StandardScaler()
# the scaler is fitted only to the training data, and even then only to the inliers
scaler.fit(df_aug.loc[idx_all_in])
df_scaled = df_aug.copy()
# we transform both training and testing data using the same scaler
df_scaled = pd.DataFrame(
scaler.transform(df_scaled), index=df_scaled.index, columns=df_scaled.columns
)
df_scaled
# ### Target Data
mod_target = df_target.copy().loc[idx_all_in]
scaler_target = StandardScaler()
mod_target = scaler_target.fit_transform(mod_target)
# # Modeling
# train test split
xtrain, xval, ytrain, yval = train_test_split(
df_scaled.loc[idx_all_in], mod_target, test_size=0.3
)
yval = scaler_target.inverse_transform(
yval
) # yval is already been transformed into the original form
print("Shape of training data:", xtrain.shape, ytrain.shape)
print("Shape of validation data:", xval.shape, yval.shape)
unique_targets = sorted(df_target[target].unique())
print(
"% of unique values in the target column:",
100 * len(unique_targets) / len(idx_train),
)
# Trick observation. Following Post-processing trick to improve CV & LB score (credited to siukeitin), we observe that there are only about 5% unique values in the target columns of the training samples. Because of this, even though the target column is of dtype float64, we shall round our predictions to the closest value that appears in the set of unique values.
# some helper functions
# projects the number n to its nearest value in the unique_target list.
def force(n):
loc = bisect.bisect_left(unique_targets, n)
if loc == 0:
return unique_targets[0]
if loc == len(unique_targets):
return unique_targets[-1]
return (
unique_targets[loc]
if abs(unique_targets[loc] - n) < abs(unique_targets[loc - 1] - n)
else unique_targets[loc - 1]
)
force_np = np.vectorize(force)
# computes the mean absolute error in y_pred relative to y_true
def mae(y_true, y_pred, **kwargs):
n = len(y_true)
y_true = scaler_target.inverse_transform(y_true.reshape(n, 1))
y_pred = force_np(scaler_target.inverse_transform(y_pred.reshape(n, 1)))
return mean_absolute_error(y_true, y_pred)
mae_scorer = make_scorer(mae, greater_is_better=False) # my mean absolute error scorer
# Creates a sorted list of n equally distanced numbers starting from a and ending near b.
# If the integer flag is set to True then it rounds the entries of the output list to their nearest integers.
def interval(a, b, n, integer=False):
d = (b - a) / n
ans = (
set()
) # set only because of the possibility that if rounding to integers occur then some entries may coincide
i = a
while i < b:
ans.add(round(i) if integer else i)
i += d
return sorted(list(ans))
# I shall use `CatBoostRegressor`, `XGBRegressor` and `HistGradientBoost` and use `RandomizedSearchCV` for hyperparameter tuning in both cases. The contest is scored at Kaggle using mean absolute error, so this is the metric that will be used for training.
# I don't know how to determine the best hyperparameters to tune or the best range in which to search for. The parameters and the ranges below are mostly guesses.
# # CatBoost
cbr = CatBoostRegressor(
loss_function="MAE",
verbose=False,
subsample=0.818,
learning_rate=0.023,
l2_leaf_reg=3.664,
iterations=1280,
depth=5,
colsample_bylevel=0.426,
border_count=300,
)
# uncomment the following block to run randomized hyperparameter search
"""
# the search space for the hyperparameters
random_grid = {'learning_rate': interval(.001, .1, 50),
'depth': range(2,10),
'l2_leaf_reg': interval(.1, 10, 50),
'iterations': interval(500, 1500, 50, integer=True),
'border_count': interval(100, 500, 50, integer=True),
'subsample': interval(.3, 1, 50),
'colsample_bylevel': interval(.3, 1, 50)
}
cbr_random = RandomizedSearchCV(scoring=mae_scorer,
estimator = cbr,
param_distributions = random_grid,
n_iter = 150,
cv = 3,
#random_state = 31,
verbose = 1,
n_jobs = -1,
refit=True
)
cbr_random.fit(xtrain, ytrain.ravel())
cbr = cbr_random.best_estimator_
print('Best parameters from search:', cbr_random.best_params_)
print('Min error from search:', abs(cbr_random.best_score_))
"""
cbr.fit(xtrain, ytrain.ravel())
y_pred_val = force_np(
scaler_target.inverse_transform(cbr.predict(xval).reshape(len(xval), 1))
)
print(
"Mean absolute error (validation):",
mean_absolute_error(yval.reshape(len(xval), 1), y_pred_val),
)
res = (y_pred_val - yval).ravel()
sns.scatterplot(x=yval.ravel(), y=res)
sns.lineplot(x=[min(yval.ravel()), max(yval.ravel())], y=0, color="r")
plt.show()
print("mean of residuals:", res.mean())
print("skewness of residuals:", skew(res))
print("kurtosis of residuals:", kurtosis(res))
df_pred = df_scaled.loc[idx_pred]
target_pred = force_np(
scaler_target.inverse_transform(cbr.predict(df_pred).reshape(len(idx_pred), 1))
)
solution = pd.DataFrame(target_pred, columns=[target])
solution["id"] = df_pred.index
solution = solution[["id", target]]
solution.to_csv(out_prefix + "_sol_cbr.csv", index=False)
# Ideally the residuals should be normally distributed around the red line. I am consistently underestimating the high yields. There should be some way to correct this. This problem persists through all the models.
# ## XGBoost
xgb = XGBRegressor(
objective="reg:absoluteerror",
n_estimators=228,
max_depth=4,
min_child_weight=2,
max_delta_step=0.4,
grow_policy="lossguide",
subsample=0.96,
learning_rate=0.0316,
reg_lambda=1.72,
gamma=0.68,
alpha=0.88,
)
# uncomment the following block to run randomized hyperparameter search
"""
# the search space for the hyperparameters
random_grid = {#'max_depth': range(1,10),
'lambda': interval(0, 2, 50),
'gamma': interval(0, 2, 50),
'alpha': interval(0, 2, 50),
'learning_rate': interval(.01, .1, 25),
'subsample': interval(.5, 1, 50)
#'n_estimators': interval(10, 1000, 100, integer=True),
#'grow_policy': ['depthwise', 'lossguide'],
#'min_child_weight': interval(.0001, 5, 50),
#'max_delta_step': interval(0, 2, 50)
}
xgb_random = RandomizedSearchCV(scoring=mae_scorer,
estimator = xgb,
param_distributions = random_grid,
n_iter = 150,
cv = 3,
#random_state = 31,
verbose = 1,
n_jobs = -1,
refit=True
)
xgb_random.fit(xtrain, ytrain.ravel())
xgb = xgb_random.best_estimator_
print('Best parameters from search:', xgb_random.best_params_)
print('Min error from search:', abs(xgb_random.best_score_))
"""
xgb.fit(xtrain, ytrain.ravel())
y_pred_val = force_np(
scaler_target.inverse_transform(xgb.predict(xval).reshape(len(xval), 1))
)
print(
"Mean absolute error (validation):",
mean_absolute_error(yval.reshape(len(xval), 1), y_pred_val),
)
res = (y_pred_val - yval).ravel()
sns.scatterplot(x=yval.ravel(), y=res)
sns.lineplot(x=[min(yval.ravel()), max(yval.ravel())], y=0, color="r")
plt.show()
print("mean of residuals:", res.mean())
print("skewness of residuals:", skew(res))
print("kurtosis of residuals:", kurtosis(res))
df_pred = df_scaled.loc[idx_pred]
target_pred = force_np(
scaler_target.inverse_transform(xgb.predict(df_pred).reshape(len(idx_pred), 1))
)
solution = pd.DataFrame(target_pred, columns=[target])
solution["id"] = df_pred.index
solution = solution[["id", target]]
solution.to_csv(out_prefix + "_sol_xgb.csv", index=False)
# ## HistGradientBoost
hgb = HistGradientBoostingRegressor(
loss="absolute_error",
early_stopping=False,
min_samples_leaf=19,
max_leaf_nodes=10,
max_iter=1815,
max_depth=8,
max_bins=239,
learning_rate=0.013,
l2_regularization=0.96,
)
# uncomment the following block to run randomized hyperparameter search
"""
# the search space for the hyperparameters
random_grid = {'l2_regularization': interval(0, 1, 50),
#'early_stopping': [True, False],
'learning_rate': interval(.001, .1, 50),
'max_iter': interval(100, 5000, 100, integer=True),
'max_depth': interval(1, 50, 20, integer=True),
'max_bins': interval(50, 255, 50, integer=True),
'min_samples_leaf': interval(10, 100, 20, integer=True),
'max_leaf_nodes': interval(10, 100, 20, integer=True)
}
hgb_random = RandomizedSearchCV(scoring = mae_scorer,
estimator = hgb,
param_distributions = random_grid,
n_iter = 150,
cv = 3,
verbose = 1,
#random_state = 19,
n_jobs = -1,
refit=True
)
hgb_random.fit(xtrain, ytrain.ravel())
hgb = hgb_random.best_estimator_
print('Best parameters from search:', hgb_random.best_params_)
print('Min error from search:', abs(hgb_random.best_score_))
"""
hgb.fit(xtrain, ytrain.ravel())
y_pred_val = force_np(
scaler_target.inverse_transform(hgb.predict(xval).reshape(len(xval), 1))
)
print(
"Mean absolute error (validation):",
mean_absolute_error(yval.reshape(len(xval), 1), y_pred_val),
)
res = (y_pred_val - yval).ravel()
sns.scatterplot(x=yval.ravel(), y=res)
sns.lineplot(x=[min(yval.ravel()), max(yval.ravel())], y=0, color="r")
plt.show()
print("mean of residuals:", res.mean())
print("skewness of residuals:", skew(res))
print("kurtosis of residuals:", kurtosis(res))
df_pred = df_scaled.loc[idx_pred]
target_pred = force_np(
scaler_target.inverse_transform(hgb.predict(df_pred).reshape(len(idx_pred), 1))
)
solution = pd.DataFrame(target_pred, columns=[target])
solution["id"] = df_pred.index
solution = solution[["id", target]]
solution.to_csv(out_prefix + "_sol_hgb.csv", index=False)
# ## Averaging over Models
# The three models perform quite similarly on the training set. So I am going to take the average of their outcomes.
target_pred = force_np(
scaler_target.inverse_transform(
(
cbr.predict(df_pred).reshape(len(idx_pred), 1)
+ xgb.predict(df_pred).reshape(len(idx_pred), 1)
+ hgb.predict(df_pred).reshape(len(idx_pred), 1)
)
/ 3
)
)
solution = pd.DataFrame(target_pred, columns=[target])
solution["id"] = df_pred.index
solution = solution[["id", target]]
solution.to_csv(out_prefix + "_sol_hgb+xgb+cbr.csv", index=False)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/429/129429904.ipynb
| null | null |
[{"Id": 129429904, "ScriptId": 38441078, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 962756, "CreationDate": "05/13/2023 18:11:28", "VersionNumber": 5.0, "Title": "Blueberry yield (playground series 3.14)", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 565.0, "LinesInsertedFromPrevious": 224.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 341.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import math
import bisect
import itertools
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import skew, kurtosis
# Regressors and classifiers
from xgboost import XGBRegressor
from sklearn.ensemble import (
RandomForestClassifier,
HistGradientBoostingClassifier,
HistGradientBoostingRegressor,
)
from sklearn.linear_model import Ridge
from catboost import CatBoostRegressor
from sklearn.metrics import (
mean_squared_error,
mean_absolute_error,
accuracy_score,
make_scorer,
)
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.model_selection import RandomizedSearchCV, GridSearchCV
from sklearn.decomposition import PCA
from sklearn.feature_selection import SelectKBest
from sklearn.preprocessing import StandardScaler
from sklearn.impute import KNNImputer
import category_encoders as ce
ONE = 1.123456789
# # Loading Dataframes
# Dataset taken from Kaggle competition: Prediction of Wild Blueberry Yield.
dir_input = "../input/playground-series-s3e14"
dir_output = "../working"
out_prefix = dir_output + "/playground-series-s3e14"
dir_train_csv = dir_input + "/train.csv"
dir_test_csv = dir_input + "/test.csv"
pd.read_csv(dir_train_csv)
df_train = pd.read_csv(dir_train_csv)
target = "yield"
df_target = df_train[[target]]
df_train = df_train.drop(columns=[target])
df_test = pd.read_csv(dir_test_csv)
df = pd.concat([df_train, df_test])
df.index = df["id"].values
df = df.drop(columns=["id"])
idx_train = df.index[: len(df_train)]
idx_pred = df.index[len(df_train) :]
df_target.index = idx_train
# df : contains both training and testing data without the target column.
# to be used for applying the same preprocessing to both training and testing data.
# df_target : target column of the training data.
# idx_train : indices of all the training samples.
# idx_pred : indices of the samples from the testing data.
print("No. of training samples:", len(idx_train))
print("No. of testing samples:", len(idx_pred))
df
# # Imputing Missing Values
df.isnull().sum()
# Nothing to do here.
# # Detecting Outliers
df_out = df.copy()
df_out[target] = df_target[target] # adding the target column here to check correations
df_out.dtypes
# Distributions of all the features
m, n = 6, 3
fig, ax = plt.subplots(m, n, figsize=[12, 15])
fig.tight_layout()
for i in range(m):
for j in range(n):
col = i * n + j
if col >= len(df_out.columns):
break
col = df_out.columns[col]
AX = ax[i][j]
sns.histplot(df_out[col], ax=AX)
AX.set(yticks=[], ylabel="")
plt.show()
# Even though all features have dtype float64, all but fruitset, fruitmass, seeds, and yield have the flavor of categorical features. I am guessing that because of this tree based regression will work better than linear regression.
# Observing the distributions of yield by all the predictors.
m, n = 4, 4
fig, ax = plt.subplots(m, n, figsize=[12, 10])
fig.tight_layout()
fig.subplots_adjust(hspace=0.5)
for i in range(m):
for j in range(n):
col = i * n + j
if col >= len(df_out.columns):
break
col = df_out.columns[col]
if col == target:
continue
Ax = ax[i][j]
sns.scatterplot(
x=df_out.loc[idx_train][col], y=df_target.loc[idx_train][target], ax=Ax
)
# putting ticks and labels nicely along the xaxis, in retrospect. this will be helpful in
# identifying outlier values
xmin = df_out.loc[idx_train][col].min()
xmax = df_out.loc[idx_train][col].max()
n_xticks = 10
d = (xmax - xmin) / n_xticks
Ax.set(xticks=[round(xmin + i * d, 2) for i in range(n_xticks + 1)])
Ax.set_xticklabels(Ax.get_xticks(), rotation=45)
Ax.grid(True, linestyle="-.")
# also in retrospect, putting yticks and ylables to help out outlier detection
Ax.set(ylabel="")
if i < m - 1 or j == 0:
Ax.set(yticks=[])
plt.show()
# There appears to be positive correlation between `yield` and the last three features. However there are some samples falling onto a horizontal floor there, which seems anomalous. In the other pictures most samples are neatly on vertical lines, some are scattered. Since all but the last three features seem rather categorical in nature we shall project some of the samples onto the nearest vertical lines. We shall identify the remaining outliers and see how many there are.
df_o2 = df_out.copy()
df_o2["clonesize"] = df_o2["clonesize"].map(lambda x: x if x > 12 else 12.5)
df_o2["clonesize"] = df_o2["clonesize"].map(lambda x: x if x < 40 else 37.5)
df_o2["bumbles"] = df_o2["bumbles"].map(lambda x: x if x > 0.1 else 0.117)
df_o2["bumbles"] = df_o2["bumbles"].map(lambda x: x if x < 0.255 or x > 0.35 else 0.25)
df_o2["osmia"] = df_o2["osmia"].map(lambda x: x if x > 0.2 else 0.058)
df_o2["osmia"] = df_o2["osmia"].map(lambda x: x if x < 0.53 or x > 0.63 else 0.63)
df_o2["MaxOfUpperTRange"] = df_o2["MaxOfUpperTRange"].map(
lambda x: x if x < 77.5 or x > 80 else 77.4
)
df_o2["MaxOfUpperTRange"] = df_o2["MaxOfUpperTRange"].map(
lambda x: x if x < 87 or x > 90 else 86
)
df_o2["MinOfUpperTRange"] = df_o2["MinOfUpperTRange"].map(
lambda x: x if x > 40 else 42.1
)
df_o2["AverageOfUpperTRange"] = df_o2["AverageOfUpperTRange"].map(
lambda x: x if x < 65 or x > 70 else 64.7
)
df_o2["MaxOfLowerTRange"] = df_o2["MaxOfLowerTRange"].map(
lambda x: x if x < 51 or x > 53 else 50.2
)
df_o2["MaxOfLowerTRange"] = df_o2["MaxOfLowerTRange"].map(
lambda x: x if x < 65 or x > 67 else 68.2
)
df_o2["MinOfLowerTRange"] = df_o2["MinOfLowerTRange"].map(
lambda x: x if x < 24.5 or x > 26 else 24.3
)
df_o2["MinOfLowerTRange"] = df_o2["MinOfLowerTRange"].map(
lambda x: x if x < 27.5 or x > 29 else 27
)
df_o2["MinOfLowerTRange"] = df_o2["MinOfLowerTRange"].map(
lambda x: x if x < 30.5 or x > 32 else 30
)
df_o2["AverageOfLowerTRange"] = df_o2["AverageOfLowerTRange"].map(
lambda x: x if x < 44 or x > 45.7 else 45.8
)
df_o2["RainingDays"] = df_o2["RainingDays"].map(lambda x: x if x < 25 or x > 30 else 24)
df_o2["AverageRainingDays"] = df_o2["AverageRainingDays"].map(
lambda x: x if x < 0.065 or x > 0.075 else 0.06
)
df_o2["AverageRainingDays"] = df_o2["AverageRainingDays"].map(
lambda x: x if x < 0.12 or x > 0.15 else 0.1
)
df_o2["AverageRainingDays"] = df_o2["AverageRainingDays"].map(
lambda x: x if x < 0.24 or x > 0.255 else 0.26
)
df_o2["andrena"] = df_o2["andrena"].map(lambda x: x if x < 0.2 or x > 0.245 else 0.25)
df_o2["andrena"] = df_o2["andrena"].map(lambda x: x if x < 0.48 or x > 0.56 else 0.5)
df_o2["andrena"] = df_o2["andrena"].map(lambda x: x if x < 0.7 or x > 0.71 else 0.75)
m, n = 4, 4
fig, ax = plt.subplots(m, n, figsize=[12, 10])
fig.tight_layout()
fig.subplots_adjust(hspace=0.5)
for i in range(m):
for j in range(n):
col = i * n + j
if col >= len(df_o2.columns):
break
col = df_o2.columns[col]
if col == target:
continue
Ax = ax[i][j]
sns.scatterplot(
x=df_o2.loc[idx_train][col], y=df_target.loc[idx_train][target], ax=Ax
)
# putting ticks and labels nicely along the xaxis, in retrospect. this will be helpful in
# identifying outlier values
xmin = df_o2.loc[idx_train][col].min()
xmax = df_o2.loc[idx_train][col].max()
n_xticks = 10
d = (xmax - xmin) / n_xticks
Ax.set(xticks=[round(xmin + i * d, 2) for i in range(n_xticks + 1)])
Ax.set_xticklabels(Ax.get_xticks(), rotation=45)
Ax.grid(True, linestyle="-.")
# also in retrospect, putting yticks and ylables to help out outlier detection
Ax.set(ylabel="")
if i < m - 1 or j == 0:
Ax.set(yticks=[])
plt.show()
conds = []
conds.append(df_o2.loc[idx_train][target] < 2550)
conds.append(df_o2.loc[idx_train]["seeds"] < 24)
conds.append(df_o2.loc[idx_train]["honeybee"] > 5)
conds.append(df_o2.loc[idx_train]["fruitmass"] < 0.34)
conds.append(
df_o2.loc[idx_train]["bumbles"].apply(lambda i: True if i > 0.4 else False).values
)
conds.append(
df_o2.loc[idx_train]["andrena"].apply(lambda i: True if i < 0.2 else False).values
)
conds.append(
df_o2.apply(lambda r: r["seeds"] > 37 and r["yield"] < 3000, axis=1).values
)
idx_custom = [any(X) for X in zip(*conds)]
idx_all_in = [not x for x in idx_custom] # boolean flag for inlier indices in idx_train
idx_all_in = df_o2.loc[idx_train].loc[idx_all_in].index # inlier indices in idx_train.
m, n = 4, 4
fig, ax = plt.subplots(m, n, figsize=[12, 10])
fig.tight_layout()
fig.subplots_adjust(hspace=0.5)
for i in range(m):
for j in range(n):
col = i * n + j
if col >= len(df_o2.columns):
break
col = df_o2.columns[col]
if col == target:
continue
Ax = ax[i][j]
sns.scatterplot(
x=df_o2.loc[idx_all_in][col], y=df_target.loc[idx_all_in][target], ax=Ax
)
sns.scatterplot(
x=df_o2.loc[idx_train].loc[idx_custom][col],
y=df_target.loc[idx_custom][target],
ax=Ax,
s=20,
)
# putting ticks and labels nicely along the xaxis, in retrospect. this will be helpful in
# identifying outlier values
xmin = df_o2.loc[idx_train][col].min()
xmax = df_o2.loc[idx_train][col].max()
n_xticks = 10
d = (xmax - xmin) / n_xticks
Ax.set(xticks=[round(xmin + i * d, 2) for i in range(n_xticks + 1)])
Ax.set_xticklabels(Ax.get_xticks(), rotation=45)
Ax.grid(True, linestyle="-.")
# also in retrospect, putting yticks and ylables to help out outlier detection
if i < m - 1 or j == 0:
Ax.set(yticks=[])
Ax.set(ylabel="")
plt.show()
print("% of outliers in training data:", 100 * (1 - len(idx_all_in) / len(idx_train)))
# The outliers have been identified with reasonable accuracy and only about 1% of the total training data are outliers, so we are going to drop them from training. All inlier indices are in idx_all_in
# Some of the samples I have identified as outliers fall squarely on a potential linear regression line in the plots against the highly correlated predictors fruitset, fruitmass, and seeds. These are samples that fell out of the vertical lines in the other plots. Whether it would be better to project these onto the vertical lines as well or not is unclear since they are a bit too scattered.
# # Preprocessing and Feature Engineering
# ## Dealing with Skewness
# Checking for skewness
df_full = df_o2.copy()
def zero(s):
if 0 in s.index:
return s.loc[0]
else:
return 0
num_columns = [c for c in df_full.columns if df_full[c].dtype in ["int64", "float64"]]
zero_percents = [
100 * zero(df_full[c].value_counts()) / len(df_full) for c in num_columns
]
skewness = df_full.skew(numeric_only=True)
skew_zero = pd.DataFrame(skewness, columns=["skewness"])
skew_zero["% zero"] = zero_percents
skew_zero.sort_values(by="skewness", ascending=False)
# Most features are not very skewed. `honeybee` is the only odd one.
# A quick look at the honeybee distribution
df_full["honeybee"].describe()
# Which samples have the high values in honeybee?
df_full.loc[df_full["honeybee"] > 1]
# Not sure why these values are so much higher than the rest. Almost equal but small number of such anomalous values appear both in training and testing data (as evidenced by valid and NaN values in the `yield` column).
# Log transforming the highly skewed columns
df_unskewed = df_full.copy().drop(
columns=[target]
) # the target columns will not be transformed. It has low skewness.
for c in df_full.columns:
if df_full[c].dtype == "object":
continue
if df_full[c].skew() < 0.5:
continue # threshhold for being considered to be highly skewed.
shift = -df_full[c].min() + ONE
df_unskewed[c] = df_unskewed[c].apply(lambda x: math.log(shift + x))
df_unskewed.describe()
# ## Engigeering Features
# How are all the features correlated?
# .corr() will get correlations among all features, there is no object type feature.
df_aug = df_unskewed.copy()
df_aug[target] = df_target[target] # adding the target column here to check correations
sns.heatmap(df_aug.corr())
df_aug.drop(columns=[target], inplace=True) # getting rid of the target column
# The temperature features are perfectly correlated, as are the two rain related features. No need to keep all of them. Instead, we can create some interaction features.
df_aug = df_unskewed.copy()
df_aug["TRange"] = df_aug["MaxOfUpperTRange"] - df_aug["MinOfLowerTRange"]
# df_aug['AverageT'] = (df_aug['AverageOfUpperTRange'] + df_aug['AverageOfLowerTRange'])/2
df_aug["T_bee_int"] = (
df_aug["AverageOfUpperTRange"] + df_aug["AverageOfLowerTRange"]
) * (df_aug["honeybee"] + df_aug["bumbles"] + df_aug["andrena"] + df_aug["osmia"])
df_aug["honeybee_clonesize_int"] = df_aug["honeybee"] * df_aug["clonesize"]
df_aug["fruitset_seeds_int"] = df_aug["fruitset"] * df_aug["seeds"]
df_aug["fruitmass_seeds_int"] = df_aug["fruitmass"] * df_aug["seeds"]
df_aug["mass_set_int"] = df_aug["fruitmass"] * df_aug["fruitset"]
df_aug["andrena_osmia_int"] = df_aug["andrena"] * df_aug["osmia"]
df_aug["set_bee_int"] = (
df_aug["bumbles"] + df_aug["osmia"] + df_aug["andrena"]
) * df_aug["fruitset"]
df_aug["mass_bee_int"] = (
df_aug["bumbles"] + df_aug["osmia"] + df_aug["andrena"]
) * df_aug["fruitmass"]
df_aug["seed_bee_int"] = (
df_aug["bumbles"] + df_aug["osmia"] + df_aug["andrena"]
) * df_aug["seeds"]
df_aug.drop(
columns=[
"MaxOfUpperTRange",
"MinOfUpperTRange",
"MaxOfLowerTRange",
"MinOfLowerTRange",
"AverageOfUpperTRange",
"AverageOfLowerTRange",
"AverageRainingDays",
],
inplace=True,
)
sns.heatmap(df_aug.corr())
plt.show()
# There is no concrete benchmark showing the efficacy of these feature engineerings.
# ## Standardization
# ### Input Data
scaler = StandardScaler()
# the scaler is fitted only to the training data, and even then only to the inliers
scaler.fit(df_aug.loc[idx_all_in])
df_scaled = df_aug.copy()
# we transform both training and testing data using the same scaler
df_scaled = pd.DataFrame(
scaler.transform(df_scaled), index=df_scaled.index, columns=df_scaled.columns
)
df_scaled
# ### Target Data
mod_target = df_target.copy().loc[idx_all_in]
scaler_target = StandardScaler()
mod_target = scaler_target.fit_transform(mod_target)
# # Modeling
# train test split
xtrain, xval, ytrain, yval = train_test_split(
df_scaled.loc[idx_all_in], mod_target, test_size=0.3
)
yval = scaler_target.inverse_transform(
yval
) # yval is already been transformed into the original form
print("Shape of training data:", xtrain.shape, ytrain.shape)
print("Shape of validation data:", xval.shape, yval.shape)
unique_targets = sorted(df_target[target].unique())
print(
"% of unique values in the target column:",
100 * len(unique_targets) / len(idx_train),
)
# Trick observation. Following Post-processing trick to improve CV & LB score (credited to siukeitin), we observe that there are only about 5% unique values in the target columns of the training samples. Because of this, even though the target column is of dtype float64, we shall round our predictions to the closest value that appears in the set of unique values.
# some helper functions
# projects the number n to its nearest value in the unique_target list.
def force(n):
loc = bisect.bisect_left(unique_targets, n)
if loc == 0:
return unique_targets[0]
if loc == len(unique_targets):
return unique_targets[-1]
return (
unique_targets[loc]
if abs(unique_targets[loc] - n) < abs(unique_targets[loc - 1] - n)
else unique_targets[loc - 1]
)
force_np = np.vectorize(force)
# computes the mean absolute error in y_pred relative to y_true
def mae(y_true, y_pred, **kwargs):
n = len(y_true)
y_true = scaler_target.inverse_transform(y_true.reshape(n, 1))
y_pred = force_np(scaler_target.inverse_transform(y_pred.reshape(n, 1)))
return mean_absolute_error(y_true, y_pred)
mae_scorer = make_scorer(mae, greater_is_better=False) # my mean absolute error scorer
# Creates a sorted list of n equally distanced numbers starting from a and ending near b.
# If the integer flag is set to True then it rounds the entries of the output list to their nearest integers.
def interval(a, b, n, integer=False):
d = (b - a) / n
ans = (
set()
) # set only because of the possibility that if rounding to integers occur then some entries may coincide
i = a
while i < b:
ans.add(round(i) if integer else i)
i += d
return sorted(list(ans))
# I shall use `CatBoostRegressor`, `XGBRegressor` and `HistGradientBoost` and use `RandomizedSearchCV` for hyperparameter tuning in both cases. The contest is scored at Kaggle using mean absolute error, so this is the metric that will be used for training.
# I don't know how to determine the best hyperparameters to tune or the best range in which to search for. The parameters and the ranges below are mostly guesses.
# # CatBoost
cbr = CatBoostRegressor(
loss_function="MAE",
verbose=False,
subsample=0.818,
learning_rate=0.023,
l2_leaf_reg=3.664,
iterations=1280,
depth=5,
colsample_bylevel=0.426,
border_count=300,
)
# uncomment the following block to run randomized hyperparameter search
"""
# the search space for the hyperparameters
random_grid = {'learning_rate': interval(.001, .1, 50),
'depth': range(2,10),
'l2_leaf_reg': interval(.1, 10, 50),
'iterations': interval(500, 1500, 50, integer=True),
'border_count': interval(100, 500, 50, integer=True),
'subsample': interval(.3, 1, 50),
'colsample_bylevel': interval(.3, 1, 50)
}
cbr_random = RandomizedSearchCV(scoring=mae_scorer,
estimator = cbr,
param_distributions = random_grid,
n_iter = 150,
cv = 3,
#random_state = 31,
verbose = 1,
n_jobs = -1,
refit=True
)
cbr_random.fit(xtrain, ytrain.ravel())
cbr = cbr_random.best_estimator_
print('Best parameters from search:', cbr_random.best_params_)
print('Min error from search:', abs(cbr_random.best_score_))
"""
cbr.fit(xtrain, ytrain.ravel())
y_pred_val = force_np(
scaler_target.inverse_transform(cbr.predict(xval).reshape(len(xval), 1))
)
print(
"Mean absolute error (validation):",
mean_absolute_error(yval.reshape(len(xval), 1), y_pred_val),
)
res = (y_pred_val - yval).ravel()
sns.scatterplot(x=yval.ravel(), y=res)
sns.lineplot(x=[min(yval.ravel()), max(yval.ravel())], y=0, color="r")
plt.show()
print("mean of residuals:", res.mean())
print("skewness of residuals:", skew(res))
print("kurtosis of residuals:", kurtosis(res))
df_pred = df_scaled.loc[idx_pred]
target_pred = force_np(
scaler_target.inverse_transform(cbr.predict(df_pred).reshape(len(idx_pred), 1))
)
solution = pd.DataFrame(target_pred, columns=[target])
solution["id"] = df_pred.index
solution = solution[["id", target]]
solution.to_csv(out_prefix + "_sol_cbr.csv", index=False)
# Ideally the residuals should be normally distributed around the red line. I am consistently underestimating the high yields. There should be some way to correct this. This problem persists through all the models.
# ## XGBoost
xgb = XGBRegressor(
objective="reg:absoluteerror",
n_estimators=228,
max_depth=4,
min_child_weight=2,
max_delta_step=0.4,
grow_policy="lossguide",
subsample=0.96,
learning_rate=0.0316,
reg_lambda=1.72,
gamma=0.68,
alpha=0.88,
)
# uncomment the following block to run randomized hyperparameter search
"""
# the search space for the hyperparameters
random_grid = {#'max_depth': range(1,10),
'lambda': interval(0, 2, 50),
'gamma': interval(0, 2, 50),
'alpha': interval(0, 2, 50),
'learning_rate': interval(.01, .1, 25),
'subsample': interval(.5, 1, 50)
#'n_estimators': interval(10, 1000, 100, integer=True),
#'grow_policy': ['depthwise', 'lossguide'],
#'min_child_weight': interval(.0001, 5, 50),
#'max_delta_step': interval(0, 2, 50)
}
xgb_random = RandomizedSearchCV(scoring=mae_scorer,
estimator = xgb,
param_distributions = random_grid,
n_iter = 150,
cv = 3,
#random_state = 31,
verbose = 1,
n_jobs = -1,
refit=True
)
xgb_random.fit(xtrain, ytrain.ravel())
xgb = xgb_random.best_estimator_
print('Best parameters from search:', xgb_random.best_params_)
print('Min error from search:', abs(xgb_random.best_score_))
"""
xgb.fit(xtrain, ytrain.ravel())
y_pred_val = force_np(
scaler_target.inverse_transform(xgb.predict(xval).reshape(len(xval), 1))
)
print(
"Mean absolute error (validation):",
mean_absolute_error(yval.reshape(len(xval), 1), y_pred_val),
)
res = (y_pred_val - yval).ravel()
sns.scatterplot(x=yval.ravel(), y=res)
sns.lineplot(x=[min(yval.ravel()), max(yval.ravel())], y=0, color="r")
plt.show()
print("mean of residuals:", res.mean())
print("skewness of residuals:", skew(res))
print("kurtosis of residuals:", kurtosis(res))
df_pred = df_scaled.loc[idx_pred]
target_pred = force_np(
scaler_target.inverse_transform(xgb.predict(df_pred).reshape(len(idx_pred), 1))
)
solution = pd.DataFrame(target_pred, columns=[target])
solution["id"] = df_pred.index
solution = solution[["id", target]]
solution.to_csv(out_prefix + "_sol_xgb.csv", index=False)
# ## HistGradientBoost
hgb = HistGradientBoostingRegressor(
loss="absolute_error",
early_stopping=False,
min_samples_leaf=19,
max_leaf_nodes=10,
max_iter=1815,
max_depth=8,
max_bins=239,
learning_rate=0.013,
l2_regularization=0.96,
)
# uncomment the following block to run randomized hyperparameter search
"""
# the search space for the hyperparameters
random_grid = {'l2_regularization': interval(0, 1, 50),
#'early_stopping': [True, False],
'learning_rate': interval(.001, .1, 50),
'max_iter': interval(100, 5000, 100, integer=True),
'max_depth': interval(1, 50, 20, integer=True),
'max_bins': interval(50, 255, 50, integer=True),
'min_samples_leaf': interval(10, 100, 20, integer=True),
'max_leaf_nodes': interval(10, 100, 20, integer=True)
}
hgb_random = RandomizedSearchCV(scoring = mae_scorer,
estimator = hgb,
param_distributions = random_grid,
n_iter = 150,
cv = 3,
verbose = 1,
#random_state = 19,
n_jobs = -1,
refit=True
)
hgb_random.fit(xtrain, ytrain.ravel())
hgb = hgb_random.best_estimator_
print('Best parameters from search:', hgb_random.best_params_)
print('Min error from search:', abs(hgb_random.best_score_))
"""
hgb.fit(xtrain, ytrain.ravel())
y_pred_val = force_np(
scaler_target.inverse_transform(hgb.predict(xval).reshape(len(xval), 1))
)
print(
"Mean absolute error (validation):",
mean_absolute_error(yval.reshape(len(xval), 1), y_pred_val),
)
res = (y_pred_val - yval).ravel()
sns.scatterplot(x=yval.ravel(), y=res)
sns.lineplot(x=[min(yval.ravel()), max(yval.ravel())], y=0, color="r")
plt.show()
print("mean of residuals:", res.mean())
print("skewness of residuals:", skew(res))
print("kurtosis of residuals:", kurtosis(res))
df_pred = df_scaled.loc[idx_pred]
target_pred = force_np(
scaler_target.inverse_transform(hgb.predict(df_pred).reshape(len(idx_pred), 1))
)
solution = pd.DataFrame(target_pred, columns=[target])
solution["id"] = df_pred.index
solution = solution[["id", target]]
solution.to_csv(out_prefix + "_sol_hgb.csv", index=False)
# ## Averaging over Models
# The three models perform quite similarly on the training set. So I am going to take the average of their outcomes.
target_pred = force_np(
scaler_target.inverse_transform(
(
cbr.predict(df_pred).reshape(len(idx_pred), 1)
+ xgb.predict(df_pred).reshape(len(idx_pred), 1)
+ hgb.predict(df_pred).reshape(len(idx_pred), 1)
)
/ 3
)
)
solution = pd.DataFrame(target_pred, columns=[target])
solution["id"] = df_pred.index
solution = solution[["id", target]]
solution.to_csv(out_prefix + "_sol_hgb+xgb+cbr.csv", index=False)
| false | 0 | 8,354 | 0 | 8,354 | 8,354 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.