file_id
stringlengths
5
9
content
stringlengths
100
5.25M
local_path
stringlengths
66
70
kaggle_dataset_name
stringlengths
3
50
kaggle_dataset_owner
stringlengths
3
20
kversion
stringlengths
497
763
kversion_datasetsources
stringlengths
71
5.46k
dataset_versions
stringlengths
338
235k
datasets
stringlengths
334
371
users
stringlengths
111
264
script
stringlengths
100
5.25M
df_info
stringlengths
0
4.87M
has_data_info
bool
2 classes
nb_filenames
int64
0
370
retreived_data_description
stringlengths
0
4.44M
script_nb_tokens
int64
25
663k
upvotes
int64
0
1.65k
tokens_description
int64
25
663k
tokens_script
int64
25
663k
129561985
<jupyter_start><jupyter_text>Salary Dataset - Simple linear regression ## Dataset Description Salary Dataset in CSV for Simple linear regression. It has also been used in Machine Learning A to Z course of my series. ## Columns - # - YearsExperience - Salary Kaggle dataset identifier: salary-dataset-simple-linear-regression <jupyter_code>import pandas as pd df = pd.read_csv('salary-dataset-simple-linear-regression/Salary_dataset.csv') df.info() <jupyter_output><class 'pandas.core.frame.DataFrame'> RangeIndex: 30 entries, 0 to 29 Data columns (total 3 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Unnamed: 0 30 non-null int64 1 YearsExperience 30 non-null float64 2 Salary 30 non-null float64 dtypes: float64(2), int64(1) memory usage: 848.0 bytes <jupyter_text>Examples: { "Unnamed: 0": 0.0, "YearsExperience": 1.2, "Salary": 39344.0 } { "Unnamed: 0": 1.0, "YearsExperience": 1.4, "Salary": 46206.0 } { "Unnamed: 0": 2.0, "YearsExperience": 1.6, "Salary": 37732.0 } { "Unnamed: 0": 3.0, "YearsExperience": 2.1, "Salary": 43526.0 } <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session data = pd.read_csv( "/kaggle/input/salary-dataset-simple-linear-regression/Salary_dataset.csv" ) data print(data.head(5)) print(data.describe()) print(data.info()) # salary_val = data["Salary"].values # salary_val from matplotlib import pyplot as plt x = data.YearsExperience.values y = data.Salary.values # data['YearsExperience'] = pd.to_datetime(data['YearsExperience']) # data['Salary'] = pd.to_datetime(data['Salary']) from sklearn.model_selection import train_test_split x = x.reshape(-1, 1) x x_train, x_test, y_train, y_test = train_test_split( x, y, train_size=0.8, random_state=100 ) x_train plt.scatter(x_train, y_train, color="red") plt.xlabel("Years_of_Experience") plt.ylabel("Salary") plt.plot from sklearn.linear_model import LinearRegression lr = LinearRegression() lr.fit(x_train, y_train) lr.score(x_test, y_test) * 100 # ### Draw at least three conclusions from your regression model x_train, x_test, y_train, y_test = train_test_split( x, y, train_size=0.6, random_state=200 ) lr.fit(x_train, y_train) print("THE SCORE IS ", lr.score(x_test, y_test) * 100) y_predict = lr.predict(x_test) plt.scatter(x_train, y_train, color="red") plt.plot(x_test, y_predict, color="green") plt.xlabel("yers_of_experiance") plt.ylabel("Salary") plt.plot x_train, x_test, y_train, y_test = train_test_split( x, y, train_size=0.4, random_state=300 ) lr.fit(x_train, y_train) print("THE SCORE IS ", lr.score(x_test, y_test) * 100) y_predict = lr.predict(x_test) plt.scatter(x_train, y_train, color="red") plt.plot(x_test, y_predict, color="green") plt.xlabel("yers_of_experiance") plt.ylabel("Salary") plt.plot x_train, x_test, y_train, y_test = train_test_split( x, y, train_size=0.5, random_state=400 ) lr.fit(x_train, y_train) print("THE SCORE IS ", lr.score(x_test, y_test) * 100) y_predict = lr.predict(x_test) plt.scatter(x_train, y_train, color="red") plt.plot(x_test, y_predict, color="green") plt.xlabel("yers_of_experiance") plt.ylabel("Salary") plt.plot
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/561/129561985.ipynb
salary-dataset-simple-linear-regression
abhishek14398
[{"Id": 129561985, "ScriptId": 38522377, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 15001180, "CreationDate": "05/14/2023 21:17:11", "VersionNumber": 1.0, "Title": "notebook345ac3d399", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 98.0, "LinesInsertedFromPrevious": 98.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
[{"Id": 185746430, "KernelVersionId": 129561985, "SourceDatasetVersionId": 4832081}]
[{"Id": 4832081, "DatasetId": 2799910, "DatasourceVersionId": 4895851, "CreatorUserId": 3259703, "LicenseName": "CC0: Public Domain", "CreationDate": "01/10/2023 03:55:40", "VersionNumber": 1.0, "Title": "Salary Dataset - Simple linear regression", "Slug": "salary-dataset-simple-linear-regression", "Subtitle": "Simple Linear Regression Dataset, used in Machine Learning A - Z", "Description": "## Dataset Description\nSalary Dataset in CSV for Simple linear regression. It has also been used in Machine Learning A to Z course of my series.\n\n## Columns\n- #\n- YearsExperience\n- Salary", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 2799910, "CreatorUserId": 3259703, "OwnerUserId": 3259703.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 4832081.0, "CurrentDatasourceVersionId": 4895851.0, "ForumId": 2834222, "Type": 2, "CreationDate": "01/10/2023 03:55:40", "LastActivityDate": "01/10/2023", "TotalViews": 65295, "TotalDownloads": 13051, "TotalVotes": 139, "TotalKernels": 93}]
[{"Id": 3259703, "UserName": "abhishek14398", "DisplayName": "Allena Venkata Sai Aby", "RegisterDate": "05/22/2019", "PerformanceTier": 2}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session data = pd.read_csv( "/kaggle/input/salary-dataset-simple-linear-regression/Salary_dataset.csv" ) data print(data.head(5)) print(data.describe()) print(data.info()) # salary_val = data["Salary"].values # salary_val from matplotlib import pyplot as plt x = data.YearsExperience.values y = data.Salary.values # data['YearsExperience'] = pd.to_datetime(data['YearsExperience']) # data['Salary'] = pd.to_datetime(data['Salary']) from sklearn.model_selection import train_test_split x = x.reshape(-1, 1) x x_train, x_test, y_train, y_test = train_test_split( x, y, train_size=0.8, random_state=100 ) x_train plt.scatter(x_train, y_train, color="red") plt.xlabel("Years_of_Experience") plt.ylabel("Salary") plt.plot from sklearn.linear_model import LinearRegression lr = LinearRegression() lr.fit(x_train, y_train) lr.score(x_test, y_test) * 100 # ### Draw at least three conclusions from your regression model x_train, x_test, y_train, y_test = train_test_split( x, y, train_size=0.6, random_state=200 ) lr.fit(x_train, y_train) print("THE SCORE IS ", lr.score(x_test, y_test) * 100) y_predict = lr.predict(x_test) plt.scatter(x_train, y_train, color="red") plt.plot(x_test, y_predict, color="green") plt.xlabel("yers_of_experiance") plt.ylabel("Salary") plt.plot x_train, x_test, y_train, y_test = train_test_split( x, y, train_size=0.4, random_state=300 ) lr.fit(x_train, y_train) print("THE SCORE IS ", lr.score(x_test, y_test) * 100) y_predict = lr.predict(x_test) plt.scatter(x_train, y_train, color="red") plt.plot(x_test, y_predict, color="green") plt.xlabel("yers_of_experiance") plt.ylabel("Salary") plt.plot x_train, x_test, y_train, y_test = train_test_split( x, y, train_size=0.5, random_state=400 ) lr.fit(x_train, y_train) print("THE SCORE IS ", lr.score(x_test, y_test) * 100) y_predict = lr.predict(x_test) plt.scatter(x_train, y_train, color="red") plt.plot(x_test, y_predict, color="green") plt.xlabel("yers_of_experiance") plt.ylabel("Salary") plt.plot
[{"salary-dataset-simple-linear-regression/Salary_dataset.csv": {"column_names": "[\"Unnamed: 0\", \"YearsExperience\", \"Salary\"]", "column_data_types": "{\"Unnamed: 0\": \"int64\", \"YearsExperience\": \"float64\", \"Salary\": \"float64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 30 entries, 0 to 29\nData columns (total 3 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Unnamed: 0 30 non-null int64 \n 1 YearsExperience 30 non-null float64\n 2 Salary 30 non-null float64\ndtypes: float64(2), int64(1)\nmemory usage: 848.0 bytes\n", "summary": "{\"Unnamed: 0\": {\"count\": 30.0, \"mean\": 14.5, \"std\": 8.803408430829505, \"min\": 0.0, \"25%\": 7.25, \"50%\": 14.5, \"75%\": 21.75, \"max\": 29.0}, \"YearsExperience\": {\"count\": 30.0, \"mean\": 5.413333333333332, \"std\": 2.8378881576627184, \"min\": 1.2000000000000002, \"25%\": 3.3000000000000003, \"50%\": 4.8, \"75%\": 7.8, \"max\": 10.6}, \"Salary\": {\"count\": 30.0, \"mean\": 76004.0, \"std\": 27414.4297845823, \"min\": 37732.0, \"25%\": 56721.75, \"50%\": 65238.0, \"75%\": 100545.75, \"max\": 122392.0}}", "examples": "{\"Unnamed: 0\":{\"0\":0,\"1\":1,\"2\":2,\"3\":3},\"YearsExperience\":{\"0\":1.2,\"1\":1.4,\"2\":1.6,\"3\":2.1},\"Salary\":{\"0\":39344.0,\"1\":46206.0,\"2\":37732.0,\"3\":43526.0}}"}}]
true
1
<start_data_description><data_path>salary-dataset-simple-linear-regression/Salary_dataset.csv: <column_names> ['Unnamed: 0', 'YearsExperience', 'Salary'] <column_types> {'Unnamed: 0': 'int64', 'YearsExperience': 'float64', 'Salary': 'float64'} <dataframe_Summary> {'Unnamed: 0': {'count': 30.0, 'mean': 14.5, 'std': 8.803408430829505, 'min': 0.0, '25%': 7.25, '50%': 14.5, '75%': 21.75, 'max': 29.0}, 'YearsExperience': {'count': 30.0, 'mean': 5.413333333333332, 'std': 2.8378881576627184, 'min': 1.2000000000000002, '25%': 3.3000000000000003, '50%': 4.8, '75%': 7.8, 'max': 10.6}, 'Salary': {'count': 30.0, 'mean': 76004.0, 'std': 27414.4297845823, 'min': 37732.0, '25%': 56721.75, '50%': 65238.0, '75%': 100545.75, 'max': 122392.0}} <dataframe_info> RangeIndex: 30 entries, 0 to 29 Data columns (total 3 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Unnamed: 0 30 non-null int64 1 YearsExperience 30 non-null float64 2 Salary 30 non-null float64 dtypes: float64(2), int64(1) memory usage: 848.0 bytes <some_examples> {'Unnamed: 0': {'0': 0, '1': 1, '2': 2, '3': 3}, 'YearsExperience': {'0': 1.2, '1': 1.4, '2': 1.6, '3': 2.1}, 'Salary': {'0': 39344.0, '1': 46206.0, '2': 37732.0, '3': 43526.0}} <end_description>
963
1
1,364
963
129561767
<jupyter_start><jupyter_text>Dummy Marketing and Sales Data I made this data for my students in 'Data-Driven Marketing' and 'Data Science for Business'. Data contains: - TV promotion budget (in million) - Social Media promotion budget (in million) - Radio promotion budget (in million) - Influencer: Whether the promotion collaborate with Mega, Macro, Nano, Micro influencer - Sales (in million) This data can be used for simple tasks: - Data preprocessing - Exploratory Data Analysis - Visualization - Prediction using Linear Regression and Model Evaluation Kaggle dataset identifier: dummy-advertising-and-sales-data <jupyter_code>import pandas as pd df = pd.read_csv('dummy-advertising-and-sales-data/Dummy Data HSS.csv') df.info() <jupyter_output><class 'pandas.core.frame.DataFrame'> RangeIndex: 4572 entries, 0 to 4571 Data columns (total 5 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 TV 4562 non-null float64 1 Radio 4568 non-null float64 2 Social Media 4566 non-null float64 3 Influencer 4572 non-null object 4 Sales 4566 non-null float64 dtypes: float64(4), object(1) memory usage: 178.7+ KB <jupyter_text>Examples: { "TV": 16, "Radio": 6.566230788, "Social Media": 2.907982773, "Influencer": "Mega", "Sales": 54.73275715 } { "TV": 13, "Radio": 9.237764567, "Social Media": 2.409567204, "Influencer": "Mega", "Sales": 46.67789698 } { "TV": 41, "Radio": 15.88644602, "Social Media": 2.913410175, "Influencer": "Mega", "Sales": 150.1778288 } { "TV": 83, "Radio": 30.02002826, "Social Media": 6.922303959, "Influencer": "Mega", "Sales": 298.2463398 } <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # ## Student study Hours # df = pd.read_csv("/kaggle/input/student-study-hours/score.csv") df hours_val = df.Hours.values hours_val scores_val = df.Scores.values scores_val from matplotlib import pyplot as plt x = hours_val # x: independent variable y = scores_val # y : dependent variable plt.scatter(x, y, color="purple") plt.xlabel("Hours") plt.ylabel("Scores") plt.plot x = x.reshape(-1, 1) x, len(x) from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split( x, y, train_size=0.8, random_state=600 ) x_train, len(x_train) x_test, len(x_test) plt.scatter(x_test, y_test, color="purple") plt.scatter(x_train, y_train, color="red") plt.xlabel("Hours") plt.ylabel("Scores") plt.plot from sklearn.linear_model import LinearRegression lr = LinearRegression() lr.fit(x_train, y_train) y_predict = lr.predict(x_test) y_predict lr.score(x_test, y_test) * 100 plt.scatter(x_train, y_train, color="red") plt.scatter(x_test, y_predict, color="green") plt.xlabel("Hours") plt.ylabel("Scores") plt.plot # ## Dummy Marketing and Sales Data # df = pd.read_csv("/kaggle/input/dummy-advertising-and-sales-data/Dummy Data HSS.csv") df radio_val = df.Radio.values radio_val sales_val = df.Sales.values sales_val x = radio_val # x: independent variable y = sales_val # y : dependent variable plt.scatter(x, y, color="purple") plt.xlabel("radio") plt.ylabel("Sales") plt.plot x = x.reshape(-1, 1) x, len(x) x_train, x_test, y_train, y_test = train_test_split( x, y, train_size=0.8, random_state=600 ) x_train, len(x_train) x_test, len(x_test) plt.scatter(x_test, y_test, color="red") plt.scatter(x_train, y_train, color="purple") plt.xlabel("Hours") plt.ylabel("Scores") plt.plot from sklearn.impute import SimpleImputer imputer = SimpleImputer(strategy="mean") x_train = imputer.fit_transform(x_train) x_test = imputer.transform(x_test) y_train = imputer.fit_transform(y_train) lr = LinearRegression() lr.fit(x_train, y_train) y_predict = lr.predict(x_test) y_predict y_test = imputer.transform(y_test.reshape(-1, 1)).flatten() score = lr.score(x_test, y_test) * 100 score plt.scatter(x_train, y_train, color="red") plt.scatter(x_test, y_predict, color="green") plt.xlabel("radio") plt.ylabel("Sales") plt.plot # ## Fuel Consumption 2000-2022 # df = pd.read_csv("/kaggle/input/fuel-consumption/Fuel_Consumption_2000-2022.csv") df fuel_consum_val = df["FUEL CONSUMPTION"].values fuel_consum_val emissions_val = df.EMISSIONS.values emissions_val from matplotlib import pyplot as plt x = fuel_consum_val # x: independent variable y = emissions_val # y : dependent variable plt.scatter(x, y, color="red") plt.xlabel("FUEL CONSUMPTION") plt.ylabel("EMISSIONS") plt.plot x = x.reshape(-1, 1) x, len(x) x_train, x_test, y_train, y_test = train_test_split( x, y, train_size=0.8, random_state=600 ) x_train, len(x_train) x_test, len(x_test) plt.scatter(x_train, y_train, color="red") plt.scatter(x_test, y_test, color="green") plt.xlabel("FUEL CONSUMPTION") plt.ylabel("EMISSIONS") plt.plot lr = LinearRegression() lr.fit(x_train, y_train) y_predict = lr.predict(x_test) y_predict lr.score(x_test, y_test) * 100
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/561/129561767.ipynb
dummy-advertising-and-sales-data
harrimansaragih
[{"Id": 129561767, "ScriptId": 38523641, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14997001, "CreationDate": "05/14/2023 21:13:56", "VersionNumber": 1.0, "Title": "Linear Regression", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 195.0, "LinesInsertedFromPrevious": 195.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 185746034, "KernelVersionId": 129561767, "SourceDatasetVersionId": 2015190}, {"Id": 185746035, "KernelVersionId": 129561767, "SourceDatasetVersionId": 3965497}, {"Id": 185746036, "KernelVersionId": 129561767, "SourceDatasetVersionId": 4906399}]
[{"Id": 2015190, "DatasetId": 1206038, "DatasourceVersionId": 2054779, "CreatorUserId": 5459862, "LicenseName": "CC0: Public Domain", "CreationDate": "03/12/2021 00:09:03", "VersionNumber": 1.0, "Title": "Dummy Marketing and Sales Data", "Slug": "dummy-advertising-and-sales-data", "Subtitle": "Data of TV, Influencer, Radio, and Social Media Ads budget to predict Sales", "Description": "I made this data for my students in 'Data-Driven Marketing' and 'Data Science for Business'. Data contains:\n- TV promotion budget (in million)\n- Social Media promotion budget (in million)\n- Radio promotion budget (in million)\n- Influencer: Whether the promotion collaborate with Mega, Macro, Nano, Micro influencer\n- Sales (in million)\n\nThis data can be used for simple tasks:\n- Data preprocessing\n- Exploratory Data Analysis\n- Visualization\n- Prediction using Linear Regression and Model Evaluation", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 1206038, "CreatorUserId": 5459862, "OwnerUserId": 5459862.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2015190.0, "CurrentDatasourceVersionId": 2054779.0, "ForumId": 1224032, "Type": 2, "CreationDate": "03/12/2021 00:09:03", "LastActivityDate": "03/12/2021", "TotalViews": 65390, "TotalDownloads": 8681, "TotalVotes": 103, "TotalKernels": 35}]
[{"Id": 5459862, "UserName": "harrimansaragih", "DisplayName": "Harriman Samuel Saragih", "RegisterDate": "07/13/2020", "PerformanceTier": 2}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # ## Student study Hours # df = pd.read_csv("/kaggle/input/student-study-hours/score.csv") df hours_val = df.Hours.values hours_val scores_val = df.Scores.values scores_val from matplotlib import pyplot as plt x = hours_val # x: independent variable y = scores_val # y : dependent variable plt.scatter(x, y, color="purple") plt.xlabel("Hours") plt.ylabel("Scores") plt.plot x = x.reshape(-1, 1) x, len(x) from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split( x, y, train_size=0.8, random_state=600 ) x_train, len(x_train) x_test, len(x_test) plt.scatter(x_test, y_test, color="purple") plt.scatter(x_train, y_train, color="red") plt.xlabel("Hours") plt.ylabel("Scores") plt.plot from sklearn.linear_model import LinearRegression lr = LinearRegression() lr.fit(x_train, y_train) y_predict = lr.predict(x_test) y_predict lr.score(x_test, y_test) * 100 plt.scatter(x_train, y_train, color="red") plt.scatter(x_test, y_predict, color="green") plt.xlabel("Hours") plt.ylabel("Scores") plt.plot # ## Dummy Marketing and Sales Data # df = pd.read_csv("/kaggle/input/dummy-advertising-and-sales-data/Dummy Data HSS.csv") df radio_val = df.Radio.values radio_val sales_val = df.Sales.values sales_val x = radio_val # x: independent variable y = sales_val # y : dependent variable plt.scatter(x, y, color="purple") plt.xlabel("radio") plt.ylabel("Sales") plt.plot x = x.reshape(-1, 1) x, len(x) x_train, x_test, y_train, y_test = train_test_split( x, y, train_size=0.8, random_state=600 ) x_train, len(x_train) x_test, len(x_test) plt.scatter(x_test, y_test, color="red") plt.scatter(x_train, y_train, color="purple") plt.xlabel("Hours") plt.ylabel("Scores") plt.plot from sklearn.impute import SimpleImputer imputer = SimpleImputer(strategy="mean") x_train = imputer.fit_transform(x_train) x_test = imputer.transform(x_test) y_train = imputer.fit_transform(y_train) lr = LinearRegression() lr.fit(x_train, y_train) y_predict = lr.predict(x_test) y_predict y_test = imputer.transform(y_test.reshape(-1, 1)).flatten() score = lr.score(x_test, y_test) * 100 score plt.scatter(x_train, y_train, color="red") plt.scatter(x_test, y_predict, color="green") plt.xlabel("radio") plt.ylabel("Sales") plt.plot # ## Fuel Consumption 2000-2022 # df = pd.read_csv("/kaggle/input/fuel-consumption/Fuel_Consumption_2000-2022.csv") df fuel_consum_val = df["FUEL CONSUMPTION"].values fuel_consum_val emissions_val = df.EMISSIONS.values emissions_val from matplotlib import pyplot as plt x = fuel_consum_val # x: independent variable y = emissions_val # y : dependent variable plt.scatter(x, y, color="red") plt.xlabel("FUEL CONSUMPTION") plt.ylabel("EMISSIONS") plt.plot x = x.reshape(-1, 1) x, len(x) x_train, x_test, y_train, y_test = train_test_split( x, y, train_size=0.8, random_state=600 ) x_train, len(x_train) x_test, len(x_test) plt.scatter(x_train, y_train, color="red") plt.scatter(x_test, y_test, color="green") plt.xlabel("FUEL CONSUMPTION") plt.ylabel("EMISSIONS") plt.plot lr = LinearRegression() lr.fit(x_train, y_train) y_predict = lr.predict(x_test) y_predict lr.score(x_test, y_test) * 100
[{"dummy-advertising-and-sales-data/Dummy Data HSS.csv": {"column_names": "[\"TV\", \"Radio\", \"Social Media\", \"Influencer\", \"Sales\"]", "column_data_types": "{\"TV\": \"float64\", \"Radio\": \"float64\", \"Social Media\": \"float64\", \"Influencer\": \"object\", \"Sales\": \"float64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 4572 entries, 0 to 4571\nData columns (total 5 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 TV 4562 non-null float64\n 1 Radio 4568 non-null float64\n 2 Social Media 4566 non-null float64\n 3 Influencer 4572 non-null object \n 4 Sales 4566 non-null float64\ndtypes: float64(4), object(1)\nmemory usage: 178.7+ KB\n", "summary": "{\"TV\": {\"count\": 4562.0, \"mean\": 54.06685664182376, \"std\": 26.125053891841468, \"min\": 10.0, \"25%\": 32.0, \"50%\": 53.0, \"75%\": 77.0, \"max\": 100.0}, \"Radio\": {\"count\": 4568.0, \"mean\": 18.160355892662654, \"std\": 9.676958456095429, \"min\": 0.000683948, \"25%\": 10.5259572775, \"50%\": 17.85951293, \"75%\": 25.649729847499998, \"max\": 48.87116125}, \"Social Media\": {\"count\": 4566.0, \"mean\": 3.323956161385458, \"std\": 2.212670263921655, \"min\": 3.13e-05, \"25%\": 1.52784868575, \"50%\": 3.055565435, \"75%\": 4.807557994, \"max\": 13.98166208}, \"Sales\": {\"count\": 4566.0, \"mean\": 192.46660210662066, \"std\": 93.13309168784247, \"min\": 31.19940869, \"25%\": 112.322882475, \"50%\": 189.23117235, \"75%\": 272.50792167500003, \"max\": 364.0797515}}", "examples": "{\"TV\":{\"0\":16.0,\"1\":13.0,\"2\":41.0,\"3\":83.0},\"Radio\":{\"0\":6.566230788,\"1\":9.237764567,\"2\":15.88644602,\"3\":30.02002826},\"Social Media\":{\"0\":2.907982773,\"1\":2.409567204,\"2\":2.913410175,\"3\":6.922303959},\"Influencer\":{\"0\":\"Mega\",\"1\":\"Mega\",\"2\":\"Mega\",\"3\":\"Mega\"},\"Sales\":{\"0\":54.73275715,\"1\":46.67789698,\"2\":150.1778288,\"3\":298.2463398}}"}}]
true
3
<start_data_description><data_path>dummy-advertising-and-sales-data/Dummy Data HSS.csv: <column_names> ['TV', 'Radio', 'Social Media', 'Influencer', 'Sales'] <column_types> {'TV': 'float64', 'Radio': 'float64', 'Social Media': 'float64', 'Influencer': 'object', 'Sales': 'float64'} <dataframe_Summary> {'TV': {'count': 4562.0, 'mean': 54.06685664182376, 'std': 26.125053891841468, 'min': 10.0, '25%': 32.0, '50%': 53.0, '75%': 77.0, 'max': 100.0}, 'Radio': {'count': 4568.0, 'mean': 18.160355892662654, 'std': 9.676958456095429, 'min': 0.000683948, '25%': 10.5259572775, '50%': 17.85951293, '75%': 25.649729847499998, 'max': 48.87116125}, 'Social Media': {'count': 4566.0, 'mean': 3.323956161385458, 'std': 2.212670263921655, 'min': 3.13e-05, '25%': 1.52784868575, '50%': 3.055565435, '75%': 4.807557994, 'max': 13.98166208}, 'Sales': {'count': 4566.0, 'mean': 192.46660210662066, 'std': 93.13309168784247, 'min': 31.19940869, '25%': 112.322882475, '50%': 189.23117235, '75%': 272.50792167500003, 'max': 364.0797515}} <dataframe_info> RangeIndex: 4572 entries, 0 to 4571 Data columns (total 5 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 TV 4562 non-null float64 1 Radio 4568 non-null float64 2 Social Media 4566 non-null float64 3 Influencer 4572 non-null object 4 Sales 4566 non-null float64 dtypes: float64(4), object(1) memory usage: 178.7+ KB <some_examples> {'TV': {'0': 16.0, '1': 13.0, '2': 41.0, '3': 83.0}, 'Radio': {'0': 6.566230788, '1': 9.237764567, '2': 15.88644602, '3': 30.02002826}, 'Social Media': {'0': 2.907982773, '1': 2.409567204, '2': 2.913410175, '3': 6.922303959}, 'Influencer': {'0': 'Mega', '1': 'Mega', '2': 'Mega', '3': 'Mega'}, 'Sales': {'0': 54.73275715, '1': 46.67789698, '2': 150.1778288, '3': 298.2463398}} <end_description>
1,425
0
2,077
1,425
129561735
<jupyter_start><jupyter_text>Dubizzle used car sales data ## Dataset Description Dubizzle is the UAE'S (Middle east country) favorite marketplace to buy, sell and find anything. In this dataset I scrapped almost all data from Dubizzle related to automobile selling. This data can be used for finding interesting fact and correlation between different kind brands, resell value of a specific car related to year and more. Enjoy and explore. ## Summary - There are 20 columns and 9170 rows - Scrapped date 12/05/2022 ## Column Description 1. *title* - Vehicle name with model details 2. *price_in_aed* - Vehicle price in united arab emirates dhirham 3. *kilometer* - How many kilometer the vehicle travelled 4. *body_condition* - Body condition of vehicle 5. *mechanical_condition* - Mechanical condition of vehicle 6. *seller_type* - Type of seller ( Dealer, Owner, Other) 7. *body_type* - Body type ( SUV, Sedan, Other) 8. *no_of_cylinder* - Number of cylinder 9. *transmission_type* - Vehicle transmission type ( Automatic Transmission, Manual Transmission ) 10. *regional_spec* - Regional Specification of vehicle 11. *horsepower* - Horsepower 12. *fuel_type* - Fuel Type 13. *steering_side* - Steering side of the vehicle 14. *year* - Vehicle model year 15. *color* - Vehicle color 16. *emirates* - Emirates is like state 17. *motor_trim* - Motor trim type 18. *company* - Vehicle manufacture company name 19. *model* - Vehicle model 20. *date_posted* - Date of ad posted Kaggle dataset identifier: dubizzle-used-car-sale-data <jupyter_script>import pandas as pd import numpy as np import seaborn as sns import matplotlib import matplotlib.pyplot as plt from datasist.structdata import detect_outliers df = pd.read_csv( "/kaggle/input/dubizzle-used-car-sale-data/data.csv", na_values=["NoneUnknown", "Unknown"], ) df.head() df.shape # ## Cheack feature,data types and missing values df.info() # ## Cheack for duplicated data df.duplicated().sum() df.drop_duplicates(inplace=True) df.duplicated().sum() df.shape df.describe() # ## Cheack for Null values (df.isna().mean() * 100) # Prsentage of Null values intair the whole data (df.isna().mean() * 100).sum() # ## Handlin Null values from sklearn.impute import KNNImputer imputer = KNNImputer() df_numeric = df.select_dtypes(include=np.number) df_catt = df.select_dtypes(include="object_") df_numeric_Arr = imputer.fit_transform(df_numeric) df_numeric_Arr len(df_numeric_Arr) df_numeric = pd.DataFrame(df_numeric_Arr, columns=imputer.get_feature_names_out()) df_numeric.shape df_numeric.isna().sum() df_numeric.shape df_catt.isna().mean() * 100 df = pd.concat([df_catt.reset_index(), df_numeric.reset_index()], axis=1) from sklearn.impute import SimpleImputer imputer = SimpleImputer(strategy="most_frequent") df["horsepower"] = imputer.fit_transform(df[["horsepower"]]) df["no_of_cylinders"] = imputer.fit_transform(df[["no_of_cylinders"]]) df["motors_trim"] = imputer.fit_transform(df[["motors_trim"]]) df.isna().mean() * 100 # df.dropna(inplace=True) # lapel=df['price_in_aed'] # df=df.drop('price_in_aed',axis=1) # # EDA # ## univariate analysis # ## Handel price_in_aed >> Convert it to Int # ### I have a ploblen to convert price_in_aed to flaot beacouse of special character ',' # df['price_in_aed'].apply(lambda x: float(x)) ##df['price_in_aed'] = df['price_in_aed'].astype(float) def Price_in_aed_To_Int(price): num = "" price_List = price.split(",") for i in price_List: num += i return int(num) df["price_in_aed"] = df["price_in_aed"].apply(Price_in_aed_To_Int) df["price_in_aed"] ### First I will Handel feature that have maney values (Not unique) & That have contunuas values like kilometers df.title.value_counts() # title have model and company and I have both as individual feature so I will drop title df.drop("title", axis=1, inplace=True) # high_variance_columns=['body_condition', 'mechanical_condition', 'steering_side', 'fuel_type', 'transmission_type', 'emirate','motors_trim','model','date_posted'] # high_variance_columns # ## there is alot of features totallt inbalanced so I have to remove them # ##### body_condition, mechanical_condition, steering_side, fuel_type, transmission_type, emirate, motors_trim, model,date_posted## there is alot of features totallt inbalanced so I have to remove them # ##### body_condition, mechanical_condition, steering_side, fuel_type, transmission_type inbalanced_columns = [ "body_condition", "mechanical_condition", "steering_side", "fuel_type", "transmission_type", ] for x in inbalanced_columns: sns.countplot(data=df, y=df[x]) plt.figure() # this creates a new figure on which df.drop(inbalanced_columns, axis=1, inplace=True) df.drop("index", inplace=True, axis=1) df["color"].value_counts() sns.countplot(y=df["color"]) df.drop( df[ (df["color"] == "Purple") | (df["color"] == "Teal") | (df["color"] == "Tan") ].index, inplace=True, axis=0, ) sns.countplot(y=df["color"]) df.shape # ## I need to make some Bivariate Analysis between price_in_aed and kilometers To show if outliers will evect the corelation bentween them or not # befor removing outliers corr=-0.2 df.corr() # ### befor removing outliers corr=-0.2 & 0.25 # df['kilometers'] = df['kilometers'].astype(int) sns.kdeplot(df["kilometers"]) sns.boxenplot(df["kilometers"]) df[df["kilometers"] > 300000].index df.drop(df[df["kilometers"] > 300000].index, inplace=True, axis=0) df.shape sns.kdeplot(df["kilometers"]) sns.displot(data=df, x="kilometers") sns.boxplot(df["kilometers"]) # #### I concluded form her that ther is many cars has never used becouse the kilometers=0 df.describe() df["year"] = df["year"].astype(int) df.info() print(list(df["year"].value_counts())) sns.displot(df["year"]) sns.boxplot(df["year"]) df[df["year"] <= 2005] df.drop(df[df["year"] <= 2005].index, inplace=True, axis=0) sns.displot(df["year"]) sns.boxplot(df["year"]) df.corr() # ### after removing outliers corr=-0.4 &0.28 # ### so outliers have no meaning here # sns.countplot(df['year']) # ### Habdel body_type sns.countplot(data=df, y="body_type") def Body_Type_(body_type): if body_type == "SUV" or body_type == "Sedan" or body_type == "Coupe": return body_type else: return "Other" df["body_type"] = df["body_type"].apply(Body_Type_) sns.countplot(df["body_type"]) # ### Handel no_of_cylinders sns.countplot(data=df, x="no_of_cylinders") # #### Remove 3, 10, 5, None not_important_no_of_cylinders_idx = df[ (df["no_of_cylinders"] == "3") | (df["no_of_cylinders"] == "10") | (df["no_of_cylinders"] == "5") | (df["no_of_cylinders"] == "None") ].index df.drop(not_important_no_of_cylinders_idx, axis=0, inplace=True) df["no_of_cylinders"] = df["no_of_cylinders"].astype(int) sns.countplot(data=df, x="no_of_cylinders") # print(list(df['motors_trim'].unique())) df.columns col = [ "body_condition", "mechanical_condition", "seller_type", "body_type", "no_of_cylinders", "transmission_type", "regional_specs", "horsepower", "fuel_type", "steering_side", "color", "emirate", ] df.info() for x in df.columns: sns.countplot(data=df, y=df[x]) plt.figure() # this creates a new figure on which df["company"].value_counts().head(23).index df["company"].value_counts().sort_values().head(29) other_companies = df["company"].value_counts().sort_values().head(29).index other_companies = list(other_companies) def Handel_Companies(company): for c in range(len(other_companies)): if company == other_companies[c]: return "other" return company df["company"] = df["company"].apply(Handel_Companies) sns.countplot(data=df, y=df["company"]) df["company"].value_counts() # df.groupby('company').sum() df df.info() df df["horsepower"].value_counts() def Horsepower(horsepower): if ( horsepower == "700 - 800 HP" or horsepower == "800 - 900 HP" or horsepower == "900+ HP" ): return "More than 700" else: return horsepower df["horsepower"] = df["horsepower"].apply(Horsepower) df["horsepower"].value_counts() for col in df.columns: sns.countplot(data=df, y=df[col]) plt.figure() sns.displot(data=df, x=df["price_in_aed"]) sns.boxenplot(data=df, x=df["price_in_aed"]) df[df["price_in_aed"] > 1500000].index x = df.drop(df[df["price_in_aed"] > 1500000].index) sns.displot(data=x, x=x["price_in_aed"]) sns.displot(np.log(df["price_in_aed"])) df.describe() df.corr() x.corr() # df.drop('date_posted',inplace=True,axis=1) # ## Here we can descover when removin outliers from the target it incres the corr so outliers here have no sens df = x df df.describe() df.head(1) # # Bivariate Analysis # ## relatiob between sns.heatmap(df.corr(), annot=True) sns.scatterplot(x=df["price_in_aed"], y=df["year"]) sns.scatterplot(y=df["price_in_aed"], x=df["kilometers"]) sns.scatterplot(x=df["price_in_aed"], y=df["no_of_cylinders"]) sns.scatterplot(x=df["price_in_aed"], y=df["no_of_cylinders"], hue=df["year"]) sns.scatterplot(x=df["price_in_aed"], y=df["no_of_cylinders"], hue=df["kilometers"]) sns.scatterplot(y=df["year"], x=df["kilometers"]) df.corr() df.drop("emirate", inplace=True, axis=1) df["motors_trim"].value_counts() # df.drop('motors_trim',inplace=True,axis=1) for col in df.columns: sns.countplot(data=df, y=df[col]) plt.figure() df.describe() df.head(2) df.reset_index() df.info() df.reset_index(inplace=True, drop=True) # # Lets Answer Business Questions # ## 1- most 10 companies have seles # ## 2- for most 10 companies have seles what is the common 5 models # ## 3- most 10 high price companies regardless othr factors # ## 4- most 10 high price companies in a category of 200-300 horsepower # ## 5- most 10 high price companies at duration of 2010-2015 # ## 6- Best selling body_type at GCC Specs (regional_specs) # ## 7- most 10 models have seles # ## 8- most 10 high price models and its cmopany # ## 9- most 10 high price models and its cmopany in a category of 200-500 horsepower # ## 10- most 10 models that spend 0 kilometers and its data # ## 11- most freq regional_specs that that cars spend 0 kilometers # ## 12- according to ouner what is most 2 body_types seles # ## 13- according to ouner what is most 10 models sales and its data # ## 14- according to dealer what is most 10 models sales and its data # ## 15- most 5 high price colors Under the average kilometers # ## 16- most 10 motors_trim have seles # ## 17- For each body_type for each seller_type which has most sales # ## 18- For each body_type for each no_of_cylinders which has most sales # ## 19- For each seller_type for each no_of_cylinders which has most sales # ## 20- For each seller_type for each horsepower which has most sales # ## 21- For each body_type for each horsepower which has most salesbody_type # ## 22- For each horsepower for each no_of_cylinders which has most salesbody_type # ## 23- For each seller_type how much kilometers driven # ## 24- For each seller_type how much kilometers driven in average # ## 25- For each body_type how much kilometers driven # ## 26- For each body_type how much kilometers driven in average # ## 27- For each seller_type for each body_type how much kilometers driven # ## 28- For each seller_type for each body_type how much kilometers driven in average # ## 29- For each body_type for each no_of_cylinders calculate the sum of kilometers driven # ## 30- For each body_type for each no_of_cylinders calculate the sum of kilometers driven in average # ## 1- most 10 companies have seles df["company"].value_counts().head(10) most_10_companies_idx = df["company"].value_counts().head(10).index most_10_companies_df = df[df["company"].isin(most_10_companies_idx)] most_10_companies_df sns.countplot( data=most_10_companies_df, y=most_10_companies_df["company"], order=most_10_companies_idx, ) # ## 2- for most 10 companies have seles what is the common 5 models # most_10_companies_df["model"].value_counts().head(5) most_5_models_in_most_10_companies_have_seles_idx = ( most_10_companies_df["model"].value_counts().head(5).index ) most_5_models_in_most_10_companies_have_seles_df = most_10_companies_df[ most_10_companies_df["model"].isin( most_5_models_in_most_10_companies_have_seles_idx ) ] most_5_models_in_most_10_companies_have_seles_df sns.countplot( data=most_5_models_in_most_10_companies_have_seles_df, x=most_5_models_in_most_10_companies_have_seles_df["model"], order=most_5_models_in_most_10_companies_have_seles_idx, ) # ## 3- most 10 high price companies regardless othr factors # df.groupby("company").sum().sort_values(by="price_in_aed").head(10)[["price_in_aed"]] # ## 4- most 10 high price companies in a category of 200-300 horsepower # df[df["horsepower"] == "200 - 300 HP"].groupby("company").sum().sort_values( by="price_in_aed" ).head(10)[["price_in_aed"]] # ## 5- most 10 high price companies at duration of 2010-2015 # df[(df["year"] < 2015) & (df["year"] > 2010)].groupby("company").sum().sort_values( by="price_in_aed" ).head(10)[["price_in_aed"]] # ## 6- Best selling body_type at GCC Specs (regional_specs) # df[df["regional_specs"] == "GCC Specs"]["body_type"].value_counts().to_frame() # ## 7- most 10 models have seles # df["model"].value_counts().head(10).to_frame() most_10_models_have_seles_idx = df["model"].value_counts().head(10).index most_10_models_have_seles_df = df[df["model"].isin(most_10_models_have_seles_idx)] most_10_models_have_seles_df.head() sns.countplot( data=most_10_models_have_seles_df, y=most_10_models_have_seles_df["model"], order=most_10_models_have_seles_idx, ) # ## 8- most 10 high price models and its cmopany # df.groupby("model").sum().sort_values(by="price_in_aed").head(10)[["price_in_aed"]] most_10_high_price_models_idx = ( df.groupby("model") .sum() .sort_values(by="price_in_aed") .head(10)[["price_in_aed"]] .index ) most_10_high_price_models_df = df[df["model"].isin(most_10_high_price_models_idx)] most_10_high_price_models_df[["model", "company", "price_in_aed"]] # ## 9- most 10 high price models and its cmopany in a category of 200-500 horsepower # df_200_500_HP = df[ (df["horsepower"] == "200 - 300 HP") | (df["horsepower"] == "400 - 500 HP") ] df_200_500_HP.groupby("model").sum().sort_values(by="price_in_aed").head(10)[ ["price_in_aed"] ] most_10_high_price_models_idx = ( df_200_500_HP.groupby("model") .sum() .sort_values(by="price_in_aed") .head(10)[["price_in_aed"]] .index ) most_10_high_price_models_df = df_200_500_HP[ df_200_500_HP["model"].isin(most_10_high_price_models_idx) ] most_10_high_price_models_df[["model", "company", "price_in_aed"]] # ## 10- most 10 models that spend 0 kilometers # most_10_models_that_spend_0_km_idx = ( df[df["kilometers"] == 0.0]["model"].value_counts().head(10).index ) df[df["kilometers"] == 0.0]["model"].value_counts().head(10) most_10_models_that_spend_0_km_df = df[ df["model"].isin(most_10_models_that_spend_0_km_idx) ] most_10_models_that_spend_0_km_df sns.countplot( data=most_10_models_that_spend_0_km_df, y=most_10_models_that_spend_0_km_df["model"], order=most_10_models_that_spend_0_km_idx, ) # ## 11- most freq regional_specs that that cars spend 0 kilometers # df[df["kilometers"] == 0.0]["regional_specs"].value_counts().to_frame() # ## 12- according to Owner what is most 2 body_types seles # df[df["seller_type"] == "Owner"]["body_type"].value_counts().head(2) # ## 13- according to Owner what is most 10 models seles and its data # df[df["seller_type"] == "Owner"]["model"].value_counts().head(10) owner_most_10_models_seles_idx = ( df[df["seller_type"] == "Owner"]["model"].value_counts().head(10).index ) owner_most_10_models_seles_df = df[df["model"].isin(owner_most_10_models_seles_idx)] owner_most_10_models_seles_df # ## 14- according to Dealer what is most 10 models seles and its data # df[df["seller_type"] == "Dealer"]["model"].value_counts().head(10) Dealer_most_10_models_seles_idx = ( df[df["seller_type"] == "Dealer"]["model"].value_counts().head(10).index ) Dealer_most_10_models_seles_df = df[df["model"].isin(Dealer_most_10_models_seles_idx)] Dealer_most_10_models_seles_df # ## 15- most 5 high price colors Under the average kilometers under_avg_km_df = df[df["kilometers"] < df["kilometers"].mean()] df.groupby("color").sum().sort_values(by="price_in_aed", ascending=False) # ## 16- most 10 motors_trim have seles df["motors_trim"].value_counts().head(10) most_10_motors_trim_have_seles_idx = df["motors_trim"].value_counts().head(10).index most_10_motors_trim_have_seles_df = df[ df["motors_trim"].isin(most_10_motors_trim_have_seles_idx) ] most_10_motors_trim_have_seles_df sns.countplot( data=most_10_motors_trim_have_seles_df, y=most_10_motors_trim_have_seles_df["motors_trim"], order=most_10_motors_trim_have_seles_idx, ) # ## 17- For each body_type for each seller_type which has most sales # df["dumy"] = 1 df.pivot_table(columns="seller_type", index="body_type", values="dumy", aggfunc=sum) sns.countplot(data=df, x="seller_type", hue="body_type") # ## 18- For each body_type for each no_of_cylinders which has most sales df.pivot_table(columns="body_type", index="no_of_cylinders", values="dumy", aggfunc=sum) sns.countplot(data=df, x="body_type", hue="no_of_cylinders") sns.countplot(df["no_of_cylinders"]) # ## 19- For each seller_type for each no_of_cylinders which has most sales df.pivot_table( columns="seller_type", index="no_of_cylinders", values="dumy", aggfunc=sum ) sns.countplot(data=df, x="seller_type", hue="no_of_cylinders") # ## 20- For each seller_type for each horsepower which has most sales # df.pivot_table(columns="seller_type", index="horsepower", values="dumy", aggfunc=sum) sns.countplot(data=df, x="seller_type", hue="horsepower") sns.countplot(y=df["horsepower"]) # ## 21- For each body_type for each horsepower which has most salesbody_type # df.pivot_table(columns="body_type", index="horsepower", values="dumy", aggfunc=sum) sns.countplot(data=df, x="body_type", hue="horsepower") sns.countplot(df["body_type"]) # ## 22- For each horsepower for each no_of_cylinders which has most salesbody_type # df.pivot_table( columns="horsepower", index="no_of_cylinders", values="dumy", aggfunc=sum ) sns.countplot(data=df, y="horsepower", hue="no_of_cylinders") sns.countplot(y=df["no_of_cylinders"]) # ## 23- For each seller_type how much kilometers driven df["seller_type"].value_counts().index sns.barplot(data=df, x="seller_type", y="kilometers", estimator=sum) df.groupby("seller_type").sum()[["kilometers"]] # ## 24- For each seller_type how much kilometers driven in average sns.barplot(data=df, x="seller_type", y="kilometers") df.groupby("seller_type").mean()[["kilometers"]] # conclulsion her is that owner type sell cars that drive more kilometers # ## 25- For each body_type how much kilometers driven sns.barplot(data=df, x="body_type", y="kilometers", estimator=sum) df.groupby("body_type").sum()[["kilometers"]] # ## 26- For each body_type how much kilometers driven in average sns.barplot(data=df, x="body_type", y="kilometers") df.groupby("body_type").mean()[["kilometers"]] df sns.barplot( data=df, x="seller_type", y="kilometers", hue=df["body_type"], estimator=sum ) df.pivot_table( columns="seller_type", index="body_type", values="kilometers", aggfunc=sum ) # ## 28- For each seller_type for each body_type how much kilometers driven in average sns.barplot(data=df, x="seller_type", y="kilometers", hue=df["body_type"]) df.pivot_table(columns="seller_type", index="body_type", values="kilometers") # ## 29- For each body_type for each no_of_cylinders calculate the sum of kilometers driven # sns.barplot( data=df, x="body_type", y="kilometers", hue=df["no_of_cylinders"], estimator=sum ) df.pivot_table( columns="body_type", index="no_of_cylinders", values="kilometers", aggfunc=sum ) # ## 30- For each body_type for each no_of_cylinders calculate the sum of kilometers driven in average # sns.barplot(data=df, x="body_type", y="kilometers", hue=df["no_of_cylinders"]) df.pivot_table(columns="body_type", index="no_of_cylinders", values="kilometers")
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/561/129561735.ipynb
dubizzle-used-car-sale-data
alihassankp
[{"Id": 129561735, "ScriptId": 38523534, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 9201390, "CreationDate": "05/14/2023 21:13:23", "VersionNumber": 2.0, "Title": "dubizzle-used-car-Analysis", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 661.0, "LinesInsertedFromPrevious": 2.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 659.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 185745985, "KernelVersionId": 129561735, "SourceDatasetVersionId": 4471258}]
[{"Id": 4471258, "DatasetId": 2616968, "DatasourceVersionId": 4531310, "CreatorUserId": 2304566, "LicenseName": "CC0: Public Domain", "CreationDate": "11/08/2022 16:09:01", "VersionNumber": 1.0, "Title": "Dubizzle used car sales data", "Slug": "dubizzle-used-car-sale-data", "Subtitle": "Dubizzle used car sales data it can be used for recommendation system", "Description": "## Dataset Description \n\nDubizzle is the UAE'S (Middle east country) favorite marketplace to buy, sell and find anything. In this dataset I scrapped almost all data from Dubizzle related to automobile selling. This data can be used for finding interesting fact and correlation between different kind brands, resell value of a specific car related to year and more. Enjoy and explore.\n\n## Summary\n- There are 20 columns and 9170 rows\n- Scrapped date 12/05/2022\n\n## Column Description\n1. *title* - Vehicle name with model details\n2. *price_in_aed* - Vehicle price in united arab emirates dhirham\n3. *kilometer* - How many kilometer the vehicle travelled \n4. *body_condition* - Body condition of vehicle \n5. *mechanical_condition* - Mechanical condition of vehicle\n6. *seller_type* - Type of seller ( Dealer, Owner, Other)\n7. *body_type* - Body type ( SUV, Sedan, Other)\n8. *no_of_cylinder* - Number of cylinder \n9. *transmission_type* - Vehicle transmission type ( Automatic Transmission, Manual Transmission )\n10. *regional_spec* - Regional Specification of vehicle\n11. *horsepower* - Horsepower\n12. *fuel_type* - Fuel Type \n13. *steering_side* - Steering side of the vehicle\n14. *year* - Vehicle model year\n15. *color* - Vehicle color\n16. *emirates* - Emirates is like state\n17. *motor_trim* - Motor trim type\n18. *company* - Vehicle manufacture company name\n19. *model* - Vehicle model\n20. *date_posted* - Date of ad posted", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 2616968, "CreatorUserId": 2304566, "OwnerUserId": 2304566.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 4471258.0, "CurrentDatasourceVersionId": 4531310.0, "ForumId": 2647821, "Type": 2, "CreationDate": "11/08/2022 16:09:01", "LastActivityDate": "11/08/2022", "TotalViews": 10323, "TotalDownloads": 1711, "TotalVotes": 33, "TotalKernels": 8}]
[{"Id": 2304566, "UserName": "alihassankp", "DisplayName": "Ali Hassan", "RegisterDate": "09/30/2018", "PerformanceTier": 1}]
import pandas as pd import numpy as np import seaborn as sns import matplotlib import matplotlib.pyplot as plt from datasist.structdata import detect_outliers df = pd.read_csv( "/kaggle/input/dubizzle-used-car-sale-data/data.csv", na_values=["NoneUnknown", "Unknown"], ) df.head() df.shape # ## Cheack feature,data types and missing values df.info() # ## Cheack for duplicated data df.duplicated().sum() df.drop_duplicates(inplace=True) df.duplicated().sum() df.shape df.describe() # ## Cheack for Null values (df.isna().mean() * 100) # Prsentage of Null values intair the whole data (df.isna().mean() * 100).sum() # ## Handlin Null values from sklearn.impute import KNNImputer imputer = KNNImputer() df_numeric = df.select_dtypes(include=np.number) df_catt = df.select_dtypes(include="object_") df_numeric_Arr = imputer.fit_transform(df_numeric) df_numeric_Arr len(df_numeric_Arr) df_numeric = pd.DataFrame(df_numeric_Arr, columns=imputer.get_feature_names_out()) df_numeric.shape df_numeric.isna().sum() df_numeric.shape df_catt.isna().mean() * 100 df = pd.concat([df_catt.reset_index(), df_numeric.reset_index()], axis=1) from sklearn.impute import SimpleImputer imputer = SimpleImputer(strategy="most_frequent") df["horsepower"] = imputer.fit_transform(df[["horsepower"]]) df["no_of_cylinders"] = imputer.fit_transform(df[["no_of_cylinders"]]) df["motors_trim"] = imputer.fit_transform(df[["motors_trim"]]) df.isna().mean() * 100 # df.dropna(inplace=True) # lapel=df['price_in_aed'] # df=df.drop('price_in_aed',axis=1) # # EDA # ## univariate analysis # ## Handel price_in_aed >> Convert it to Int # ### I have a ploblen to convert price_in_aed to flaot beacouse of special character ',' # df['price_in_aed'].apply(lambda x: float(x)) ##df['price_in_aed'] = df['price_in_aed'].astype(float) def Price_in_aed_To_Int(price): num = "" price_List = price.split(",") for i in price_List: num += i return int(num) df["price_in_aed"] = df["price_in_aed"].apply(Price_in_aed_To_Int) df["price_in_aed"] ### First I will Handel feature that have maney values (Not unique) & That have contunuas values like kilometers df.title.value_counts() # title have model and company and I have both as individual feature so I will drop title df.drop("title", axis=1, inplace=True) # high_variance_columns=['body_condition', 'mechanical_condition', 'steering_side', 'fuel_type', 'transmission_type', 'emirate','motors_trim','model','date_posted'] # high_variance_columns # ## there is alot of features totallt inbalanced so I have to remove them # ##### body_condition, mechanical_condition, steering_side, fuel_type, transmission_type, emirate, motors_trim, model,date_posted## there is alot of features totallt inbalanced so I have to remove them # ##### body_condition, mechanical_condition, steering_side, fuel_type, transmission_type inbalanced_columns = [ "body_condition", "mechanical_condition", "steering_side", "fuel_type", "transmission_type", ] for x in inbalanced_columns: sns.countplot(data=df, y=df[x]) plt.figure() # this creates a new figure on which df.drop(inbalanced_columns, axis=1, inplace=True) df.drop("index", inplace=True, axis=1) df["color"].value_counts() sns.countplot(y=df["color"]) df.drop( df[ (df["color"] == "Purple") | (df["color"] == "Teal") | (df["color"] == "Tan") ].index, inplace=True, axis=0, ) sns.countplot(y=df["color"]) df.shape # ## I need to make some Bivariate Analysis between price_in_aed and kilometers To show if outliers will evect the corelation bentween them or not # befor removing outliers corr=-0.2 df.corr() # ### befor removing outliers corr=-0.2 & 0.25 # df['kilometers'] = df['kilometers'].astype(int) sns.kdeplot(df["kilometers"]) sns.boxenplot(df["kilometers"]) df[df["kilometers"] > 300000].index df.drop(df[df["kilometers"] > 300000].index, inplace=True, axis=0) df.shape sns.kdeplot(df["kilometers"]) sns.displot(data=df, x="kilometers") sns.boxplot(df["kilometers"]) # #### I concluded form her that ther is many cars has never used becouse the kilometers=0 df.describe() df["year"] = df["year"].astype(int) df.info() print(list(df["year"].value_counts())) sns.displot(df["year"]) sns.boxplot(df["year"]) df[df["year"] <= 2005] df.drop(df[df["year"] <= 2005].index, inplace=True, axis=0) sns.displot(df["year"]) sns.boxplot(df["year"]) df.corr() # ### after removing outliers corr=-0.4 &0.28 # ### so outliers have no meaning here # sns.countplot(df['year']) # ### Habdel body_type sns.countplot(data=df, y="body_type") def Body_Type_(body_type): if body_type == "SUV" or body_type == "Sedan" or body_type == "Coupe": return body_type else: return "Other" df["body_type"] = df["body_type"].apply(Body_Type_) sns.countplot(df["body_type"]) # ### Handel no_of_cylinders sns.countplot(data=df, x="no_of_cylinders") # #### Remove 3, 10, 5, None not_important_no_of_cylinders_idx = df[ (df["no_of_cylinders"] == "3") | (df["no_of_cylinders"] == "10") | (df["no_of_cylinders"] == "5") | (df["no_of_cylinders"] == "None") ].index df.drop(not_important_no_of_cylinders_idx, axis=0, inplace=True) df["no_of_cylinders"] = df["no_of_cylinders"].astype(int) sns.countplot(data=df, x="no_of_cylinders") # print(list(df['motors_trim'].unique())) df.columns col = [ "body_condition", "mechanical_condition", "seller_type", "body_type", "no_of_cylinders", "transmission_type", "regional_specs", "horsepower", "fuel_type", "steering_side", "color", "emirate", ] df.info() for x in df.columns: sns.countplot(data=df, y=df[x]) plt.figure() # this creates a new figure on which df["company"].value_counts().head(23).index df["company"].value_counts().sort_values().head(29) other_companies = df["company"].value_counts().sort_values().head(29).index other_companies = list(other_companies) def Handel_Companies(company): for c in range(len(other_companies)): if company == other_companies[c]: return "other" return company df["company"] = df["company"].apply(Handel_Companies) sns.countplot(data=df, y=df["company"]) df["company"].value_counts() # df.groupby('company').sum() df df.info() df df["horsepower"].value_counts() def Horsepower(horsepower): if ( horsepower == "700 - 800 HP" or horsepower == "800 - 900 HP" or horsepower == "900+ HP" ): return "More than 700" else: return horsepower df["horsepower"] = df["horsepower"].apply(Horsepower) df["horsepower"].value_counts() for col in df.columns: sns.countplot(data=df, y=df[col]) plt.figure() sns.displot(data=df, x=df["price_in_aed"]) sns.boxenplot(data=df, x=df["price_in_aed"]) df[df["price_in_aed"] > 1500000].index x = df.drop(df[df["price_in_aed"] > 1500000].index) sns.displot(data=x, x=x["price_in_aed"]) sns.displot(np.log(df["price_in_aed"])) df.describe() df.corr() x.corr() # df.drop('date_posted',inplace=True,axis=1) # ## Here we can descover when removin outliers from the target it incres the corr so outliers here have no sens df = x df df.describe() df.head(1) # # Bivariate Analysis # ## relatiob between sns.heatmap(df.corr(), annot=True) sns.scatterplot(x=df["price_in_aed"], y=df["year"]) sns.scatterplot(y=df["price_in_aed"], x=df["kilometers"]) sns.scatterplot(x=df["price_in_aed"], y=df["no_of_cylinders"]) sns.scatterplot(x=df["price_in_aed"], y=df["no_of_cylinders"], hue=df["year"]) sns.scatterplot(x=df["price_in_aed"], y=df["no_of_cylinders"], hue=df["kilometers"]) sns.scatterplot(y=df["year"], x=df["kilometers"]) df.corr() df.drop("emirate", inplace=True, axis=1) df["motors_trim"].value_counts() # df.drop('motors_trim',inplace=True,axis=1) for col in df.columns: sns.countplot(data=df, y=df[col]) plt.figure() df.describe() df.head(2) df.reset_index() df.info() df.reset_index(inplace=True, drop=True) # # Lets Answer Business Questions # ## 1- most 10 companies have seles # ## 2- for most 10 companies have seles what is the common 5 models # ## 3- most 10 high price companies regardless othr factors # ## 4- most 10 high price companies in a category of 200-300 horsepower # ## 5- most 10 high price companies at duration of 2010-2015 # ## 6- Best selling body_type at GCC Specs (regional_specs) # ## 7- most 10 models have seles # ## 8- most 10 high price models and its cmopany # ## 9- most 10 high price models and its cmopany in a category of 200-500 horsepower # ## 10- most 10 models that spend 0 kilometers and its data # ## 11- most freq regional_specs that that cars spend 0 kilometers # ## 12- according to ouner what is most 2 body_types seles # ## 13- according to ouner what is most 10 models sales and its data # ## 14- according to dealer what is most 10 models sales and its data # ## 15- most 5 high price colors Under the average kilometers # ## 16- most 10 motors_trim have seles # ## 17- For each body_type for each seller_type which has most sales # ## 18- For each body_type for each no_of_cylinders which has most sales # ## 19- For each seller_type for each no_of_cylinders which has most sales # ## 20- For each seller_type for each horsepower which has most sales # ## 21- For each body_type for each horsepower which has most salesbody_type # ## 22- For each horsepower for each no_of_cylinders which has most salesbody_type # ## 23- For each seller_type how much kilometers driven # ## 24- For each seller_type how much kilometers driven in average # ## 25- For each body_type how much kilometers driven # ## 26- For each body_type how much kilometers driven in average # ## 27- For each seller_type for each body_type how much kilometers driven # ## 28- For each seller_type for each body_type how much kilometers driven in average # ## 29- For each body_type for each no_of_cylinders calculate the sum of kilometers driven # ## 30- For each body_type for each no_of_cylinders calculate the sum of kilometers driven in average # ## 1- most 10 companies have seles df["company"].value_counts().head(10) most_10_companies_idx = df["company"].value_counts().head(10).index most_10_companies_df = df[df["company"].isin(most_10_companies_idx)] most_10_companies_df sns.countplot( data=most_10_companies_df, y=most_10_companies_df["company"], order=most_10_companies_idx, ) # ## 2- for most 10 companies have seles what is the common 5 models # most_10_companies_df["model"].value_counts().head(5) most_5_models_in_most_10_companies_have_seles_idx = ( most_10_companies_df["model"].value_counts().head(5).index ) most_5_models_in_most_10_companies_have_seles_df = most_10_companies_df[ most_10_companies_df["model"].isin( most_5_models_in_most_10_companies_have_seles_idx ) ] most_5_models_in_most_10_companies_have_seles_df sns.countplot( data=most_5_models_in_most_10_companies_have_seles_df, x=most_5_models_in_most_10_companies_have_seles_df["model"], order=most_5_models_in_most_10_companies_have_seles_idx, ) # ## 3- most 10 high price companies regardless othr factors # df.groupby("company").sum().sort_values(by="price_in_aed").head(10)[["price_in_aed"]] # ## 4- most 10 high price companies in a category of 200-300 horsepower # df[df["horsepower"] == "200 - 300 HP"].groupby("company").sum().sort_values( by="price_in_aed" ).head(10)[["price_in_aed"]] # ## 5- most 10 high price companies at duration of 2010-2015 # df[(df["year"] < 2015) & (df["year"] > 2010)].groupby("company").sum().sort_values( by="price_in_aed" ).head(10)[["price_in_aed"]] # ## 6- Best selling body_type at GCC Specs (regional_specs) # df[df["regional_specs"] == "GCC Specs"]["body_type"].value_counts().to_frame() # ## 7- most 10 models have seles # df["model"].value_counts().head(10).to_frame() most_10_models_have_seles_idx = df["model"].value_counts().head(10).index most_10_models_have_seles_df = df[df["model"].isin(most_10_models_have_seles_idx)] most_10_models_have_seles_df.head() sns.countplot( data=most_10_models_have_seles_df, y=most_10_models_have_seles_df["model"], order=most_10_models_have_seles_idx, ) # ## 8- most 10 high price models and its cmopany # df.groupby("model").sum().sort_values(by="price_in_aed").head(10)[["price_in_aed"]] most_10_high_price_models_idx = ( df.groupby("model") .sum() .sort_values(by="price_in_aed") .head(10)[["price_in_aed"]] .index ) most_10_high_price_models_df = df[df["model"].isin(most_10_high_price_models_idx)] most_10_high_price_models_df[["model", "company", "price_in_aed"]] # ## 9- most 10 high price models and its cmopany in a category of 200-500 horsepower # df_200_500_HP = df[ (df["horsepower"] == "200 - 300 HP") | (df["horsepower"] == "400 - 500 HP") ] df_200_500_HP.groupby("model").sum().sort_values(by="price_in_aed").head(10)[ ["price_in_aed"] ] most_10_high_price_models_idx = ( df_200_500_HP.groupby("model") .sum() .sort_values(by="price_in_aed") .head(10)[["price_in_aed"]] .index ) most_10_high_price_models_df = df_200_500_HP[ df_200_500_HP["model"].isin(most_10_high_price_models_idx) ] most_10_high_price_models_df[["model", "company", "price_in_aed"]] # ## 10- most 10 models that spend 0 kilometers # most_10_models_that_spend_0_km_idx = ( df[df["kilometers"] == 0.0]["model"].value_counts().head(10).index ) df[df["kilometers"] == 0.0]["model"].value_counts().head(10) most_10_models_that_spend_0_km_df = df[ df["model"].isin(most_10_models_that_spend_0_km_idx) ] most_10_models_that_spend_0_km_df sns.countplot( data=most_10_models_that_spend_0_km_df, y=most_10_models_that_spend_0_km_df["model"], order=most_10_models_that_spend_0_km_idx, ) # ## 11- most freq regional_specs that that cars spend 0 kilometers # df[df["kilometers"] == 0.0]["regional_specs"].value_counts().to_frame() # ## 12- according to Owner what is most 2 body_types seles # df[df["seller_type"] == "Owner"]["body_type"].value_counts().head(2) # ## 13- according to Owner what is most 10 models seles and its data # df[df["seller_type"] == "Owner"]["model"].value_counts().head(10) owner_most_10_models_seles_idx = ( df[df["seller_type"] == "Owner"]["model"].value_counts().head(10).index ) owner_most_10_models_seles_df = df[df["model"].isin(owner_most_10_models_seles_idx)] owner_most_10_models_seles_df # ## 14- according to Dealer what is most 10 models seles and its data # df[df["seller_type"] == "Dealer"]["model"].value_counts().head(10) Dealer_most_10_models_seles_idx = ( df[df["seller_type"] == "Dealer"]["model"].value_counts().head(10).index ) Dealer_most_10_models_seles_df = df[df["model"].isin(Dealer_most_10_models_seles_idx)] Dealer_most_10_models_seles_df # ## 15- most 5 high price colors Under the average kilometers under_avg_km_df = df[df["kilometers"] < df["kilometers"].mean()] df.groupby("color").sum().sort_values(by="price_in_aed", ascending=False) # ## 16- most 10 motors_trim have seles df["motors_trim"].value_counts().head(10) most_10_motors_trim_have_seles_idx = df["motors_trim"].value_counts().head(10).index most_10_motors_trim_have_seles_df = df[ df["motors_trim"].isin(most_10_motors_trim_have_seles_idx) ] most_10_motors_trim_have_seles_df sns.countplot( data=most_10_motors_trim_have_seles_df, y=most_10_motors_trim_have_seles_df["motors_trim"], order=most_10_motors_trim_have_seles_idx, ) # ## 17- For each body_type for each seller_type which has most sales # df["dumy"] = 1 df.pivot_table(columns="seller_type", index="body_type", values="dumy", aggfunc=sum) sns.countplot(data=df, x="seller_type", hue="body_type") # ## 18- For each body_type for each no_of_cylinders which has most sales df.pivot_table(columns="body_type", index="no_of_cylinders", values="dumy", aggfunc=sum) sns.countplot(data=df, x="body_type", hue="no_of_cylinders") sns.countplot(df["no_of_cylinders"]) # ## 19- For each seller_type for each no_of_cylinders which has most sales df.pivot_table( columns="seller_type", index="no_of_cylinders", values="dumy", aggfunc=sum ) sns.countplot(data=df, x="seller_type", hue="no_of_cylinders") # ## 20- For each seller_type for each horsepower which has most sales # df.pivot_table(columns="seller_type", index="horsepower", values="dumy", aggfunc=sum) sns.countplot(data=df, x="seller_type", hue="horsepower") sns.countplot(y=df["horsepower"]) # ## 21- For each body_type for each horsepower which has most salesbody_type # df.pivot_table(columns="body_type", index="horsepower", values="dumy", aggfunc=sum) sns.countplot(data=df, x="body_type", hue="horsepower") sns.countplot(df["body_type"]) # ## 22- For each horsepower for each no_of_cylinders which has most salesbody_type # df.pivot_table( columns="horsepower", index="no_of_cylinders", values="dumy", aggfunc=sum ) sns.countplot(data=df, y="horsepower", hue="no_of_cylinders") sns.countplot(y=df["no_of_cylinders"]) # ## 23- For each seller_type how much kilometers driven df["seller_type"].value_counts().index sns.barplot(data=df, x="seller_type", y="kilometers", estimator=sum) df.groupby("seller_type").sum()[["kilometers"]] # ## 24- For each seller_type how much kilometers driven in average sns.barplot(data=df, x="seller_type", y="kilometers") df.groupby("seller_type").mean()[["kilometers"]] # conclulsion her is that owner type sell cars that drive more kilometers # ## 25- For each body_type how much kilometers driven sns.barplot(data=df, x="body_type", y="kilometers", estimator=sum) df.groupby("body_type").sum()[["kilometers"]] # ## 26- For each body_type how much kilometers driven in average sns.barplot(data=df, x="body_type", y="kilometers") df.groupby("body_type").mean()[["kilometers"]] df sns.barplot( data=df, x="seller_type", y="kilometers", hue=df["body_type"], estimator=sum ) df.pivot_table( columns="seller_type", index="body_type", values="kilometers", aggfunc=sum ) # ## 28- For each seller_type for each body_type how much kilometers driven in average sns.barplot(data=df, x="seller_type", y="kilometers", hue=df["body_type"]) df.pivot_table(columns="seller_type", index="body_type", values="kilometers") # ## 29- For each body_type for each no_of_cylinders calculate the sum of kilometers driven # sns.barplot( data=df, x="body_type", y="kilometers", hue=df["no_of_cylinders"], estimator=sum ) df.pivot_table( columns="body_type", index="no_of_cylinders", values="kilometers", aggfunc=sum ) # ## 30- For each body_type for each no_of_cylinders calculate the sum of kilometers driven in average # sns.barplot(data=df, x="body_type", y="kilometers", hue=df["no_of_cylinders"]) df.pivot_table(columns="body_type", index="no_of_cylinders", values="kilometers")
false
1
7,083
0
7,558
7,083
129561547
<jupyter_start><jupyter_text>Tetris Openers Dataset Kaggle dataset identifier: tetris-openers-dataset <jupyter_script># Data Science Classification Project: Tetris Opener Classification # ## Entire project is available at https://github.com/quickandsmart/tetris-opener-classifier import tensorflow as tf from tensorflow.keras import models, layers import matplotlib.pyplot as plt from IPython.display import HTML import numpy as np # Data Load: Load in and Resize Tetris Opener Images # **Manually collected different tetris openers and resized them to be 256x256. More information about the openers can be found in the README** BATCH_SIZE = 8 IMAGE_SIZE = 256 CHANNELS = 3 EPOCHS = 15 dataset = tf.keras.preprocessing.image_dataset_from_directory( "openers", seed=123, shuffle=True, image_size=(IMAGE_SIZE, IMAGE_SIZE), batch_size=BATCH_SIZE, ) class_names = dataset.class_names class_names # ### Visualization of the first batch of images in our dataset for image_batch, labels_batch in dataset.take(1): for i in range(8): ax = plt.subplot(2, 4, i + 1) plt.imshow(image_batch[i].numpy().astype("uint8")) plt.title(class_names[labels_batch[i]]) plt.axis("off") # Train Validate Test Split the Data # **The dataset needs to be split into 3 categories to train the Neural Network.A large set of the data is needed for training the model, a validation set is needed to test the model while it's being trained, and lastly a test set to see how to model does after being fully trained. The current model was trained with an 60,20,20 split which lead to 10 out of 17 batches being trained, 3 out of 17 being validated, and 4 out of 17 being tested** def get_dataset_partitions_tf( ds, train_split=0.8, val_split=0.1, test_split=0.1, shuffle=True, shuffle_size=10000 ): assert (train_split + test_split + val_split) == 1 ds_size = len(ds) if shuffle: ds = ds.shuffle(shuffle_size, seed=12) train_size = int(train_split * ds_size) val_size = int(val_split * ds_size) train_ds = ds.take(train_size) val_ds = ds.skip(train_size).take(val_size) test_ds = ds.skip(train_size).skip(val_size) return train_ds, val_ds, test_ds train_ds, val_ds, test_ds = get_dataset_partitions_tf(dataset) len(train_ds), len(val_ds), len(test_ds) # **Cache and prefetch the datasets to make training the model faster and shuffle the datasets one more time** train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=tf.data.AUTOTUNE) val_ds = val_ds.cache().shuffle(1000).prefetch(buffer_size=tf.data.AUTOTUNE) test_ds = test_ds.cache().shuffle(1000).prefetch(buffer_size=tf.data.AUTOTUNE) # Building the Classification Model # ### Creating a Layer for Resizing and Normalization # **This first layer in the neural network is meant to resize and rescale any image that is put into the model to classify after the training is done** resize_and_rescale = tf.keras.Sequential( [ layers.experimental.preprocessing.Resizing(IMAGE_SIZE, IMAGE_SIZE), layers.experimental.preprocessing.Rescaling(1.0 / 255), ] ) # ### Data Augmentation # **Since I didn't have a lot of data to work with Data Augmentation is used to create more sample data to train with, which can help boost the accuracy of the model. The data augmentation included Flipping the image horizontally, adding contrast to the image by up to 40% and zooming in the image horizontally and vertically by up to 10%** data_augmentation = tf.keras.Sequential( [ layers.experimental.preprocessing.RandomFlip("horizontal"), layers.experimental.preprocessing.RandomContrast(0.4), layers.experimental.preprocessing.RandomZoom( height_factor=(0.2, 0.0), width_factor=(0.2, 0.0) ), ] ) augmented_ds = train_ds.map(lambda x, y: (data_augmentation(x, training=True), y)) train_ds_final = ( train_ds.concatenate(augmented_ds) .shuffle(buffer_size=10000) .prefetch(buffer_size=tf.data.AUTOTUNE) ) # **With this data augmentation, I have 2x more images to work with for our training set** len(train_ds_final) # Model Architecture # **I used a Convolutional Neural Network (CNN) since it works well with training images.This involves repeatedly using the convolution and maxpooling layers to shrink the image and create more recognized patterns** input_shape = (BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, CHANNELS) n_classes = 5 model = models.Sequential( [ resize_and_rescale, layers.Conv2D(32, (3, 3), activation="relu", input_shape=input_shape), layers.MaxPooling2D((2, 2)), layers.Conv2D(64, (3, 3), activation="relu"), layers.MaxPooling2D((2, 2)), layers.Conv2D(64, (3, 3), activation="relu"), layers.MaxPooling2D((2, 2)), layers.Conv2D(64, (3, 3), activation="relu"), layers.MaxPooling2D((2, 2)), layers.Conv2D(64, (3, 3), activation="relu"), layers.MaxPooling2D((2, 2)), layers.Conv2D(64, (3, 3), activation="relu"), layers.MaxPooling2D((2, 2)), layers.Flatten(), layers.Dense(64, activation="relu"), layers.Dense(n_classes, activation="softmax"), ] ) model.build(input_shape=input_shape) model.summary() model.compile( optimizer="adam", loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=["accuracy"], ) history = model.fit( train_ds_final, batch_size=BATCH_SIZE, validation_data=val_ds, verbose=1, epochs=15, ) scores = model.evaluate(test_ds) scores # **With this model I managed to achieve a 83.333% accuracy for the test dataset which is very good** # Run Predictions on Test Images import numpy as np plt.figure(figsize=(3, 3)) for images_batch, labels_batch in test_ds.take(1): first_image = images_batch[0].numpy().astype("uint8") first_label = labels_batch[0].numpy() plt.imshow(first_image) print("actual label:", class_names[first_label]) batch_prediction = model.predict(images_batch, verbose=0) print("predicted label:", class_names[np.argmax(batch_prediction[0])]) plt.axis("off") def predict(model, img): img_array = tf.keras.preprocessing.image.img_to_array(images[i].numpy()) img_array = tf.expand_dims(img_array, 0) predictions = model.predict(img_array, verbose=0) predicted_class = class_names[np.argmax(predictions[0])] confidence = round(100 * (np.max(predictions[0])), 2) return predicted_class, confidence plt.figure(figsize=(10, 10)) for images, labels in test_ds.take(1): for i in range(8): ax = plt.subplot(2, 4, i + 1) plt.imshow(images[i].numpy().astype("uint8")) predicted_class, confidence = predict(model, images[i].numpy()) actual_class = class_names[labels[i]] plt.title( f"Actual: {actual_class},\n Predicted: {predicted_class}.\n Confidence: {confidence}%" ) plt.axis("off") # Save the Model # **Save the model, and whenever I want to make changes to test later I can have multiple models** import os model_version = max([int(i) for i in os.listdir("../models") + [0]]) + 1 model.save(f"../models/{model_version}")
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/561/129561547.ipynb
tetris-openers-dataset
quickandsmart
[{"Id": 129561547, "ScriptId": 38525309, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13330521, "CreationDate": "05/14/2023 21:10:23", "VersionNumber": 1.0, "Title": "Tetris Opener Classification", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 194.0, "LinesInsertedFromPrevious": 194.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 185745669, "KernelVersionId": 129561547, "SourceDatasetVersionId": 5685419}]
[{"Id": 5685419, "DatasetId": 3268605, "DatasourceVersionId": 5760996, "CreatorUserId": 13330521, "LicenseName": "Unknown", "CreationDate": "05/14/2023 21:02:03", "VersionNumber": 1.0, "Title": "Tetris Openers Dataset", "Slug": "tetris-openers-dataset", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3268605, "CreatorUserId": 13330521, "OwnerUserId": 13330521.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5685419.0, "CurrentDatasourceVersionId": 5760996.0, "ForumId": 3334223, "Type": 2, "CreationDate": "05/14/2023 21:02:03", "LastActivityDate": "05/14/2023", "TotalViews": 138, "TotalDownloads": 2, "TotalVotes": 0, "TotalKernels": 1}]
[{"Id": 13330521, "UserName": "quickandsmart", "DisplayName": "Quickandsmart", "RegisterDate": "01/18/2023", "PerformanceTier": 0}]
# Data Science Classification Project: Tetris Opener Classification # ## Entire project is available at https://github.com/quickandsmart/tetris-opener-classifier import tensorflow as tf from tensorflow.keras import models, layers import matplotlib.pyplot as plt from IPython.display import HTML import numpy as np # Data Load: Load in and Resize Tetris Opener Images # **Manually collected different tetris openers and resized them to be 256x256. More information about the openers can be found in the README** BATCH_SIZE = 8 IMAGE_SIZE = 256 CHANNELS = 3 EPOCHS = 15 dataset = tf.keras.preprocessing.image_dataset_from_directory( "openers", seed=123, shuffle=True, image_size=(IMAGE_SIZE, IMAGE_SIZE), batch_size=BATCH_SIZE, ) class_names = dataset.class_names class_names # ### Visualization of the first batch of images in our dataset for image_batch, labels_batch in dataset.take(1): for i in range(8): ax = plt.subplot(2, 4, i + 1) plt.imshow(image_batch[i].numpy().astype("uint8")) plt.title(class_names[labels_batch[i]]) plt.axis("off") # Train Validate Test Split the Data # **The dataset needs to be split into 3 categories to train the Neural Network.A large set of the data is needed for training the model, a validation set is needed to test the model while it's being trained, and lastly a test set to see how to model does after being fully trained. The current model was trained with an 60,20,20 split which lead to 10 out of 17 batches being trained, 3 out of 17 being validated, and 4 out of 17 being tested** def get_dataset_partitions_tf( ds, train_split=0.8, val_split=0.1, test_split=0.1, shuffle=True, shuffle_size=10000 ): assert (train_split + test_split + val_split) == 1 ds_size = len(ds) if shuffle: ds = ds.shuffle(shuffle_size, seed=12) train_size = int(train_split * ds_size) val_size = int(val_split * ds_size) train_ds = ds.take(train_size) val_ds = ds.skip(train_size).take(val_size) test_ds = ds.skip(train_size).skip(val_size) return train_ds, val_ds, test_ds train_ds, val_ds, test_ds = get_dataset_partitions_tf(dataset) len(train_ds), len(val_ds), len(test_ds) # **Cache and prefetch the datasets to make training the model faster and shuffle the datasets one more time** train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=tf.data.AUTOTUNE) val_ds = val_ds.cache().shuffle(1000).prefetch(buffer_size=tf.data.AUTOTUNE) test_ds = test_ds.cache().shuffle(1000).prefetch(buffer_size=tf.data.AUTOTUNE) # Building the Classification Model # ### Creating a Layer for Resizing and Normalization # **This first layer in the neural network is meant to resize and rescale any image that is put into the model to classify after the training is done** resize_and_rescale = tf.keras.Sequential( [ layers.experimental.preprocessing.Resizing(IMAGE_SIZE, IMAGE_SIZE), layers.experimental.preprocessing.Rescaling(1.0 / 255), ] ) # ### Data Augmentation # **Since I didn't have a lot of data to work with Data Augmentation is used to create more sample data to train with, which can help boost the accuracy of the model. The data augmentation included Flipping the image horizontally, adding contrast to the image by up to 40% and zooming in the image horizontally and vertically by up to 10%** data_augmentation = tf.keras.Sequential( [ layers.experimental.preprocessing.RandomFlip("horizontal"), layers.experimental.preprocessing.RandomContrast(0.4), layers.experimental.preprocessing.RandomZoom( height_factor=(0.2, 0.0), width_factor=(0.2, 0.0) ), ] ) augmented_ds = train_ds.map(lambda x, y: (data_augmentation(x, training=True), y)) train_ds_final = ( train_ds.concatenate(augmented_ds) .shuffle(buffer_size=10000) .prefetch(buffer_size=tf.data.AUTOTUNE) ) # **With this data augmentation, I have 2x more images to work with for our training set** len(train_ds_final) # Model Architecture # **I used a Convolutional Neural Network (CNN) since it works well with training images.This involves repeatedly using the convolution and maxpooling layers to shrink the image and create more recognized patterns** input_shape = (BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, CHANNELS) n_classes = 5 model = models.Sequential( [ resize_and_rescale, layers.Conv2D(32, (3, 3), activation="relu", input_shape=input_shape), layers.MaxPooling2D((2, 2)), layers.Conv2D(64, (3, 3), activation="relu"), layers.MaxPooling2D((2, 2)), layers.Conv2D(64, (3, 3), activation="relu"), layers.MaxPooling2D((2, 2)), layers.Conv2D(64, (3, 3), activation="relu"), layers.MaxPooling2D((2, 2)), layers.Conv2D(64, (3, 3), activation="relu"), layers.MaxPooling2D((2, 2)), layers.Conv2D(64, (3, 3), activation="relu"), layers.MaxPooling2D((2, 2)), layers.Flatten(), layers.Dense(64, activation="relu"), layers.Dense(n_classes, activation="softmax"), ] ) model.build(input_shape=input_shape) model.summary() model.compile( optimizer="adam", loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=["accuracy"], ) history = model.fit( train_ds_final, batch_size=BATCH_SIZE, validation_data=val_ds, verbose=1, epochs=15, ) scores = model.evaluate(test_ds) scores # **With this model I managed to achieve a 83.333% accuracy for the test dataset which is very good** # Run Predictions on Test Images import numpy as np plt.figure(figsize=(3, 3)) for images_batch, labels_batch in test_ds.take(1): first_image = images_batch[0].numpy().astype("uint8") first_label = labels_batch[0].numpy() plt.imshow(first_image) print("actual label:", class_names[first_label]) batch_prediction = model.predict(images_batch, verbose=0) print("predicted label:", class_names[np.argmax(batch_prediction[0])]) plt.axis("off") def predict(model, img): img_array = tf.keras.preprocessing.image.img_to_array(images[i].numpy()) img_array = tf.expand_dims(img_array, 0) predictions = model.predict(img_array, verbose=0) predicted_class = class_names[np.argmax(predictions[0])] confidence = round(100 * (np.max(predictions[0])), 2) return predicted_class, confidence plt.figure(figsize=(10, 10)) for images, labels in test_ds.take(1): for i in range(8): ax = plt.subplot(2, 4, i + 1) plt.imshow(images[i].numpy().astype("uint8")) predicted_class, confidence = predict(model, images[i].numpy()) actual_class = class_names[labels[i]] plt.title( f"Actual: {actual_class},\n Predicted: {predicted_class}.\n Confidence: {confidence}%" ) plt.axis("off") # Save the Model # **Save the model, and whenever I want to make changes to test later I can have multiple models** import os model_version = max([int(i) for i in os.listdir("../models") + [0]]) + 1 model.save(f"../models/{model_version}")
false
0
2,153
0
2,178
2,153
129561844
# Code for ["Causal inference with Synthetic Control using Python and SparseSC"](https://aayushmnit.github.io/posts/2022-09-19-SyntheticControl/2022-09-19_SyntheticControl.html) blog. # ## What is Synthetic Control Method? # I will try to keep this part short and focus more on why Data scientists should care about such methods and how to use them on larger datasets based on practical experience using [SparseSC package](https://github.com/microsoft/SparseSC). # The Synthetic Control (SC) method is a statistical method used to estimate causal effects from binary treatments on observational panel (longitudinal) data. The method got quite a coverage by being described as [“the most important innovation in the policy evaluation literature in the last few years”](https://www.aeaweb.org/articles?id=10.1257/jep.31.2.3) and got an article published in [Washington Post - Seriously, here’s one amazing math trick to learn what can’t be known](https://www.washingtonpost.com/news/wonk/wp/2015/10/30/how-to-measure-things-in-a-world-of-competing-claims/). “SC is a technique to create an artificial control group by taking a weighted average of untreated units in such a way that it reproduces the characteristics of the treated units before the intervention(treatment). The SC acts as the counterfactual for a treatment unit, and the estimate of a treatment effect is the difference between the observed outcome in the post-treatment period and the SC's outcome.” # “One way to think of SC is as an improvement upon [difference-in-difference (DiD) estimation](https://en.wikipedia.org/wiki/Difference_in_differences). Typical DiD will compare a treated unit to the average of the control units. But often the treated unit does not look like a typical control (e.g., it might have a different growth rate), in which case the 'parallel trend' assumption of DiD is not valid. SC remedies this by choosing a smarter linear combination, rather than the simple average, to weigh more heavily the more similar units. SC's assumption is if there are endogenous factors that affect treatment and future outcomes then you should be able to control them by matching past outcomes. The matching that SC provides can therefore deal with some problems in estimation that DiD cannot handle.” # Here is the link to the Causal inference book which I found most useful to understand the math behind SC- [Causal Inference for The Brave and True by Matheus Facure - Chapter 15](https://matheusfacure.github.io/python-causality-handbook/15-Synthetic-Control.html). # ## Why should any Data scientist care about this method? # Often as a Data Scientist, you will encounter situations as follows where running A/B testing is not feasible because of - # 1. Lack of infrastructure # 2. Lack of similar groups for running A/B testing (in case of evaluation of state policies, as there is no state equivalent of other) # 3. Providing unwanted advantage to one group over others. Sometimes running an A/B test can give an unfair advantage and lead you into anti-trust territory. For example, what if Amazon tries to charge differential pricing for different customers or apply different margins for their sellers for the same product? # As a data scientist, stakeholders may still ask you to estimate the impact of certain changes/treatments, and Synthetic controls can come to the rescue in this situation. For this reason, it is a valuable tool to keep in your algorithmic toolkit. # ## Problem Overview # We will use the Proposition 99 data to explain the use case for this approach and also how to use the SparceSC library and its key features. “In 1988, California passed a famous Tobacco Tax and Health Protection Act, which became known as Proposition 99. Its primary effect is to impose a 25-cent per pack state excise tax on the sale of tobacco cigarettes within California, with approximately equivalent excise taxes similarly imposed on the retail sale of other commercial tobacco products, such as cigars and chewing tobacco. Additional restrictions placed on the sale of tobacco include a ban on cigarette vending machines in public areas accessible by juveniles, and a ban on the individual sale of single cigarettes. Revenue generated by the act was earmarked for various environmental and health care programs, and anti-tobacco advertisements. To evaluate its effect, we can gather data on cigarette sales from multiple states and across a number of years. In our case, we got data from the year 1970 to 2000 from 39 states.” import os install = '"git+https://github.com/microsoft/SparseSC.git"' os.system(f"pip install -Uqq {install}") import pandas as pd import numpy as np import SparseSC from datetime import datetime import warnings import plotly.express as px import plotly.graph_objects as pgo pd.set_option("display.max_columns", None) warnings.filterwarnings("ignore") # Let's look at the data # We have data per `state` as treatment unit and yearly (`year` column) per-capita sales of cigarettes in packs (`cigsale` column) and the cigarette retail price (`retprice` column). We are going to pivot this data so that each row is one treatment unit(`state`), and columns represent the yearly `cigsale` value. df_grp = pd.read_csv("/kaggle/working/in_grp_current.csv") df_industry = pd.read_csv("/kaggle/working/in_industry_production.csv") df_investments = pd.read_csv("/kaggle/working/in_investments.csv") df_fixed_assets = pd.read_csv("/kaggle/working/in_fixed_assets.csv") def prepare_df(df): df.index = df["region"].str.rstrip().str.lower() df.drop(columns="region", inplace=True) df.columns = df.columns.str.slice(stop=4).astype(int) for colname in df.columns: df[colname] = pd.to_numeric(df[colname], errors="coerce") df[colname] = df[colname].fillna(df[colname].dropna().mean()) return df df_grp = prepare_df(df_grp) df_industry = prepare_df(df_industry) df_investments = prepare_df(df_investments) df_fixed_assets = prepare_df(df_fixed_assets) df_fixed_assets.index wiki_url = "https://en.wikipedia.org/wiki/List_of_special_economic_zones_in_India" zones_df = pd.read_html(wiki_url)[2] zones_df["Notification Date"] = pd.to_datetime(zones_df["Notification Date"]) zones_df.groupby(by="DC Name")["Notification Date"].min() sezs_region_and_year = [ ("andhra pradesh", 2007), # Andhra Pradesh Special Economic Zone ("karnataka", 2007), # Mangalore Special Economic Zone ("tamil nadu", 2003), # Salem Special Economic Zone & MEPZ Special Economic Zone ("madhya pradesh", 2003), # Indore Special Economic Zone ("west bengal", 2003), # Falta Special Economic Zone ("uttar pradesh", 2003), # Noida Special Economic Zone ] LIST_OF_REGIONS_WITH_SEZS = [x for x, y in sezs_region_and_year] for rname in LIST_OF_REGIONS_WITH_SEZS: for df in [df_grp, df_industry, df_investments, df_fixed_assets]: if rname not in df.index.tolist(): print(f"{rname} is not present") print("finished") df_grp.head(3) df_industry.head(3) df_investments.head(3) df_fixed_assets.head(3) DATAFRAMES = { "grp": df_grp, "industry": df_industry, "investments": df_investments, "fixed_assets": df_fixed_assets, } from tqdm.notebook import tqdm def calculate_synths(current_region_name, current_year): for rname in LIST_OF_REGIONS_WITH_SEZS: if rname != current_region_name: idx_to_drop = [ val for val in list(DATAFRAMES.values())[0].index.values if val == rname ] synths = {} for df_name, df in tqdm(DATAFRAMES.items()): synth = SparseSC.fit( features=df.drop(idx_to_drop, axis=0) .iloc[:, df.columns <= current_year] .values, targets=df.drop(idx_to_drop).iloc[:, df.columns > current_year].values, treated_units=[ idx for idx, val in enumerate(df.drop(idx_to_drop).index.values) if val == current_region_name ], progress=0, print_path=False, ) treated_units = [ idx for idx, val in enumerate(df.drop(idx_to_drop).index.values) if val == current_region_name ] result = df.loc[df.index == current_region_name].T.reset_index(drop=False) result.columns = ["year", "Observed"] result["Synthetic"] = synth.predict(df.drop(idx_to_drop, axis=0).values)[ treated_units, : ][0] synths[df_name] = {"synth": synth, "res_df": result} return synths synths_for_each_region = {} for current_region, current_year in sezs_region_and_year: print(f"Calculating for {current_region}") synths_for_each_region[current_region] = calculate_synths( current_region, current_year ) def vizualize(result: pd.DataFrame, region: str, variable: str, year: int): fig = px.line( data_frame=result, x="year", y=["Observed", "Synthetic"], template="plotly_dark", ) fig.add_trace( pgo.Scatter( x=[year, year], y=[ 0, result.Observed.max() * 1.02 if result.Observed.max() > result.Synthetic.max() else result.Synthetic.max() * 1.02, ], # y=[result.Observed.min()*0.98,result.Observed.max()*1.02], line={ "dash": "dash", }, name="SEZ creation", ) ) fig.update_layout( title={ "text": f"Synthetic Control Assessment for {region}", "y": 0.95, "x": 0.5, }, legend=dict(y=1, x=0.1, orientation="v"), legend_title="", xaxis_title="Year", yaxis_title=variable, font=dict(size=15), ) fig.show(renderer="notebook") print(sezs_region_and_year) vizualize( synths_for_each_region["tamil nadu"]["fixed_assets"]["res_df"], "Noida, Uttar Pradesh state", "Fixed assets", 2003, ) pd.set_option("display.float_format", lambda x: "%.3f" % x) # from sklearn.metrics import mean_absolute_percentage_error as MAPE from sklearn.metrics import mean_absolute_error as MAE print(sezs_region_and_year) df = synths_for_each_region["uttar pradesh"]["fixed_assets"]["res_df"] treatment_year = 2003 mae_pre = MAE( df[df["year"] <= treatment_year]["Observed"].values, df[df["year"] <= treatment_year]["Synthetic"].values, ) mae_post = MAE( df[df["year"] > treatment_year]["Observed"].values, df[df["year"] > treatment_year]["Synthetic"].values, ) pd.DataFrame( {"Pre": [mae_pre], "Post": [mae_post], "Post/Pre": [mae_post / mae_pre]}, index=["MAE"], ) def calculate_synths_for_placebo(current_region_name, current_year): """ basically the same as calculate_synth(), but we drop all the regions, and fit() replaced with fit_fast() so we will spend less time and the estimations does not have to be all that accurate """ for rname in LIST_OF_REGIONS_WITH_SEZS: idx_to_drop = [ val for val in list(DATAFRAMES.values())[0].index.values if val == rname ] synths = {} for df_name, df in tqdm(DATAFRAMES.items()): try: synth = SparseSC.fit_fast( features=df.drop(idx_to_drop, axis=0) .iloc[:, df.columns <= current_year] .values, targets=df.drop(idx_to_drop).iloc[:, df.columns > current_year].values, treated_units=[ idx for idx, val in enumerate(df.drop(idx_to_drop).index.values) if val == current_region_name ], progress=0, print_path=False, ) treated_units = [ idx for idx, val in enumerate(df.index.values) if val == current_region_name ] result = df.loc[df.index == current_region_name].T.reset_index(drop=False) result.columns = ["year", "Observed"] result["Synthetic"] = synth.predict(df.drop(idx_to_drop, axis=0).values)[ treated_units, : ][0] synths[df_name] = {"synth": synth, "res_df": result} # LinAlgError: Matrix is singular raises for fixed_assets so i just dont calculate for it except Exception as e: print(f"{e} occurred for {df_name}") return synths synths_for_placebo = {} for i in range(4): current_region = df_grp.index.values[np.random.randint(0, len(df_grp))] current_year = 2011 while current_region in LIST_OF_REGIONS_WITH_SEZS: current_region = df_grp.index.values[np.random.randint(0, len(df_grp))] print(f"Calculating for {current_region}") synths_for_placebo[current_region] = calculate_synths_for_placebo( current_region, current_year ) _ = list(synths_for_placebo.keys())[0] for variable in synths_for_placebo[_].keys(): for rname in synths_for_each_region.keys(): df = synths_for_each_region[rname][variable]["res_df"] treatment_year = [y for r, y in sezs_region_and_year if r == rname][0] mae_pre = MAE( df[df["year"] <= treatment_year]["Observed"].values, df[df["year"] <= treatment_year]["Synthetic"].values, ) mae_post = MAE( df[df["year"] > treatment_year]["Observed"].values, df[df["year"] > treatment_year]["Synthetic"].values, ) rel = mae_post / mae_pre synths_for_each_region[rname][variable]["mae_pre"] = mae_pre synths_for_each_region[rname][variable]["mae_post"] = mae_post synths_for_each_region[rname][variable]["rel"] = rel for rname in synths_for_placebo.keys(): df = synths_for_placebo[rname][variable]["res_df"] treatment_year = 2011 mae_pre = MAE( df[df["year"] <= treatment_year]["Observed"].values, df[df["year"] <= treatment_year]["Synthetic"].values, ) mae_post = MAE( df[df["year"] > treatment_year]["Observed"].values, df[df["year"] > treatment_year]["Synthetic"].values, ) rel = mae_post / mae_pre synths_for_placebo[rname][variable]["mae_pre"] = mae_pre synths_for_placebo[rname][variable]["mae_post"] = mae_post synths_for_placebo[rname][variable]["rel"] = rel import matplotlib.pyplot as plt _ = list(synths_for_placebo.keys())[0] post_pre_relations = {} for variable in synths_for_placebo[_].keys(): x = [] heigth = [] for rname in synths_for_each_region.keys(): x.append(rname) heigth.append(synths_for_each_region[rname][variable]["rel"]) for rname in synths_for_placebo.keys(): heigth.append(synths_for_placebo[rname][variable]["rel"]) x.append(rname) post_pre_relations[variable] = {"x": x, "height": heigth} colors = ["cyan" for _ in range(len(post_pre_relations["grp"]["x"]) - 4)] colors.extend(["green"] * 4) with plt.style.context("dark_background"): plt.figure(figsize=(20, 8)) plt.bar( x=post_pre_relations["grp"]["x"], height=post_pre_relations["grp"]["height"], color=colors, ) plt.xticks(rotation=30, ha="right", fontsize=19) legend_elements = [ plt.Line2D( [0], [0], marker="o", color="w", label="Treated units", markerfacecolor="cyan", markersize=15, ), plt.Line2D( [0], [0], marker="o", color="w", label="Placebo units", markerfacecolor="green", markersize=15, ), ] plt.title("MAE Post/Pre relation for GRP", fontsize=23) plt.legend(handles=legend_elements, loc="upper right", fontsize=19) with plt.style.context("dark_background"): plt.figure(figsize=(20, 8)) plt.bar( x=post_pre_relations["investments"]["x"], height=post_pre_relations["investments"]["height"], color=colors, ) plt.xticks(rotation=30, ha="right", fontsize=19) legend_elements = [ plt.Line2D( [0], [0], marker="o", color="w", label="Treated units", markerfacecolor="cyan", markersize=15, ), plt.Line2D( [0], [0], marker="o", color="w", label="Placebo units", markerfacecolor="green", markersize=15, ), ] plt.title("MAE Post/Pre relation for Direct investments", fontsize=23) plt.legend(handles=legend_elements, loc="upper right", fontsize=19) with plt.style.context("dark_background"): plt.figure(figsize=(20, 8)) plt.bar( x=post_pre_relations["fixed_assets"]["x"], height=post_pre_relations["fixed_assets"]["height"], color=colors, ) plt.xticks(rotation=30, ha="right", fontsize=19) legend_elements = [ plt.Line2D( [0], [0], marker="o", color="w", label="Treated units", markerfacecolor="cyan", markersize=15, ), plt.Line2D( [0], [0], marker="o", color="w", label="Placebo units", markerfacecolor="green", markersize=15, ), ] plt.title("MAE Post/Pre relation for Fixed assets", fontsize=23) plt.legend(handles=legend_elements, loc="upper right", fontsize=19) result = {"treated": synths_for_each_region, "placebo": synths_for_placebo} with open("bullshit_for_india.txt", "w") as f: for rname, year in sezs_region_and_year: print(rname, year) f.write(rname) f.write("\n") for variable in tqdm(synths_for_each_region[rname].keys()): f.write(variable) f.write("\n") if variable == "grp": colnames = np.arange(1990, 2022) else: colnames = np.arange(1990, 2020) df = pd.DataFrame( np.hstack( ( synths_for_each_region[rname][variable]["synth"].features, synths_for_each_region[rname][variable]["synth"].targets, ) ), columns=colnames, ) year = year ## Creating unit treatment_periods unit_treatment_periods = np.full((df.values.shape[0]), np.nan) unit_treatment_periods[ synths_for_each_region[rname][variable]["synth"].treated_units ] = [idx for idx, colname in enumerate(df.columns) if colname > year][0] try: ## fitting estimate effects method sc = SparseSC.estimate_effects( outcomes=df.values, unit_treatment_periods=unit_treatment_periods, max_n_pl=50, # Number of placebos level=0.9, # Level for confidence intervals ) f.write(str(sc)) f.write("\n\n") f.write( f"Estimated effect of SEZ is {np.round(sc.pl_res_post.effect_vec.effect[-1])}, \ with a p-value of {np.round(sc.pl_res_post.effect_vec.p[-1],2)}" ) f.write("\n\n") except Exception as e: print(f"{e} occured for {rname}, {variable}") print( f"Estimated effect of SEZ is {np.round(sc.pl_res_post.effect_vec.effect[-1])}, \ with a p-value of {np.round(sc.pl_res_post.effect_vec.p[-1],2)}" ) for synth_group in result.keys(): for rname in result[synth_group].keys(): for variable in result[synth_group][rname].keys(): result[synth_group][rname][variable]["synth"] = { "weights": result[synth_group][rname][variable]["synth"].get_weights( True ), "v_matrix": result[synth_group][rname][variable]["synth"].V, "v_pen": result[synth_group][rname][variable]["synth"].fitted_v_pen, "w_pen": result[synth_group][rname][variable]["synth"].fitted_w_pen, } result[synth_group][rname][variable]["res_df"] = result[synth_group][rname][ variable ]["res_df"].to_dict() import json class NumpyEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, np.ndarray): return obj.tolist() return json.JSONEncoder.default(self, obj) with open("result_in.json", "w") as outfile: json.dump(result, outfile, indent=4, cls=NumpyEncoder)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/561/129561844.ipynb
null
null
[{"Id": 129561844, "ScriptId": 38457222, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 10269757, "CreationDate": "05/14/2023 21:15:06", "VersionNumber": 1.0, "Title": "Synthetic Control using SparseSC for India SEZ", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 390.0, "LinesInsertedFromPrevious": 244.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 146.0, "LinesInsertedFromFork": 244.0, "LinesDeletedFromFork": 89.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 146.0, "TotalVotes": 0}]
null
null
null
null
# Code for ["Causal inference with Synthetic Control using Python and SparseSC"](https://aayushmnit.github.io/posts/2022-09-19-SyntheticControl/2022-09-19_SyntheticControl.html) blog. # ## What is Synthetic Control Method? # I will try to keep this part short and focus more on why Data scientists should care about such methods and how to use them on larger datasets based on practical experience using [SparseSC package](https://github.com/microsoft/SparseSC). # The Synthetic Control (SC) method is a statistical method used to estimate causal effects from binary treatments on observational panel (longitudinal) data. The method got quite a coverage by being described as [“the most important innovation in the policy evaluation literature in the last few years”](https://www.aeaweb.org/articles?id=10.1257/jep.31.2.3) and got an article published in [Washington Post - Seriously, here’s one amazing math trick to learn what can’t be known](https://www.washingtonpost.com/news/wonk/wp/2015/10/30/how-to-measure-things-in-a-world-of-competing-claims/). “SC is a technique to create an artificial control group by taking a weighted average of untreated units in such a way that it reproduces the characteristics of the treated units before the intervention(treatment). The SC acts as the counterfactual for a treatment unit, and the estimate of a treatment effect is the difference between the observed outcome in the post-treatment period and the SC's outcome.” # “One way to think of SC is as an improvement upon [difference-in-difference (DiD) estimation](https://en.wikipedia.org/wiki/Difference_in_differences). Typical DiD will compare a treated unit to the average of the control units. But often the treated unit does not look like a typical control (e.g., it might have a different growth rate), in which case the 'parallel trend' assumption of DiD is not valid. SC remedies this by choosing a smarter linear combination, rather than the simple average, to weigh more heavily the more similar units. SC's assumption is if there are endogenous factors that affect treatment and future outcomes then you should be able to control them by matching past outcomes. The matching that SC provides can therefore deal with some problems in estimation that DiD cannot handle.” # Here is the link to the Causal inference book which I found most useful to understand the math behind SC- [Causal Inference for The Brave and True by Matheus Facure - Chapter 15](https://matheusfacure.github.io/python-causality-handbook/15-Synthetic-Control.html). # ## Why should any Data scientist care about this method? # Often as a Data Scientist, you will encounter situations as follows where running A/B testing is not feasible because of - # 1. Lack of infrastructure # 2. Lack of similar groups for running A/B testing (in case of evaluation of state policies, as there is no state equivalent of other) # 3. Providing unwanted advantage to one group over others. Sometimes running an A/B test can give an unfair advantage and lead you into anti-trust territory. For example, what if Amazon tries to charge differential pricing for different customers or apply different margins for their sellers for the same product? # As a data scientist, stakeholders may still ask you to estimate the impact of certain changes/treatments, and Synthetic controls can come to the rescue in this situation. For this reason, it is a valuable tool to keep in your algorithmic toolkit. # ## Problem Overview # We will use the Proposition 99 data to explain the use case for this approach and also how to use the SparceSC library and its key features. “In 1988, California passed a famous Tobacco Tax and Health Protection Act, which became known as Proposition 99. Its primary effect is to impose a 25-cent per pack state excise tax on the sale of tobacco cigarettes within California, with approximately equivalent excise taxes similarly imposed on the retail sale of other commercial tobacco products, such as cigars and chewing tobacco. Additional restrictions placed on the sale of tobacco include a ban on cigarette vending machines in public areas accessible by juveniles, and a ban on the individual sale of single cigarettes. Revenue generated by the act was earmarked for various environmental and health care programs, and anti-tobacco advertisements. To evaluate its effect, we can gather data on cigarette sales from multiple states and across a number of years. In our case, we got data from the year 1970 to 2000 from 39 states.” import os install = '"git+https://github.com/microsoft/SparseSC.git"' os.system(f"pip install -Uqq {install}") import pandas as pd import numpy as np import SparseSC from datetime import datetime import warnings import plotly.express as px import plotly.graph_objects as pgo pd.set_option("display.max_columns", None) warnings.filterwarnings("ignore") # Let's look at the data # We have data per `state` as treatment unit and yearly (`year` column) per-capita sales of cigarettes in packs (`cigsale` column) and the cigarette retail price (`retprice` column). We are going to pivot this data so that each row is one treatment unit(`state`), and columns represent the yearly `cigsale` value. df_grp = pd.read_csv("/kaggle/working/in_grp_current.csv") df_industry = pd.read_csv("/kaggle/working/in_industry_production.csv") df_investments = pd.read_csv("/kaggle/working/in_investments.csv") df_fixed_assets = pd.read_csv("/kaggle/working/in_fixed_assets.csv") def prepare_df(df): df.index = df["region"].str.rstrip().str.lower() df.drop(columns="region", inplace=True) df.columns = df.columns.str.slice(stop=4).astype(int) for colname in df.columns: df[colname] = pd.to_numeric(df[colname], errors="coerce") df[colname] = df[colname].fillna(df[colname].dropna().mean()) return df df_grp = prepare_df(df_grp) df_industry = prepare_df(df_industry) df_investments = prepare_df(df_investments) df_fixed_assets = prepare_df(df_fixed_assets) df_fixed_assets.index wiki_url = "https://en.wikipedia.org/wiki/List_of_special_economic_zones_in_India" zones_df = pd.read_html(wiki_url)[2] zones_df["Notification Date"] = pd.to_datetime(zones_df["Notification Date"]) zones_df.groupby(by="DC Name")["Notification Date"].min() sezs_region_and_year = [ ("andhra pradesh", 2007), # Andhra Pradesh Special Economic Zone ("karnataka", 2007), # Mangalore Special Economic Zone ("tamil nadu", 2003), # Salem Special Economic Zone & MEPZ Special Economic Zone ("madhya pradesh", 2003), # Indore Special Economic Zone ("west bengal", 2003), # Falta Special Economic Zone ("uttar pradesh", 2003), # Noida Special Economic Zone ] LIST_OF_REGIONS_WITH_SEZS = [x for x, y in sezs_region_and_year] for rname in LIST_OF_REGIONS_WITH_SEZS: for df in [df_grp, df_industry, df_investments, df_fixed_assets]: if rname not in df.index.tolist(): print(f"{rname} is not present") print("finished") df_grp.head(3) df_industry.head(3) df_investments.head(3) df_fixed_assets.head(3) DATAFRAMES = { "grp": df_grp, "industry": df_industry, "investments": df_investments, "fixed_assets": df_fixed_assets, } from tqdm.notebook import tqdm def calculate_synths(current_region_name, current_year): for rname in LIST_OF_REGIONS_WITH_SEZS: if rname != current_region_name: idx_to_drop = [ val for val in list(DATAFRAMES.values())[0].index.values if val == rname ] synths = {} for df_name, df in tqdm(DATAFRAMES.items()): synth = SparseSC.fit( features=df.drop(idx_to_drop, axis=0) .iloc[:, df.columns <= current_year] .values, targets=df.drop(idx_to_drop).iloc[:, df.columns > current_year].values, treated_units=[ idx for idx, val in enumerate(df.drop(idx_to_drop).index.values) if val == current_region_name ], progress=0, print_path=False, ) treated_units = [ idx for idx, val in enumerate(df.drop(idx_to_drop).index.values) if val == current_region_name ] result = df.loc[df.index == current_region_name].T.reset_index(drop=False) result.columns = ["year", "Observed"] result["Synthetic"] = synth.predict(df.drop(idx_to_drop, axis=0).values)[ treated_units, : ][0] synths[df_name] = {"synth": synth, "res_df": result} return synths synths_for_each_region = {} for current_region, current_year in sezs_region_and_year: print(f"Calculating for {current_region}") synths_for_each_region[current_region] = calculate_synths( current_region, current_year ) def vizualize(result: pd.DataFrame, region: str, variable: str, year: int): fig = px.line( data_frame=result, x="year", y=["Observed", "Synthetic"], template="plotly_dark", ) fig.add_trace( pgo.Scatter( x=[year, year], y=[ 0, result.Observed.max() * 1.02 if result.Observed.max() > result.Synthetic.max() else result.Synthetic.max() * 1.02, ], # y=[result.Observed.min()*0.98,result.Observed.max()*1.02], line={ "dash": "dash", }, name="SEZ creation", ) ) fig.update_layout( title={ "text": f"Synthetic Control Assessment for {region}", "y": 0.95, "x": 0.5, }, legend=dict(y=1, x=0.1, orientation="v"), legend_title="", xaxis_title="Year", yaxis_title=variable, font=dict(size=15), ) fig.show(renderer="notebook") print(sezs_region_and_year) vizualize( synths_for_each_region["tamil nadu"]["fixed_assets"]["res_df"], "Noida, Uttar Pradesh state", "Fixed assets", 2003, ) pd.set_option("display.float_format", lambda x: "%.3f" % x) # from sklearn.metrics import mean_absolute_percentage_error as MAPE from sklearn.metrics import mean_absolute_error as MAE print(sezs_region_and_year) df = synths_for_each_region["uttar pradesh"]["fixed_assets"]["res_df"] treatment_year = 2003 mae_pre = MAE( df[df["year"] <= treatment_year]["Observed"].values, df[df["year"] <= treatment_year]["Synthetic"].values, ) mae_post = MAE( df[df["year"] > treatment_year]["Observed"].values, df[df["year"] > treatment_year]["Synthetic"].values, ) pd.DataFrame( {"Pre": [mae_pre], "Post": [mae_post], "Post/Pre": [mae_post / mae_pre]}, index=["MAE"], ) def calculate_synths_for_placebo(current_region_name, current_year): """ basically the same as calculate_synth(), but we drop all the regions, and fit() replaced with fit_fast() so we will spend less time and the estimations does not have to be all that accurate """ for rname in LIST_OF_REGIONS_WITH_SEZS: idx_to_drop = [ val for val in list(DATAFRAMES.values())[0].index.values if val == rname ] synths = {} for df_name, df in tqdm(DATAFRAMES.items()): try: synth = SparseSC.fit_fast( features=df.drop(idx_to_drop, axis=0) .iloc[:, df.columns <= current_year] .values, targets=df.drop(idx_to_drop).iloc[:, df.columns > current_year].values, treated_units=[ idx for idx, val in enumerate(df.drop(idx_to_drop).index.values) if val == current_region_name ], progress=0, print_path=False, ) treated_units = [ idx for idx, val in enumerate(df.index.values) if val == current_region_name ] result = df.loc[df.index == current_region_name].T.reset_index(drop=False) result.columns = ["year", "Observed"] result["Synthetic"] = synth.predict(df.drop(idx_to_drop, axis=0).values)[ treated_units, : ][0] synths[df_name] = {"synth": synth, "res_df": result} # LinAlgError: Matrix is singular raises for fixed_assets so i just dont calculate for it except Exception as e: print(f"{e} occurred for {df_name}") return synths synths_for_placebo = {} for i in range(4): current_region = df_grp.index.values[np.random.randint(0, len(df_grp))] current_year = 2011 while current_region in LIST_OF_REGIONS_WITH_SEZS: current_region = df_grp.index.values[np.random.randint(0, len(df_grp))] print(f"Calculating for {current_region}") synths_for_placebo[current_region] = calculate_synths_for_placebo( current_region, current_year ) _ = list(synths_for_placebo.keys())[0] for variable in synths_for_placebo[_].keys(): for rname in synths_for_each_region.keys(): df = synths_for_each_region[rname][variable]["res_df"] treatment_year = [y for r, y in sezs_region_and_year if r == rname][0] mae_pre = MAE( df[df["year"] <= treatment_year]["Observed"].values, df[df["year"] <= treatment_year]["Synthetic"].values, ) mae_post = MAE( df[df["year"] > treatment_year]["Observed"].values, df[df["year"] > treatment_year]["Synthetic"].values, ) rel = mae_post / mae_pre synths_for_each_region[rname][variable]["mae_pre"] = mae_pre synths_for_each_region[rname][variable]["mae_post"] = mae_post synths_for_each_region[rname][variable]["rel"] = rel for rname in synths_for_placebo.keys(): df = synths_for_placebo[rname][variable]["res_df"] treatment_year = 2011 mae_pre = MAE( df[df["year"] <= treatment_year]["Observed"].values, df[df["year"] <= treatment_year]["Synthetic"].values, ) mae_post = MAE( df[df["year"] > treatment_year]["Observed"].values, df[df["year"] > treatment_year]["Synthetic"].values, ) rel = mae_post / mae_pre synths_for_placebo[rname][variable]["mae_pre"] = mae_pre synths_for_placebo[rname][variable]["mae_post"] = mae_post synths_for_placebo[rname][variable]["rel"] = rel import matplotlib.pyplot as plt _ = list(synths_for_placebo.keys())[0] post_pre_relations = {} for variable in synths_for_placebo[_].keys(): x = [] heigth = [] for rname in synths_for_each_region.keys(): x.append(rname) heigth.append(synths_for_each_region[rname][variable]["rel"]) for rname in synths_for_placebo.keys(): heigth.append(synths_for_placebo[rname][variable]["rel"]) x.append(rname) post_pre_relations[variable] = {"x": x, "height": heigth} colors = ["cyan" for _ in range(len(post_pre_relations["grp"]["x"]) - 4)] colors.extend(["green"] * 4) with plt.style.context("dark_background"): plt.figure(figsize=(20, 8)) plt.bar( x=post_pre_relations["grp"]["x"], height=post_pre_relations["grp"]["height"], color=colors, ) plt.xticks(rotation=30, ha="right", fontsize=19) legend_elements = [ plt.Line2D( [0], [0], marker="o", color="w", label="Treated units", markerfacecolor="cyan", markersize=15, ), plt.Line2D( [0], [0], marker="o", color="w", label="Placebo units", markerfacecolor="green", markersize=15, ), ] plt.title("MAE Post/Pre relation for GRP", fontsize=23) plt.legend(handles=legend_elements, loc="upper right", fontsize=19) with plt.style.context("dark_background"): plt.figure(figsize=(20, 8)) plt.bar( x=post_pre_relations["investments"]["x"], height=post_pre_relations["investments"]["height"], color=colors, ) plt.xticks(rotation=30, ha="right", fontsize=19) legend_elements = [ plt.Line2D( [0], [0], marker="o", color="w", label="Treated units", markerfacecolor="cyan", markersize=15, ), plt.Line2D( [0], [0], marker="o", color="w", label="Placebo units", markerfacecolor="green", markersize=15, ), ] plt.title("MAE Post/Pre relation for Direct investments", fontsize=23) plt.legend(handles=legend_elements, loc="upper right", fontsize=19) with plt.style.context("dark_background"): plt.figure(figsize=(20, 8)) plt.bar( x=post_pre_relations["fixed_assets"]["x"], height=post_pre_relations["fixed_assets"]["height"], color=colors, ) plt.xticks(rotation=30, ha="right", fontsize=19) legend_elements = [ plt.Line2D( [0], [0], marker="o", color="w", label="Treated units", markerfacecolor="cyan", markersize=15, ), plt.Line2D( [0], [0], marker="o", color="w", label="Placebo units", markerfacecolor="green", markersize=15, ), ] plt.title("MAE Post/Pre relation for Fixed assets", fontsize=23) plt.legend(handles=legend_elements, loc="upper right", fontsize=19) result = {"treated": synths_for_each_region, "placebo": synths_for_placebo} with open("bullshit_for_india.txt", "w") as f: for rname, year in sezs_region_and_year: print(rname, year) f.write(rname) f.write("\n") for variable in tqdm(synths_for_each_region[rname].keys()): f.write(variable) f.write("\n") if variable == "grp": colnames = np.arange(1990, 2022) else: colnames = np.arange(1990, 2020) df = pd.DataFrame( np.hstack( ( synths_for_each_region[rname][variable]["synth"].features, synths_for_each_region[rname][variable]["synth"].targets, ) ), columns=colnames, ) year = year ## Creating unit treatment_periods unit_treatment_periods = np.full((df.values.shape[0]), np.nan) unit_treatment_periods[ synths_for_each_region[rname][variable]["synth"].treated_units ] = [idx for idx, colname in enumerate(df.columns) if colname > year][0] try: ## fitting estimate effects method sc = SparseSC.estimate_effects( outcomes=df.values, unit_treatment_periods=unit_treatment_periods, max_n_pl=50, # Number of placebos level=0.9, # Level for confidence intervals ) f.write(str(sc)) f.write("\n\n") f.write( f"Estimated effect of SEZ is {np.round(sc.pl_res_post.effect_vec.effect[-1])}, \ with a p-value of {np.round(sc.pl_res_post.effect_vec.p[-1],2)}" ) f.write("\n\n") except Exception as e: print(f"{e} occured for {rname}, {variable}") print( f"Estimated effect of SEZ is {np.round(sc.pl_res_post.effect_vec.effect[-1])}, \ with a p-value of {np.round(sc.pl_res_post.effect_vec.p[-1],2)}" ) for synth_group in result.keys(): for rname in result[synth_group].keys(): for variable in result[synth_group][rname].keys(): result[synth_group][rname][variable]["synth"] = { "weights": result[synth_group][rname][variable]["synth"].get_weights( True ), "v_matrix": result[synth_group][rname][variable]["synth"].V, "v_pen": result[synth_group][rname][variable]["synth"].fitted_v_pen, "w_pen": result[synth_group][rname][variable]["synth"].fitted_w_pen, } result[synth_group][rname][variable]["res_df"] = result[synth_group][rname][ variable ]["res_df"].to_dict() import json class NumpyEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, np.ndarray): return obj.tolist() return json.JSONEncoder.default(self, obj) with open("result_in.json", "w") as outfile: json.dump(result, outfile, indent=4, cls=NumpyEncoder)
false
0
5,960
0
5,960
5,960
129561202
<jupyter_start><jupyter_text>stenosD Kaggle dataset identifier: stenosd <jupyter_script>import os import glob import xml.etree.ElementTree as ET import numpy as np import tensorflow as tf import cv2 import matplotlib.pyplot as plt def read_xml_annotation(xml_file): tree = ET.parse(xml_file) root = tree.getroot() boxes = [] for obj in root.iter("object"): xmin = int(obj.find("bndbox/xmin").text) xmax = int(obj.find("bndbox/xmax").text) ymin = int(obj.find("bndbox/ymin").text) ymax = int(obj.find("bndbox/ymax").text) boxes.append([xmin, xmax, ymin, ymax]) return np.array(boxes) def load_image_and_annotation(image_path, annotation_dir): img = cv2.imread(image_path) annotation_file = os.path.join( annotation_dir, os.path.splitext(os.path.basename(image_path))[0] + ".xml" ) boxes = read_xml_annotation(annotation_file) return img, boxes def preprocess_image(image): image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) image = image / 255.0 return image image_dir = "/kaggle/input/stenosd/dataset" annotation_dir = "/kaggle/input/stenosd/dataset" batch_size = 32 num_epochs = 10 learning_rate = 0.001 def generate_training_data(image_dir, annotation_dir): image_paths = glob.glob(os.path.join(image_dir, "*.bmp")) for i in range(0, len(image_paths), batch_size): images = [] boxes_list = [] for image_path in image_paths[i : i + batch_size]: img, boxes = load_image_and_annotation(image_path, annotation_dir) images.append(preprocess_image(img)) boxes_list.append(boxes) yield np.array(images), np.array(boxes_list) from object_detection.utils import visualization_utils as vis_util from object_detection.utils import label_map_util from object_detection.utils import visualization_utils as vis_util from object_detection.utils import label_map_util def create_model(num_classes): detection_model = tf.saved_model.load("path_to_pretrained_model") return detection_model def detect_objects(image, model): input_tensor = tf.convert_to_tensor(image) input_tensor = input_tensor[tf.newaxis, ...] detections = model(input_tensor) num_detections = int(detections.pop("num_detections")) detections = { key: value[0, :num_detections].numpy() for key, value in detections.items() } detections["num_detections"] = num_detections return detections def visualize_detections(image, detections, label_map_path): category_index = label_map_util.create_category_index_from_labelmap( label_map_path, use_display_name=True ) vis_util.visualize_boxes_and_labels_on_image_array def visualize_detections(image, detections, label_map_path): category_index = label_map_util.create_category_index_from_labelmap( label_map_path, use_display_name=True ) vis_util.visualize_boxes_and_labels_on_image_array( image, detections["detection_boxes"], detections["detection_classes"].astype(np.int64), detections["detection_scores"], category_index, use_normalized_coordinates=True, line_thickness=8, min_score_thresh=0.5, ) plt.figure(figsize=(12, 8)) plt.imshow(image) plt.axis("off") plt.show() import tensorflow as tf def create_object_detection_model(): inputs = tf.keras.Input(shape=(None, None, 3)) # Variable input size backbone = tf.keras.applications.ResNet50( include_top=False, weights="imagenet" ) # Pretrained backbone backbone.trainable = True # Feature extraction backbone_outputs = backbone(inputs, training=True) # Additional convolutional layers for object detection x = layers.Conv2D(256, (3, 3), activation="relu")(backbone_outputs) x = layers.Conv2D(256, (3, 3), activation="relu")(x) x = layers.Conv2D(256, (3, 3), activation="relu")(x) # Localization head localization = layers.Conv2D(4, (3, 3), activation="linear", name="localization")(x) # Classification head classification = layers.Conv2D( 1, (3, 3), activation="sigmoid", name="classification" )(x) # Define the model model = tf.keras.Model(inputs=inputs, outputs=[localization, classification]) return model from tensorflow.keras import layers model = create_object_detection_model() # Define the loss functions localization_loss = tf.keras.losses.MeanSquaredError() classification_loss = tf.keras.losses.BinaryCrossentropy() # Define the metrics metrics = [tf.keras.metrics.BinaryAccuracy(), tf.keras.metrics.MeanIoU(num_classes=2)] # Define the optimizer optimizer = tf.keras.optimizers.Adam(learning_rate=0.001) # Compile the model model.compile( optimizer=optimizer, loss=[localization_loss, classification_loss], metrics=metrics ) import os import glob import random from sklearn.model_selection import train_test_split # Set the directory where your dataset is located dataset_dir = "/kaggle/input/stenosd/dataset" # Get the paths of all image files in the dataset directory image_paths = glob.glob(os.path.join(dataset_dir, "*.bmp")) # Split the dataset into training and validation sets train_image_paths, val_image_paths = train_test_split( image_paths, test_size=0.2, random_state=42 ) # Define a function to load an image and its corresponding annotation def load_image_and_annotation(image_path): annotation_path = image_path.replace(".bmp", ".xml") # Implement the logic to load the image and its annotation # Return the image and annotation data # Load training images and annotations train_images = [] train_labels = [] for image_path in train_image_paths: img, boxes = load_image_and_annotation(image_path) train_images.append(preprocess_image(img)) train_labels.append(boxes) # Load validation images and annotations val_images = [] val_labels = [] for image_path in val_image_paths: img, boxes = load_image_and_annotation(image_path) val_images.append(preprocess_image(img)) val_labels.append(boxes) # Convert the lists to NumPy arrays train_images = np.array(train_images) train_labels = np.array(train_labels) val_images = np.array(val_images) val_labels = np.array(val_labels) # Train the model and save the training history history = model.fit( train_images, train_labels, epochs=10, validation_data=(val_images, val_labels) ) # Save the model weights model.save_weights("object_detection_model_weights.h5") # Save the training history to a file with open("training_history.txt", "w") as file: file.write(str(history.history)) import tensorflow as tf from sklearn.model_selection import train_test_split # Train the model and save the training history history = model.fit( train_images, train_labels, epochs=10, validation_data=(val_images, val_labels) ) # Save the model weights model.save_weights("object_detection_model_weights.h5") # Save the training history to a file with open("training_history.txt", "w") as file: file.write(str(history.history))
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/561/129561202.ipynb
stenosd
aimanghrab
[{"Id": 129561202, "ScriptId": 38509747, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14561628, "CreationDate": "05/14/2023 21:04:14", "VersionNumber": 1.0, "Title": "notebookb9ca368178", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 238.0, "LinesInsertedFromPrevious": 238.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 185745024, "KernelVersionId": 129561202, "SourceDatasetVersionId": 5682369}]
[{"Id": 5682369, "DatasetId": 3266733, "DatasourceVersionId": 5757933, "CreatorUserId": 14561628, "LicenseName": "Unknown", "CreationDate": "05/14/2023 11:39:49", "VersionNumber": 1.0, "Title": "stenosD", "Slug": "stenosd", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3266733, "CreatorUserId": 14561628, "OwnerUserId": 14561628.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5682369.0, "CurrentDatasourceVersionId": 5757933.0, "ForumId": 3332349, "Type": 2, "CreationDate": "05/14/2023 11:39:49", "LastActivityDate": "05/14/2023", "TotalViews": 48, "TotalDownloads": 3, "TotalVotes": 0, "TotalKernels": 0}]
[{"Id": 14561628, "UserName": "aimanghrab", "DisplayName": "aiman ghrab", "RegisterDate": "04/09/2023", "PerformanceTier": 0}]
import os import glob import xml.etree.ElementTree as ET import numpy as np import tensorflow as tf import cv2 import matplotlib.pyplot as plt def read_xml_annotation(xml_file): tree = ET.parse(xml_file) root = tree.getroot() boxes = [] for obj in root.iter("object"): xmin = int(obj.find("bndbox/xmin").text) xmax = int(obj.find("bndbox/xmax").text) ymin = int(obj.find("bndbox/ymin").text) ymax = int(obj.find("bndbox/ymax").text) boxes.append([xmin, xmax, ymin, ymax]) return np.array(boxes) def load_image_and_annotation(image_path, annotation_dir): img = cv2.imread(image_path) annotation_file = os.path.join( annotation_dir, os.path.splitext(os.path.basename(image_path))[0] + ".xml" ) boxes = read_xml_annotation(annotation_file) return img, boxes def preprocess_image(image): image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) image = image / 255.0 return image image_dir = "/kaggle/input/stenosd/dataset" annotation_dir = "/kaggle/input/stenosd/dataset" batch_size = 32 num_epochs = 10 learning_rate = 0.001 def generate_training_data(image_dir, annotation_dir): image_paths = glob.glob(os.path.join(image_dir, "*.bmp")) for i in range(0, len(image_paths), batch_size): images = [] boxes_list = [] for image_path in image_paths[i : i + batch_size]: img, boxes = load_image_and_annotation(image_path, annotation_dir) images.append(preprocess_image(img)) boxes_list.append(boxes) yield np.array(images), np.array(boxes_list) from object_detection.utils import visualization_utils as vis_util from object_detection.utils import label_map_util from object_detection.utils import visualization_utils as vis_util from object_detection.utils import label_map_util def create_model(num_classes): detection_model = tf.saved_model.load("path_to_pretrained_model") return detection_model def detect_objects(image, model): input_tensor = tf.convert_to_tensor(image) input_tensor = input_tensor[tf.newaxis, ...] detections = model(input_tensor) num_detections = int(detections.pop("num_detections")) detections = { key: value[0, :num_detections].numpy() for key, value in detections.items() } detections["num_detections"] = num_detections return detections def visualize_detections(image, detections, label_map_path): category_index = label_map_util.create_category_index_from_labelmap( label_map_path, use_display_name=True ) vis_util.visualize_boxes_and_labels_on_image_array def visualize_detections(image, detections, label_map_path): category_index = label_map_util.create_category_index_from_labelmap( label_map_path, use_display_name=True ) vis_util.visualize_boxes_and_labels_on_image_array( image, detections["detection_boxes"], detections["detection_classes"].astype(np.int64), detections["detection_scores"], category_index, use_normalized_coordinates=True, line_thickness=8, min_score_thresh=0.5, ) plt.figure(figsize=(12, 8)) plt.imshow(image) plt.axis("off") plt.show() import tensorflow as tf def create_object_detection_model(): inputs = tf.keras.Input(shape=(None, None, 3)) # Variable input size backbone = tf.keras.applications.ResNet50( include_top=False, weights="imagenet" ) # Pretrained backbone backbone.trainable = True # Feature extraction backbone_outputs = backbone(inputs, training=True) # Additional convolutional layers for object detection x = layers.Conv2D(256, (3, 3), activation="relu")(backbone_outputs) x = layers.Conv2D(256, (3, 3), activation="relu")(x) x = layers.Conv2D(256, (3, 3), activation="relu")(x) # Localization head localization = layers.Conv2D(4, (3, 3), activation="linear", name="localization")(x) # Classification head classification = layers.Conv2D( 1, (3, 3), activation="sigmoid", name="classification" )(x) # Define the model model = tf.keras.Model(inputs=inputs, outputs=[localization, classification]) return model from tensorflow.keras import layers model = create_object_detection_model() # Define the loss functions localization_loss = tf.keras.losses.MeanSquaredError() classification_loss = tf.keras.losses.BinaryCrossentropy() # Define the metrics metrics = [tf.keras.metrics.BinaryAccuracy(), tf.keras.metrics.MeanIoU(num_classes=2)] # Define the optimizer optimizer = tf.keras.optimizers.Adam(learning_rate=0.001) # Compile the model model.compile( optimizer=optimizer, loss=[localization_loss, classification_loss], metrics=metrics ) import os import glob import random from sklearn.model_selection import train_test_split # Set the directory where your dataset is located dataset_dir = "/kaggle/input/stenosd/dataset" # Get the paths of all image files in the dataset directory image_paths = glob.glob(os.path.join(dataset_dir, "*.bmp")) # Split the dataset into training and validation sets train_image_paths, val_image_paths = train_test_split( image_paths, test_size=0.2, random_state=42 ) # Define a function to load an image and its corresponding annotation def load_image_and_annotation(image_path): annotation_path = image_path.replace(".bmp", ".xml") # Implement the logic to load the image and its annotation # Return the image and annotation data # Load training images and annotations train_images = [] train_labels = [] for image_path in train_image_paths: img, boxes = load_image_and_annotation(image_path) train_images.append(preprocess_image(img)) train_labels.append(boxes) # Load validation images and annotations val_images = [] val_labels = [] for image_path in val_image_paths: img, boxes = load_image_and_annotation(image_path) val_images.append(preprocess_image(img)) val_labels.append(boxes) # Convert the lists to NumPy arrays train_images = np.array(train_images) train_labels = np.array(train_labels) val_images = np.array(val_images) val_labels = np.array(val_labels) # Train the model and save the training history history = model.fit( train_images, train_labels, epochs=10, validation_data=(val_images, val_labels) ) # Save the model weights model.save_weights("object_detection_model_weights.h5") # Save the training history to a file with open("training_history.txt", "w") as file: file.write(str(history.history)) import tensorflow as tf from sklearn.model_selection import train_test_split # Train the model and save the training history history = model.fit( train_images, train_labels, epochs=10, validation_data=(val_images, val_labels) ) # Save the model weights model.save_weights("object_detection_model_weights.h5") # Save the training history to a file with open("training_history.txt", "w") as file: file.write(str(history.history))
false
0
2,028
0
2,048
2,028
129516668
<jupyter_start><jupyter_text>Bank Customer Segmentation (1M+ Transactions) ### Bank Customer Segmentation Most banks have a large customer base - with different characteristics in terms of age, income, values, lifestyle, and more. Customer segmentation is the process of dividing a customer dataset into specific groups based on shared traits. *According to a report from Ernst & Young, “A more granular understanding of consumers is no longer a nice-to-have item, but a strategic and competitive imperative for banking providers. Customer understanding should be a living, breathing part of everyday business, with insights underpinning the full range of banking operations.* ### About this Dataset This dataset consists of 1 Million+ transaction by over 800K customers for a bank in India. The data contains information such as - customer age (DOB), location, gender, account balance at the time of the transaction, transaction details, transaction amount, etc. ### Interesting Analysis Ideas The dataset can be used for different analysis, example - 1. Perform Clustering / Segmentation on the dataset and identify popular customer groups along with their definitions/rules 2. Perform Location-wise analysis to identify regional trends in India 3. Perform transaction-related analysis to identify interesting trends that can be used by a bank to improve / optimi their user experiences 4. Customer Recency, Frequency, Monetary analysis 5. Network analysis or Graph analysis of customer data. Kaggle dataset identifier: bank-customer-segmentation <jupyter_code>import pandas as pd df = pd.read_csv('bank-customer-segmentation/bank_transactions.csv') df.info() <jupyter_output><class 'pandas.core.frame.DataFrame'> RangeIndex: 1048567 entries, 0 to 1048566 Data columns (total 9 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 TransactionID 1048567 non-null object 1 CustomerID 1048567 non-null object 2 CustomerDOB 1045170 non-null object 3 CustGender 1047467 non-null object 4 CustLocation 1048416 non-null object 5 CustAccountBalance 1046198 non-null float64 6 TransactionDate 1048567 non-null object 7 TransactionTime 1048567 non-null int64 8 TransactionAmount (INR) 1048567 non-null float64 dtypes: float64(2), int64(1), object(6) memory usage: 72.0+ MB <jupyter_text>Examples: { "TransactionID": "T1", "CustomerID": "C5841053", "CustomerDOB": "10/1/94", "CustGender": "F", "CustLocation": "JAMSHEDPUR", "CustAccountBalance": 17819.05, "TransactionDate": "2/8/16", "TransactionTime": 143207, "TransactionAmount (INR)": 25 } { "TransactionID": "T2", "CustomerID": "C2142763", "CustomerDOB": "4/4/57", "CustGender": "M", "CustLocation": "JHAJJAR", "CustAccountBalance": 2270.69, "TransactionDate": "2/8/16", "TransactionTime": 141858, "TransactionAmount (INR)": 27999 } { "TransactionID": "T3", "CustomerID": "C4417068", "CustomerDOB": "26/11/96", "CustGender": "F", "CustLocation": "MUMBAI", "CustAccountBalance": 17874.44, "TransactionDate": "2/8/16", "TransactionTime": 142712, "TransactionAmount (INR)": 459 } { "TransactionID": "T4", "CustomerID": "C5342380", "CustomerDOB": "14/9/73", "CustGender": "F", "CustLocation": "MUMBAI", "CustAccountBalance": 866503.21, "TransactionDate": "2/8/16", "TransactionTime": 142714, "TransactionAmount (INR)": 2060 } <jupyter_script># # Customer Segmentation on Bank Customers # ### The project will look into the demographic attributes of the bank's customers and conduct segmentation based on their lifecycle and value to the bank. The methods used in the project: # ### 1. RFM Model: Recency, Frequency, Monetary Scores # ### 2. Customer Lifecycle: New Customer, Active Customer, Non-active Customer, Returning Customer # ### 3. Pareto Analysis: how many customers contribute to the most transaction volume import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import seaborn as sns from datetime import datetime # Ban the scientific expression pd.set_option("display.float_format", lambda x: "%.2f" % x) # Ignore warnings import warnings warnings.filterwarnings("ignore") # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # ### 1. Data Cleaning # Import the data data = pd.read_csv("/kaggle/input/bank-customer-segmentation/bank_transactions.csv") data.info() data.sample(5) # 1. Check for missing values data.isnull().sum() / data.shape[0] * 100 # From the results, the missing values only take less than 1% of the total records. Therefore we can drop them data.dropna(axis=0, inplace=True) # 2. Check for duplications: the customer could be duplicated given one can make more than 1 transactions. The transaction ID should be uniqie data.duplicated(subset="TransactionID").sum() # There is no duplication in the transaction id part. # 3. Check the distribution of the numeric fields (for potential outliers) data["TransactionDate"] = pd.to_datetime(data["TransactionDate"]) data["CustomerDOB"] = pd.to_datetime(data["CustomerDOB"]) data[ [ "CustAccountBalance", "TransactionAmount (INR)", "CustomerDOB", "CustGender", "CustLocation", "TransactionDate", "TransactionTime", ] ].describe(percentiles=[0.01, 0.25, 0.50, 0.75, 0.99], include="all").T # From the results, the numerical variables (balance and transaction amt) seems to follow a right skewed distribution (mean > median), which shouold be due to high net wealth customers # For date and categorical variables, there are some dates in wrong values (e.g. 1800-01-01) will need to be adjusted. # The transaction time could be dropped given it seems not containing useful information in the analysis # 4. Data Transformation # 4.1 Drop unused fields data.drop("TransactionTime", axis=1, inplace=True) # 4.2 Calculate Customer Age # Here will use the year in the data (2016) as base to get the customer's age data["age"] = data["TransactionDate"].dt.year - data["CustomerDOB"].dt.year # 4.3 Change all the age below 12 and above 100 percentile into median age data.loc[(data["age"] < 12) | (data["age"] >= 100), "age"] = data["age"].median() # 4.4 Adjust the values of Gender data["CustGender"] = data["CustGender"].replace( {"M": "Male", "F": "Female", "T": "Male"} ) # ### 2. Exploratory Data Analysis (EDA) # #### 2.1 Gender # Compare the distribution of customers across genders plt.style.use("ggplot") fig, ax = plt.subplots(ncols=2, nrows=1, figsize=(20, 10)) ax[0].pie( data["CustGender"].value_counts(), autopct="%1.f%%", labels=["Male", "Female"] ) ax[0].set_title("Customer Gender Frequency", size=20) ax[1] = sns.distplot( data.loc[ (data["CustGender"] == "Male") & ( data["TransactionAmount (INR)"] < np.percentile(data["TransactionAmount (INR)"], 90) ), "TransactionAmount (INR)", ], label=True, kde=False, ) ax[1] = sns.distplot( data.loc[ (data["CustGender"] == "Female") & ( data["TransactionAmount (INR)"] < np.percentile(data["TransactionAmount (INR)"], 90) ), "TransactionAmount (INR)", ], label="Female", kde=False, ) ax[1].set_title("Transaction Amount by Customer Gender", size=20) # #### 2.2 Location # Select the top 20 cities with most transactions happened plt.figure(figsize=(20, 6)) sns.countplot( y="CustLocation", data=data, order=data["CustLocation"].value_counts()[:20].index ) plt.title("Top 20 Locations of Customer ", fontsize="20") # From the plot, it seems Mumbai, New Delhi and Bangalore are the TOP 3 cities that transaction happened. This could be due to that the 3 cities have more population, better economy condition and higher salary range. # #### 2.3 Age # Distribution of age based on bins bins = [0, 20, 30, 40, 50, 60, 100] labels = ["0 - 20", "20 - 30", "30 - 40", "40 - 50", "50 - 60", "60+"] data["age_bin"] = pd.cut(x=data["age"], bins=bins, labels=labels, right=True) plt.figure(figsize=(20, 6)) sns.countplot(data, y="age_bin", order=data["age_bin"].value_counts().index) # From the plot, 20 - 30 group is the the majority, followed by 30 - 40 customers. There is much less transaction by customers after 50. # ### 3. Customer Segmentation # #### 3.1 RFM Segmentation # #### RFM model is commonly used in marketing to segment customers based on their shopping behaviours, then treat each segment with targeted strategies. The three metrics used in the segmentation are: # #### 1) Recency: how many days since the the customer's last transaction date? The lower the value, the more loyal of the customer to our firm; # #### 2) Frequency: how many time the customer make transactions during the period? The higher the value, the more active of the customer to our products and services; # #### 3) Monetary: The total amount of transactions or money spent by the customer during the period. This is the most important metric in the model. The higher the value, the more monetary value the customer could bring to our firm. # #### Steps of RFM: # #### Step 1: Calculate the raw value of each metrics; # #### Step 2: Assign mark to each raw value based on their distributions # #### Step 3: Based on the average mark of each metric, decide the class of each customer record (0 or 1, 1 means qualified, 0 means unqualified) # #### Step 4: segment the customers based in their assigned class (0 or 1) if each metric # #### The time range selected should be decided by the business team in real place, here just using the whole timeframe in the data. Here we do not take the balance into consideration, given the real transaction would create more values to the business. # RFM Modeling Process: # 1. Step 1 - Calculate the raw value of each metrics: data_RFM = data.copy() data_RFM = ( data_RFM.groupby(["CustomerID"]) .agg( Last_Trans_Date=("TransactionDate", "max"), M=("TransactionAmount (INR)", "sum"), F=("TransactionID", "count"), ) .reset_index() ) # Recency (R) Calculation: use the last transaction date in the data as base, then calculate the date difference between each customer's last transaction date to the base data_RFM["R"] = data_RFM["Last_Trans_Date"].apply( lambda x: data_RFM["Last_Trans_Date"].max() - x ) data_RFM["R"] = data_RFM["Last_Trans_Date"].dt.days data_RFM = data_RFM[["CustomerID", "R", "F", "M"]] data_RFM.head() data_RFM.head() # Step 2 - Assign mark to each raw value based on their distributions # There is no strict standard about the selection of marking bins or mark range, it would depend on the business case and the marketing requirments # Here will take a look at the distribution of each metrics plt.style.use("ggplot") fig, ax = plt.subplots(ncols=1, nrows=3, figsize=(6, 20)) ax[:, 0] = sns.displot(data=data_RFM["R"], kde=False) ax[:, 1] = sns.displot(data=data_RFM["F"], kde=False) ax[:, 2] = sns.displot(data=data_RFM["M"], kde=False) data_RFM.head()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/516/129516668.ipynb
bank-customer-segmentation
shivamb
[{"Id": 129516668, "ScriptId": 38489547, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 1270512, "CreationDate": "05/14/2023 13:17:01", "VersionNumber": 2.0, "Title": "Customer Segmentation - RFM Model Practise", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 171.0, "LinesInsertedFromPrevious": 42.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 129.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 185653298, "KernelVersionId": 129516668, "SourceDatasetVersionId": 2743905}]
[{"Id": 2743905, "DatasetId": 1672910, "DatasourceVersionId": 2789165, "CreatorUserId": 1571785, "LicenseName": "Data files \u00a9 Original Authors", "CreationDate": "10/26/2021 13:28:18", "VersionNumber": 1.0, "Title": "Bank Customer Segmentation (1M+ Transactions)", "Slug": "bank-customer-segmentation", "Subtitle": "Customer demographics and transactions data from an Indian Bank", "Description": "### Bank Customer Segmentation\n\nMost banks have a large customer base - with different characteristics in terms of age, income, values, lifestyle, and more. Customer segmentation is the process of dividing a customer dataset into specific groups based on shared traits.\n\n*According to a report from Ernst & Young, \u201cA more granular understanding of consumers is no longer a nice-to-have item, but a strategic and competitive imperative for banking providers. Customer understanding should be a living, breathing part of everyday business, with insights underpinning the full range of banking operations.*\n\n### About this Dataset\n\nThis dataset consists of 1 Million+ transaction by over 800K customers for a bank in India. The data contains information such as - customer age (DOB), location, gender, account balance at the time of the transaction, transaction details, transaction amount, etc. \n\n### Interesting Analysis Ideas \n\nThe dataset can be used for different analysis, example - \n\n1. Perform Clustering / Segmentation on the dataset and identify popular customer groups along with their definitions/rules \n2. Perform Location-wise analysis to identify regional trends in India \n3. Perform transaction-related analysis to identify interesting trends that can be used by a bank to improve / optimi their user experiences \n4. Customer Recency, Frequency, Monetary analysis \n5. Network analysis or Graph analysis of customer data.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 1672910, "CreatorUserId": 1571785, "OwnerUserId": 1571785.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2743905.0, "CurrentDatasourceVersionId": 2789165.0, "ForumId": 1694135, "Type": 2, "CreationDate": "10/26/2021 13:28:18", "LastActivityDate": "10/26/2021", "TotalViews": 74434, "TotalDownloads": 6281, "TotalVotes": 86, "TotalKernels": 23}]
[{"Id": 1571785, "UserName": "shivamb", "DisplayName": "Shivam Bansal", "RegisterDate": "01/22/2018", "PerformanceTier": 4}]
# # Customer Segmentation on Bank Customers # ### The project will look into the demographic attributes of the bank's customers and conduct segmentation based on their lifecycle and value to the bank. The methods used in the project: # ### 1. RFM Model: Recency, Frequency, Monetary Scores # ### 2. Customer Lifecycle: New Customer, Active Customer, Non-active Customer, Returning Customer # ### 3. Pareto Analysis: how many customers contribute to the most transaction volume import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import seaborn as sns from datetime import datetime # Ban the scientific expression pd.set_option("display.float_format", lambda x: "%.2f" % x) # Ignore warnings import warnings warnings.filterwarnings("ignore") # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # ### 1. Data Cleaning # Import the data data = pd.read_csv("/kaggle/input/bank-customer-segmentation/bank_transactions.csv") data.info() data.sample(5) # 1. Check for missing values data.isnull().sum() / data.shape[0] * 100 # From the results, the missing values only take less than 1% of the total records. Therefore we can drop them data.dropna(axis=0, inplace=True) # 2. Check for duplications: the customer could be duplicated given one can make more than 1 transactions. The transaction ID should be uniqie data.duplicated(subset="TransactionID").sum() # There is no duplication in the transaction id part. # 3. Check the distribution of the numeric fields (for potential outliers) data["TransactionDate"] = pd.to_datetime(data["TransactionDate"]) data["CustomerDOB"] = pd.to_datetime(data["CustomerDOB"]) data[ [ "CustAccountBalance", "TransactionAmount (INR)", "CustomerDOB", "CustGender", "CustLocation", "TransactionDate", "TransactionTime", ] ].describe(percentiles=[0.01, 0.25, 0.50, 0.75, 0.99], include="all").T # From the results, the numerical variables (balance and transaction amt) seems to follow a right skewed distribution (mean > median), which shouold be due to high net wealth customers # For date and categorical variables, there are some dates in wrong values (e.g. 1800-01-01) will need to be adjusted. # The transaction time could be dropped given it seems not containing useful information in the analysis # 4. Data Transformation # 4.1 Drop unused fields data.drop("TransactionTime", axis=1, inplace=True) # 4.2 Calculate Customer Age # Here will use the year in the data (2016) as base to get the customer's age data["age"] = data["TransactionDate"].dt.year - data["CustomerDOB"].dt.year # 4.3 Change all the age below 12 and above 100 percentile into median age data.loc[(data["age"] < 12) | (data["age"] >= 100), "age"] = data["age"].median() # 4.4 Adjust the values of Gender data["CustGender"] = data["CustGender"].replace( {"M": "Male", "F": "Female", "T": "Male"} ) # ### 2. Exploratory Data Analysis (EDA) # #### 2.1 Gender # Compare the distribution of customers across genders plt.style.use("ggplot") fig, ax = plt.subplots(ncols=2, nrows=1, figsize=(20, 10)) ax[0].pie( data["CustGender"].value_counts(), autopct="%1.f%%", labels=["Male", "Female"] ) ax[0].set_title("Customer Gender Frequency", size=20) ax[1] = sns.distplot( data.loc[ (data["CustGender"] == "Male") & ( data["TransactionAmount (INR)"] < np.percentile(data["TransactionAmount (INR)"], 90) ), "TransactionAmount (INR)", ], label=True, kde=False, ) ax[1] = sns.distplot( data.loc[ (data["CustGender"] == "Female") & ( data["TransactionAmount (INR)"] < np.percentile(data["TransactionAmount (INR)"], 90) ), "TransactionAmount (INR)", ], label="Female", kde=False, ) ax[1].set_title("Transaction Amount by Customer Gender", size=20) # #### 2.2 Location # Select the top 20 cities with most transactions happened plt.figure(figsize=(20, 6)) sns.countplot( y="CustLocation", data=data, order=data["CustLocation"].value_counts()[:20].index ) plt.title("Top 20 Locations of Customer ", fontsize="20") # From the plot, it seems Mumbai, New Delhi and Bangalore are the TOP 3 cities that transaction happened. This could be due to that the 3 cities have more population, better economy condition and higher salary range. # #### 2.3 Age # Distribution of age based on bins bins = [0, 20, 30, 40, 50, 60, 100] labels = ["0 - 20", "20 - 30", "30 - 40", "40 - 50", "50 - 60", "60+"] data["age_bin"] = pd.cut(x=data["age"], bins=bins, labels=labels, right=True) plt.figure(figsize=(20, 6)) sns.countplot(data, y="age_bin", order=data["age_bin"].value_counts().index) # From the plot, 20 - 30 group is the the majority, followed by 30 - 40 customers. There is much less transaction by customers after 50. # ### 3. Customer Segmentation # #### 3.1 RFM Segmentation # #### RFM model is commonly used in marketing to segment customers based on their shopping behaviours, then treat each segment with targeted strategies. The three metrics used in the segmentation are: # #### 1) Recency: how many days since the the customer's last transaction date? The lower the value, the more loyal of the customer to our firm; # #### 2) Frequency: how many time the customer make transactions during the period? The higher the value, the more active of the customer to our products and services; # #### 3) Monetary: The total amount of transactions or money spent by the customer during the period. This is the most important metric in the model. The higher the value, the more monetary value the customer could bring to our firm. # #### Steps of RFM: # #### Step 1: Calculate the raw value of each metrics; # #### Step 2: Assign mark to each raw value based on their distributions # #### Step 3: Based on the average mark of each metric, decide the class of each customer record (0 or 1, 1 means qualified, 0 means unqualified) # #### Step 4: segment the customers based in their assigned class (0 or 1) if each metric # #### The time range selected should be decided by the business team in real place, here just using the whole timeframe in the data. Here we do not take the balance into consideration, given the real transaction would create more values to the business. # RFM Modeling Process: # 1. Step 1 - Calculate the raw value of each metrics: data_RFM = data.copy() data_RFM = ( data_RFM.groupby(["CustomerID"]) .agg( Last_Trans_Date=("TransactionDate", "max"), M=("TransactionAmount (INR)", "sum"), F=("TransactionID", "count"), ) .reset_index() ) # Recency (R) Calculation: use the last transaction date in the data as base, then calculate the date difference between each customer's last transaction date to the base data_RFM["R"] = data_RFM["Last_Trans_Date"].apply( lambda x: data_RFM["Last_Trans_Date"].max() - x ) data_RFM["R"] = data_RFM["Last_Trans_Date"].dt.days data_RFM = data_RFM[["CustomerID", "R", "F", "M"]] data_RFM.head() data_RFM.head() # Step 2 - Assign mark to each raw value based on their distributions # There is no strict standard about the selection of marking bins or mark range, it would depend on the business case and the marketing requirments # Here will take a look at the distribution of each metrics plt.style.use("ggplot") fig, ax = plt.subplots(ncols=1, nrows=3, figsize=(6, 20)) ax[:, 0] = sns.displot(data=data_RFM["R"], kde=False) ax[:, 1] = sns.displot(data=data_RFM["F"], kde=False) ax[:, 2] = sns.displot(data=data_RFM["M"], kde=False) data_RFM.head()
[{"bank-customer-segmentation/bank_transactions.csv": {"column_names": "[\"TransactionID\", \"CustomerID\", \"CustomerDOB\", \"CustGender\", \"CustLocation\", \"CustAccountBalance\", \"TransactionDate\", \"TransactionTime\", \"TransactionAmount (INR)\"]", "column_data_types": "{\"TransactionID\": \"object\", \"CustomerID\": \"object\", \"CustomerDOB\": \"object\", \"CustGender\": \"object\", \"CustLocation\": \"object\", \"CustAccountBalance\": \"float64\", \"TransactionDate\": \"object\", \"TransactionTime\": \"int64\", \"TransactionAmount (INR)\": \"float64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 1048567 entries, 0 to 1048566\nData columns (total 9 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 TransactionID 1048567 non-null object \n 1 CustomerID 1048567 non-null object \n 2 CustomerDOB 1045170 non-null object \n 3 CustGender 1047467 non-null object \n 4 CustLocation 1048416 non-null object \n 5 CustAccountBalance 1046198 non-null float64\n 6 TransactionDate 1048567 non-null object \n 7 TransactionTime 1048567 non-null int64 \n 8 TransactionAmount (INR) 1048567 non-null float64\ndtypes: float64(2), int64(1), object(6)\nmemory usage: 72.0+ MB\n", "summary": "{\"CustAccountBalance\": {\"count\": 1046198.0, \"mean\": 115403.54005622261, \"std\": 846485.3806006602, \"min\": 0.0, \"25%\": 4721.76, \"50%\": 16792.18, \"75%\": 57657.36, \"max\": 115035495.1}, \"TransactionTime\": {\"count\": 1048567.0, \"mean\": 157087.52939297154, \"std\": 51261.85402232933, \"min\": 0.0, \"25%\": 124030.0, \"50%\": 164226.0, \"75%\": 200010.0, \"max\": 235959.0}, \"TransactionAmount (INR)\": {\"count\": 1048567.0, \"mean\": 1574.3350034570992, \"std\": 6574.742978454002, \"min\": 0.0, \"25%\": 161.0, \"50%\": 459.03, \"75%\": 1200.0, \"max\": 1560034.99}}", "examples": "{\"TransactionID\":{\"0\":\"T1\",\"1\":\"T2\",\"2\":\"T3\",\"3\":\"T4\"},\"CustomerID\":{\"0\":\"C5841053\",\"1\":\"C2142763\",\"2\":\"C4417068\",\"3\":\"C5342380\"},\"CustomerDOB\":{\"0\":\"10\\/1\\/94\",\"1\":\"4\\/4\\/57\",\"2\":\"26\\/11\\/96\",\"3\":\"14\\/9\\/73\"},\"CustGender\":{\"0\":\"F\",\"1\":\"M\",\"2\":\"F\",\"3\":\"F\"},\"CustLocation\":{\"0\":\"JAMSHEDPUR\",\"1\":\"JHAJJAR\",\"2\":\"MUMBAI\",\"3\":\"MUMBAI\"},\"CustAccountBalance\":{\"0\":17819.05,\"1\":2270.69,\"2\":17874.44,\"3\":866503.21},\"TransactionDate\":{\"0\":\"2\\/8\\/16\",\"1\":\"2\\/8\\/16\",\"2\":\"2\\/8\\/16\",\"3\":\"2\\/8\\/16\"},\"TransactionTime\":{\"0\":143207,\"1\":141858,\"2\":142712,\"3\":142714},\"TransactionAmount (INR)\":{\"0\":25.0,\"1\":27999.0,\"2\":459.0,\"3\":2060.0}}"}}]
true
1
<start_data_description><data_path>bank-customer-segmentation/bank_transactions.csv: <column_names> ['TransactionID', 'CustomerID', 'CustomerDOB', 'CustGender', 'CustLocation', 'CustAccountBalance', 'TransactionDate', 'TransactionTime', 'TransactionAmount (INR)'] <column_types> {'TransactionID': 'object', 'CustomerID': 'object', 'CustomerDOB': 'object', 'CustGender': 'object', 'CustLocation': 'object', 'CustAccountBalance': 'float64', 'TransactionDate': 'object', 'TransactionTime': 'int64', 'TransactionAmount (INR)': 'float64'} <dataframe_Summary> {'CustAccountBalance': {'count': 1046198.0, 'mean': 115403.54005622261, 'std': 846485.3806006602, 'min': 0.0, '25%': 4721.76, '50%': 16792.18, '75%': 57657.36, 'max': 115035495.1}, 'TransactionTime': {'count': 1048567.0, 'mean': 157087.52939297154, 'std': 51261.85402232933, 'min': 0.0, '25%': 124030.0, '50%': 164226.0, '75%': 200010.0, 'max': 235959.0}, 'TransactionAmount (INR)': {'count': 1048567.0, 'mean': 1574.3350034570992, 'std': 6574.742978454002, 'min': 0.0, '25%': 161.0, '50%': 459.03, '75%': 1200.0, 'max': 1560034.99}} <dataframe_info> RangeIndex: 1048567 entries, 0 to 1048566 Data columns (total 9 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 TransactionID 1048567 non-null object 1 CustomerID 1048567 non-null object 2 CustomerDOB 1045170 non-null object 3 CustGender 1047467 non-null object 4 CustLocation 1048416 non-null object 5 CustAccountBalance 1046198 non-null float64 6 TransactionDate 1048567 non-null object 7 TransactionTime 1048567 non-null int64 8 TransactionAmount (INR) 1048567 non-null float64 dtypes: float64(2), int64(1), object(6) memory usage: 72.0+ MB <some_examples> {'TransactionID': {'0': 'T1', '1': 'T2', '2': 'T3', '3': 'T4'}, 'CustomerID': {'0': 'C5841053', '1': 'C2142763', '2': 'C4417068', '3': 'C5342380'}, 'CustomerDOB': {'0': '10/1/94', '1': '4/4/57', '2': '26/11/96', '3': '14/9/73'}, 'CustGender': {'0': 'F', '1': 'M', '2': 'F', '3': 'F'}, 'CustLocation': {'0': 'JAMSHEDPUR', '1': 'JHAJJAR', '2': 'MUMBAI', '3': 'MUMBAI'}, 'CustAccountBalance': {'0': 17819.05, '1': 2270.69, '2': 17874.44, '3': 866503.21}, 'TransactionDate': {'0': '2/8/16', '1': '2/8/16', '2': '2/8/16', '3': '2/8/16'}, 'TransactionTime': {'0': 143207, '1': 141858, '2': 142712, '3': 142714}, 'TransactionAmount (INR)': {'0': 25.0, '1': 27999.0, '2': 459.0, '3': 2060.0}} <end_description>
2,402
0
3,525
2,402
129516007
<jupyter_start><jupyter_text>Heights and weights ### Context This data set gives average masses for women as a function of their height in a sample of American women of age 30–39. ### Content The data contains the variables Height (m) Weight (kg) Kaggle dataset identifier: heights-and-weights <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session df = pd.read_csv("/kaggle/input/heights-and-weights/data.csv") df.head(5) # first five values # # description and the info of the data set. print("Dimensions of the dataset:", df.shape) print("\nColumn names and data types:") print(df.dtypes) print("\nSummary statistics:") print(df.describe()) print("\nMissing values:") print(df.isnull().sum()) df.info() # # Linear Regression work. # ## defineing x and y axises x_axis_val = df["Height"].values x_axis_val y_axis_val = df["Weight"].values y_axis_val from matplotlib import pyplot as plt # ## plot of the original full_data plt.scatter(x_axis_val, y_axis_val, color="black") plt.xlabel("Height") plt.ylabel("Weight") plt.plot x = x_axis_val.reshape(-1, 1) x, len(x) from sklearn.model_selection import train_test_split # ## define the axises for the linear regression x_train, x_test, y_train, y_test = train_test_split( x, y_axis_val, train_size=0.7, random_state=150 ) x_train, len(x_train) y_train, len(y_train) # ## plot of the training sample plt.scatter(x_train, y_train, color="black") plt.xlabel("Height_train_sample") plt.ylabel("Weight_train_sample") plt.plot from sklearn.linear_model import LinearRegression lr = LinearRegression() lr.fit(x_train, y_train) lr.score(x_test, y_test) * 100 y_predict = lr.predict(x_test) y_predict y_test # to compare the predict and test values # ## plot of the training sample(in black) with the predicted values for the test sample(in red) plt.scatter(x_train, y_train, color="black") plt.scatter(x_test, y_predict, color="red") plt.xlabel("Height") plt.ylabel("Weight") plt.plot # ## three conclusions from my regression model. # ## 1. height vs weight with train size of 60% and random state of 150 x_train_1, x_test_1, y_train_1, y_test_1 = train_test_split( x, y_axis_val, train_size=0.6, random_state=150 ) lr_1 = LinearRegression() lr_1.fit(x_train_1, y_train_1) lr_1.score(x_test_1, y_test_1) * 100 y_predict_1 = lr.predict(x_test_1) plt.scatter(x_train_1, y_train_1, color="black") plt.plot(x_test_1, y_predict_1, color="red") plt.xlabel("Height") plt.ylabel("Weight") plt.plot # ## 2. height vs weight with train size of 15% and random state of 150 x_train_2, x_test_2, y_train_2, y_test_2 = train_test_split( x, y_axis_val, train_size=0.15, random_state=150 ) lr_2 = LinearRegression() lr_2.fit(x_train_2, y_train_2) lr_2.score(x_test_2, y_test_2) * 100 y_predict_2 = lr.predict(x_test_2) plt.scatter(x_train_2, y_train_2, color="black") plt.plot(x_test_2, y_predict_2, color="red") plt.xlabel("Height") plt.ylabel("Weight") plt.plot # ## 3. height vs weight with train size of 90% and random state of 150 x_train_3, x_test_3, y_train_3, y_test_3 = train_test_split( x, y_axis_val, train_size=0.9, random_state=150 ) lr_3 = LinearRegression() lr_3.fit(x_train_3, y_train_3) lr_3.score(x_test_3, y_test_3) * 100 y_predict_3 = lr.predict(x_test_3) plt.scatter(x_train_3, y_train_3, color="black") plt.plot(x_test_3, y_predict_3, color="red") plt.xlabel("Height") plt.ylabel("Weight") plt.plot
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/516/129516007.ipynb
heights-and-weights
tmcketterick
[{"Id": 129516007, "ScriptId": 38504956, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14803463, "CreationDate": "05/14/2023 13:11:09", "VersionNumber": 1.0, "Title": "Linear Regression", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 150.0, "LinesInsertedFromPrevious": 150.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 185651620, "KernelVersionId": 129516007, "SourceDatasetVersionId": 12327}]
[{"Id": 12327, "DatasetId": 8834, "DatasourceVersionId": 12327, "CreatorUserId": 930751, "LicenseName": "CC0: Public Domain", "CreationDate": "01/06/2018 21:42:20", "VersionNumber": 1.0, "Title": "Heights and weights", "Slug": "heights-and-weights", "Subtitle": "Simple linear regression", "Description": "### Context\n\nThis data set gives average masses for women as a function of their height in a sample of American women of age 30\u201339.\n\n\n### Content\n\nThe data contains the variables\n\nHeight (m) \nWeight (kg)\n\n\n### Acknowledgements\n\nhttps://en.wikipedia.org/wiki/Simple_linear_regression", "VersionNotes": "Initial release", "TotalCompressedBytes": 189.0, "TotalUncompressedBytes": 189.0}]
[{"Id": 8834, "CreatorUserId": 930751, "OwnerUserId": 930751.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 12327.0, "CurrentDatasourceVersionId": 12327.0, "ForumId": 16020, "Type": 2, "CreationDate": "01/06/2018 21:42:20", "LastActivityDate": "01/31/2018", "TotalViews": 47661, "TotalDownloads": 8113, "TotalVotes": 72, "TotalKernels": 87}]
[{"Id": 930751, "UserName": "tmcketterick", "DisplayName": "T McKetterick", "RegisterDate": "02/26/2017", "PerformanceTier": 1}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session df = pd.read_csv("/kaggle/input/heights-and-weights/data.csv") df.head(5) # first five values # # description and the info of the data set. print("Dimensions of the dataset:", df.shape) print("\nColumn names and data types:") print(df.dtypes) print("\nSummary statistics:") print(df.describe()) print("\nMissing values:") print(df.isnull().sum()) df.info() # # Linear Regression work. # ## defineing x and y axises x_axis_val = df["Height"].values x_axis_val y_axis_val = df["Weight"].values y_axis_val from matplotlib import pyplot as plt # ## plot of the original full_data plt.scatter(x_axis_val, y_axis_val, color="black") plt.xlabel("Height") plt.ylabel("Weight") plt.plot x = x_axis_val.reshape(-1, 1) x, len(x) from sklearn.model_selection import train_test_split # ## define the axises for the linear regression x_train, x_test, y_train, y_test = train_test_split( x, y_axis_val, train_size=0.7, random_state=150 ) x_train, len(x_train) y_train, len(y_train) # ## plot of the training sample plt.scatter(x_train, y_train, color="black") plt.xlabel("Height_train_sample") plt.ylabel("Weight_train_sample") plt.plot from sklearn.linear_model import LinearRegression lr = LinearRegression() lr.fit(x_train, y_train) lr.score(x_test, y_test) * 100 y_predict = lr.predict(x_test) y_predict y_test # to compare the predict and test values # ## plot of the training sample(in black) with the predicted values for the test sample(in red) plt.scatter(x_train, y_train, color="black") plt.scatter(x_test, y_predict, color="red") plt.xlabel("Height") plt.ylabel("Weight") plt.plot # ## three conclusions from my regression model. # ## 1. height vs weight with train size of 60% and random state of 150 x_train_1, x_test_1, y_train_1, y_test_1 = train_test_split( x, y_axis_val, train_size=0.6, random_state=150 ) lr_1 = LinearRegression() lr_1.fit(x_train_1, y_train_1) lr_1.score(x_test_1, y_test_1) * 100 y_predict_1 = lr.predict(x_test_1) plt.scatter(x_train_1, y_train_1, color="black") plt.plot(x_test_1, y_predict_1, color="red") plt.xlabel("Height") plt.ylabel("Weight") plt.plot # ## 2. height vs weight with train size of 15% and random state of 150 x_train_2, x_test_2, y_train_2, y_test_2 = train_test_split( x, y_axis_val, train_size=0.15, random_state=150 ) lr_2 = LinearRegression() lr_2.fit(x_train_2, y_train_2) lr_2.score(x_test_2, y_test_2) * 100 y_predict_2 = lr.predict(x_test_2) plt.scatter(x_train_2, y_train_2, color="black") plt.plot(x_test_2, y_predict_2, color="red") plt.xlabel("Height") plt.ylabel("Weight") plt.plot # ## 3. height vs weight with train size of 90% and random state of 150 x_train_3, x_test_3, y_train_3, y_test_3 = train_test_split( x, y_axis_val, train_size=0.9, random_state=150 ) lr_3 = LinearRegression() lr_3.fit(x_train_3, y_train_3) lr_3.score(x_test_3, y_test_3) * 100 y_predict_3 = lr.predict(x_test_3) plt.scatter(x_train_3, y_train_3, color="black") plt.plot(x_test_3, y_predict_3, color="red") plt.xlabel("Height") plt.ylabel("Weight") plt.plot
false
1
1,377
0
1,459
1,377
129482503
from IPython.core.display import HTML with open("./CSS.css", "r") as file: custom_css = file.read() HTML(custom_css) import os import numpy as np import pandas as pd import os from sklearn.model_selection import train_test_split import cv2 import albumentations as A from albumentations.pytorch.transforms import ToTensorV2 import torch from torch.utils.data import Dataset, DataLoader import torch.nn as nn import torch.nn.functional as F import matplotlib.pyplot as plt from tqdm.notebook import tqdm root = "/kaggle/input/image-matching-challenge-2023" train_label_file = "train_labels.csv" train_path = "/kaggle/input/image-matching-challenge-2023/train" test_path = "/kaggle/input/image-matching-challenge-2023/test" def get_datasets(root, path): file_path = os.path.join(root, path) df = pd.read_csv(file_path) return df get_datasets(train_path, train_label_file).head() train_transform = A.Compose( [ A.Resize(224, 224), A.Normalize( mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_pixel_value=255.0, always_apply=False, p=1.0, ), ToTensorV2(), ] ) validation_transform = A.Compose( [ A.Resize(224, 224), A.Normalize( mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_pixel_value=255.0, always_apply=False, p=1.0, ), ToTensorV2(), ] ) print("Follwing classes are there : \n", train_dataset.classes) def display_image(image, label): plt.imshow(image.permute(1, 2, 0)) display_image(*train_dataset[2]) class ImageDataset(Dataset): def __init__( self, root, img_path, rotation_label, translation_label, transforms=None ): self.root = root self.img_path = img_path self.rotation_label = rotation_label self.translation_label = translation_label self.transforms = transforms def __getitem__(self, index): img_path = self.img_path[index] img_path = self.root + img_path image = cv2.imread(img_path) if self.transforms is not None: image = self.transforms(image=image)["image"] rotation_label = self.rotation_label[index].split(";") rotation_label = list(map(float, rotation_label)) translation_label = self.translation_label[index].split(";") translation_label = list(map(float, translation_label)) return image, np.array(rotation_label), np.array(translation_label) def __len__(self): return len(self.img_path) def get_train_validation_set(df): validtion_data = df.sample(frac=0.3) training_data = df[~df["image_path"].isin(validtion_data["image_path"])] return training_data, validtion_data training_data, validation_data = get_train_validation_set( get_datasets(train_path, train_label_file) ) def get_translation_rotation(df): df["rotation_matrix_split"] = df.apply( lambda x: list(map(float, x["rotation_matrix"].split(";"))), axis=1 ) df["translation_vector_split"] = df.apply( lambda x: list(map(float, x["translation_vector"].split(";"))), axis=1 ) rotation_value = np.array(df["rotation_matrix_split"].tolist()) translation_value = np.array(df["translation_vector_split"].tolist()) return translation_value, rotation_value translation_value, rotation_value = get_translation_rotation( get_datasets(train_path, train_label_file) ) def get_train_dataset(train_path, traning_data): train_dataset = ImageDataset( train_path, training_data["image_path"].tolist(), training_data["rotation_matrix"].tolist(), training_data["translation_vector"].tolist(), transforms=train_transform, ) return train_dataset def get_validation_dataset(train_path, validation_data): validation_dataset = ImageDataset( train_path, validation_data["image_path"].tolist(), validation_data["rotation_matrix"].tolist(), validation_data["translation_vector"].tolist(), transforms=validation_transform, ) return validation_dataset def get_dataloader(train_dataset, validation_dataset): train_loader = DataLoader(train_dataset, batch_size=4, shuffle=True, num_workers=2) validation_loader = DataLoader(validation_dataset, batch_size=4, shuffle=False) return train_loader, validation_loader train_loader, validation_loader = get_dataloader( get_train_dataset(train_path, training_data), get_validation_dataset(train_path, validation_data), ) class ImagemtachingModel(torch.nn.Module): def __init__(self, dropout=0.2): super(ImagemtachingModel, self).__init__() self.layer_one = nn.Sequential( nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(64), nn.ReLU(), nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(64), nn.ReLU(), nn.Dropout(dropout), nn.MaxPool2d(kernel_size=2), ) self.layer_tow = nn.Sequential( nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(128), nn.ReLU(), nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(128), nn.ReLU(), nn.Dropout(dropout), nn.MaxPool2d(kernel_size=2), ) self.layer_three = nn.Sequential( nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(256), nn.ReLU(), nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(256), nn.ReLU(), nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(256), nn.ReLU(), nn.Dropout(dropout), nn.MaxPool2d(kernel_size=2), ) self.layer_four = nn.Sequential( nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(512), nn.ReLU(), nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(512), nn.ReLU(), nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(512), nn.ReLU(), nn.Dropout(dropout), nn.MaxPool2d(kernel_size=2), ) self.layer_five = nn.Sequential( nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(512), nn.ReLU(), nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(512), nn.ReLU(), nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(512), nn.ReLU(), nn.Dropout(dropout), nn.MaxPool2d(kernel_size=2), nn.AvgPool2d(kernel_size=7), ) self.layer_six = nn.Sequential( nn.Linear(512, 512), nn.ReLU(), ) self.rotation_out = nn.Linear(512, 9) self.tanh = torch.nn.Tanh() self.translation_out = torch.nn.Linear(512, 3) def forward(self, x): x = self.layer_one(x) x = self.layer_two(x) x = self.layer_three(x) x = self.layer_four(x) x = self.layer_five(x) x = x.view(-1, 512) x = self.layer_six(x) rotation_out = self.rotation_out(x) rotation_out = self.tanh(rotation_out) translation_out = self.translation_out(x) return rotation_out, translation_out device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") print(device) model = ImagemtachingModel(dropout=0.2) model.to(device) l1_distance = torch.nn.L1Loss() optimizer = torch.optim.Adam(params=model.parameters(), lr=0.001) scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau( optimizer, mode="max", factor=0.5, patience=2, threshold_mode="abs", min_lr=1e-8, verbose=True, ) best_loss = 1000000000 epochs = 10 best_model = None for epoch in range(1, epochs + 1): train_loss = [] rot_loss = [] trans_loss = [] val_loss = [] val_rot_loss = [] val_trans_loss = [] for imgs, rotation_labels, translation_labels in tqdm(train_loader): model.train() optimizer.zero_grad() imgs = imgs.to(device) rotation_labels = rotation_labels.to(device) translation_labels = translation_labels.to(device) rotation_output, translation_output = model(imgs) rotation_loss = l1_distance(rotation_output, rotation_labels) translation_loss = l1_distance(translation_output, translation_labels) loss = rotation_loss + translation_loss loss.backward() optimizer.step() train_loss.append(loss.item()) rot_loss.append(rotation_loss.item()) trans_loss.append(translation_loss.item()) for imgs, rotation_labels, translation_labels in tqdm(val_loader): model.eval() imgs = imgs.to(device) rotation_labels = rotation_labels.to(device) translation_labels = translation_labels.to(device) rotation_output, translation_output = model(imgs) rotation_loss = l1_distance(rotation_output, rotation_labels) translation_loss = l1_distance(translation_output, translation_labels) loss = rotation_loss + translation_loss val_loss.append(loss.item()) val_rot_loss.append(rotation_loss.item()) val_trans_loss.append(translation_loss.item()) mtrain_loss = np.mean(train_loss) mval_loss = np.mean(val_loss) mtrain_rot_loss = np.mean(rot_loss) mtrain_trans_loss = np.mean(trans_loss) mval_rot_loss = np.mean(val_rot_loss) mval_trans_loss = np.mean(val_trans_loss) print( f"Epoch [{epoch}], Train Loss : [{mtrain_loss:.5f}] \ Train Rotation Loss : [{mtrain_rot_loss:.5f}] Train Translation Loss : [{mtrain_trans_loss:.5f}] \ Val Loss : [{mval_loss:.5f}] Val Rotation Loss : [{mval_rot_loss:.5f}] Val Translation Loss : [{mval_trans_loss:.5f}]" ) if scheduler is not None: scheduler.step(mval_loss) if best_loss < mval_loss: best_loss = mval_loss best_model = model
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/482/129482503.ipynb
null
null
[{"Id": 129482503, "ScriptId": 37430982, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 3402809, "CreationDate": "05/14/2023 07:34:12", "VersionNumber": 12.0, "Title": "\ud83d\udd25Pytorch Image Matching \ud83d\udd25", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 290.0, "LinesInsertedFromPrevious": 6.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 284.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
from IPython.core.display import HTML with open("./CSS.css", "r") as file: custom_css = file.read() HTML(custom_css) import os import numpy as np import pandas as pd import os from sklearn.model_selection import train_test_split import cv2 import albumentations as A from albumentations.pytorch.transforms import ToTensorV2 import torch from torch.utils.data import Dataset, DataLoader import torch.nn as nn import torch.nn.functional as F import matplotlib.pyplot as plt from tqdm.notebook import tqdm root = "/kaggle/input/image-matching-challenge-2023" train_label_file = "train_labels.csv" train_path = "/kaggle/input/image-matching-challenge-2023/train" test_path = "/kaggle/input/image-matching-challenge-2023/test" def get_datasets(root, path): file_path = os.path.join(root, path) df = pd.read_csv(file_path) return df get_datasets(train_path, train_label_file).head() train_transform = A.Compose( [ A.Resize(224, 224), A.Normalize( mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_pixel_value=255.0, always_apply=False, p=1.0, ), ToTensorV2(), ] ) validation_transform = A.Compose( [ A.Resize(224, 224), A.Normalize( mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_pixel_value=255.0, always_apply=False, p=1.0, ), ToTensorV2(), ] ) print("Follwing classes are there : \n", train_dataset.classes) def display_image(image, label): plt.imshow(image.permute(1, 2, 0)) display_image(*train_dataset[2]) class ImageDataset(Dataset): def __init__( self, root, img_path, rotation_label, translation_label, transforms=None ): self.root = root self.img_path = img_path self.rotation_label = rotation_label self.translation_label = translation_label self.transforms = transforms def __getitem__(self, index): img_path = self.img_path[index] img_path = self.root + img_path image = cv2.imread(img_path) if self.transforms is not None: image = self.transforms(image=image)["image"] rotation_label = self.rotation_label[index].split(";") rotation_label = list(map(float, rotation_label)) translation_label = self.translation_label[index].split(";") translation_label = list(map(float, translation_label)) return image, np.array(rotation_label), np.array(translation_label) def __len__(self): return len(self.img_path) def get_train_validation_set(df): validtion_data = df.sample(frac=0.3) training_data = df[~df["image_path"].isin(validtion_data["image_path"])] return training_data, validtion_data training_data, validation_data = get_train_validation_set( get_datasets(train_path, train_label_file) ) def get_translation_rotation(df): df["rotation_matrix_split"] = df.apply( lambda x: list(map(float, x["rotation_matrix"].split(";"))), axis=1 ) df["translation_vector_split"] = df.apply( lambda x: list(map(float, x["translation_vector"].split(";"))), axis=1 ) rotation_value = np.array(df["rotation_matrix_split"].tolist()) translation_value = np.array(df["translation_vector_split"].tolist()) return translation_value, rotation_value translation_value, rotation_value = get_translation_rotation( get_datasets(train_path, train_label_file) ) def get_train_dataset(train_path, traning_data): train_dataset = ImageDataset( train_path, training_data["image_path"].tolist(), training_data["rotation_matrix"].tolist(), training_data["translation_vector"].tolist(), transforms=train_transform, ) return train_dataset def get_validation_dataset(train_path, validation_data): validation_dataset = ImageDataset( train_path, validation_data["image_path"].tolist(), validation_data["rotation_matrix"].tolist(), validation_data["translation_vector"].tolist(), transforms=validation_transform, ) return validation_dataset def get_dataloader(train_dataset, validation_dataset): train_loader = DataLoader(train_dataset, batch_size=4, shuffle=True, num_workers=2) validation_loader = DataLoader(validation_dataset, batch_size=4, shuffle=False) return train_loader, validation_loader train_loader, validation_loader = get_dataloader( get_train_dataset(train_path, training_data), get_validation_dataset(train_path, validation_data), ) class ImagemtachingModel(torch.nn.Module): def __init__(self, dropout=0.2): super(ImagemtachingModel, self).__init__() self.layer_one = nn.Sequential( nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(64), nn.ReLU(), nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(64), nn.ReLU(), nn.Dropout(dropout), nn.MaxPool2d(kernel_size=2), ) self.layer_tow = nn.Sequential( nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(128), nn.ReLU(), nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(128), nn.ReLU(), nn.Dropout(dropout), nn.MaxPool2d(kernel_size=2), ) self.layer_three = nn.Sequential( nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(256), nn.ReLU(), nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(256), nn.ReLU(), nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(256), nn.ReLU(), nn.Dropout(dropout), nn.MaxPool2d(kernel_size=2), ) self.layer_four = nn.Sequential( nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(512), nn.ReLU(), nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(512), nn.ReLU(), nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(512), nn.ReLU(), nn.Dropout(dropout), nn.MaxPool2d(kernel_size=2), ) self.layer_five = nn.Sequential( nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(512), nn.ReLU(), nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(512), nn.ReLU(), nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(512), nn.ReLU(), nn.Dropout(dropout), nn.MaxPool2d(kernel_size=2), nn.AvgPool2d(kernel_size=7), ) self.layer_six = nn.Sequential( nn.Linear(512, 512), nn.ReLU(), ) self.rotation_out = nn.Linear(512, 9) self.tanh = torch.nn.Tanh() self.translation_out = torch.nn.Linear(512, 3) def forward(self, x): x = self.layer_one(x) x = self.layer_two(x) x = self.layer_three(x) x = self.layer_four(x) x = self.layer_five(x) x = x.view(-1, 512) x = self.layer_six(x) rotation_out = self.rotation_out(x) rotation_out = self.tanh(rotation_out) translation_out = self.translation_out(x) return rotation_out, translation_out device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") print(device) model = ImagemtachingModel(dropout=0.2) model.to(device) l1_distance = torch.nn.L1Loss() optimizer = torch.optim.Adam(params=model.parameters(), lr=0.001) scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau( optimizer, mode="max", factor=0.5, patience=2, threshold_mode="abs", min_lr=1e-8, verbose=True, ) best_loss = 1000000000 epochs = 10 best_model = None for epoch in range(1, epochs + 1): train_loss = [] rot_loss = [] trans_loss = [] val_loss = [] val_rot_loss = [] val_trans_loss = [] for imgs, rotation_labels, translation_labels in tqdm(train_loader): model.train() optimizer.zero_grad() imgs = imgs.to(device) rotation_labels = rotation_labels.to(device) translation_labels = translation_labels.to(device) rotation_output, translation_output = model(imgs) rotation_loss = l1_distance(rotation_output, rotation_labels) translation_loss = l1_distance(translation_output, translation_labels) loss = rotation_loss + translation_loss loss.backward() optimizer.step() train_loss.append(loss.item()) rot_loss.append(rotation_loss.item()) trans_loss.append(translation_loss.item()) for imgs, rotation_labels, translation_labels in tqdm(val_loader): model.eval() imgs = imgs.to(device) rotation_labels = rotation_labels.to(device) translation_labels = translation_labels.to(device) rotation_output, translation_output = model(imgs) rotation_loss = l1_distance(rotation_output, rotation_labels) translation_loss = l1_distance(translation_output, translation_labels) loss = rotation_loss + translation_loss val_loss.append(loss.item()) val_rot_loss.append(rotation_loss.item()) val_trans_loss.append(translation_loss.item()) mtrain_loss = np.mean(train_loss) mval_loss = np.mean(val_loss) mtrain_rot_loss = np.mean(rot_loss) mtrain_trans_loss = np.mean(trans_loss) mval_rot_loss = np.mean(val_rot_loss) mval_trans_loss = np.mean(val_trans_loss) print( f"Epoch [{epoch}], Train Loss : [{mtrain_loss:.5f}] \ Train Rotation Loss : [{mtrain_rot_loss:.5f}] Train Translation Loss : [{mtrain_trans_loss:.5f}] \ Val Loss : [{mval_loss:.5f}] Val Rotation Loss : [{mval_rot_loss:.5f}] Val Translation Loss : [{mval_trans_loss:.5f}]" ) if scheduler is not None: scheduler.step(mval_loss) if best_loss < mval_loss: best_loss = mval_loss best_model = model
false
0
3,197
0
3,197
3,197
129482192
<jupyter_start><jupyter_text>Chatbot Dataset Topical Chat This is a Topical Chat dataset from Amazon! It consists of over 8000 conversations and over 184000 messages! Within each message, there is: A conversation id, which is basically which conversation the message takes place in. Each message is either the start of a conversation or a reply from the previous message. There is also a sentiment, which represents the emotion that the person who sent the message is feeling. There are 8 sentiments: Angry, Curious to Dive Deeper, Disguised, Fearful, Happy, Sad, and Surprised. This dataset can be used in machine learning to simulate a conversation or to make a chatbot. It can also be used for data visualization, for example you could visualize the word usage for the different emotions. PS: If you cannot download the dataset, download it from here: https://docs.google.com/spreadsheets/d/1dFdlvgmyXfN3SriVn5Byv_BNtyroICxdgrQKBzuMA1U/edit?usp=sharing Original github dataset: https://github.com/alexa/Topical-Chat Kaggle dataset identifier: chatbot-dataset-topical-chat <jupyter_script># #### Importing Libraries and Installing Dependencies import torch import pandas as pd import numpy as np import transformers import random from transformers import GPT2Tokenizer, GPT2LMHeadModel from torch.utils.data import Dataset, DataLoader from tqdm.notebook import tqdm # #### Downloading Topical Chat Data # https://www.kaggle.com/datasets/arnavsharmaas/chatbot-dataset-topical-chat # Next, let's load the data and preprocess it. In this example, we will only consider the text. We will also remove any comments that are too long or too short. def load_data(): df = pd.read_csv("/kaggle/input/chatbot-dataset-topical-chat/topical_chat.csv") comments = df["message"].tolist() comments = [c.strip() for c in comments] comments = [c for c in comments if len(c) > 10 and len(c) < 100] return comments comments = load_data() comments[1:5] # #### Preprocessing Data # Now, let's tokenize the data using the GPT2 tokenizer. tokenizer = GPT2Tokenizer.from_pretrained("gpt2") tokenizer.pad_token = tokenizer.eos_token def tokenize_data(data): tokenized_data = tokenizer.batch_encode_plus( data, padding=True, return_tensors="pt" ) return tokenized_data tokenized_data = tokenize_data(comments) # We will now define a PyTorch dataset and a dataloader to feed the tokenized data into the model during training. class topicalDataset(Dataset): def __init__(self, tokenized_data, block_size): self.tokenized_data = tokenized_data self.block_size = block_size def __len__(self): return len(self.tokenized_data["input_ids"]) def __getitem__(self, index): return { "input_ids": self.tokenized_data["input_ids"][index], "attention_mask": self.tokenized_data["attention_mask"][index], } block_size = 128 dataset = topicalDataset(tokenized_data, block_size) dataloader = DataLoader(dataset, batch_size=32, shuffle=True) # #### Training the LLM model = GPT2LMHeadModel.from_pretrained("gpt2") device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model.to(device) epochs = 3 learning_rate = 5e-5 optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) scheduler = transformers.get_linear_schedule_with_warmup( optimizer, num_warmup_steps=0, num_training_steps=len(dataloader) * epochs ) def train(): model.train() total_loss = 0 for batch in tqdm(dataloader): input_ids = batch["input_ids"].to(device) attention_mask = batch["attention_mask"].to(device) optimizer.zero_grad() outputs = model(input_ids, attention_mask=attention_mask, labels=input_ids) loss = outputs[0] total_loss += loss.item() loss.backward() torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0) optimizer.step() scheduler.step() avg_loss = total_loss / len(dataloader) print(f"Training loss: {avg_loss:.2f}") # #### Generating Responses def generate(prompt, max_length=50): model.eval() encoded_prompt = tokenizer.encode( prompt, add_special_tokens=True, padding="max_length", max_length=max_length, return_tensors="pt", truncation=True, ) input_ids = encoded_prompt.to(device) attention_mask = (input_ids != tokenizer.pad_token_id).to(device) output_sequences = model.generate( input_ids=input_ids, attention_mask=attention_mask, max_length=max_length + len(encoded_prompt[0]), temperature=1.0, top_k=0, top_p=0.9, repetition_penalty=1.0, do_sample=True, num_return_sequences=1, ) generated_sequence = output_sequences[0] generated_sequence = generated_sequence.tolist() text = tokenizer.decode(generated_sequence, clean_up_tokenization_spaces=True) text = text[ len(tokenizer.decode(encoded_prompt[0], clean_up_tokenization_spaces=True)) : ] return text generate("Do you like dance?")
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/482/129482192.ipynb
chatbot-dataset-topical-chat
arnavsharmaas
[{"Id": 129482192, "ScriptId": 38497778, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4672638, "CreationDate": "05/14/2023 07:31:05", "VersionNumber": 3.0, "Title": "LLM ChatBot", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 142.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 142.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 185582222, "KernelVersionId": 129482192, "SourceDatasetVersionId": 1765643}]
[{"Id": 1765643, "DatasetId": 1049526, "DatasourceVersionId": 1802912, "CreatorUserId": 5939391, "LicenseName": "Unknown", "CreationDate": "12/20/2020 21:46:07", "VersionNumber": 1.0, "Title": "Chatbot Dataset Topical Chat", "Slug": "chatbot-dataset-topical-chat", "Subtitle": "Over 8000 conversations", "Description": "This is a Topical Chat dataset from Amazon! It consists of over 8000 conversations and over 184000 messages! \n\nWithin each message, there is: A conversation id, which is basically which conversation the message takes place in. Each message is either the start of a conversation or a reply from the previous message. There is also a sentiment, which represents the emotion that the person who sent the message is feeling. There are 8 sentiments: Angry, Curious to Dive Deeper, Disguised, Fearful, Happy, Sad, and Surprised.\n\nThis dataset can be used in machine learning to simulate a conversation or to make a chatbot. It can also be used for data visualization, for example you could visualize the word usage for the different emotions. \n\nPS: If you cannot download the dataset, download it from here:\nhttps://docs.google.com/spreadsheets/d/1dFdlvgmyXfN3SriVn5Byv_BNtyroICxdgrQKBzuMA1U/edit?usp=sharing\n\nOriginal github dataset:\nhttps://github.com/alexa/Topical-Chat", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 1049526, "CreatorUserId": 5939391, "OwnerUserId": 5939391.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 1765643.0, "CurrentDatasourceVersionId": 1802912.0, "ForumId": 1066530, "Type": 2, "CreationDate": "12/20/2020 21:46:07", "LastActivityDate": "12/20/2020", "TotalViews": 25728, "TotalDownloads": 2691, "TotalVotes": 36, "TotalKernels": 6}]
[{"Id": 5939391, "UserName": "arnavsharmaas", "DisplayName": "Arnav Sharma AS", "RegisterDate": "10/12/2020", "PerformanceTier": 0}]
# #### Importing Libraries and Installing Dependencies import torch import pandas as pd import numpy as np import transformers import random from transformers import GPT2Tokenizer, GPT2LMHeadModel from torch.utils.data import Dataset, DataLoader from tqdm.notebook import tqdm # #### Downloading Topical Chat Data # https://www.kaggle.com/datasets/arnavsharmaas/chatbot-dataset-topical-chat # Next, let's load the data and preprocess it. In this example, we will only consider the text. We will also remove any comments that are too long or too short. def load_data(): df = pd.read_csv("/kaggle/input/chatbot-dataset-topical-chat/topical_chat.csv") comments = df["message"].tolist() comments = [c.strip() for c in comments] comments = [c for c in comments if len(c) > 10 and len(c) < 100] return comments comments = load_data() comments[1:5] # #### Preprocessing Data # Now, let's tokenize the data using the GPT2 tokenizer. tokenizer = GPT2Tokenizer.from_pretrained("gpt2") tokenizer.pad_token = tokenizer.eos_token def tokenize_data(data): tokenized_data = tokenizer.batch_encode_plus( data, padding=True, return_tensors="pt" ) return tokenized_data tokenized_data = tokenize_data(comments) # We will now define a PyTorch dataset and a dataloader to feed the tokenized data into the model during training. class topicalDataset(Dataset): def __init__(self, tokenized_data, block_size): self.tokenized_data = tokenized_data self.block_size = block_size def __len__(self): return len(self.tokenized_data["input_ids"]) def __getitem__(self, index): return { "input_ids": self.tokenized_data["input_ids"][index], "attention_mask": self.tokenized_data["attention_mask"][index], } block_size = 128 dataset = topicalDataset(tokenized_data, block_size) dataloader = DataLoader(dataset, batch_size=32, shuffle=True) # #### Training the LLM model = GPT2LMHeadModel.from_pretrained("gpt2") device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model.to(device) epochs = 3 learning_rate = 5e-5 optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) scheduler = transformers.get_linear_schedule_with_warmup( optimizer, num_warmup_steps=0, num_training_steps=len(dataloader) * epochs ) def train(): model.train() total_loss = 0 for batch in tqdm(dataloader): input_ids = batch["input_ids"].to(device) attention_mask = batch["attention_mask"].to(device) optimizer.zero_grad() outputs = model(input_ids, attention_mask=attention_mask, labels=input_ids) loss = outputs[0] total_loss += loss.item() loss.backward() torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0) optimizer.step() scheduler.step() avg_loss = total_loss / len(dataloader) print(f"Training loss: {avg_loss:.2f}") # #### Generating Responses def generate(prompt, max_length=50): model.eval() encoded_prompt = tokenizer.encode( prompt, add_special_tokens=True, padding="max_length", max_length=max_length, return_tensors="pt", truncation=True, ) input_ids = encoded_prompt.to(device) attention_mask = (input_ids != tokenizer.pad_token_id).to(device) output_sequences = model.generate( input_ids=input_ids, attention_mask=attention_mask, max_length=max_length + len(encoded_prompt[0]), temperature=1.0, top_k=0, top_p=0.9, repetition_penalty=1.0, do_sample=True, num_return_sequences=1, ) generated_sequence = output_sequences[0] generated_sequence = generated_sequence.tolist() text = tokenizer.decode(generated_sequence, clean_up_tokenization_spaces=True) text = text[ len(tokenizer.decode(encoded_prompt[0], clean_up_tokenization_spaces=True)) : ] return text generate("Do you like dance?")
false
1
1,138
0
1,424
1,138
129836364
# # Titanic Machine Learning Challenge # Titanic Machine learning # The aim of this challenge is to predict whether or not a passenger will survive the Titanic disaster. # The Titanic was carrying approximatly 2200 people, of which 1300 were passangers. The 20 lifeboats were only able to carry 1178 people. However, the life boats were not fully loaded with passengers since the crew were afraid the davits would not support the weight of a fully loaded boat. This resulted in only 705 people being rescued in lifeboats. The Titanic disaster lead to more than 1503 fatalities (815 passengers and 688 crew). The crew had 700 fatalities. Third class passengers suffered the greatest loss of aproximatly 700 fatalities, only 174 third class passangers survived. It is claimed that the steerage* passengers were prevented from boarding boats. However, this claim was largly dispelled since the general alarm was sounded too late so some third class passangers did not realise the direness of the situation before it was too late. The large number of fatalites is also due to passangers finding it difficult to navigate the complex lower levels of the ship which meant they reached the deck after the lifeboats had been launched. Many women also refused to leave their husbands and sons behind. 31.6% is the total percentage of passengers and crew who survived. # The challenge is to accuratly predict if a person will survive. # *The part of a ship providing the cheapest accommodation for passengers https://www.britannica.com/topic/Titanic/Discovery-and-legacy https://titanicfacts.net/ https://www.rmg.co.uk/stories/topics/rms-titanic-facts # # Import the data import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) from sklearn.feature_selection import mutual_info_classif as MIC from sklearn.feature_selection import mutual_info_regression as MIR from sklearn import tree from sklearn import metrics import matplotlib.pyplot as plt import seaborn as sns # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # Load training data trainDF = pd.read_csv("/kaggle/input/titanic/train.csv") print("Train data shape: ", trainDF.shape) # View data headings trainDF.head() # Load test data testDF = pd.read_csv("/kaggle/input/titanic/test.csv") print("Test data shape: ", testDF.shape) # View data headings testDF.head() # # Format and explore the data # This is binary classification (supervised learning) problem. Use logistic regression or decision tree. Use one hot encoding. # Firts check which data will be most usefull with mutual information. # The name of a passanger should not determine if they survived or not. # From a visual inspection the ticket information seems to provide a ticket number and details of embarking or disembarking. This informaiton will not add value. # The cabin might if the ticket holder was in their cabin at the time. # The ticket fare will relate more to the class and therfore survival than the actual value of the ticket. # The point of embarkment will not affect if they survive or not since this will not determine their location on the boat at the time of sinking. # It is suspected that the age, class, sex and if they are travling with a child will carry the most influence in if the passanger survived or not. # Drop the columns that will not add value from the initial suspicion trainDF.drop(columns=["Name", "Ticket", "Fare", "Embarked"], inplace=True) # For NaN age replace with the average age of people on the ship trainDF["Age"].fillna(trainDF["Age"].mean(), inplace=True) # Make all float values (age) into integer trainDF["Age"] = (trainDF["Age"] * 100).astype("int") # The cabin were assigend as # deck T : Boat deck - 1 passanger # First class # Deck A : 42 passangers # Deck B : 123 passangers # Deck C : 310 passangers # Deck D : 117 passangers # Deck E : 97 passangers # Second class # Deck D : 118 passangers # Deck E : 226 passangers # Deck F : 218 passangers # Deck G : 112 passangers # Third class # Deck D : 50 passangers # Deck E : 260 passangers # Deck F : 466 passangers # Deck G : 250 passangers # Ref # #https://www.scribd.com/document/392911689/Cabin-Allocations# # #https://titanic.fandom.com/wiki/Second_Class_Cabins#:~:text=D%2DDeck%3A%20At%20the%20stern,%2D43%20to%20E%2D107. # Select cabin zone trainDF["Deck"] = trainDF["Cabin"].astype(str).str[0] trainDF.drop(columns="Cabin", inplace=True) # For NaN Cabin replace with the most likely cabin location trainDF["Deck"] = np.where( (trainDF["Deck"] == "n") & (trainDF["Pclass"] == 1), "C", trainDF["Deck"] ) trainDF["Deck"] = np.where( (trainDF["Deck"] == "n") & (trainDF["Pclass"] == 2), "E", trainDF["Deck"] ) trainDF["Deck"] = np.where( (trainDF["Deck"] == "n") & (trainDF["Pclass"] == 3), "F", trainDF["Deck"] ) # One hot encode the text data print(trainDF.columns) trainDF = pd.get_dummies(data=trainDF, columns=["Sex"], drop_first=True, dtype="int8") trainDF = pd.get_dummies(data=trainDF, prefix="Deck", prefix_sep="", columns=["Deck"]) trainDF = pd.get_dummies(data=trainDF, prefix="SibSp", prefix_sep="", columns=["SibSp"]) trainDF = pd.get_dummies(data=trainDF, prefix="Parch", prefix_sep="", columns=["Parch"]) print(trainDF) # Get an average mutual information score to highlight important features miScores = MIC(trainDF, trainDF["Survived"]) # , discrete_features=True) miScoreSer = pd.Series(miScores, index=trainDF.columns) print(miScoreSer) # # Set train and test data # Select a sample of 90% of the data frame to train with xTrainDF = trainDF.sample(frac=0.9) print(xTrainDF.shape) # Use the remaining 10% of the data frame to check model with xValDF = pd.concat([xTrainDF, trainDF]).drop_duplicates(keep=False) print(xValDF.shape) # Get training and testing y yTrainDF = xTrainDF["Survived"] xTrainDF.drop(columns=["Survived"], inplace=True) yValDF = xValDF["Survived"] xValDF.drop(columns=["Survived"], inplace=True) # From the mi-score, the passanger sex, class and age are most likely to contribute to the survival rate. The cabin location and travel partner also have an influence. # # Decision tree # Fit the decision tree model treeClassifier = tree.DecisionTreeClassifier() treeClassifier = treeClassifier.fit(xTrainDF, yTrainDF) # tree.plot_tree(treeClassifier) # Predict the outcome treePredict = treeClassifier.predict(xValDF) # https://www.simplilearn.com/tutorials/scikit-learn-tutorial/sklearn-decision-trees confusion_matrix = metrics.confusion_matrix(yValDF, treePredict) labels = yValDF.unique() matrix_df = pd.DataFrame(confusion_matrix) ax = plt.axes() sns.set(font_scale=1.3) plt.figure(figsize=(10, 7)) sns.heatmap(matrix_df, annot=True, fmt="g", ax=ax, cmap="magma") ax.set_title("Confusion Matrix - Decision Tree") ax.set_xlabel("Predicted label", fontsize=15) ax.set_xticklabels([""] + labels) ax.set_ylabel("True Label", fontsize=15) ax.set_yticklabels(list(labels), rotation=0) plt.show() # # Format test data # Remove unused parameters testDF.drop(columns=["Name", "Ticket", "Fare", "Embarked"], inplace=True) # Format Age testDF["Age"].fillna(testDF["Age"].mean(), inplace=True) testDF["Age"] = (testDF["Age"] * 100).astype("int") # Select cabin zone testDF["Deck"] = testDF["Cabin"].astype(str).str[0] testDF.drop(columns="Cabin", inplace=True) # For NaN Cabin replace with the most likely cabin location testDF["Deck"] = np.where( (testDF["Deck"] == "n") & (testDF["Pclass"] == 1), "C", testDF["Deck"] ) testDF["Deck"] = np.where( (testDF["Deck"] == "n") & (testDF["Pclass"] == 2), "E", testDF["Deck"] ) testDF["Deck"] = np.where( (testDF["Deck"] == "n") & (testDF["Pclass"] == 3), "F", testDF["Deck"] ) # One hot encode the text data print(testDF.columns) testDF = pd.get_dummies(data=testDF, columns=["Sex"], drop_first=True, dtype="int8") testDF = pd.get_dummies(data=testDF, prefix="Deck", prefix_sep="", columns=["Deck"]) testDF = pd.get_dummies(data=testDF, prefix="SibSp", prefix_sep="", columns=["SibSp"]) testDF = pd.get_dummies(data=testDF, prefix="Parch", prefix_sep="", columns=["Parch"]) print(testDF) treePredict = treeClassifier.predict(testDF) # Generate results file output = pd.DataFrame({"PassengerId": testDF.PassengerId, "Survived": Ps}) output.to_csv("ClassificationTry2.csv", index=False) print("Your file was successfully saved!")
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/836/129836364.ipynb
null
null
[{"Id": 129836364, "ScriptId": 38613574, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11417667, "CreationDate": "05/16/2023 20:50:06", "VersionNumber": 1.0, "Title": "TitanicML_try2_2023-05-16", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 230.0, "LinesInsertedFromPrevious": 230.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# # Titanic Machine Learning Challenge # Titanic Machine learning # The aim of this challenge is to predict whether or not a passenger will survive the Titanic disaster. # The Titanic was carrying approximatly 2200 people, of which 1300 were passangers. The 20 lifeboats were only able to carry 1178 people. However, the life boats were not fully loaded with passengers since the crew were afraid the davits would not support the weight of a fully loaded boat. This resulted in only 705 people being rescued in lifeboats. The Titanic disaster lead to more than 1503 fatalities (815 passengers and 688 crew). The crew had 700 fatalities. Third class passengers suffered the greatest loss of aproximatly 700 fatalities, only 174 third class passangers survived. It is claimed that the steerage* passengers were prevented from boarding boats. However, this claim was largly dispelled since the general alarm was sounded too late so some third class passangers did not realise the direness of the situation before it was too late. The large number of fatalites is also due to passangers finding it difficult to navigate the complex lower levels of the ship which meant they reached the deck after the lifeboats had been launched. Many women also refused to leave their husbands and sons behind. 31.6% is the total percentage of passengers and crew who survived. # The challenge is to accuratly predict if a person will survive. # *The part of a ship providing the cheapest accommodation for passengers https://www.britannica.com/topic/Titanic/Discovery-and-legacy https://titanicfacts.net/ https://www.rmg.co.uk/stories/topics/rms-titanic-facts # # Import the data import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) from sklearn.feature_selection import mutual_info_classif as MIC from sklearn.feature_selection import mutual_info_regression as MIR from sklearn import tree from sklearn import metrics import matplotlib.pyplot as plt import seaborn as sns # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # Load training data trainDF = pd.read_csv("/kaggle/input/titanic/train.csv") print("Train data shape: ", trainDF.shape) # View data headings trainDF.head() # Load test data testDF = pd.read_csv("/kaggle/input/titanic/test.csv") print("Test data shape: ", testDF.shape) # View data headings testDF.head() # # Format and explore the data # This is binary classification (supervised learning) problem. Use logistic regression or decision tree. Use one hot encoding. # Firts check which data will be most usefull with mutual information. # The name of a passanger should not determine if they survived or not. # From a visual inspection the ticket information seems to provide a ticket number and details of embarking or disembarking. This informaiton will not add value. # The cabin might if the ticket holder was in their cabin at the time. # The ticket fare will relate more to the class and therfore survival than the actual value of the ticket. # The point of embarkment will not affect if they survive or not since this will not determine their location on the boat at the time of sinking. # It is suspected that the age, class, sex and if they are travling with a child will carry the most influence in if the passanger survived or not. # Drop the columns that will not add value from the initial suspicion trainDF.drop(columns=["Name", "Ticket", "Fare", "Embarked"], inplace=True) # For NaN age replace with the average age of people on the ship trainDF["Age"].fillna(trainDF["Age"].mean(), inplace=True) # Make all float values (age) into integer trainDF["Age"] = (trainDF["Age"] * 100).astype("int") # The cabin were assigend as # deck T : Boat deck - 1 passanger # First class # Deck A : 42 passangers # Deck B : 123 passangers # Deck C : 310 passangers # Deck D : 117 passangers # Deck E : 97 passangers # Second class # Deck D : 118 passangers # Deck E : 226 passangers # Deck F : 218 passangers # Deck G : 112 passangers # Third class # Deck D : 50 passangers # Deck E : 260 passangers # Deck F : 466 passangers # Deck G : 250 passangers # Ref # #https://www.scribd.com/document/392911689/Cabin-Allocations# # #https://titanic.fandom.com/wiki/Second_Class_Cabins#:~:text=D%2DDeck%3A%20At%20the%20stern,%2D43%20to%20E%2D107. # Select cabin zone trainDF["Deck"] = trainDF["Cabin"].astype(str).str[0] trainDF.drop(columns="Cabin", inplace=True) # For NaN Cabin replace with the most likely cabin location trainDF["Deck"] = np.where( (trainDF["Deck"] == "n") & (trainDF["Pclass"] == 1), "C", trainDF["Deck"] ) trainDF["Deck"] = np.where( (trainDF["Deck"] == "n") & (trainDF["Pclass"] == 2), "E", trainDF["Deck"] ) trainDF["Deck"] = np.where( (trainDF["Deck"] == "n") & (trainDF["Pclass"] == 3), "F", trainDF["Deck"] ) # One hot encode the text data print(trainDF.columns) trainDF = pd.get_dummies(data=trainDF, columns=["Sex"], drop_first=True, dtype="int8") trainDF = pd.get_dummies(data=trainDF, prefix="Deck", prefix_sep="", columns=["Deck"]) trainDF = pd.get_dummies(data=trainDF, prefix="SibSp", prefix_sep="", columns=["SibSp"]) trainDF = pd.get_dummies(data=trainDF, prefix="Parch", prefix_sep="", columns=["Parch"]) print(trainDF) # Get an average mutual information score to highlight important features miScores = MIC(trainDF, trainDF["Survived"]) # , discrete_features=True) miScoreSer = pd.Series(miScores, index=trainDF.columns) print(miScoreSer) # # Set train and test data # Select a sample of 90% of the data frame to train with xTrainDF = trainDF.sample(frac=0.9) print(xTrainDF.shape) # Use the remaining 10% of the data frame to check model with xValDF = pd.concat([xTrainDF, trainDF]).drop_duplicates(keep=False) print(xValDF.shape) # Get training and testing y yTrainDF = xTrainDF["Survived"] xTrainDF.drop(columns=["Survived"], inplace=True) yValDF = xValDF["Survived"] xValDF.drop(columns=["Survived"], inplace=True) # From the mi-score, the passanger sex, class and age are most likely to contribute to the survival rate. The cabin location and travel partner also have an influence. # # Decision tree # Fit the decision tree model treeClassifier = tree.DecisionTreeClassifier() treeClassifier = treeClassifier.fit(xTrainDF, yTrainDF) # tree.plot_tree(treeClassifier) # Predict the outcome treePredict = treeClassifier.predict(xValDF) # https://www.simplilearn.com/tutorials/scikit-learn-tutorial/sklearn-decision-trees confusion_matrix = metrics.confusion_matrix(yValDF, treePredict) labels = yValDF.unique() matrix_df = pd.DataFrame(confusion_matrix) ax = plt.axes() sns.set(font_scale=1.3) plt.figure(figsize=(10, 7)) sns.heatmap(matrix_df, annot=True, fmt="g", ax=ax, cmap="magma") ax.set_title("Confusion Matrix - Decision Tree") ax.set_xlabel("Predicted label", fontsize=15) ax.set_xticklabels([""] + labels) ax.set_ylabel("True Label", fontsize=15) ax.set_yticklabels(list(labels), rotation=0) plt.show() # # Format test data # Remove unused parameters testDF.drop(columns=["Name", "Ticket", "Fare", "Embarked"], inplace=True) # Format Age testDF["Age"].fillna(testDF["Age"].mean(), inplace=True) testDF["Age"] = (testDF["Age"] * 100).astype("int") # Select cabin zone testDF["Deck"] = testDF["Cabin"].astype(str).str[0] testDF.drop(columns="Cabin", inplace=True) # For NaN Cabin replace with the most likely cabin location testDF["Deck"] = np.where( (testDF["Deck"] == "n") & (testDF["Pclass"] == 1), "C", testDF["Deck"] ) testDF["Deck"] = np.where( (testDF["Deck"] == "n") & (testDF["Pclass"] == 2), "E", testDF["Deck"] ) testDF["Deck"] = np.where( (testDF["Deck"] == "n") & (testDF["Pclass"] == 3), "F", testDF["Deck"] ) # One hot encode the text data print(testDF.columns) testDF = pd.get_dummies(data=testDF, columns=["Sex"], drop_first=True, dtype="int8") testDF = pd.get_dummies(data=testDF, prefix="Deck", prefix_sep="", columns=["Deck"]) testDF = pd.get_dummies(data=testDF, prefix="SibSp", prefix_sep="", columns=["SibSp"]) testDF = pd.get_dummies(data=testDF, prefix="Parch", prefix_sep="", columns=["Parch"]) print(testDF) treePredict = treeClassifier.predict(testDF) # Generate results file output = pd.DataFrame({"PassengerId": testDF.PassengerId, "Survived": Ps}) output.to_csv("ClassificationTry2.csv", index=False) print("Your file was successfully saved!")
false
0
2,787
0
2,787
2,787
129841155
<jupyter_start><jupyter_text>Auto-mpg dataset ### Context The data is technical spec of cars. The dataset is downloaded from UCI Machine Learning Repository ### Content 1. Title: Auto-Mpg Data 2. Sources: (a) Origin: This dataset was taken from the StatLib library which is maintained at Carnegie Mellon University. The dataset was used in the 1983 American Statistical Association Exposition. (c) Date: July 7, 1993 3. Past Usage: - See 2b (above) - Quinlan,R. (1993). Combining Instance-Based and Model-Based Learning. In Proceedings on the Tenth International Conference of Machine Learning, 236-243, University of Massachusetts, Amherst. Morgan Kaufmann. 4. Relevant Information: This dataset is a slightly modified version of the dataset provided in the StatLib library. In line with the use by Ross Quinlan (1993) in predicting the attribute "mpg", 8 of the original instances were removed because they had unknown values for the "mpg" attribute. The original dataset is available in the file "auto-mpg.data-original". "The data concerns city-cycle fuel consumption in miles per gallon, to be predicted in terms of 3 multivalued discrete and 5 continuous attributes." (Quinlan, 1993) 5. Number of Instances: 398 6. Number of Attributes: 9 including the class attribute 7. Attribute Information: 1. mpg: continuous 2. cylinders: multi-valued discrete 3. displacement: continuous 4. horsepower: continuous 5. weight: continuous 6. acceleration: continuous 7. model year: multi-valued discrete 8. origin: multi-valued discrete 9. car name: string (unique for each instance) 8. Missing Attribute Values: horsepower has 6 missing values Kaggle dataset identifier: autompg-dataset <jupyter_script>import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # warning import warnings warnings.filterwarnings("ignore") # mpg: The "mpg" column represents the miles per gallon, indicating how many miles a vehicle can travel on one gallon of fuel. It is a continuous value. # cylinders: The "cylinders" column denotes the number of cylinders in the engine of the vehicle. It is a multi-valued discrete feature, indicating the count of cylinders, such as 3, 4, 5, 6, 8. # displacement: The "displacement" column represents the total volume swept by all the cylinders in the engine. It is a continuous value and is typically measured in liters. # horsepower: The "horsepower" column indicates the power output of the vehicle's engine. It is a continuous value, representing the strength of the engine. # weight: The "weight" column represents the weight of the vehicle. It is a continuous value, often measured in pounds or kilograms. # acceleration: The "acceleration" column denotes the time it takes for the vehicle to reach a certain speed. It is a continuous value, indicating the rate of change of velocity. # model year: The "model year" column represents the year in which the vehicle was manufactured. It is a multi-valued discrete feature, indicating different years of production. # origin: The "origin" column indicates the geographic origin or manufacturing region of the vehicle. It is a multi-valued discrete feature, representing different countries or regions. # car name: The "car name" column specifies the unique name of each vehicle instance. It is a string feature, providing a distinct identifier for each car df = pd.read_csv("/kaggle/input/autompg-dataset/auto-mpg.csv") df.head() df.cylinders.value_counts()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/841/129841155.ipynb
autompg-dataset
null
[{"Id": 129841155, "ScriptId": 38616820, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 12449603, "CreationDate": "05/16/2023 22:01:24", "VersionNumber": 1.0, "Title": "Vehicle Fuel Consumption (EDA-ML)", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 39.0, "LinesInsertedFromPrevious": 39.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 186225522, "KernelVersionId": 129841155, "SourceDatasetVersionId": 2704}]
[{"Id": 2704, "DatasetId": 1489, "DatasourceVersionId": 2704, "CreatorUserId": 693660, "LicenseName": "CC0: Public Domain", "CreationDate": "07/02/2017 05:25:54", "VersionNumber": 3.0, "Title": "Auto-mpg dataset", "Slug": "autompg-dataset", "Subtitle": "Mileage per gallon performances of various cars", "Description": "### Context\n\nThe data is technical spec of cars. The dataset is downloaded from UCI Machine Learning Repository\n\n\n### Content\n\n1. Title: Auto-Mpg Data\n\n2. Sources:\n (a) Origin: This dataset was taken from the StatLib library which is\n maintained at Carnegie Mellon University. The dataset was \n used in the 1983 American Statistical Association Exposition.\n (c) Date: July 7, 1993\n\n3. Past Usage:\n - See 2b (above)\n - Quinlan,R. (1993). Combining Instance-Based and Model-Based Learning.\n In Proceedings on the Tenth International Conference of Machine \n Learning, 236-243, University of Massachusetts, Amherst. Morgan\n Kaufmann.\n\n4. Relevant Information:\n\n This dataset is a slightly modified version of the dataset provided in\n the StatLib library. In line with the use by Ross Quinlan (1993) in\n predicting the attribute \"mpg\", 8 of the original instances were removed \n because they had unknown values for the \"mpg\" attribute. The original \n dataset is available in the file \"auto-mpg.data-original\".\n\n \"The data concerns city-cycle fuel consumption in miles per gallon,\n to be predicted in terms of 3 multivalued discrete and 5 continuous\n attributes.\" (Quinlan, 1993)\n\n5. Number of Instances: 398\n\n6. Number of Attributes: 9 including the class attribute\n\n7. Attribute Information:\n\n 1. mpg: continuous\n 2. cylinders: multi-valued discrete\n 3. displacement: continuous\n 4. horsepower: continuous\n 5. weight: continuous\n 6. acceleration: continuous\n 7. model year: multi-valued discrete\n 8. origin: multi-valued discrete\n 9. car name: string (unique for each instance)\n\n8. Missing Attribute Values: horsepower has 6 missing values\n\n### Acknowledgements\n\nDataset: UCI Machine Learning Repository \nData link : https://archive.ics.uci.edu/ml/datasets/auto+mpg\n\n\n### Inspiration\n\nI have used this dataset for practicing my exploratory analysis skills.", "VersionNotes": "Auto-mpg.csv with header", "TotalCompressedBytes": 18131.0, "TotalUncompressedBytes": 18131.0}]
[{"Id": 1489, "CreatorUserId": 693660, "OwnerUserId": NaN, "OwnerOrganizationId": 7.0, "CurrentDatasetVersionId": 2704.0, "CurrentDatasourceVersionId": 2704.0, "ForumId": 4406, "Type": 2, "CreationDate": "06/28/2017 10:09:21", "LastActivityDate": "02/05/2018", "TotalViews": 274788, "TotalDownloads": 40258, "TotalVotes": 275, "TotalKernels": 260}]
null
import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # warning import warnings warnings.filterwarnings("ignore") # mpg: The "mpg" column represents the miles per gallon, indicating how many miles a vehicle can travel on one gallon of fuel. It is a continuous value. # cylinders: The "cylinders" column denotes the number of cylinders in the engine of the vehicle. It is a multi-valued discrete feature, indicating the count of cylinders, such as 3, 4, 5, 6, 8. # displacement: The "displacement" column represents the total volume swept by all the cylinders in the engine. It is a continuous value and is typically measured in liters. # horsepower: The "horsepower" column indicates the power output of the vehicle's engine. It is a continuous value, representing the strength of the engine. # weight: The "weight" column represents the weight of the vehicle. It is a continuous value, often measured in pounds or kilograms. # acceleration: The "acceleration" column denotes the time it takes for the vehicle to reach a certain speed. It is a continuous value, indicating the rate of change of velocity. # model year: The "model year" column represents the year in which the vehicle was manufactured. It is a multi-valued discrete feature, indicating different years of production. # origin: The "origin" column indicates the geographic origin or manufacturing region of the vehicle. It is a multi-valued discrete feature, representing different countries or regions. # car name: The "car name" column specifies the unique name of each vehicle instance. It is a string feature, providing a distinct identifier for each car df = pd.read_csv("/kaggle/input/autompg-dataset/auto-mpg.csv") df.head() df.cylinders.value_counts()
false
0
485
0
980
485
129841525
<jupyter_start><jupyter_text>College Basketball Dataset ### Content Data from the 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, and 2021 Division I college basketball seasons. cbb.csv has seasons 2013-2019 and seasons 2021-2023 combined The 2020 season's data set is kept separate from the other seasons, because there was no postseason due to the Coronavirus. ###Variables RK (Only in cbb20): The ranking of the team at the end of the regular season according to barttorvik TEAM: The Division I college basketball school CONF: The Athletic Conference in which the school participates in (A10 = Atlantic 10, ACC = Atlantic Coast Conference, AE = America East, Amer = American, ASun = ASUN, B10 = Big Ten, B12 = Big 12, BE = Big East, BSky = Big Sky, BSth = Big South, BW = Big West, CAA = Colonial Athletic Association, CUSA = Conference USA, Horz = Horizon League, Ivy = Ivy League, MAAC = Metro Atlantic Athletic Conference, MAC = Mid-American Conference, MEAC = Mid-Eastern Athletic Conference, MVC = Missouri Valley Conference, MWC = Mountain West, NEC = Northeast Conference, OVC = Ohio Valley Conference, P12 = Pac-12, Pat = Patriot League, SB = Sun Belt, SC = Southern Conference, SEC = South Eastern Conference, Slnd = Southland Conference, Sum = Summit League, SWAC = Southwestern Athletic Conference, WAC = Western Athletic Conference, WCC = West Coast Conference) G: Number of games played W: Number of games won ADJOE: Adjusted Offensive Efficiency (An estimate of the offensive efficiency (points scored per 100 possessions) a team would have against the average Division I defense) ADJDE: Adjusted Defensive Efficiency (An estimate of the defensive efficiency (points allowed per 100 possessions) a team would have against the average Division I offense) BARTHAG: Power Rating (Chance of beating an average Division I team) EFG_O: Effective Field Goal Percentage Shot EFG_D: Effective Field Goal Percentage Allowed TOR: Turnover Percentage Allowed (Turnover Rate) TORD: Turnover Percentage Committed (Steal Rate) ORB: Offensive Rebound Rate DRB: Offensive Rebound Rate Allowed FTR : Free Throw Rate (How often the given team shoots Free Throws) FTRD: Free Throw Rate Allowed 2P_O: Two-Point Shooting Percentage 2P_D: Two-Point Shooting Percentage Allowed 3P_O: Three-Point Shooting Percentage 3P_D: Three-Point Shooting Percentage Allowed ADJ_T: Adjusted Tempo (An estimate of the tempo (possessions per 40 minutes) a team would have against the team that wants to play at an average Division I tempo) WAB: Wins Above Bubble (The bubble refers to the cut off between making the NCAA March Madness Tournament and not making it) POSTSEASON: Round where the given team was eliminated or where their season ended (R68 = First Four, R64 = Round of 64, R32 = Round of 32, S16 = Sweet Sixteen, E8 = Elite Eight, F4 = Final Four, 2ND = Runner-up, Champion = Winner of the NCAA March Madness Tournament for that given year) SEED: Seed in the NCAA March Madness Tournament YEAR: Season Kaggle dataset identifier: college-basketball-dataset <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session print("Setup complete.") df = pd.read_csv("/kaggle/input/college-basketball-dataset/cbb.csv") df = df.sort_values(by=["TEAM", "YEAR"]) duke_years = df[df["TEAM"] == "Duke"] columns_list = df.columns.tolist() print(columns_list) postseason_mapping = { "Champions": 1, "2ND": 2, "F4": 4, "E8": 8, "S16": 16, "R32": 32, "R64": 64, "R68": 68, } df["POSTSEASON"] = df["POSTSEASON"].replace(postseason_mapping) # Analyzing three point offense and defense on team success... selected_columns = ["TEAM", "W", "POSTSEASON", "3P_O", "3P_D"] data_filtered = df[selected_columns].copy() data_filtered = ( data_filtered.dropna() ) # Includes only teams that made it to the postseason data_filtered.sort_values(by="POSTSEASON", key=lambda x: x.astype(int)) # Create correlation matrix... correlation_matrix = data_filtered[["3P_O", "3P_D", "W", "POSTSEASON"]].corr() print(correlation_matrix) # Clearly, the strongest correlation lies between wins and postseason success. I am surprised to find that 3 point efficiency and defense has relatively low correlation to post season success. I think it's important to note, however, that 3 point defense has a higher correlation to wins and postseason success than 3 point efficiency. # It's a common phrase that "defense wins championships." Let's look at if defense rating has a higher correlation to wins and postseason success than offensive rating. adje_columns = ["TEAM", "YEAR", "W", "POSTSEASON", "ADJOE", "ADJDE"] adje_df = df[adje_columns].copy() adje_df = adje_df.dropna() adje_df.sort_values(by="ADJOE", ascending=False) adje_correlation_matrix = adje_df[["W", "POSTSEASON", "ADJOE", "ADJDE"]].corr() print(adje_correlation_matrix) # Interesting. The correlation between wins and ADJOE and ADJDE was relatively similar which is unsurprising, as those metrics are "adjusted" to show points scored on and getting scored on the average D-1 basketball team. However, I was surprised to learn that ADJOE has a higher correlation to postseason success than ADJDE. effectiveness = adje_df.copy() effectiveness["EFF"] = effectiveness["ADJOE"] - effectiveness["ADJDE"] effectiveness_correlation_matrix = effectiveness[["W", "POSTSEASON", "EFF"]].corr() print(effectiveness_correlation_matrix) # This is a much better metric. But I also wonder if perhaps defense plays more of a roll in winning in certain years rather than others... good_postseason_teams = adje_df[adje_df["POSTSEASON"] <= 4] good_postseason_teams.reset_index() avg_adjde_by_year = good_postseason_teams.groupby("YEAR")["ADJDE"].mean() avg_adjoe_by_year = good_postseason_teams.groupby("YEAR")["ADJOE"].mean() import matplotlib.pyplot as plt plt.plot(avg_adjde_by_year.index, avg_adjde_by_year.values) plt.xlabel("Year") plt.ylabel("Average ADJDE") plt.title("Average ADJDE of Teams with Postseason Success") plt.show() plt.plot(avg_adjoe_by_year.index, avg_adjoe_by_year.values) plt.xlabel("Year") plt.ylabel("Average ADJOE") plt.title("Average ADJOE of Teams with Postseason Success") plt.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/841/129841525.ipynb
college-basketball-dataset
andrewsundberg
[{"Id": 129841525, "ScriptId": 38556575, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 10847921, "CreationDate": "05/16/2023 22:06:28", "VersionNumber": 2.0, "Title": "College Basketball Correlations", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 87.0, "LinesInsertedFromPrevious": 34.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 53.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 186226085, "KernelVersionId": 129841525, "SourceDatasetVersionId": 2027426}]
[{"Id": 2027426, "DatasetId": 418778, "DatasourceVersionId": 2067134, "CreatorUserId": 2192630, "LicenseName": "CC0: Public Domain", "CreationDate": "03/16/2021 00:56:42", "VersionNumber": 4.0, "Title": "College Basketball Dataset", "Slug": "college-basketball-dataset", "Subtitle": "Datasets for the 2013 through 2021 seasons", "Description": "### Content\nData from the 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, and 2021 Division I college basketball seasons.\n\ncbb.csv has seasons 2013-2019 and seasons 2021-2023 combined\n\nThe 2020 season's data set is kept separate from the other seasons, because there was no postseason due to the Coronavirus.\n\n###Variables\n\nRK (Only in cbb20): The ranking of the team at the end of the regular season according to barttorvik\n\nTEAM: The Division I college basketball school\n\nCONF: The Athletic Conference in which the school participates in (A10 = Atlantic 10, ACC = Atlantic Coast Conference, AE = America East, Amer = American, ASun = ASUN, B10 = Big Ten, B12 = Big 12, BE = Big East, BSky = Big Sky, BSth = Big South, BW = Big West, CAA = Colonial Athletic Association, CUSA = Conference USA, Horz = Horizon League, Ivy = Ivy League, MAAC = Metro Atlantic Athletic Conference, MAC = Mid-American Conference, MEAC = Mid-Eastern Athletic Conference, MVC = Missouri Valley Conference, MWC = Mountain West, NEC = Northeast Conference, OVC = Ohio Valley Conference, P12 = Pac-12, Pat = Patriot League, SB = Sun Belt, SC = Southern Conference, SEC = South Eastern Conference, Slnd = Southland Conference, Sum = Summit League, SWAC = Southwestern Athletic Conference, WAC = Western Athletic Conference, WCC = West Coast Conference)\n\nG: Number of games played\n\nW: Number of games won\n\nADJOE: Adjusted Offensive Efficiency (An estimate of the offensive efficiency (points scored per 100 possessions) a team would have against the average Division I defense)\n\nADJDE: Adjusted Defensive Efficiency (An estimate of the defensive efficiency (points allowed per 100 possessions) a team would have against the average Division I offense)\n\nBARTHAG: Power Rating (Chance of beating an average Division I team)\n\nEFG_O: Effective Field Goal Percentage Shot\n\nEFG_D: Effective Field Goal Percentage Allowed\n\nTOR: Turnover Percentage Allowed (Turnover Rate)\n\nTORD: Turnover Percentage Committed (Steal Rate)\n\nORB: Offensive Rebound Rate\n\nDRB: Offensive Rebound Rate Allowed\n\nFTR\t: Free Throw Rate (How often the given team shoots Free Throws)\n\nFTRD: Free Throw Rate Allowed\n\n2P_O: Two-Point Shooting Percentage\n\n2P_D: Two-Point Shooting Percentage Allowed\n\n3P_O: Three-Point Shooting Percentage\n\n3P_D: Three-Point Shooting Percentage Allowed\n\nADJ_T: Adjusted Tempo (An estimate of the tempo (possessions per 40 minutes) a team would have against the team that wants to play at an average Division I tempo)\n\nWAB: Wins Above Bubble (The bubble refers to the cut off between making the NCAA March Madness Tournament and not making it)\t\n\nPOSTSEASON: Round where the given team was eliminated or where their season ended (R68 = First Four, R64 = Round of 64, R32 = Round of 32, S16 = Sweet Sixteen, E8 = Elite Eight, F4 = Final Four, 2ND = Runner-up, Champion = Winner of the NCAA March Madness Tournament for that given year)\n\nSEED: Seed in the NCAA March Madness Tournament\n\nYEAR: Season\n\n\n### Acknowledgements\n\nThis data was scraped from from http://barttorvik.com/trank.php#. I cleaned the data set and added the POSTSEASON, SEED, and YEAR columns", "VersionNotes": "Added 2013 and 2014 as well as 2021 data from the start of the tournament", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 418778, "CreatorUserId": 2192630, "OwnerUserId": 2192630.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 6097216.0, "CurrentDatasourceVersionId": 6175761.0, "ForumId": 431212, "Type": 2, "CreationDate": "11/18/2019 02:54:22", "LastActivityDate": "11/18/2019", "TotalViews": 139857, "TotalDownloads": 18994, "TotalVotes": 285, "TotalKernels": 33}]
[{"Id": 2192630, "UserName": "andrewsundberg", "DisplayName": "Andrew Sundberg", "RegisterDate": "08/29/2018", "PerformanceTier": 1}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session print("Setup complete.") df = pd.read_csv("/kaggle/input/college-basketball-dataset/cbb.csv") df = df.sort_values(by=["TEAM", "YEAR"]) duke_years = df[df["TEAM"] == "Duke"] columns_list = df.columns.tolist() print(columns_list) postseason_mapping = { "Champions": 1, "2ND": 2, "F4": 4, "E8": 8, "S16": 16, "R32": 32, "R64": 64, "R68": 68, } df["POSTSEASON"] = df["POSTSEASON"].replace(postseason_mapping) # Analyzing three point offense and defense on team success... selected_columns = ["TEAM", "W", "POSTSEASON", "3P_O", "3P_D"] data_filtered = df[selected_columns].copy() data_filtered = ( data_filtered.dropna() ) # Includes only teams that made it to the postseason data_filtered.sort_values(by="POSTSEASON", key=lambda x: x.astype(int)) # Create correlation matrix... correlation_matrix = data_filtered[["3P_O", "3P_D", "W", "POSTSEASON"]].corr() print(correlation_matrix) # Clearly, the strongest correlation lies between wins and postseason success. I am surprised to find that 3 point efficiency and defense has relatively low correlation to post season success. I think it's important to note, however, that 3 point defense has a higher correlation to wins and postseason success than 3 point efficiency. # It's a common phrase that "defense wins championships." Let's look at if defense rating has a higher correlation to wins and postseason success than offensive rating. adje_columns = ["TEAM", "YEAR", "W", "POSTSEASON", "ADJOE", "ADJDE"] adje_df = df[adje_columns].copy() adje_df = adje_df.dropna() adje_df.sort_values(by="ADJOE", ascending=False) adje_correlation_matrix = adje_df[["W", "POSTSEASON", "ADJOE", "ADJDE"]].corr() print(adje_correlation_matrix) # Interesting. The correlation between wins and ADJOE and ADJDE was relatively similar which is unsurprising, as those metrics are "adjusted" to show points scored on and getting scored on the average D-1 basketball team. However, I was surprised to learn that ADJOE has a higher correlation to postseason success than ADJDE. effectiveness = adje_df.copy() effectiveness["EFF"] = effectiveness["ADJOE"] - effectiveness["ADJDE"] effectiveness_correlation_matrix = effectiveness[["W", "POSTSEASON", "EFF"]].corr() print(effectiveness_correlation_matrix) # This is a much better metric. But I also wonder if perhaps defense plays more of a roll in winning in certain years rather than others... good_postseason_teams = adje_df[adje_df["POSTSEASON"] <= 4] good_postseason_teams.reset_index() avg_adjde_by_year = good_postseason_teams.groupby("YEAR")["ADJDE"].mean() avg_adjoe_by_year = good_postseason_teams.groupby("YEAR")["ADJOE"].mean() import matplotlib.pyplot as plt plt.plot(avg_adjde_by_year.index, avg_adjde_by_year.values) plt.xlabel("Year") plt.ylabel("Average ADJDE") plt.title("Average ADJDE of Teams with Postseason Success") plt.show() plt.plot(avg_adjoe_by_year.index, avg_adjoe_by_year.values) plt.xlabel("Year") plt.ylabel("Average ADJOE") plt.title("Average ADJOE of Teams with Postseason Success") plt.show()
false
1
1,168
0
2,163
1,168
129151809
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session from nltk.tokenize import RegexpTokenizer def tokenizer(txt): token = RegexpTokenizer(r"\w+") return " ".join(list(token.tokenize(txt))) # stopwords from nltk.corpus import stopwords st_words = stopwords.words("english") def remove_stopwords(lst): res = [] lst = lst.split(" ") for word in lst: if word.lower() not in st_words: res.append(word.lower()) return " ".join(res) # Stemming from nltk.stem import WordNetLemmatizer Lemmatizer = WordNetLemmatizer() def lemmatize_words(lst): res = [] for word in lst.split(" "): res.append(Lemmatizer.lemmatize(word)) return " ".join(res) # list to str def convert_tostr(lst): return " ".join(lst) def convert_tolst(s): return [s] import pandas as pd import numpy as np b = pd.read_json("/kaggle/input/nctc-may-1-7/NCTC_may_7.json") b["_data"] = b["Text"].apply(tokenizer) b["_data"] = b["_data"].apply(remove_stopwords) b["_data"] = b["_data"].apply(lemmatize_words) from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.cluster import AgglomerativeClustering from sklearn.metrics.pairwise import cosine_similarity vectorizer = TfidfVectorizer() articles = list(b["_data"]) tfidf_matrix = vectorizer.fit_transform(articles) for i in tfidf_matrix: print(i) cosine_sim = cosine_similarity(tfidf_matrix) cosine_sim cluster = AgglomerativeClustering( n_clusters=None, distance_threshold=0.5, linkage="single" ) cluster_labels = cluster.fit_predict(cosine_sim) b["cluster_label_tf_af"] = cluster_labels n = cluster_labels.max() for i in range(0, n + 1): dt = b.query("cluster_label_tf_af==" + str(i)) if dt.shape[0] == 1: b["cluster_label_tf_af"] = b["cluster_label_tf_af"].replace( [dt["cluster_label_tf_af"].values[0]], -1 ) dic = {} for i in b["cluster_label_tf_af"]: if dic.get(i) != None: dic[i] = dic[i] + 1 else: dic[i] = 1 print("total_clusters: ", max(dic.keys())) for i, j in dic.items(): print(f"cluster :{i} , No of Articles :{j}") b.to_csv("May_7.csv") dt = b.query( "cluster_label_tf_af==0" ) # defining cluster id and id must be >=40 and <=2480 for i, r in dt.iterrows(): print("** Article id---", r["_id"]) print("\n") # print('hashlink---',r['hashlink']) # print('\n') print("source---", r["_source"]["source"]) print("\n") print("Content -----", r["Text"]) print( "----------------------------------------------------------------------------------" ) print("\n")
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/151/129151809.ipynb
null
null
[{"Id": 129151809, "ScriptId": 38390627, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14218315, "CreationDate": "05/11/2023 11:29:38", "VersionNumber": 1.0, "Title": "AgglomerativeClustering", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 129.0, "LinesInsertedFromPrevious": 129.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session from nltk.tokenize import RegexpTokenizer def tokenizer(txt): token = RegexpTokenizer(r"\w+") return " ".join(list(token.tokenize(txt))) # stopwords from nltk.corpus import stopwords st_words = stopwords.words("english") def remove_stopwords(lst): res = [] lst = lst.split(" ") for word in lst: if word.lower() not in st_words: res.append(word.lower()) return " ".join(res) # Stemming from nltk.stem import WordNetLemmatizer Lemmatizer = WordNetLemmatizer() def lemmatize_words(lst): res = [] for word in lst.split(" "): res.append(Lemmatizer.lemmatize(word)) return " ".join(res) # list to str def convert_tostr(lst): return " ".join(lst) def convert_tolst(s): return [s] import pandas as pd import numpy as np b = pd.read_json("/kaggle/input/nctc-may-1-7/NCTC_may_7.json") b["_data"] = b["Text"].apply(tokenizer) b["_data"] = b["_data"].apply(remove_stopwords) b["_data"] = b["_data"].apply(lemmatize_words) from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.cluster import AgglomerativeClustering from sklearn.metrics.pairwise import cosine_similarity vectorizer = TfidfVectorizer() articles = list(b["_data"]) tfidf_matrix = vectorizer.fit_transform(articles) for i in tfidf_matrix: print(i) cosine_sim = cosine_similarity(tfidf_matrix) cosine_sim cluster = AgglomerativeClustering( n_clusters=None, distance_threshold=0.5, linkage="single" ) cluster_labels = cluster.fit_predict(cosine_sim) b["cluster_label_tf_af"] = cluster_labels n = cluster_labels.max() for i in range(0, n + 1): dt = b.query("cluster_label_tf_af==" + str(i)) if dt.shape[0] == 1: b["cluster_label_tf_af"] = b["cluster_label_tf_af"].replace( [dt["cluster_label_tf_af"].values[0]], -1 ) dic = {} for i in b["cluster_label_tf_af"]: if dic.get(i) != None: dic[i] = dic[i] + 1 else: dic[i] = 1 print("total_clusters: ", max(dic.keys())) for i, j in dic.items(): print(f"cluster :{i} , No of Articles :{j}") b.to_csv("May_7.csv") dt = b.query( "cluster_label_tf_af==0" ) # defining cluster id and id must be >=40 and <=2480 for i, r in dt.iterrows(): print("** Article id---", r["_id"]) print("\n") # print('hashlink---',r['hashlink']) # print('\n') print("source---", r["_source"]["source"]) print("\n") print("Content -----", r["Text"]) print( "----------------------------------------------------------------------------------" ) print("\n")
false
0
1,039
0
1,039
1,039
129126559
<jupyter_start><jupyter_text>Marijuana Arrests in Toronto: Racial Disparities ``` Data on police treatment of individuals arrested in Toronto for simple possession of small quantities of marijuana. The data are part of a larger data set featured in a series of articles in the Toronto Star newspaper. A data frame with 5226 observations on the following 8 variables. ``` | Column | Description | | --- | --- | | released | Whether or not the arrestee was released with a summons; a factor with levels: No; Yes. | | colour | The arrestee's race; a factor with levels: Black; White. | | year | 1997 through 2002; a numeric vector. | | age | in years; a numeric vector. | | sex | a factor with levels: Female; Male. | | employed | a factor with levels: No; Yes. | | citizen | a factor with levels: No; Yes. | | checks | Number of police data bases (of previous arrests, previous convictions, parole status, etc. – 6 in all) on which the arrestee's name appeared; a numeric vector | # Source Personal communication from Michael Friendly, York University. Kaggle dataset identifier: arrests-for-marijuana-possession <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # # Import Libraries and Load Dataset import numpy as np import pandas as pd from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler from sklearn.linear_model import LogisticRegression from sklearn.metrics import mean_squared_error from sklearn.preprocessing import LabelBinarizer # load dataset data = pd.read_csv("/kaggle/input/arrests-for-marijuana-possession/Arrests.csv") data from sklearn.model_selection import RandomizedSearchCV, GridSearchCV from sklearn.metrics import accuracy_score import seaborn as sns import matplotlib.pyplot as plt corr_matrix = data.corr() fig, ax = plt.subplots(figsize=(10, 10)) sns.heatmap(corr_matrix, annot=True, cmap="coolwarm", square=True, ax=ax) plt.show() data.describe() # # Looking for Null values num_nan = data.isna().sum() print(num_nan) data.columns data = data.drop("Unnamed: 0", axis=1) data # # Creating Dummy Variables and Applying Binary Labeling lb = LabelBinarizer() data["released_binary"] = lb.fit_transform(data["released"]) data["colour_binary"] = lb.fit_transform(data["colour"]) data["sex_binary"] = lb.fit_transform(data["sex"]) data["employed_binary"] = lb.fit_transform(data["employed"]) data["citizen_binary"] = lb.fit_transform(data["citizen"]) data # # Getting rid of Categorical Features data = data.drop(["released", "colour", "sex", "employed", "citizen"], axis=1) data corr_matrix = data.corr() fig, ax = plt.subplots(figsize=(10, 10)) sns.heatmap(corr_matrix, annot=True, cmap="coolwarm", square=True, ax=ax) plt.show() # # Applying Standard Scaling scaler = StandardScaler() scaler.fit(data[["age", "year", "checks"]]) data[["age", "year", "checks"]] = scaler.transform(data[["age", "year", "checks"]]) data # # Splitting the dataset into Training and Test sets X_train, X_test, y_train, y_test = train_test_split( data.drop("released_binary", axis=1), data["released_binary"], test_size=0.2, random_state=42, ) X_train y_train X_test y_test # # Training the dataset using Logistic Regression Classifier logreg = LogisticRegression() logreg.fit(X_train, y_train) # # Testing on dataset using Logistic Regression Classifier y_pred = logreg.predict(X_test) y_pred accuracy = logreg.score(X_test, y_test) print(accuracy) # # Importing Random Forest Classifier from sklearn.ensemble import RandomForestClassifier from scipy.stats import randint # # Creating a Grid Search to find best hyperparameters for the RF classifier # Define the range of hyperparameters to tune param_grid = { "n_estimators": [50, 100, 150, 200], "max_depth": [2, 5, 10, 20], "min_samples_split": [2, 5, 10], "min_samples_leaf": [1, 2, 4], } rf = RandomForestClassifier(random_state=42) grid_search = GridSearchCV(estimator=rf, param_grid=param_grid, cv=5) grid_search.fit(X_train, y_train) print("Best hyperparameters: ", grid_search.best_params_) # # Training the dataset using RF Classifier and best hyperparameters rf = RandomForestClassifier( random_state=42, max_depth=5, min_samples_leaf=1, n_estimators=200 ) rf.fit(X_train, y_train) # # Predicting results using RF Classifier y_pred = rf.predict(X_test) y_pred print(classification_report(y_test, y_pred)) # # Importing Decision Tree Classifier from sklearn.tree import DecisionTreeClassifier # # Creating a Grid Search to find best hyperparameters for the Decision Tree classifier param_grid = { "max_depth": [2, 4, 6, 8, 10], "min_samples_split": [2, 4, 6, 8, 10], "min_samples_leaf": [1, 2, 3, 4, 5], "max_features": ["sqrt", "log2", None], "criterion": ["gini", "entropy"], } clf = DecisionTreeClassifier() grid_search = GridSearchCV(estimator=clf, param_grid=param_grid, cv=5, n_jobs=-1) grid_search.fit(X_train, y_train) # # Finding best hyperparameters for Decision Tree print("Best Hyperparameters:", grid_search.best_params_) # # Training the Decision Tree Model using best hyperparameters best_clf = grid_search.best_estimator_ y_pred = best_clf.predict(X_test) # # Predicting results using Decision Tree Classifier accuracy = accuracy_score(y_test, y_pred) print(accuracy) # # Importing Gradient Boosting Classifier from sklearn.ensemble import GradientBoostingClassifier # # Creating a Grid Search to find best hyperparameters for the GB classifier param_grid = { "n_estimators": [50, 100, 150], "max_depth": [3, 5, 7], "learning_rate": [0.05, 0.1, 0.2], "min_samples_split": [2, 5, 10], "min_samples_leaf": [1, 2, 4], "max_features": [None, "sqrt", "log2"], } clf = GradientBoostingClassifier(random_state=42) grid_search = GridSearchCV(clf, param_grid=param_grid, cv=5) grid_search.fit(X_train, y_train) # # Finding best hyperparameters for GB Classifier print("Best Hyperparameters: ", grid_search.best_params_) print("Best Accuracy Score: {:.2f}%".format(grid_search.best_score_ * 100)) # # Training the GB Classifier using best hyperparameters best_clf = grid_search.best_estimator_ y_pred = best_clf.predict(X_test) # # Predicting results using GB Classifier accuracy = accuracy_score(y_test, y_pred) print(accuracy) # # Importing SVM and Training on dataset from sklearn import svm clf = svm.SVC(kernel="poly", C=100) clf.fit(X_train, y_train) # # Predicting results using SVM y_pred = clf.predict(X_test) accuracy = accuracy_score(y_test, y_pred) print(accuracy) # # Importing Naive Bayes and Training on the Dataset from sklearn.naive_bayes import GaussianNB clf = GaussianNB() clf.fit(X_train, y_train) # # Predicting results using Naive Bayes y_pred = clf.predict(X_test) accuracy = accuracy_score(y_test, y_pred) print(accuracy) # # Importing KNN Classifier from sklearn.neighbors import KNeighborsClassifier # # Finding the best value for Number of Neighbours for KNN n_neighbors_list = range(1, 21) accuracy_list = [] for n in n_neighbors_list: clf = KNeighborsClassifier(n_neighbors=n) clf.fit(X_train, y_train) y_pred = clf.predict(X_test) accuracy = accuracy_score(y_test, y_pred) accuracy_list.append(accuracy) # Plot the results of the elbow method plt.plot(n_neighbors_list, accuracy_list) plt.title("Elbow Curve for KNN") plt.xlabel("Number of Neighbors") plt.ylabel("Accuracy") plt.show() # # Training the Dataset with best Hyperparameters of KNN clf = KNeighborsClassifier(n_neighbors=20) clf.fit(X_train, y_train) # # Predicting the results using KNN y_pred = clf.predict(X_test) accuracy = accuracy_score(y_test, y_pred) print(accuracy) # # And, Finally importing ANN from keras.models import Sequential from keras.layers import Dense model = Sequential() model.add(Dense(64, input_dim=X_train.shape[1], activation="relu")) model.add(Dense(64, activation="relu")) model.add(Dense(1, activation="sigmoid")) # # Model Compilation model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"]) model.fit(X_train, y_train, epochs=100, batch_size=32, validation_split=0.2) # # ANN Model Prediction loss, accuracy = model.evaluate(X_test, y_test) print("Accuracy: {:.2f}%".format(accuracy * 100))
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/126/129126559.ipynb
arrests-for-marijuana-possession
utkarshx27
[{"Id": 129126559, "ScriptId": 38320485, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7719373, "CreationDate": "05/11/2023 07:44:01", "VersionNumber": 2.0, "Title": "Classification of Criminals Rereleased or Not", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 294.0, "LinesInsertedFromPrevious": 96.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 198.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 3}]
[{"Id": 184903786, "KernelVersionId": 129126559, "SourceDatasetVersionId": 5631796}]
[{"Id": 5631796, "DatasetId": 3238325, "DatasourceVersionId": 5707058, "CreatorUserId": 13364933, "LicenseName": "CC0: Public Domain", "CreationDate": "05/08/2023 10:17:21", "VersionNumber": 1.0, "Title": "Marijuana Arrests in Toronto: Racial Disparities", "Slug": "arrests-for-marijuana-possession", "Subtitle": "Marijuana Arrests in Toronto: Race, Release, and Policing (1997-2002)", "Description": "``` \nData on police treatment of individuals arrested in Toronto for simple possession of small quantities of marijuana. The data are part of a larger data set featured in a series of articles in the Toronto Star newspaper. A data frame with 5226 observations on the following 8 variables.\n```\n| Column | Description |\n| --- | --- |\n| released | Whether or not the arrestee was released with a summons; a factor with levels: No; Yes.\n |\n| colour | The arrestee's race; a factor with levels: Black; White. |\n| year | 1997 through 2002; a numeric vector. |\n| age | in years; a numeric vector. |\n| sex | a factor with levels: Female; Male. |\n| employed | a factor with levels: No; Yes. |\n| citizen | a factor with levels: No; Yes. |\n| checks | Number of police data bases (of previous arrests, previous convictions, parole status, etc. \u2013 6 in all) on which the arrestee's name appeared; a numeric vector |\n\n# Source\nPersonal communication from Michael Friendly, York University.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3238325, "CreatorUserId": 13364933, "OwnerUserId": 13364933.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5631796.0, "CurrentDatasourceVersionId": 5707058.0, "ForumId": 3303517, "Type": 2, "CreationDate": "05/08/2023 10:17:21", "LastActivityDate": "05/08/2023", "TotalViews": 8788, "TotalDownloads": 1614, "TotalVotes": 49, "TotalKernels": 14}]
[{"Id": 13364933, "UserName": "utkarshx27", "DisplayName": "Utkarsh Singh", "RegisterDate": "01/21/2023", "PerformanceTier": 2}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # # Import Libraries and Load Dataset import numpy as np import pandas as pd from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler from sklearn.linear_model import LogisticRegression from sklearn.metrics import mean_squared_error from sklearn.preprocessing import LabelBinarizer # load dataset data = pd.read_csv("/kaggle/input/arrests-for-marijuana-possession/Arrests.csv") data from sklearn.model_selection import RandomizedSearchCV, GridSearchCV from sklearn.metrics import accuracy_score import seaborn as sns import matplotlib.pyplot as plt corr_matrix = data.corr() fig, ax = plt.subplots(figsize=(10, 10)) sns.heatmap(corr_matrix, annot=True, cmap="coolwarm", square=True, ax=ax) plt.show() data.describe() # # Looking for Null values num_nan = data.isna().sum() print(num_nan) data.columns data = data.drop("Unnamed: 0", axis=1) data # # Creating Dummy Variables and Applying Binary Labeling lb = LabelBinarizer() data["released_binary"] = lb.fit_transform(data["released"]) data["colour_binary"] = lb.fit_transform(data["colour"]) data["sex_binary"] = lb.fit_transform(data["sex"]) data["employed_binary"] = lb.fit_transform(data["employed"]) data["citizen_binary"] = lb.fit_transform(data["citizen"]) data # # Getting rid of Categorical Features data = data.drop(["released", "colour", "sex", "employed", "citizen"], axis=1) data corr_matrix = data.corr() fig, ax = plt.subplots(figsize=(10, 10)) sns.heatmap(corr_matrix, annot=True, cmap="coolwarm", square=True, ax=ax) plt.show() # # Applying Standard Scaling scaler = StandardScaler() scaler.fit(data[["age", "year", "checks"]]) data[["age", "year", "checks"]] = scaler.transform(data[["age", "year", "checks"]]) data # # Splitting the dataset into Training and Test sets X_train, X_test, y_train, y_test = train_test_split( data.drop("released_binary", axis=1), data["released_binary"], test_size=0.2, random_state=42, ) X_train y_train X_test y_test # # Training the dataset using Logistic Regression Classifier logreg = LogisticRegression() logreg.fit(X_train, y_train) # # Testing on dataset using Logistic Regression Classifier y_pred = logreg.predict(X_test) y_pred accuracy = logreg.score(X_test, y_test) print(accuracy) # # Importing Random Forest Classifier from sklearn.ensemble import RandomForestClassifier from scipy.stats import randint # # Creating a Grid Search to find best hyperparameters for the RF classifier # Define the range of hyperparameters to tune param_grid = { "n_estimators": [50, 100, 150, 200], "max_depth": [2, 5, 10, 20], "min_samples_split": [2, 5, 10], "min_samples_leaf": [1, 2, 4], } rf = RandomForestClassifier(random_state=42) grid_search = GridSearchCV(estimator=rf, param_grid=param_grid, cv=5) grid_search.fit(X_train, y_train) print("Best hyperparameters: ", grid_search.best_params_) # # Training the dataset using RF Classifier and best hyperparameters rf = RandomForestClassifier( random_state=42, max_depth=5, min_samples_leaf=1, n_estimators=200 ) rf.fit(X_train, y_train) # # Predicting results using RF Classifier y_pred = rf.predict(X_test) y_pred print(classification_report(y_test, y_pred)) # # Importing Decision Tree Classifier from sklearn.tree import DecisionTreeClassifier # # Creating a Grid Search to find best hyperparameters for the Decision Tree classifier param_grid = { "max_depth": [2, 4, 6, 8, 10], "min_samples_split": [2, 4, 6, 8, 10], "min_samples_leaf": [1, 2, 3, 4, 5], "max_features": ["sqrt", "log2", None], "criterion": ["gini", "entropy"], } clf = DecisionTreeClassifier() grid_search = GridSearchCV(estimator=clf, param_grid=param_grid, cv=5, n_jobs=-1) grid_search.fit(X_train, y_train) # # Finding best hyperparameters for Decision Tree print("Best Hyperparameters:", grid_search.best_params_) # # Training the Decision Tree Model using best hyperparameters best_clf = grid_search.best_estimator_ y_pred = best_clf.predict(X_test) # # Predicting results using Decision Tree Classifier accuracy = accuracy_score(y_test, y_pred) print(accuracy) # # Importing Gradient Boosting Classifier from sklearn.ensemble import GradientBoostingClassifier # # Creating a Grid Search to find best hyperparameters for the GB classifier param_grid = { "n_estimators": [50, 100, 150], "max_depth": [3, 5, 7], "learning_rate": [0.05, 0.1, 0.2], "min_samples_split": [2, 5, 10], "min_samples_leaf": [1, 2, 4], "max_features": [None, "sqrt", "log2"], } clf = GradientBoostingClassifier(random_state=42) grid_search = GridSearchCV(clf, param_grid=param_grid, cv=5) grid_search.fit(X_train, y_train) # # Finding best hyperparameters for GB Classifier print("Best Hyperparameters: ", grid_search.best_params_) print("Best Accuracy Score: {:.2f}%".format(grid_search.best_score_ * 100)) # # Training the GB Classifier using best hyperparameters best_clf = grid_search.best_estimator_ y_pred = best_clf.predict(X_test) # # Predicting results using GB Classifier accuracy = accuracy_score(y_test, y_pred) print(accuracy) # # Importing SVM and Training on dataset from sklearn import svm clf = svm.SVC(kernel="poly", C=100) clf.fit(X_train, y_train) # # Predicting results using SVM y_pred = clf.predict(X_test) accuracy = accuracy_score(y_test, y_pred) print(accuracy) # # Importing Naive Bayes and Training on the Dataset from sklearn.naive_bayes import GaussianNB clf = GaussianNB() clf.fit(X_train, y_train) # # Predicting results using Naive Bayes y_pred = clf.predict(X_test) accuracy = accuracy_score(y_test, y_pred) print(accuracy) # # Importing KNN Classifier from sklearn.neighbors import KNeighborsClassifier # # Finding the best value for Number of Neighbours for KNN n_neighbors_list = range(1, 21) accuracy_list = [] for n in n_neighbors_list: clf = KNeighborsClassifier(n_neighbors=n) clf.fit(X_train, y_train) y_pred = clf.predict(X_test) accuracy = accuracy_score(y_test, y_pred) accuracy_list.append(accuracy) # Plot the results of the elbow method plt.plot(n_neighbors_list, accuracy_list) plt.title("Elbow Curve for KNN") plt.xlabel("Number of Neighbors") plt.ylabel("Accuracy") plt.show() # # Training the Dataset with best Hyperparameters of KNN clf = KNeighborsClassifier(n_neighbors=20) clf.fit(X_train, y_train) # # Predicting the results using KNN y_pred = clf.predict(X_test) accuracy = accuracy_score(y_test, y_pred) print(accuracy) # # And, Finally importing ANN from keras.models import Sequential from keras.layers import Dense model = Sequential() model.add(Dense(64, input_dim=X_train.shape[1], activation="relu")) model.add(Dense(64, activation="relu")) model.add(Dense(1, activation="sigmoid")) # # Model Compilation model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"]) model.fit(X_train, y_train, epochs=100, batch_size=32, validation_split=0.2) # # ANN Model Prediction loss, accuracy = model.evaluate(X_test, y_test) print("Accuracy: {:.2f}%".format(accuracy * 100))
false
1
2,492
3
2,816
2,492
129260220
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import pandas as pd import numpy as np import matplotlib as plt import seaborn as sns df = pd.read_csv("/kaggle/input/indian-food-dataset/data.csv") df.head() # **Summary statistics: # **** # Compute summary statistics for Preparation Time column df["Preparation Time"].describe() # Compute summary statistics for Cooking Time column df["Cooking Time"].describe() # Compute summary statistics for Total Time column df["Total Time"].describe() # Compute summary statistics for Protein per Serving column df["Protein per Serving"].describe() # Compute summary statistics for Calories per Serving column df["Calories per Serving"].describe() # **Frequency distribution: # ** # Calculate frequency distribution for Dish Name column df["Dish Name"].value_counts() # Calculate frequency distribution for Region column df["Region"].value_counts() # Calculate frequency distribution for Spiciness column df["Spiciness"].value_counts() # Calculate frequency distribution for Serves column df["Serves"].value_counts() # **Histograms** # Plot histogram for Preparation Time column import matplotlib.pyplot as plt plt.hist(df["Preparation Time"]) # Plot histogram for Cooking Time column plt.hist(df["Cooking Time"], color="red") # Plot histogram for Total Time column plt.hist(df["Total Time"], color="orange") # Plot histogram for Protein per Serving column plt.hist(df["Protein per Serving"], color="green") # Plot histogram for Calories per Serving column plt.hist(df["Calories per Serving"], color="pink") df.head() # **Pair plot** sns.pairplot(df) # Create a bar plot of the number of dishes by region plt.bar(df["Region"].unique(), df["Region"].value_counts()) # Create a stacked bar plot of the number of dishes by region and spiciness df.groupby(["Region", "Spiciness"]).size().unstack().plot(kind="bar", stacked=True) # Create a pie chart of the proportion of dishes by region plt.pie(df["Region"].value_counts(), labels=df["Region"].unique()) # Create a pie chart of the proportion of dishes by spiciness plt.pie(df["Spiciness"].value_counts(), labels=df["Spiciness"].unique()) # Create a scatter plot of Preparation Time vs Calories per Serving plt.scatter(df["Preparation Time"], df["Calories per Serving"]) # Create a scatter plot of Cooking Time vs Protein per Serving plt.scatter(df["Cooking Time"], df["Protein per Serving"])
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/260/129260220.ipynb
null
null
[{"Id": 129260220, "ScriptId": 38306133, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14927546, "CreationDate": "05/12/2023 08:55:47", "VersionNumber": 2.0, "Title": "Indian Food Dataset Notebook", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 113.0, "LinesInsertedFromPrevious": 86.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 27.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import pandas as pd import numpy as np import matplotlib as plt import seaborn as sns df = pd.read_csv("/kaggle/input/indian-food-dataset/data.csv") df.head() # **Summary statistics: # **** # Compute summary statistics for Preparation Time column df["Preparation Time"].describe() # Compute summary statistics for Cooking Time column df["Cooking Time"].describe() # Compute summary statistics for Total Time column df["Total Time"].describe() # Compute summary statistics for Protein per Serving column df["Protein per Serving"].describe() # Compute summary statistics for Calories per Serving column df["Calories per Serving"].describe() # **Frequency distribution: # ** # Calculate frequency distribution for Dish Name column df["Dish Name"].value_counts() # Calculate frequency distribution for Region column df["Region"].value_counts() # Calculate frequency distribution for Spiciness column df["Spiciness"].value_counts() # Calculate frequency distribution for Serves column df["Serves"].value_counts() # **Histograms** # Plot histogram for Preparation Time column import matplotlib.pyplot as plt plt.hist(df["Preparation Time"]) # Plot histogram for Cooking Time column plt.hist(df["Cooking Time"], color="red") # Plot histogram for Total Time column plt.hist(df["Total Time"], color="orange") # Plot histogram for Protein per Serving column plt.hist(df["Protein per Serving"], color="green") # Plot histogram for Calories per Serving column plt.hist(df["Calories per Serving"], color="pink") df.head() # **Pair plot** sns.pairplot(df) # Create a bar plot of the number of dishes by region plt.bar(df["Region"].unique(), df["Region"].value_counts()) # Create a stacked bar plot of the number of dishes by region and spiciness df.groupby(["Region", "Spiciness"]).size().unstack().plot(kind="bar", stacked=True) # Create a pie chart of the proportion of dishes by region plt.pie(df["Region"].value_counts(), labels=df["Region"].unique()) # Create a pie chart of the proportion of dishes by spiciness plt.pie(df["Spiciness"].value_counts(), labels=df["Spiciness"].unique()) # Create a scatter plot of Preparation Time vs Calories per Serving plt.scatter(df["Preparation Time"], df["Calories per Serving"]) # Create a scatter plot of Cooking Time vs Protein per Serving plt.scatter(df["Cooking Time"], df["Protein per Serving"])
false
0
811
0
811
811
129260808
# # [Attention] はじめに # **This notebook is simple-baseline for ICR Identifying Age-Related Conditions competition.** # **You can refer and copy this notebook freely, but this will need a lot of improvement(e.g., use Greeks, feature-engineering, and more).** # **If you referred or copied this notebook, please vote for this.** # **Have fun!** # **このノートブックはシンプルなベースラインです。** # **参照や複製は自由ですが、多くの改善を必要とするでしょう(Greeksの活用や特徴量エンジニアリングなど)。** # **もし参照や複製をされた場合は、このノートブックにvoteをお願いします。** # **楽しんでいきましょう!** # import libraries # ライブラリのインポート import gc import os import math import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn.model_selection import train_test_split, StratifiedKFold from sklearn.metrics import log_loss from catboost import CatBoostClassifier, Pool import warnings warnings.simplefilter("ignore") print("imported.") # read train-data CSV # 訓練データCSVの読込 df_train = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/train.csv") df_train # **Number of records are 617, it is very light data.** # **target is "Class", and there are 56 features except for "Id".** # **It is better to refer to the Greeks as they are deeply related to the target.** # **レコード件数は617件で、かなり軽量のデータです。** # **目的変数は"Class"で、"Id"以外に56個の特徴量があります。** # **ターゲットに深く関係していると思われるので、Greeksを参照したほうがよさそうです。** # information of train-data # 訓練データの基本情報 df_train.info() # **Almost features are float type except for "Id" and "Class", but "EJ" is only categorical feature.** # **"Id"と"Class"を除いてほぼfloat型の特徴量ですが、"EJ"だけがカテゴリー変数です。** # features which include Null isnull = {} for col in df_train.columns: k = df_train[col].isnull().sum() if k > 0: isnull[col] = k isnull # statistical information of train-data # 訓練データの基本統計量 df_train.describe() # Visualization of "Class" # "Class"の可視化 print(df_train["Class"].value_counts()) sns.countplot(data=df_train, x="Class") plt.grid() # **The ratio of "0" to "1" is approximately 5 to 1.** # **0と1の割合はおよそ5対1です。** # Visualization of "EJ" # "EJ"の可視化 print(df_train["EJ"].value_counts()) sns.countplot(data=df_train, x="EJ") plt.grid() # **The ratio of "A" to "B" is approximately 1 to 2.** # **AとBの割合はおよそ1対2です。** # simple histgram of train-data # ヒストグラム表示 bins = 20 # bins = int(math.log2(len(df_train)) + 1) df_hist = df_train.drop(columns=["Id", "Class"]) fig, axs = plt.subplots(8, 7, figsize=(16, 28)) cnt = 0 for row in range(8): for col in range(7): axs[row, col].hist(df_hist.iloc[:, cnt], bins=bins) axs[row, col].set_title(df_hist.columns[cnt]) cnt += 1 plt.show() # **We need more deeper feature engineering. But here, we aim to complete the simple baseline first.** # **もっと深く特徴量エンジニアリングをしたほうがよいですが、ここではシンプルなベースラインの構築を目指すことにします。** # correlation between features in train-data # 特徴量間の相関関係の図示 plt.figure(figsize=(14, 12)) colormap = plt.cm.RdBu sns.heatmap( df_train.corr(), linewidths=0.1, vmax=1.0, square=True, cmap=colormap, linecolor="white", annot=False, ) # **We need more deeper feature engineering. But here, we aim to complete the simple baseline first.** # **もっと深く特徴量エンジニアリングをしたほうがよいですが、ここではシンプルなベースラインの構築を目指すことにします。** # read greeks-data CSV # greeksデータCSVの読込 df_greeks = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/greeks.csv") df_greeks # information of greeks-data df_greeks.info() # detail of "Alpha" # "Alpha"の詳細 df_greeks["Alpha"].value_counts() # **"A" is class-0, and "B"/"G"/"D" are class-1. Numbers of "A" and "B"+"G"+"D" are the same as the numbers of 0 and 1 in "Class".** # **"A"はクラス0で"B"と"G"と"D"はクラス1です。それぞれ、"A"の数と"Class"=0、"B"+"G"+"D"の数と"Class"=1の数は一致します。** # detail of "Beta" df_greeks["Beta"].value_counts() # detail of "Gamma" df_greeks["Gamma"].value_counts() # detail of "Epsilon" df_greeks["Epsilon"].value_counts() # read test-data CSV # テストデータCSVの読込 df_test = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/test.csv") df_test # **Test data has only 5 records, simple.** # **テストデータは5件だけです。** # Information of test-data # テストデータの基本情報 df_test.info() # statistical information of test-data # テストデータの基本統計量 df_test.describe() # set x/y dataset for train # 訓練用のx/yデータセットの準備 x_train = df_train.drop(columns=["Id", "Class"]) x_train["EJ"] = x_train["EJ"].astype("category") y_train = df_train[["Class"]] print(x_train.shape, y_train.shape) x_train # fitting by CatBoost with Stratified K-Fold cross-validation (splits=3) # CatBoostによる訓練(3分割でのStratified K-Foldによるクロスバリデーション) # parameter params = { "loss_function": "Logloss", "n_estimators": 2000, # "learning_rate": 0.03, "random_state": 45, # "l2_leaf_reg": 3.0, # "bootstrap_type": "Bayesian", # "bagging_temperature": 1.0, # "subsample": 1.0, # "random_strength": 1.0, # "depth": 6, # "grow_policy": "SymmetricTree", "Deptwise", "Lossguide", # "grow_policy": "Lossguide", # "max_leaves": 31, # "od_type": "Iter", # "od_wait": 20, # "border_count": 254, } n_splits = 3 cv = list( StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=45).split( x_train, y_train ) ) metrics = [] imp = pd.DataFrame() for nfold in np.arange(n_splits): print("-" * 30, "fold:", nfold, "-" * 30) idx_tr, idx_va = cv[nfold][0], cv[nfold][1] x_tr, y_tr = x_train.loc[idx_tr, :], y_train.loc[idx_tr, :] x_va, y_va = x_train.loc[idx_va, :], y_train.loc[idx_va, :] print("x/y train-data shapes:", x_tr.shape, y_tr.shape) print("x/y valid-data shapes:", x_va.shape, y_va.shape) # fitting model = CatBoostClassifier(**params) model.fit( x_tr, y_tr, cat_features=["EJ"], eval_set=[(x_tr, y_tr), (x_va, y_va)], verbose=100, early_stopping_rounds=300, ) # prediction y_tr_pred = model.predict_proba(x_tr) y_va_pred = model.predict_proba(x_va) # set metrics(LogLoss) metric_tr = log_loss(y_tr, y_tr_pred) metric_va = log_loss(y_va, y_va_pred) metrics.append([nfold, metric_tr, metric_va]) # importance of features _imp = pd.DataFrame( { "features": x_train.columns, "importance": model.feature_importances_, "nfold": nfold, } ) imp = pd.concat([imp, _imp], axis=0, ignore_index=True) print("-" * 30, "result (LogLoss)", "-" * 30) metrics = np.array(metrics) print( "train-mean-LogLoss:", "{:.3f}".format(np.mean(metrics[:, 1])), "valid-mean-LogLoss:", "{:.3f}".format(np.mean(metrics[:, 2])), ) print( "train-std-LogLoss:", "{:.3f}".format(np.std(metrics[:, 1])), "valid-std-LogLoss:", "{:.3f}".format(np.std(metrics[:, 2])), ) print( "LogLoss:", "{:.3f}".format(np.mean(metrics[:, 2]) - np.std(metrics[:, 2])), "-", "{:.3f}".format(np.mean(metrics[:, 2]) + np.std(metrics[:, 2])), ) display(metrics) imp = imp.groupby("features")["importance"].agg(["mean", "std"]) imp.columns = ["importance", "importance_std"] imp["importance_cov"] = imp["importance_std"] / imp["importance"] imp = imp.reset_index(drop=False) display(imp.sort_values("importance", ascending=False, ignore_index=True)) # set x/id dataset for test # 予測用のx/idデータセットの準備 x_test = df_test.drop(columns=["Id"]) x_test["EJ"] = x_test["EJ"].astype("category") id_test = df_test[["Id"]] print(x_test.shape, id_test.shape) x_test # prediction of probability with test-data # テストデータによる確率の予測 y_test_pred = model.predict_proba(x_test) y_test_pred # submission # 提出用データの整形・CSV出力 sample_sub = pd.read_csv( "/kaggle/input/icr-identify-age-related-conditions/sample_submission.csv" ) df_submit = pd.DataFrame(columns=sample_sub.columns) df_submit["Id"] = id_test["Id"] df_submit[["class_0", "class_1"]] = y_test_pred df_submit.to_csv("submission.csv", index=None) print("completed.") df_submit
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/260/129260808.ipynb
null
null
[{"Id": 129260808, "ScriptId": 38422323, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14597111, "CreationDate": "05/12/2023 09:00:23", "VersionNumber": 1.0, "Title": "ICR_ARC_01-SimpleBaseline(EN/JP)_20230512", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 259.0, "LinesInsertedFromPrevious": 259.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 2}]
null
null
null
null
# # [Attention] はじめに # **This notebook is simple-baseline for ICR Identifying Age-Related Conditions competition.** # **You can refer and copy this notebook freely, but this will need a lot of improvement(e.g., use Greeks, feature-engineering, and more).** # **If you referred or copied this notebook, please vote for this.** # **Have fun!** # **このノートブックはシンプルなベースラインです。** # **参照や複製は自由ですが、多くの改善を必要とするでしょう(Greeksの活用や特徴量エンジニアリングなど)。** # **もし参照や複製をされた場合は、このノートブックにvoteをお願いします。** # **楽しんでいきましょう!** # import libraries # ライブラリのインポート import gc import os import math import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn.model_selection import train_test_split, StratifiedKFold from sklearn.metrics import log_loss from catboost import CatBoostClassifier, Pool import warnings warnings.simplefilter("ignore") print("imported.") # read train-data CSV # 訓練データCSVの読込 df_train = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/train.csv") df_train # **Number of records are 617, it is very light data.** # **target is "Class", and there are 56 features except for "Id".** # **It is better to refer to the Greeks as they are deeply related to the target.** # **レコード件数は617件で、かなり軽量のデータです。** # **目的変数は"Class"で、"Id"以外に56個の特徴量があります。** # **ターゲットに深く関係していると思われるので、Greeksを参照したほうがよさそうです。** # information of train-data # 訓練データの基本情報 df_train.info() # **Almost features are float type except for "Id" and "Class", but "EJ" is only categorical feature.** # **"Id"と"Class"を除いてほぼfloat型の特徴量ですが、"EJ"だけがカテゴリー変数です。** # features which include Null isnull = {} for col in df_train.columns: k = df_train[col].isnull().sum() if k > 0: isnull[col] = k isnull # statistical information of train-data # 訓練データの基本統計量 df_train.describe() # Visualization of "Class" # "Class"の可視化 print(df_train["Class"].value_counts()) sns.countplot(data=df_train, x="Class") plt.grid() # **The ratio of "0" to "1" is approximately 5 to 1.** # **0と1の割合はおよそ5対1です。** # Visualization of "EJ" # "EJ"の可視化 print(df_train["EJ"].value_counts()) sns.countplot(data=df_train, x="EJ") plt.grid() # **The ratio of "A" to "B" is approximately 1 to 2.** # **AとBの割合はおよそ1対2です。** # simple histgram of train-data # ヒストグラム表示 bins = 20 # bins = int(math.log2(len(df_train)) + 1) df_hist = df_train.drop(columns=["Id", "Class"]) fig, axs = plt.subplots(8, 7, figsize=(16, 28)) cnt = 0 for row in range(8): for col in range(7): axs[row, col].hist(df_hist.iloc[:, cnt], bins=bins) axs[row, col].set_title(df_hist.columns[cnt]) cnt += 1 plt.show() # **We need more deeper feature engineering. But here, we aim to complete the simple baseline first.** # **もっと深く特徴量エンジニアリングをしたほうがよいですが、ここではシンプルなベースラインの構築を目指すことにします。** # correlation between features in train-data # 特徴量間の相関関係の図示 plt.figure(figsize=(14, 12)) colormap = plt.cm.RdBu sns.heatmap( df_train.corr(), linewidths=0.1, vmax=1.0, square=True, cmap=colormap, linecolor="white", annot=False, ) # **We need more deeper feature engineering. But here, we aim to complete the simple baseline first.** # **もっと深く特徴量エンジニアリングをしたほうがよいですが、ここではシンプルなベースラインの構築を目指すことにします。** # read greeks-data CSV # greeksデータCSVの読込 df_greeks = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/greeks.csv") df_greeks # information of greeks-data df_greeks.info() # detail of "Alpha" # "Alpha"の詳細 df_greeks["Alpha"].value_counts() # **"A" is class-0, and "B"/"G"/"D" are class-1. Numbers of "A" and "B"+"G"+"D" are the same as the numbers of 0 and 1 in "Class".** # **"A"はクラス0で"B"と"G"と"D"はクラス1です。それぞれ、"A"の数と"Class"=0、"B"+"G"+"D"の数と"Class"=1の数は一致します。** # detail of "Beta" df_greeks["Beta"].value_counts() # detail of "Gamma" df_greeks["Gamma"].value_counts() # detail of "Epsilon" df_greeks["Epsilon"].value_counts() # read test-data CSV # テストデータCSVの読込 df_test = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/test.csv") df_test # **Test data has only 5 records, simple.** # **テストデータは5件だけです。** # Information of test-data # テストデータの基本情報 df_test.info() # statistical information of test-data # テストデータの基本統計量 df_test.describe() # set x/y dataset for train # 訓練用のx/yデータセットの準備 x_train = df_train.drop(columns=["Id", "Class"]) x_train["EJ"] = x_train["EJ"].astype("category") y_train = df_train[["Class"]] print(x_train.shape, y_train.shape) x_train # fitting by CatBoost with Stratified K-Fold cross-validation (splits=3) # CatBoostによる訓練(3分割でのStratified K-Foldによるクロスバリデーション) # parameter params = { "loss_function": "Logloss", "n_estimators": 2000, # "learning_rate": 0.03, "random_state": 45, # "l2_leaf_reg": 3.0, # "bootstrap_type": "Bayesian", # "bagging_temperature": 1.0, # "subsample": 1.0, # "random_strength": 1.0, # "depth": 6, # "grow_policy": "SymmetricTree", "Deptwise", "Lossguide", # "grow_policy": "Lossguide", # "max_leaves": 31, # "od_type": "Iter", # "od_wait": 20, # "border_count": 254, } n_splits = 3 cv = list( StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=45).split( x_train, y_train ) ) metrics = [] imp = pd.DataFrame() for nfold in np.arange(n_splits): print("-" * 30, "fold:", nfold, "-" * 30) idx_tr, idx_va = cv[nfold][0], cv[nfold][1] x_tr, y_tr = x_train.loc[idx_tr, :], y_train.loc[idx_tr, :] x_va, y_va = x_train.loc[idx_va, :], y_train.loc[idx_va, :] print("x/y train-data shapes:", x_tr.shape, y_tr.shape) print("x/y valid-data shapes:", x_va.shape, y_va.shape) # fitting model = CatBoostClassifier(**params) model.fit( x_tr, y_tr, cat_features=["EJ"], eval_set=[(x_tr, y_tr), (x_va, y_va)], verbose=100, early_stopping_rounds=300, ) # prediction y_tr_pred = model.predict_proba(x_tr) y_va_pred = model.predict_proba(x_va) # set metrics(LogLoss) metric_tr = log_loss(y_tr, y_tr_pred) metric_va = log_loss(y_va, y_va_pred) metrics.append([nfold, metric_tr, metric_va]) # importance of features _imp = pd.DataFrame( { "features": x_train.columns, "importance": model.feature_importances_, "nfold": nfold, } ) imp = pd.concat([imp, _imp], axis=0, ignore_index=True) print("-" * 30, "result (LogLoss)", "-" * 30) metrics = np.array(metrics) print( "train-mean-LogLoss:", "{:.3f}".format(np.mean(metrics[:, 1])), "valid-mean-LogLoss:", "{:.3f}".format(np.mean(metrics[:, 2])), ) print( "train-std-LogLoss:", "{:.3f}".format(np.std(metrics[:, 1])), "valid-std-LogLoss:", "{:.3f}".format(np.std(metrics[:, 2])), ) print( "LogLoss:", "{:.3f}".format(np.mean(metrics[:, 2]) - np.std(metrics[:, 2])), "-", "{:.3f}".format(np.mean(metrics[:, 2]) + np.std(metrics[:, 2])), ) display(metrics) imp = imp.groupby("features")["importance"].agg(["mean", "std"]) imp.columns = ["importance", "importance_std"] imp["importance_cov"] = imp["importance_std"] / imp["importance"] imp = imp.reset_index(drop=False) display(imp.sort_values("importance", ascending=False, ignore_index=True)) # set x/id dataset for test # 予測用のx/idデータセットの準備 x_test = df_test.drop(columns=["Id"]) x_test["EJ"] = x_test["EJ"].astype("category") id_test = df_test[["Id"]] print(x_test.shape, id_test.shape) x_test # prediction of probability with test-data # テストデータによる確率の予測 y_test_pred = model.predict_proba(x_test) y_test_pred # submission # 提出用データの整形・CSV出力 sample_sub = pd.read_csv( "/kaggle/input/icr-identify-age-related-conditions/sample_submission.csv" ) df_submit = pd.DataFrame(columns=sample_sub.columns) df_submit["Id"] = id_test["Id"] df_submit[["class_0", "class_1"]] = y_test_pred df_submit.to_csv("submission.csv", index=None) print("completed.") df_submit
false
0
3,071
2
3,071
3,071
129383110
<jupyter_start><jupyter_text>Breast Cancer Wisconsin (Diagnostic) Data Set Features are computed from a digitized image of a fine needle aspirate (FNA) of a breast mass. They describe characteristics of the cell nuclei present in the image. n the 3-dimensional space is that described in: [K. P. Bennett and O. L. Mangasarian: "Robust Linear Programming Discrimination of Two Linearly Inseparable Sets", Optimization Methods and Software 1, 1992, 23-34]. This database is also available through the UW CS ftp server: ftp ftp.cs.wisc.edu cd math-prog/cpo-dataset/machine-learn/WDBC/ Also can be found on UCI Machine Learning Repository: https://archive.ics.uci.edu/ml/datasets/Breast+Cancer+Wisconsin+%28Diagnostic%29 Attribute Information: 1) ID number 2) Diagnosis (M = malignant, B = benign) 3-32) Ten real-valued features are computed for each cell nucleus: a) radius (mean of distances from center to points on the perimeter) b) texture (standard deviation of gray-scale values) c) perimeter d) area e) smoothness (local variation in radius lengths) f) compactness (perimeter^2 / area - 1.0) g) concavity (severity of concave portions of the contour) h) concave points (number of concave portions of the contour) i) symmetry j) fractal dimension ("coastline approximation" - 1) The mean, standard error and "worst" or largest (mean of the three largest values) of these features were computed for each image, resulting in 30 features. For instance, field 3 is Mean Radius, field 13 is Radius SE, field 23 is Worst Radius. All feature values are recoded with four significant digits. Missing attribute values: none Class distribution: 357 benign, 212 malignant Kaggle dataset identifier: breast-cancer-wisconsin-data <jupyter_script># # import libraries import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.preprocessing import StandardScaler from sklearn.pipeline import Pipeline from sklearn.metrics import confusion_matrix # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # # Data Preprocessing df = pd.read_csv("/kaggle/input/breast-cancer-wisconsin-data/data.csv") df.head() df.shape df.info() df.drop(columns="Unnamed: 32", inplace=True) df["diagnosis"].unique() df = df.replace({"M": 1, "B": 0}) df.describe().T plt.figure(figsize=(20, 12)) sns.heatmap(df.drop(columns="id").corr(), annot=True) plt.hist(df["fractal_dimension_mean"]) plt.hist(df["texture_se"]) plt.hist(df["symmetry_se"]) # # Splitting Data X = df.drop(columns=["id", "diagnosis"]) y = df["diagnosis"] X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=42 ) # # Logistic Regression lr_pipeline = Pipeline( [("scaler", StandardScaler()), ("logistic_regression", LogisticRegression())] ) lr_pipeline.fit(X_train, y_train) lr_pipeline.score(X_test, y_test) predicted = lr_pipeline.predict(X_test) truth = y_test cm = confusion_matrix(truth, predicted) sns.heatmap(cm, annot=True) plt.xlabel("Predicted") plt.ylabel("Truth") precision = np.sum(truth & predicted) / np.sum(predicted) recall = np.sum(truth & predicted) / np.sum(truth) f1 = 2 * precision * recall / (precision + recall) print("Precision:", precision) print("Recall:", recall) print("F1 score:", f1) # # Decision Trees tree_pipeline = Pipeline( [("scaler", StandardScaler()), ("logistic_regression", DecisionTreeClassifier())] ) tree_pipeline.fit(X_train, y_train) tree_pipeline.score(X_test, y_test) predicted = tree_pipeline.predict(X_test) truth = y_test cm = confusion_matrix(truth, predicted) sns.heatmap(cm, annot=True) plt.xlabel("Predicted") plt.ylabel("Truth") precision = np.sum(truth & predicted) / np.sum(predicted) recall = np.sum(truth & predicted) / np.sum(truth) f1 = 2 * precision * recall / (precision + recall) print("Precision:", precision) print("Recall:", recall) print("F1 score:", f1) # # Random Forest rf_pipeline = Pipeline( [("scaler", StandardScaler()), ("random_forest", RandomForestClassifier())] ) rf_pipeline.fit(X_train, y_train) rf_pipeline.score(X_test, y_test) predicted = rf_pipeline.predict(X_test) truth = y_test cm = confusion_matrix(truth, predicted) sns.heatmap(cm, annot=True) plt.xlabel("Predicted") plt.ylabel("Truth") precision = np.sum(truth & predicted) / np.sum(predicted) recall = np.sum(truth & predicted) / np.sum(truth) f1 = 2 * precision * recall / (precision + recall) print("Precision:", precision) print("Recall:", recall) print("F1 score:", f1)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/383/129383110.ipynb
breast-cancer-wisconsin-data
null
[{"Id": 129383110, "ScriptId": 38468518, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 10444652, "CreationDate": "05/13/2023 10:17:16", "VersionNumber": 1.0, "Title": "Breast Cancer Prediction", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 139.0, "LinesInsertedFromPrevious": 139.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 2}]
[{"Id": 185380006, "KernelVersionId": 129383110, "SourceDatasetVersionId": 408}]
[{"Id": 408, "DatasetId": 180, "DatasourceVersionId": 408, "CreatorUserId": 711301, "LicenseName": "CC BY-NC-SA 4.0", "CreationDate": "09/25/2016 10:49:04", "VersionNumber": 2.0, "Title": "Breast Cancer Wisconsin (Diagnostic) Data Set", "Slug": "breast-cancer-wisconsin-data", "Subtitle": "Predict whether the cancer is benign or malignant", "Description": "Features are computed from a digitized image of a fine needle aspirate (FNA) of a breast mass. They describe characteristics of the cell nuclei present in the image. \nn the 3-dimensional space is that described in: [K. P. Bennett and O. L. Mangasarian: \"Robust Linear Programming Discrimination of Two Linearly Inseparable Sets\", Optimization Methods and Software 1, 1992, 23-34]. \n\nThis database is also available through the UW CS ftp server: \nftp ftp.cs.wisc.edu \ncd math-prog/cpo-dataset/machine-learn/WDBC/\n\nAlso can be found on UCI Machine Learning Repository: https://archive.ics.uci.edu/ml/datasets/Breast+Cancer+Wisconsin+%28Diagnostic%29\n\nAttribute Information:\n\n1) ID number \n2) Diagnosis (M = malignant, B = benign) \n3-32) \n\nTen real-valued features are computed for each cell nucleus: \n\na) radius (mean of distances from center to points on the perimeter) \nb) texture (standard deviation of gray-scale values) \nc) perimeter \nd) area \ne) smoothness (local variation in radius lengths) \nf) compactness (perimeter^2 / area - 1.0) \ng) concavity (severity of concave portions of the contour) \nh) concave points (number of concave portions of the contour) \ni) symmetry \nj) fractal dimension (\"coastline approximation\" - 1)\n\nThe mean, standard error and \"worst\" or largest (mean of the three\nlargest values) of these features were computed for each image,\nresulting in 30 features. For instance, field 3 is Mean Radius, field\n13 is Radius SE, field 23 is Worst Radius.\n\nAll feature values are recoded with four significant digits.\n\nMissing attribute values: none\n\nClass distribution: 357 benign, 212 malignant", "VersionNotes": "This updated dataset has column names added", "TotalCompressedBytes": 125204.0, "TotalUncompressedBytes": 125204.0}]
[{"Id": 180, "CreatorUserId": 711301, "OwnerUserId": NaN, "OwnerOrganizationId": 7.0, "CurrentDatasetVersionId": 408.0, "CurrentDatasourceVersionId": 408.0, "ForumId": 1547, "Type": 2, "CreationDate": "09/19/2016 20:27:05", "LastActivityDate": "02/06/2018", "TotalViews": 1744898, "TotalDownloads": 301790, "TotalVotes": 3191, "TotalKernels": 2628}]
null
# # import libraries import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.preprocessing import StandardScaler from sklearn.pipeline import Pipeline from sklearn.metrics import confusion_matrix # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # # Data Preprocessing df = pd.read_csv("/kaggle/input/breast-cancer-wisconsin-data/data.csv") df.head() df.shape df.info() df.drop(columns="Unnamed: 32", inplace=True) df["diagnosis"].unique() df = df.replace({"M": 1, "B": 0}) df.describe().T plt.figure(figsize=(20, 12)) sns.heatmap(df.drop(columns="id").corr(), annot=True) plt.hist(df["fractal_dimension_mean"]) plt.hist(df["texture_se"]) plt.hist(df["symmetry_se"]) # # Splitting Data X = df.drop(columns=["id", "diagnosis"]) y = df["diagnosis"] X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=42 ) # # Logistic Regression lr_pipeline = Pipeline( [("scaler", StandardScaler()), ("logistic_regression", LogisticRegression())] ) lr_pipeline.fit(X_train, y_train) lr_pipeline.score(X_test, y_test) predicted = lr_pipeline.predict(X_test) truth = y_test cm = confusion_matrix(truth, predicted) sns.heatmap(cm, annot=True) plt.xlabel("Predicted") plt.ylabel("Truth") precision = np.sum(truth & predicted) / np.sum(predicted) recall = np.sum(truth & predicted) / np.sum(truth) f1 = 2 * precision * recall / (precision + recall) print("Precision:", precision) print("Recall:", recall) print("F1 score:", f1) # # Decision Trees tree_pipeline = Pipeline( [("scaler", StandardScaler()), ("logistic_regression", DecisionTreeClassifier())] ) tree_pipeline.fit(X_train, y_train) tree_pipeline.score(X_test, y_test) predicted = tree_pipeline.predict(X_test) truth = y_test cm = confusion_matrix(truth, predicted) sns.heatmap(cm, annot=True) plt.xlabel("Predicted") plt.ylabel("Truth") precision = np.sum(truth & predicted) / np.sum(predicted) recall = np.sum(truth & predicted) / np.sum(truth) f1 = 2 * precision * recall / (precision + recall) print("Precision:", precision) print("Recall:", recall) print("F1 score:", f1) # # Random Forest rf_pipeline = Pipeline( [("scaler", StandardScaler()), ("random_forest", RandomForestClassifier())] ) rf_pipeline.fit(X_train, y_train) rf_pipeline.score(X_test, y_test) predicted = rf_pipeline.predict(X_test) truth = y_test cm = confusion_matrix(truth, predicted) sns.heatmap(cm, annot=True) plt.xlabel("Predicted") plt.ylabel("Truth") precision = np.sum(truth & predicted) / np.sum(predicted) recall = np.sum(truth & predicted) / np.sum(truth) f1 = 2 * precision * recall / (precision + recall) print("Precision:", precision) print("Recall:", recall) print("F1 score:", f1)
false
0
1,099
2
1,624
1,099
129383655
<jupyter_start><jupyter_text>Starbucks Nutrition Facts ``` Nutrition facts for several Starbucks food items ``` | Column | Description | | ------- | ------------------------------------------------------------ | | item | The name of the food item. | | calories| The amount of calories in the food item. | | fat | The quantity of fat in grams present in the food item. | | carb | The amount of carbohydrates in grams found in the food item. | | fiber | The quantity of dietary fiber in grams in the food item. | | protein | The amount of protein in grams contained in the food item. | | type | The category or type of food item (bakery, bistro box, hot breakfast, parfait, petite, salad, or sandwich). | Kaggle dataset identifier: starbucks-nutrition <jupyter_script># # StarBucks Menu Nutrition Facts Exploratory Data Analysis # ## Starbucks Intro # Starbucks, the renowned coffeehouse chain founded in 1971, has become a global icon with over 33,000 stores in 80 countries. With a staggering 4 billion cups of coffee sold in 2020, equivalent to 11 million cups per day, Starbucks has established itself as a leader in the coffee industry. # ### Dataset Details # This Dataset contains information about the nutrition facts of items in the Menu such fat, carb, protein, fiber and calories per serving. # The items are also categorized by type as well. # ## Objectives of Data Analysis # 1. FInd the items with maximum and minimum carb, fat, protein, fiber and calories. # 2. Relationship of Calories with Carbs, Fat, Protein, Fiber and Calories # 3. Distribution of Nutritional contents in the Menu # 4. Analysing the nutritional content by type of Items in the Menu. # ### Importing Libraries # importing libararies import numpy as np import plotly.express as px import pandas as pd import plotly.graph_objects as go import plotly.subplots as sp # ### Importing Dataset # importing dataset data_df = pd.read_csv("/kaggle/input/starbucks-nutrition/starbucks.csv") data_df.head() data_df.shape # no of rows and columns data_df.info() # any null values # dropping unwanted column data_df = data_df.drop("Unnamed: 0", axis=1) # descriptive stats data_df.describe() print(f"Duplicated values: {data_df.duplicated().sum()}") # checking duplicated values data_df.head() # ### Analysing data by type of items in the Menu type_df = data_df.groupby("type").agg( { "item": pd.Series.count, "calories": pd.Series.mean, "fat": pd.Series.mean, "carb": pd.Series.mean, "fiber": pd.Series.mean, "protein": pd.Series.mean, } ) type_df pie = px.pie( type_df, labels=type_df.index, values=type_df.item, title="Percentage by type of items in Starbucks Menu", names=type_df.index, ) pie.update_traces( textposition="outside", textfont_size=15, textinfo="percent + label", showlegend=False, ) pie.show() # We can clearly see that the majority ot the items in the Menu belongs to bakery type and it also makes sense because bakery items are mostly consumed with coffee. fig = go.Figure() fig.add_bar(x=type_df.index, y=type_df.fat, name="Fat Content") fig.add_bar(x=type_df.index, y=type_df.protein, name="Protein Content") fig.add_bar(x=type_df.index, y=type_df.carb, name="Carb Content") fig.add_bar(x=type_df.index, y=type_df.fiber, name="Fiber Content") fig.update_layout(barmode="group") fig.update_layout( title="Average Calories, Fat, Protein, Carb and fiber content of items in Menu by Type", xaxis_title="Type", yaxis_title="Quantity", ) fig.show() type_df = type_df.sort_values(by="calories", ascending=True) h_bar = px.bar( x=type_df.calories, y=type_df.index, orientation="h", color=type_df.calories, color_continuous_scale="reds", title="Average Calories in Menu Items by Type", ) h_bar.update_layout(xaxis_title="Type", yaxis_title="Calories") h_bar.show() # ### Distribution of Nutritional Content in the Menu import plotly.subplots as sp import plotly.graph_objects as go # Create subplots fig = sp.make_subplots(rows=3, cols=2) # Add histogram traces to subplots fig.add_trace( go.Histogram(x=data_df.calories, name="Calories", nbinsx=20), row=1, col=1 ) fig.add_trace( go.Histogram(x=data_df.fat, name="Fat Content", nbinsx=10), row=1, col=2, ) fig.add_trace( go.Histogram(x=data_df.protein, name="Protein Content", nbinsx=10), row=2, col=1, ) fig.add_trace( go.Histogram(x=data_df.carb, name="Carbs Content"), row=2, col=2, ) fig.add_trace( go.Histogram(x=data_df.fiber, name="Fiber Content"), row=3, col=1, ) # Customize the layout fig.update_layout( title="Distribution of Calories, Fat, Carb, Protein and Fiber Content", xaxis_title="Values", yaxis_title="Frequency", autosize=False, width=1000, height=700, ) # Display the plot fig.show() # ### Relationship of Calories with other nutritional content # Create the subplots fig = sp.make_subplots( rows=2, cols=2, subplot_titles=( "Fat vs Calories", "Protein vs Calories", "Carb vs Calories", "Fiber vs Calories", ), ) fig.add_trace( go.Scatter(x=data_df.fat, y=data_df.calories, name="fat", mode="markers"), row=1, col=1, ) fig.add_trace( go.Scatter(x=data_df.protein, y=data_df.calories, name="protein", mode="markers"), row=1, col=2, ) fig.add_trace( go.Scatter( x=data_df.carb, y=data_df.calories, name="carbs", mode="markers", ), row=2, col=1, ) fig.add_trace( go.Scatter( x=data_df.fiber, y=data_df.calories, name="fiber", mode="markers", ), row=2, col=2, ) # Add chart title fig.update_layout( title="Relationship between carb, fat, proetin, fiber and Calories", autosize=False, width=1000, height=700, ) # Update x and y axis titles for each subplot fig.update_xaxes(title_text="fat", row=1, col=1) fig.update_xaxes(title_text="protein", row=1, col=2) fig.update_xaxes(title_text="carb", row=2, col=1) fig.update_xaxes(title_text="fiber", row=2, col=2) fig.update_yaxes(title_text="calories", row=1, col=1) fig.update_yaxes(title_text="calories", row=1, col=2) fig.update_yaxes(title_text="calories", row=2, col=1) fig.update_yaxes(title_text="calories", row=2, col=2) # Display the chart fig.show() # ### Finding Items with Maximum and Minimum Nutrional components # ### Top 10 items with Minimum and Maximum Calories cal_max = data_df.sort_values(by=["calories"], ascending=True)[-10:] cal_min = data_df.sort_values(by=["calories"], ascending=False)[-10:] # Create the subplots fig = sp.make_subplots( rows=1, cols=2, subplot_titles=( "Top 10 Menu item with highest calories", "Top 10 Menu item with lowest calories", ), ) fig.add_trace( go.Bar( x=cal_max.item, y=cal_max.calories, name="Max Calories", ), row=1, col=1, ) fig.add_trace( go.Bar( x=cal_min.item, y=cal_min.calories, name="Min Calories", ), row=1, col=2, ) # Add chart title fig.update_layout( title="Top 10 Menu Items With Highest & Lowest Calories", autosize=False, width=1200, height=700, ) # Update x and y axis titles for each subplot fig.update_xaxes(title_text="items", row=1, col=1) fig.update_xaxes(title_text="items", row=1, col=2) fig.update_yaxes(title_text="calories", row=1, col=1) fig.update_yaxes(title_text="calories", row=1, col=2) # Display the chart fig.show() # ### Top 10 items with Minimum and Maximum Proteins pro_max = data_df.sort_values(by="protein", ascending=True)[-10:] pro_min = data_df.sort_values(by="protein", ascending=False)[-10:] # Create the subplots fig = sp.make_subplots( rows=1, cols=2, subplot_titles=( "Top 10 Menu item with highest Proteins", "Top 10 Menu item with lowest Proteins", ), ) fig.add_trace( go.Bar( x=pro_max.item, y=pro_max.protein, name="Max Protein content", ), row=1, col=1, ) fig.add_trace( go.Bar( x=pro_min.item, y=pro_min.protein, name="Min Protein content", ), row=1, col=2, ) # Add chart title fig.update_layout( title="Top 10 Menu Items With Highest & Lowest Proteins", autosize=False, width=1200, height=700, ) # Update x and y axis titles for each subplot fig.update_xaxes(title_text="items", row=1, col=1) fig.update_xaxes(title_text="items", row=1, col=2) fig.update_yaxes(title_text="Protein", row=1, col=1) fig.update_yaxes(title_text="Protein", row=1, col=2) # Display the chart fig.show() # ### Top 10 items with Minimum and Maximum Fat fat_max = data_df.sort_values(by="fat", ascending=True)[-10:] fat_min = data_df.sort_values(by="fat", ascending=False)[-10:] # Create the subplots fig = sp.make_subplots( rows=1, cols=2, subplot_titles=( "Top 10 Menu item with highest Fat", "Top 10 Menu item with lowest Fat", ), ) fig.add_trace( go.Bar( x=fat_max.item, y=fat_max.fat, name="Max Fat content", ), row=1, col=1, ) fig.add_trace( go.Bar( x=fat_min.item, y=fat_min.fat, name="Min Fat content", ), row=1, col=2, ) # Add chart title fig.update_layout( title="Top 10 Menu Items With Highest & Lowest Fat", autosize=False, width=1200, height=700, ) # Update x and y axis titles for each subplot fig.update_xaxes(title_text="items", row=1, col=1) fig.update_xaxes(title_text="items", row=1, col=2) fig.update_yaxes(title_text="Fat", row=1, col=1) fig.update_yaxes(title_text="Fat", row=1, col=2) # Display the chart fig.show() # ### Top 10 items with Minimum and Maximum Fiber fiber_max = data_df.sort_values(by="fiber", ascending=True)[-10:] fiber_min = data_df.sort_values(by="fiber", ascending=False)[-10:] # Create the subplots fig = sp.make_subplots( rows=1, cols=2, subplot_titles=( "Top 10 Menu item with highest Fiber", "Top 10 Menu item with lowest Fiber", ), ) fig.add_trace( go.Bar( x=fiber_max.item, y=fiber_max.fiber, name="Max Fiber content", ), row=1, col=1, ) fig.add_trace( go.Bar( x=fiber_min.item, y=fiber_min.fiber, name="Min Fiber content", ), row=1, col=2, ) # Add chart title fig.update_layout( title="Top 10 Menu Items With Highest & Lowest Fiber", autosize=False, width=1200, height=700, ) # Update x and y axis titles for each subplot fig.update_xaxes(title_text="items", row=1, col=1) fig.update_xaxes(title_text="items", row=1, col=2) fig.update_yaxes(title_text="Fiber", row=1, col=1) fig.update_yaxes(title_text="Fiber", row=1, col=2) # Display the chart fig.show() # ### Top 10 items with Minimum and Maximum Carbs carb_max = data_df.sort_values(by="carb", ascending=True)[-10:] carb_min = data_df.sort_values(by="carb", ascending=False)[-10:] # Create the subplots fig = sp.make_subplots( rows=1, cols=2, subplot_titles=( "Top 10 Menu item with highest Carbs", "Top 10 Menu item with lowest Carbs", ), ) fig.add_trace( go.Bar( x=carb_max.item, y=carb_max.carb, name="Max Carbs content", ), row=1, col=1, ) fig.add_trace( go.Bar( x=carb_min.item, y=carb_min.carb, name="Min Carbs content", ), row=1, col=2, ) # Add chart title fig.update_layout( title="Top 10 Menu Items With Highest & Lowest Carbs", autosize=False, width=1200, height=700, ) # Update x and y axis titles for each subplot fig.update_xaxes(title_text="items", row=1, col=1) fig.update_xaxes(title_text="items", row=1, col=2) fig.update_yaxes(title_text="Carbs", row=1, col=1) fig.update_yaxes(title_text="Carbs", row=1, col=2) # Display the chart fig.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/383/129383655.ipynb
starbucks-nutrition
utkarshx27
[{"Id": 129383655, "ScriptId": 38404114, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 10493034, "CreationDate": "05/13/2023 10:23:20", "VersionNumber": 1.0, "Title": "Star Bucks Menu Nutrition Facts EDA", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 353.0, "LinesInsertedFromPrevious": 353.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 2}]
[{"Id": 185380982, "KernelVersionId": 129383655, "SourceDatasetVersionId": 5651811}]
[{"Id": 5651811, "DatasetId": 3248696, "DatasourceVersionId": 5727183, "CreatorUserId": 13364933, "LicenseName": "CC0: Public Domain", "CreationDate": "05/10/2023 05:42:59", "VersionNumber": 1.0, "Title": "Starbucks Nutrition Facts", "Slug": "starbucks-nutrition", "Subtitle": "Nutrition facts for several Starbucks food items", "Description": "```\nNutrition facts for several Starbucks food items\n```\n| Column | Description |\n| ------- | ------------------------------------------------------------ |\n| item | The name of the food item. |\n| calories| The amount of calories in the food item. |\n| fat | The quantity of fat in grams present in the food item. |\n| carb | The amount of carbohydrates in grams found in the food item. |\n| fiber | The quantity of dietary fiber in grams in the food item. |\n| protein | The amount of protein in grams contained in the food item. |\n| type | The category or type of food item (bakery, bistro box, hot breakfast, parfait, petite, salad, or sandwich). |", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3248696, "CreatorUserId": 13364933, "OwnerUserId": 13364933.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5651811.0, "CurrentDatasourceVersionId": 5727183.0, "ForumId": 3314049, "Type": 2, "CreationDate": "05/10/2023 05:42:59", "LastActivityDate": "05/10/2023", "TotalViews": 12557, "TotalDownloads": 2321, "TotalVotes": 59, "TotalKernels": 17}]
[{"Id": 13364933, "UserName": "utkarshx27", "DisplayName": "Utkarsh Singh", "RegisterDate": "01/21/2023", "PerformanceTier": 2}]
# # StarBucks Menu Nutrition Facts Exploratory Data Analysis # ## Starbucks Intro # Starbucks, the renowned coffeehouse chain founded in 1971, has become a global icon with over 33,000 stores in 80 countries. With a staggering 4 billion cups of coffee sold in 2020, equivalent to 11 million cups per day, Starbucks has established itself as a leader in the coffee industry. # ### Dataset Details # This Dataset contains information about the nutrition facts of items in the Menu such fat, carb, protein, fiber and calories per serving. # The items are also categorized by type as well. # ## Objectives of Data Analysis # 1. FInd the items with maximum and minimum carb, fat, protein, fiber and calories. # 2. Relationship of Calories with Carbs, Fat, Protein, Fiber and Calories # 3. Distribution of Nutritional contents in the Menu # 4. Analysing the nutritional content by type of Items in the Menu. # ### Importing Libraries # importing libararies import numpy as np import plotly.express as px import pandas as pd import plotly.graph_objects as go import plotly.subplots as sp # ### Importing Dataset # importing dataset data_df = pd.read_csv("/kaggle/input/starbucks-nutrition/starbucks.csv") data_df.head() data_df.shape # no of rows and columns data_df.info() # any null values # dropping unwanted column data_df = data_df.drop("Unnamed: 0", axis=1) # descriptive stats data_df.describe() print(f"Duplicated values: {data_df.duplicated().sum()}") # checking duplicated values data_df.head() # ### Analysing data by type of items in the Menu type_df = data_df.groupby("type").agg( { "item": pd.Series.count, "calories": pd.Series.mean, "fat": pd.Series.mean, "carb": pd.Series.mean, "fiber": pd.Series.mean, "protein": pd.Series.mean, } ) type_df pie = px.pie( type_df, labels=type_df.index, values=type_df.item, title="Percentage by type of items in Starbucks Menu", names=type_df.index, ) pie.update_traces( textposition="outside", textfont_size=15, textinfo="percent + label", showlegend=False, ) pie.show() # We can clearly see that the majority ot the items in the Menu belongs to bakery type and it also makes sense because bakery items are mostly consumed with coffee. fig = go.Figure() fig.add_bar(x=type_df.index, y=type_df.fat, name="Fat Content") fig.add_bar(x=type_df.index, y=type_df.protein, name="Protein Content") fig.add_bar(x=type_df.index, y=type_df.carb, name="Carb Content") fig.add_bar(x=type_df.index, y=type_df.fiber, name="Fiber Content") fig.update_layout(barmode="group") fig.update_layout( title="Average Calories, Fat, Protein, Carb and fiber content of items in Menu by Type", xaxis_title="Type", yaxis_title="Quantity", ) fig.show() type_df = type_df.sort_values(by="calories", ascending=True) h_bar = px.bar( x=type_df.calories, y=type_df.index, orientation="h", color=type_df.calories, color_continuous_scale="reds", title="Average Calories in Menu Items by Type", ) h_bar.update_layout(xaxis_title="Type", yaxis_title="Calories") h_bar.show() # ### Distribution of Nutritional Content in the Menu import plotly.subplots as sp import plotly.graph_objects as go # Create subplots fig = sp.make_subplots(rows=3, cols=2) # Add histogram traces to subplots fig.add_trace( go.Histogram(x=data_df.calories, name="Calories", nbinsx=20), row=1, col=1 ) fig.add_trace( go.Histogram(x=data_df.fat, name="Fat Content", nbinsx=10), row=1, col=2, ) fig.add_trace( go.Histogram(x=data_df.protein, name="Protein Content", nbinsx=10), row=2, col=1, ) fig.add_trace( go.Histogram(x=data_df.carb, name="Carbs Content"), row=2, col=2, ) fig.add_trace( go.Histogram(x=data_df.fiber, name="Fiber Content"), row=3, col=1, ) # Customize the layout fig.update_layout( title="Distribution of Calories, Fat, Carb, Protein and Fiber Content", xaxis_title="Values", yaxis_title="Frequency", autosize=False, width=1000, height=700, ) # Display the plot fig.show() # ### Relationship of Calories with other nutritional content # Create the subplots fig = sp.make_subplots( rows=2, cols=2, subplot_titles=( "Fat vs Calories", "Protein vs Calories", "Carb vs Calories", "Fiber vs Calories", ), ) fig.add_trace( go.Scatter(x=data_df.fat, y=data_df.calories, name="fat", mode="markers"), row=1, col=1, ) fig.add_trace( go.Scatter(x=data_df.protein, y=data_df.calories, name="protein", mode="markers"), row=1, col=2, ) fig.add_trace( go.Scatter( x=data_df.carb, y=data_df.calories, name="carbs", mode="markers", ), row=2, col=1, ) fig.add_trace( go.Scatter( x=data_df.fiber, y=data_df.calories, name="fiber", mode="markers", ), row=2, col=2, ) # Add chart title fig.update_layout( title="Relationship between carb, fat, proetin, fiber and Calories", autosize=False, width=1000, height=700, ) # Update x and y axis titles for each subplot fig.update_xaxes(title_text="fat", row=1, col=1) fig.update_xaxes(title_text="protein", row=1, col=2) fig.update_xaxes(title_text="carb", row=2, col=1) fig.update_xaxes(title_text="fiber", row=2, col=2) fig.update_yaxes(title_text="calories", row=1, col=1) fig.update_yaxes(title_text="calories", row=1, col=2) fig.update_yaxes(title_text="calories", row=2, col=1) fig.update_yaxes(title_text="calories", row=2, col=2) # Display the chart fig.show() # ### Finding Items with Maximum and Minimum Nutrional components # ### Top 10 items with Minimum and Maximum Calories cal_max = data_df.sort_values(by=["calories"], ascending=True)[-10:] cal_min = data_df.sort_values(by=["calories"], ascending=False)[-10:] # Create the subplots fig = sp.make_subplots( rows=1, cols=2, subplot_titles=( "Top 10 Menu item with highest calories", "Top 10 Menu item with lowest calories", ), ) fig.add_trace( go.Bar( x=cal_max.item, y=cal_max.calories, name="Max Calories", ), row=1, col=1, ) fig.add_trace( go.Bar( x=cal_min.item, y=cal_min.calories, name="Min Calories", ), row=1, col=2, ) # Add chart title fig.update_layout( title="Top 10 Menu Items With Highest & Lowest Calories", autosize=False, width=1200, height=700, ) # Update x and y axis titles for each subplot fig.update_xaxes(title_text="items", row=1, col=1) fig.update_xaxes(title_text="items", row=1, col=2) fig.update_yaxes(title_text="calories", row=1, col=1) fig.update_yaxes(title_text="calories", row=1, col=2) # Display the chart fig.show() # ### Top 10 items with Minimum and Maximum Proteins pro_max = data_df.sort_values(by="protein", ascending=True)[-10:] pro_min = data_df.sort_values(by="protein", ascending=False)[-10:] # Create the subplots fig = sp.make_subplots( rows=1, cols=2, subplot_titles=( "Top 10 Menu item with highest Proteins", "Top 10 Menu item with lowest Proteins", ), ) fig.add_trace( go.Bar( x=pro_max.item, y=pro_max.protein, name="Max Protein content", ), row=1, col=1, ) fig.add_trace( go.Bar( x=pro_min.item, y=pro_min.protein, name="Min Protein content", ), row=1, col=2, ) # Add chart title fig.update_layout( title="Top 10 Menu Items With Highest & Lowest Proteins", autosize=False, width=1200, height=700, ) # Update x and y axis titles for each subplot fig.update_xaxes(title_text="items", row=1, col=1) fig.update_xaxes(title_text="items", row=1, col=2) fig.update_yaxes(title_text="Protein", row=1, col=1) fig.update_yaxes(title_text="Protein", row=1, col=2) # Display the chart fig.show() # ### Top 10 items with Minimum and Maximum Fat fat_max = data_df.sort_values(by="fat", ascending=True)[-10:] fat_min = data_df.sort_values(by="fat", ascending=False)[-10:] # Create the subplots fig = sp.make_subplots( rows=1, cols=2, subplot_titles=( "Top 10 Menu item with highest Fat", "Top 10 Menu item with lowest Fat", ), ) fig.add_trace( go.Bar( x=fat_max.item, y=fat_max.fat, name="Max Fat content", ), row=1, col=1, ) fig.add_trace( go.Bar( x=fat_min.item, y=fat_min.fat, name="Min Fat content", ), row=1, col=2, ) # Add chart title fig.update_layout( title="Top 10 Menu Items With Highest & Lowest Fat", autosize=False, width=1200, height=700, ) # Update x and y axis titles for each subplot fig.update_xaxes(title_text="items", row=1, col=1) fig.update_xaxes(title_text="items", row=1, col=2) fig.update_yaxes(title_text="Fat", row=1, col=1) fig.update_yaxes(title_text="Fat", row=1, col=2) # Display the chart fig.show() # ### Top 10 items with Minimum and Maximum Fiber fiber_max = data_df.sort_values(by="fiber", ascending=True)[-10:] fiber_min = data_df.sort_values(by="fiber", ascending=False)[-10:] # Create the subplots fig = sp.make_subplots( rows=1, cols=2, subplot_titles=( "Top 10 Menu item with highest Fiber", "Top 10 Menu item with lowest Fiber", ), ) fig.add_trace( go.Bar( x=fiber_max.item, y=fiber_max.fiber, name="Max Fiber content", ), row=1, col=1, ) fig.add_trace( go.Bar( x=fiber_min.item, y=fiber_min.fiber, name="Min Fiber content", ), row=1, col=2, ) # Add chart title fig.update_layout( title="Top 10 Menu Items With Highest & Lowest Fiber", autosize=False, width=1200, height=700, ) # Update x and y axis titles for each subplot fig.update_xaxes(title_text="items", row=1, col=1) fig.update_xaxes(title_text="items", row=1, col=2) fig.update_yaxes(title_text="Fiber", row=1, col=1) fig.update_yaxes(title_text="Fiber", row=1, col=2) # Display the chart fig.show() # ### Top 10 items with Minimum and Maximum Carbs carb_max = data_df.sort_values(by="carb", ascending=True)[-10:] carb_min = data_df.sort_values(by="carb", ascending=False)[-10:] # Create the subplots fig = sp.make_subplots( rows=1, cols=2, subplot_titles=( "Top 10 Menu item with highest Carbs", "Top 10 Menu item with lowest Carbs", ), ) fig.add_trace( go.Bar( x=carb_max.item, y=carb_max.carb, name="Max Carbs content", ), row=1, col=1, ) fig.add_trace( go.Bar( x=carb_min.item, y=carb_min.carb, name="Min Carbs content", ), row=1, col=2, ) # Add chart title fig.update_layout( title="Top 10 Menu Items With Highest & Lowest Carbs", autosize=False, width=1200, height=700, ) # Update x and y axis titles for each subplot fig.update_xaxes(title_text="items", row=1, col=1) fig.update_xaxes(title_text="items", row=1, col=2) fig.update_yaxes(title_text="Carbs", row=1, col=1) fig.update_yaxes(title_text="Carbs", row=1, col=2) # Display the chart fig.show()
false
1
3,886
2
4,101
3,886
129383348
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) from sklearn.preprocessing import LabelEncoder # from sklearn.pandas import CategoricalImputer # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session data_dir = "/kaggle/input/loan-status-binary-classification/" train = pd.read_csv(data_dir + "train.csv") test = pd.read_csv(data_dir + "test.csv") print(train.shape) print(train.head()) print("==============") print(test.shape) print(test.head()) train["Gender"].value_counts(dropna=False) test["Gender"].value_counts(dropna=False) train["Gender"].fillna("Third sex", inplace=True) test["Gender"].fillna("Third sex", inplace=True) train_1 = pd.DataFrame(train) dummies = pd.get_dummies(train_1["Gender"], prefix="Gender") train_1 = pd.concat([train_1, dummies], axis=1) train_1 = train_1.drop(["Gender"], axis=1) print(train_1) test_1 = pd.DataFrame(test) dummies = pd.get_dummies(test_1["Gender"], prefix="Gender") test_1 = pd.concat([test_1, dummies], axis=1) test_1 = test_1.drop(["Gender"], axis=1) print(test_1) train_1["Married"].value_counts(dropna=False) test_1["Married"].value_counts(dropna=False) train_1["Married"].fillna("Yes", inplace=True) test_1["Married"].fillna("Yes", inplace=True) train_1["Married"] = train_1["Married"].map({"Yes": 1, "No": 0}) test_1["Married"] = test_1["Married"].map({"Yes": 1, "No": 0}) print(train_1) print(test_1) train_1["Dependents"].value_counts(dropna=False) test_1["Dependents"].value_counts(dropna=False) train_1["Dependents"].fillna("0", inplace=True) test_1["Dependents"].fillna("0", inplace=True) encoder = LabelEncoder() train_1["Dependents"] = encoder.fit_transform(train_1["Dependents"]) print(train_1) encoder = LabelEncoder() test_1["Dependents"] = encoder.fit_transform(test_1["Dependents"]) print(test_1) train_1["Education"].value_counts(dropna=False) test_1["Education"].value_counts(dropna=False) train_1["Education"] = train_1["Education"].map({"Graduate": 1, "Not Graduate": 0}) test_1["Education"] = test_1["Education"].map({"Graduate": 1, "Not Graduate": 0}) train_1["Self_Employed"].value_counts(dropna=False) test_1["Self_Employed"].value_counts(dropna=False) train_1["Self_Employed"].fillna("No", inplace=True) test_1["Self_Employed"].fillna("No", inplace=True) train_1["Self_Employed"] = train_1["Self_Employed"].map({"Yes": 1, "No": 0}) test_1["Self_Employed"] = test_1["Self_Employed"].map({"Yes": 1, "No": 0}) print(train_1) train_1["Applicant_Income"].value_counts(dropna=False) train_1["Coapplicant_Income"].value_counts(dropna=False) train_1["Loan_Amount"].value_counts(dropna=False) train_1["Term"].value_counts(dropna=False) mean_term = train_1["Term"].mean() train_1["Term"].fillna(mean_term, inplace=True) test_1["Term"].fillna(mean_term, inplace=True) train["Credit_History"].value_counts(dropna=False) train_1["Credit_History"] = train["Credit_History"] test_1["Credit_History"] = test["Credit_History"] train_1["Credit_History"].fillna(1.0, inplace=True) test_1["Credit_History"].fillna(1.0, inplace=True) print(train_1) train_1["Area"].value_counts(dropna=False) train_end = pd.DataFrame(train_1) dummies = pd.get_dummies(train_end["Area"], prefix="Area") train_end = pd.concat([train_end, dummies], axis=1) train_end = train_end.drop(["Area"], axis=1) print(train_end) test_end = pd.DataFrame(test_1) dummies = pd.get_dummies(test_end["Area"], prefix="Area") test_end = pd.concat([test_end, dummies], axis=1) test_end = test_end.drop(["Area"], axis=1) print(test_end) from sklearn.linear_model import LogisticRegression clf = LogisticRegression() clf.fit(train_end.drop(["id", "Status"], axis=1), train_end["Status"]) preds = clf.predict(test_end.drop("id", axis=1)) sub = pd.DataFrame({"id": test_end["id"], "Status": preds}) # Write the submission dataframe to a csv file sub.to_csv("/kaggle/working/submission.csv", index=False)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/383/129383348.ipynb
null
null
[{"Id": 129383348, "ScriptId": 38435961, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 15037956, "CreationDate": "05/13/2023 10:19:50", "VersionNumber": 3.0, "Title": "loan-baseline2222", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 157.0, "LinesInsertedFromPrevious": 82.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 75.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) from sklearn.preprocessing import LabelEncoder # from sklearn.pandas import CategoricalImputer # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session data_dir = "/kaggle/input/loan-status-binary-classification/" train = pd.read_csv(data_dir + "train.csv") test = pd.read_csv(data_dir + "test.csv") print(train.shape) print(train.head()) print("==============") print(test.shape) print(test.head()) train["Gender"].value_counts(dropna=False) test["Gender"].value_counts(dropna=False) train["Gender"].fillna("Third sex", inplace=True) test["Gender"].fillna("Third sex", inplace=True) train_1 = pd.DataFrame(train) dummies = pd.get_dummies(train_1["Gender"], prefix="Gender") train_1 = pd.concat([train_1, dummies], axis=1) train_1 = train_1.drop(["Gender"], axis=1) print(train_1) test_1 = pd.DataFrame(test) dummies = pd.get_dummies(test_1["Gender"], prefix="Gender") test_1 = pd.concat([test_1, dummies], axis=1) test_1 = test_1.drop(["Gender"], axis=1) print(test_1) train_1["Married"].value_counts(dropna=False) test_1["Married"].value_counts(dropna=False) train_1["Married"].fillna("Yes", inplace=True) test_1["Married"].fillna("Yes", inplace=True) train_1["Married"] = train_1["Married"].map({"Yes": 1, "No": 0}) test_1["Married"] = test_1["Married"].map({"Yes": 1, "No": 0}) print(train_1) print(test_1) train_1["Dependents"].value_counts(dropna=False) test_1["Dependents"].value_counts(dropna=False) train_1["Dependents"].fillna("0", inplace=True) test_1["Dependents"].fillna("0", inplace=True) encoder = LabelEncoder() train_1["Dependents"] = encoder.fit_transform(train_1["Dependents"]) print(train_1) encoder = LabelEncoder() test_1["Dependents"] = encoder.fit_transform(test_1["Dependents"]) print(test_1) train_1["Education"].value_counts(dropna=False) test_1["Education"].value_counts(dropna=False) train_1["Education"] = train_1["Education"].map({"Graduate": 1, "Not Graduate": 0}) test_1["Education"] = test_1["Education"].map({"Graduate": 1, "Not Graduate": 0}) train_1["Self_Employed"].value_counts(dropna=False) test_1["Self_Employed"].value_counts(dropna=False) train_1["Self_Employed"].fillna("No", inplace=True) test_1["Self_Employed"].fillna("No", inplace=True) train_1["Self_Employed"] = train_1["Self_Employed"].map({"Yes": 1, "No": 0}) test_1["Self_Employed"] = test_1["Self_Employed"].map({"Yes": 1, "No": 0}) print(train_1) train_1["Applicant_Income"].value_counts(dropna=False) train_1["Coapplicant_Income"].value_counts(dropna=False) train_1["Loan_Amount"].value_counts(dropna=False) train_1["Term"].value_counts(dropna=False) mean_term = train_1["Term"].mean() train_1["Term"].fillna(mean_term, inplace=True) test_1["Term"].fillna(mean_term, inplace=True) train["Credit_History"].value_counts(dropna=False) train_1["Credit_History"] = train["Credit_History"] test_1["Credit_History"] = test["Credit_History"] train_1["Credit_History"].fillna(1.0, inplace=True) test_1["Credit_History"].fillna(1.0, inplace=True) print(train_1) train_1["Area"].value_counts(dropna=False) train_end = pd.DataFrame(train_1) dummies = pd.get_dummies(train_end["Area"], prefix="Area") train_end = pd.concat([train_end, dummies], axis=1) train_end = train_end.drop(["Area"], axis=1) print(train_end) test_end = pd.DataFrame(test_1) dummies = pd.get_dummies(test_end["Area"], prefix="Area") test_end = pd.concat([test_end, dummies], axis=1) test_end = test_end.drop(["Area"], axis=1) print(test_end) from sklearn.linear_model import LogisticRegression clf = LogisticRegression() clf.fit(train_end.drop(["id", "Status"], axis=1), train_end["Status"]) preds = clf.predict(test_end.drop("id", axis=1)) sub = pd.DataFrame({"id": test_end["id"], "Status": preds}) # Write the submission dataframe to a csv file sub.to_csv("/kaggle/working/submission.csv", index=False)
false
0
1,507
0
1,507
1,507
129217578
# ![](https://storage.googleapis.com/kaggle-media/competitions/Google-Contrails/waterdroplets.png) import os, glob import pandas as pd import matplotlib.pyplot as plt DATASET_FOLDER = ( "/kaggle/input/google-research-identify-contrails-reduce-global-warming" ) path_json = os.path.join(DATASET_FOLDER, "train_metadata.json") df_train = pd.read_json(path_json) display(df_train.head()) from pprint import pprint pprint(df_train.iloc[0]["projection_wkt"]) # ## Show raw image # - **band_{08-16}.npy:** array with size of H x W x T, where T = n_times_before + n_times_after + 1, representing the number of images in the sequence. There are n_times_before and n_times_after images before and after the labeled frame respectively. In our dataset all examples have n_times_before=4 and n_times_after=3. Each band represents an infrared channel at different wavelengths and is converted to brightness temperatures based on the calibration parameters. The number in the filename corresponds to the GOES-16 ABI band number. Details of the ABI bands can be found here. # - **human_individual_masks.npy:** array with size of H x W x 1 x R. Each example is labeled by R individual human labelers. R is not the same for all samples. The labeled masks have value either 0 or 1 and correspond to the (n_times_before+1)-th image in band_{08-16}.npy. They are available only in the training set. # - **human_pixel_masks.npy:** array with size of H x W x 1 containing the binary ground truth. A pixel is regarded as contrail pixel in evaluation if it is labeled as contrail by more than half of the labelers. import numpy as np def show_sample(spl_id): folder = os.path.join(DATASET_FOLDER, "train", spl_id) fig, axarr = plt.subplots(ncols=8, nrows=9, figsize=(16, 14)) for i, bi in enumerate(range(8, 17)): img = np.load(os.path.join(folder, f"band_{bi:02}.npy")) print(img.shape, img.min(), img.max()) for j in range(8): axarr[i, j].imshow(img[:, :, j]) for n in ("human_individual_masks", "human_pixel_masks"): mask = np.load(os.path.join(folder, f"{n}.npy")) print(mask.shape, mask.min(), mask.max()) show_sample("1000823728928031783") # ## Conver to RGB # **Following example from: https://www.kaggle.com/code/inversion/visualizing-contrails** _T11_BOUNDS = (243, 303) _CLOUD_TOP_TDIFF_BOUNDS = (-4, 5) _TDIFF_BOUNDS = (-4, 2) def normalize_range(data, bounds): """Maps data to the range [0, 1].""" return (data - bounds[0]) / (bounds[1] - bounds[0]) def show_rgb_sample(spl_id, t=4): folder = os.path.join(DATASET_FOLDER, "train", spl_id) bands = [None] * 17 for bi in range(8, 17): bands[bi] = np.load(os.path.join(folder, f"band_{bi:02}.npy")) # print(bands[bi].shape, bands[bi].min(), bands[bi].max()) r = normalize_range(bands[15] - bands[14], _TDIFF_BOUNDS) g = normalize_range(bands[14] - bands[11], _CLOUD_TOP_TDIFF_BOUNDS) b = normalize_range(bands[14], _T11_BOUNDS) img = np.clip(np.stack([r, g, b], axis=2), 0, 1) mask = np.load(os.path.join(folder, "human_pixel_masks.npy")) # print(mask.shape, mask.min(), mask.max()) fig, axarr = plt.subplots(ncols=2, nrows=2, figsize=(10, 10)) axarr[0, 0].imshow(img[..., t]) axarr[0, 1].imshow(mask[..., 0], interpolation="none") axarr[1, 0].imshow(img[..., t]) axarr[1, 0].contour(mask[..., 0], linewidths=1, colors="red") show_rgb_sample("1000823728928031783")
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/217/129217578.ipynb
null
null
[{"Id": 129217578, "ScriptId": 38390791, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 5069700, "CreationDate": "05/11/2023 23:46:51", "VersionNumber": 2.0, "Title": "\u2708\ufe0fContrails: EDA \ud83e\uddd1\u200d\u2708\ufe0f & interactive \ud83d\udd0e viewer", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 71.0, "LinesInsertedFromPrevious": 35.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 36.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 7}]
null
null
null
null
# ![](https://storage.googleapis.com/kaggle-media/competitions/Google-Contrails/waterdroplets.png) import os, glob import pandas as pd import matplotlib.pyplot as plt DATASET_FOLDER = ( "/kaggle/input/google-research-identify-contrails-reduce-global-warming" ) path_json = os.path.join(DATASET_FOLDER, "train_metadata.json") df_train = pd.read_json(path_json) display(df_train.head()) from pprint import pprint pprint(df_train.iloc[0]["projection_wkt"]) # ## Show raw image # - **band_{08-16}.npy:** array with size of H x W x T, where T = n_times_before + n_times_after + 1, representing the number of images in the sequence. There are n_times_before and n_times_after images before and after the labeled frame respectively. In our dataset all examples have n_times_before=4 and n_times_after=3. Each band represents an infrared channel at different wavelengths and is converted to brightness temperatures based on the calibration parameters. The number in the filename corresponds to the GOES-16 ABI band number. Details of the ABI bands can be found here. # - **human_individual_masks.npy:** array with size of H x W x 1 x R. Each example is labeled by R individual human labelers. R is not the same for all samples. The labeled masks have value either 0 or 1 and correspond to the (n_times_before+1)-th image in band_{08-16}.npy. They are available only in the training set. # - **human_pixel_masks.npy:** array with size of H x W x 1 containing the binary ground truth. A pixel is regarded as contrail pixel in evaluation if it is labeled as contrail by more than half of the labelers. import numpy as np def show_sample(spl_id): folder = os.path.join(DATASET_FOLDER, "train", spl_id) fig, axarr = plt.subplots(ncols=8, nrows=9, figsize=(16, 14)) for i, bi in enumerate(range(8, 17)): img = np.load(os.path.join(folder, f"band_{bi:02}.npy")) print(img.shape, img.min(), img.max()) for j in range(8): axarr[i, j].imshow(img[:, :, j]) for n in ("human_individual_masks", "human_pixel_masks"): mask = np.load(os.path.join(folder, f"{n}.npy")) print(mask.shape, mask.min(), mask.max()) show_sample("1000823728928031783") # ## Conver to RGB # **Following example from: https://www.kaggle.com/code/inversion/visualizing-contrails** _T11_BOUNDS = (243, 303) _CLOUD_TOP_TDIFF_BOUNDS = (-4, 5) _TDIFF_BOUNDS = (-4, 2) def normalize_range(data, bounds): """Maps data to the range [0, 1].""" return (data - bounds[0]) / (bounds[1] - bounds[0]) def show_rgb_sample(spl_id, t=4): folder = os.path.join(DATASET_FOLDER, "train", spl_id) bands = [None] * 17 for bi in range(8, 17): bands[bi] = np.load(os.path.join(folder, f"band_{bi:02}.npy")) # print(bands[bi].shape, bands[bi].min(), bands[bi].max()) r = normalize_range(bands[15] - bands[14], _TDIFF_BOUNDS) g = normalize_range(bands[14] - bands[11], _CLOUD_TOP_TDIFF_BOUNDS) b = normalize_range(bands[14], _T11_BOUNDS) img = np.clip(np.stack([r, g, b], axis=2), 0, 1) mask = np.load(os.path.join(folder, "human_pixel_masks.npy")) # print(mask.shape, mask.min(), mask.max()) fig, axarr = plt.subplots(ncols=2, nrows=2, figsize=(10, 10)) axarr[0, 0].imshow(img[..., t]) axarr[0, 1].imshow(mask[..., 0], interpolation="none") axarr[1, 0].imshow(img[..., t]) axarr[1, 0].contour(mask[..., 0], linewidths=1, colors="red") show_rgb_sample("1000823728928031783")
false
0
1,187
7
1,187
1,187
129650672
# # Kagle Intro # * Please register on kaggle. Use your Levi9 email. If already registered, you can use your account, but identify yourself for org team # * **Important:** confirm your phone number. Otherwise you will not have access to some features like Internet access from Notebook or GPU # * You have 30 free GPU hours per week per user. Quota available in your profile. # ## Notebook configuration # * Enabling internet access - on the right, switch the Internet (available only after phone confirmation) # * Enabling GPU - choose accelerator from the options # ## Istalling additional libraries # ## Working with the data # * competition data # * adding own dataset - create a dataset and add it to competition (https://www.kaggle.com/datasets) import torchvision.datasets as dset import torchvision.transforms as transforms path2data = "/kaggle/input/levi9-hack9-2023/train" path2json = "/kaggle/input/levi9-hack9-2023/train.json" coco_train = dset.CocoDetection( root=path2data, annFile=path2json, transform=transforms.ToTensor() ) print("Number of samples: ", len(coco_train)) img, target = coco_train[0] print(img.size) print(target) # ## Notebook Sharing # Privately with your teammates or make it public # ## Working with the secrets from kaggle_secrets import UserSecretsClient user_secrets = UserSecretsClient() AWS_ACCESS_KEY_ID = user_secrets.get_secret("AWS_ACCESS_KEY_ID") # ## Submission # On the right, there is output folder import pandas as pd submission = [ {"image_id": "test", "prediction": "test2"}, {"image_id": "test22", "prediction": "sample"}, ] df = pd.DataFrame.from_dict(submission) df.to_csv("submission2.csv", index=False)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/650/129650672.ipynb
null
null
[{"Id": 129650672, "ScriptId": 38433042, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 1904452, "CreationDate": "05/15/2023 13:37:45", "VersionNumber": 2.0, "Title": "Kaggle intro", "EvaluationDate": "05/15/2023", "IsChange": true, "TotalLines": 51.0, "LinesInsertedFromPrevious": 23.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 28.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# # Kagle Intro # * Please register on kaggle. Use your Levi9 email. If already registered, you can use your account, but identify yourself for org team # * **Important:** confirm your phone number. Otherwise you will not have access to some features like Internet access from Notebook or GPU # * You have 30 free GPU hours per week per user. Quota available in your profile. # ## Notebook configuration # * Enabling internet access - on the right, switch the Internet (available only after phone confirmation) # * Enabling GPU - choose accelerator from the options # ## Istalling additional libraries # ## Working with the data # * competition data # * adding own dataset - create a dataset and add it to competition (https://www.kaggle.com/datasets) import torchvision.datasets as dset import torchvision.transforms as transforms path2data = "/kaggle/input/levi9-hack9-2023/train" path2json = "/kaggle/input/levi9-hack9-2023/train.json" coco_train = dset.CocoDetection( root=path2data, annFile=path2json, transform=transforms.ToTensor() ) print("Number of samples: ", len(coco_train)) img, target = coco_train[0] print(img.size) print(target) # ## Notebook Sharing # Privately with your teammates or make it public # ## Working with the secrets from kaggle_secrets import UserSecretsClient user_secrets = UserSecretsClient() AWS_ACCESS_KEY_ID = user_secrets.get_secret("AWS_ACCESS_KEY_ID") # ## Submission # On the right, there is output folder import pandas as pd submission = [ {"image_id": "test", "prediction": "test2"}, {"image_id": "test22", "prediction": "sample"}, ] df = pd.DataFrame.from_dict(submission) df.to_csv("submission2.csv", index=False)
false
0
477
0
477
477
129650266
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import matplotlib.pyplot as plt from wordcloud import WordCloud from IPython.display import display import base64 import string import re from collections import Counter from time import time # from sklearn.feature_extraction.stop_words import ENGLISH_STOP_WORDS as stopwords from nltk.corpus import stopwords from sklearn.metrics import log_loss stopwords = stopwords.words("english") sns.set_context("notebook") print("Numpy version:", np.__version__) print("Pandas version:", pd.__version__) print("Seaborn version:", sns.__version__) # **Data Visualization** train = pd.read_csv("/kaggle/working/train.csv") train.head() train.index train = pd.read_csv( "/kaggle/working/train.csv", skiprows=1, names=["ID", "Text", "Author"] ).set_index("ID") test = pd.read_csv( "/kaggle/working/test.csv", skiprows=1, names=["ID", "Text"] ).set_index("ID") # Delete the word 'id' from the ID columns train.index = [id[2:] for id in train.index] test.index = [id[2:] for id in test.index] train.sort_index(inplace=True) test.sort_index(inplace=True) train["Text"][0] display(train.head()) display(test.head()) print("Training sample:", train["Text"][0]) print("Author of sample:", train["Author"][0]) print("Training Data Shape:", train.shape) print("Testing Data Shape:", test.shape) print("Training Dataset Info:") display(train.info()) print("Test Dataset Info:") display(test.info()) # **Plot** sns.barplot( x=["Edgar Allen Poe", "Mary Wollstonecraft Shelley", "H.P. Lovecraft"], y=train["Author"].value_counts(), ) plt.show() # **Create Spooky Wordcloud** font_64_decode = base64.decodebytes(font_64_encode) font_result = open( "spooky_font.ttf", "wb" ) # create a writable font file and write the decoding result font_result.write(font_64_decode) font_result.close() all_text = " ".join([text for text in train["Text"]]) print("Number of words in all_text:", len(all_text)) wordcloud = WordCloud( font_path="spooky_font.ttf", width=800, height=500, random_state=21, max_font_size=110, ).generate(all_text) plt.figure(figsize=(15, 12)) plt.imshow(wordcloud, interpolation="bilinear") plt.axis("off") plt.show() # **Wordcloud for Edgar Allen Poe** [train["Author"] == "EAP"] eap = train[train["Author"] == "EAP"] eap_text = " ".join(text for text in eap["Text"]) print("Number of words in eap_text:", len(eap_text)) wordcloud = WordCloud( font_path="spooky_font.ttf", width=800, height=500, random_state=21, max_font_size=110, ).generate(eap_text) plt.figure(figsize=(15, 12)) plt.imshow(wordcloud, interpolation="bilinear") plt.axis("off") plt.show() # **Wordcloud for Mary Wollstonecraft Shelley** train[train["Author"] == "MWS"] mws = train[train["Author"] == "MWS"] mws_text = " ".join(text for text in mws["Text"]) print("Number of words in eap_text:", len(mws_text)) wordcloud = WordCloud( font_path="spooky_font.ttf", width=800, height=500, random_state=21, max_font_size=110, ).generate(mws_text) plt.figure(figsize=(15, 12)) plt.imshow(wordcloud, interpolation="bilinear") plt.axis("off") plt.show() # **Wordcloud for H.P. Lovecraft** train[train["Author"] == "HPL"] hpl = train[train["Author"] == "HPL"] hpl_text = " ".join(text for text in hpl["Text"]) print("Number of words in eap_text:", len(hpl_text)) wordcloud = WordCloud( font_path="spooky_font.ttf", width=800, height=500, random_state=21, max_font_size=110, ).generate(hpl_text) plt.figure(figsize=(15, 12)) plt.imshow(wordcloud, interpolation="bilinear") plt.axis("off") plt.show() # **Text Preprocessing** import spacy nlp = spacy.load("en_core_web_sm") punctuations = string.punctuation def cleanup_text(docs, logging=False): texts = [] counter = 1 for doc in docs: if counter % 1000 == 0 and logging: print("Processed %d out of %d documents." % (counter, len(docs))) counter += 1 doc = nlp(doc, disable=["parser", "ner"]) tokens = [tok.lemma_.lower().strip() for tok in doc if tok.lemma_ != "-PRON-"] tokens = [ tok for tok in tokens if tok not in stopwords and tok not in punctuations ] tokens = " ".join(tokens) texts.append(tokens) return pd.Series(texts) # **Plot Word Frequency By Each Author** train[train["Author"] == "EAP"]["Text"] eap_text = [text for text in train[train["Author"] == "EAP"]["Text"]] hpl_text = [text for text in train[train["Author"] == "HPL"]["Text"]] mws_text = [text for text in train[train["Author"] == "MWS"]["Text"]] # clean up eap eap_clean = cleanup_text(eap_text) eap_clean = " ".join(eap_clean).split() eap_claen = [word for word in eap_clean if word != "\s"] # clean up hpl hpl_clean = cleanup_text(hpl_text) hpl_clean = " ".join(hpl_clean).split() # 's appears a lot in the text, so we get rid of it since it's not a word hpl_clean = [word for word in hpl_clean if word != "'s"] # clean up mws mws_clean = cleanup_text(mws_text) mws_clean = " ".join(mws_clean).split() # 's appears a lot in the text, so we get rid of it since it's not a word mws_clean = [word for word in mws_clean if word != "'s"] eap_counts = Counter(eap_clean) hpl_counts = Counter(hpl_clean) mws_counts = Counter(mws_clean) # **EAP Common Words** eap_common_words = [word[0] for word in eap_counts.most_common(25)] eap_common_counts = [word[1] for word in eap_counts.most_common(25)] plt.style.use("dark_background") plt.figure(figsize=(15, 12)) sns.barplot(x=eap_common_words, y=eap_common_counts) plt.title("Most Common Words used by Edgar Allen Poe") plt.show() # **HPL Common Words** hpl_common_words = [word[0] for word in hpl_counts.most_common(25)] hpl_common_counts = [word[1] for word in hpl_counts.most_common(25)] plt.figure(figsize=(15, 12)) sns.barplot(x=hpl_common_words, y=hpl_common_counts) plt.title("Most Common Words used by H.P. Lovecraft") plt.show() # **MWS Common Words** mws_common_words = [word[0] for word in mws_counts.most_common(25)] mws_common_counts = [word[1] for word in mws_counts.most_common(25)] plt.figure(figsize=(15, 12)) sns.barplot(x=mws_common_words, y=mws_common_counts) plt.title("Most Common Words used by Mary Wollstonecraft Shelley") plt.show() print(eap_counts.most_common(25)) print() print(hpl_counts.most_common(25)) print() print(mws_counts.most_common(25)) print("Original training data shape: ", train["Text"].shape) train_cleaned = cleanup_text(train["Text"], logging=True) print("Cleaned up training data shape: ", train_cleaned.shape) print("Parsing documents...") start = time() train_vec = [] for doc in nlp.pipe(train_cleaned, batch_size=500): if doc.has_vector: train_vec.append(doc.vector) else: train_vec.append(np.zeros((128,), dtype="float32")) train_vec = np.array(train_vec) end = time() print("Total time passed parsing documents: {} seconds".format(end - start)) print("Total number of documents parsed: {}".format(len(train_vec))) print("Number of words in first document: ", len(train["Text"][0])) print("Number of words in second document: ", len(train["Text"][1])) # print('Size of vector embeddings: ', train_vec.shape[1]) print("Shape of vectors embeddings matrix: ", train_vec.shape) # **Word2vec** all_text = np.concatenate((train["Text"], test["Text"]), axis=0) all_text = pd.DataFrame(all_text, columns=["Text"]) print("Number of total text documents:", len(all_text)) def cleanup_text_word2vec(docs, logging=False): sentences = [] counter = 1 for doc in docs: if counter % 1000 == 0 and logging: print("Processed %d out of %d documents" % (counter, len(docs))) doc = nlp(doc, disable=["tagger"]) doc = " ".join([tok.lemma_.lower() for tok in doc]) # Split into sentences based on punctuation doc = re.split("[\.?!;] ", doc) # Remove commas, periods, and other punctuation (mostly commas) doc = [re.sub("[\.,;:!?]", "", sent) for sent in doc] # Split into words doc = [sent.split() for sent in doc] sentences += doc counter += 1 return sentences train_cleaned_word2vec = cleanup_text_word2vec(all_text["Text"], logging=True) print( "Cleaned up training data size (i.e. number of sentences): ", len(train_cleaned_word2vec), ) from gensim.models.word2vec import Word2Vec text_dim = 300 print("Training Word2Vec model...") wordvec_model = Word2Vec( train_cleaned_word2vec, vector_size=text_dim, window=5, min_count=3, workers=4, sg=1 ) print("Word2Vec model created.") print( "%d unique words represented by %d dimensional vectors" % (len(wordvec_model.wv.key_to_index), text_dim) ) print(wordvec_model.wv.most_similar(positive=["woman", "king"], negative=["man"])) print( wordvec_model.wv.most_similar_cosmul(positive=["woman", "king"], negative=["man"]) ) print(wordvec_model.wv.doesnt_match("breakfast cereal dinner lunch".split())) print(wordvec_model.wv.similarity("woman", "man")) print(wordvec_model.wv.similarity("widow", "mother")) def create_average_vec(doc): average = np.zeros((text_dim,), dtype="float32") num_words = 0.0 for word in doc.split(): if word in wordvec_model.wv.key_to_index: average = np.add(average, wordvec_model.wv[word]) num_words += 1.0 if num_words != 0.0: average = np.divide(average, num_words) return average count = 0 for i in range(len(train_cleaned)): if train_cleaned[i] == "": print("index:", i) count += 1 print(count) # **Word Vectors** train_cleaned_vec = np.zeros((train.shape[0], text_dim), dtype="float32") # 19579 x 300 for i in range(len(train_cleaned)): train_cleaned_vec[i] = create_average_vec(train_cleaned[i]) print("Train word vector shape:", train_cleaned_vec.shape) # **One-hot Encode Labels** from sklearn.preprocessing import label_binarize # transform labels into the hot encoded y_train_ohe = label_binarize(train["Author"], classes=["EAP", "HPL", "MWS"]) print("y_train_ohe shape: {}".format(y_train_ohe.shape)) print("y_train_ohe samples:") print(y_train_ohe[:5]) # **Split into Train/Test Datasets** from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( train_cleaned_vec, y_train_ohe, test_size=0.2, random_state=21 ) print("X_train size: {}".format(X_train.shape)) print("X_test size: {}".format(X_test.shape)) print("y_train size: {}".format(y_train.shape)) print("y_test size: {}".format(y_test.shape)) # **Keras** from keras.models import Sequential, Model from keras.layers import Dense, Dropout, Input, LSTM, Embedding, Bidirectional, Flatten from keras.layers import Conv1D, MaxPooling1D, GlobalMaxPooling1D from keras.optimizers import SGD def build_model(architecture="mlp"): model = Sequential() if architecture == "mlp": model.add( Dense(512, activation="relu", kernel_initializer="he_normal", input_dim=300) ) model.add(Dropout(0.2)) model.add(Dense(512, activation="relu", kernel_initializer="he_normal")) model.add(Dropout(0.2)) model.add(Dense(512, activation="relu", kernel_initializer="he_normal")) model.add(Dropout(0.2)) model.add(Dense(512, activation="relu", kernel_initializer="he_normal")) model.add(Dropout(0.2)) model.add(Dense(3, activation="softmax")) elif architecture == "cnn": inputs = Input(shape=(300, 1)) x = Conv1D(64, 3, strides=1, padding="same", activation="relu")(inputs) x = MaxPooling1D(pool_size=2)(x) outputs = Dense(3, activation="softmax")(x) modeel = Model(inputs=inputs, outputs=outputs, name="CNN") elif architecture == "lstm": inputs = Input(shape=(300, 1)) x = Bidirectional(LSTM(64, return_sequences=True), merge_mode="concat")(inputs) x = Dropout(0.2)(x) x = Flatten(x) outputs = Dense(3, activation="softmax")(x) model = Model(inputs=inputs, outputs=outputs, name="LSTM") else: print("Error: Model type not found.") return model model = build_model("mlp") if model.name == "CNN" or model.name == "LSTM": X_train = np.expand_dims(X_train, axis=2) X_test = np.expand_dims(X_test, axis=2) print("Text train shape: ", X_test.shape) print("Text test shape: ", X_test.shape) model.summary() # **SGD** sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True) model.compile(optimizer=sgd, loss="categorical_crossentropy", metrics=["acc"]) epochs = 30 estimator = model.fit( X_train, y_train, validation_split=0.2, epochs=epochs, batch_size=128, verbose=1 ) print( "Training accuracy: %.2f%% / Validation accuracy: %.2f%%" % (100 * estimator.history["acc"][-1], 100 * estimator.history["val_acc"][-1]) ) sns.reset_orig() plt.plot(estimator.history["acc"]) plt.plot(estimator.history["val_acc"]) plt.title("model accuracy") plt.ylabel("accuracy") plt.xlabel("epoch") plt.legend(["train", "valid"], loc="upper left") plt.show() # Plot model loss over epochs plt.plot(estimator.history["loss"]) plt.plot(estimator.history["val_loss"]) plt.title("model loss") plt.ylabel("loss") plt.xlabel("epoch") plt.legend(["train", "valid"], loc="upper left") plt.show() # **Prediction and Submission** predicted_prob = model.predict(X_test) print(predicted_prob.shape) with open("submission.csv", "w") as file_obj: file_obj.write("ID,EAP,HPL,MWS\n") for pred in range(len(predicted_prob)): file_obj.write( str(pred + 1) + "," + ",".join("{:.2f}".format(s) for s in predicted_prob[pred].tolist()) + "\n" ) loss_sk = log_loss(y_test, predicted_prob) print("Log loss is: {}".format(loss_sk))
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/650/129650266.ipynb
null
null
[{"Id": 129650266, "ScriptId": 38524167, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 9917475, "CreationDate": "05/15/2023 13:34:55", "VersionNumber": 3.0, "Title": "Spooky Author Identification-WordCloud-Word2vec", "EvaluationDate": "05/15/2023", "IsChange": false, "TotalLines": 448.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 448.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import matplotlib.pyplot as plt from wordcloud import WordCloud from IPython.display import display import base64 import string import re from collections import Counter from time import time # from sklearn.feature_extraction.stop_words import ENGLISH_STOP_WORDS as stopwords from nltk.corpus import stopwords from sklearn.metrics import log_loss stopwords = stopwords.words("english") sns.set_context("notebook") print("Numpy version:", np.__version__) print("Pandas version:", pd.__version__) print("Seaborn version:", sns.__version__) # **Data Visualization** train = pd.read_csv("/kaggle/working/train.csv") train.head() train.index train = pd.read_csv( "/kaggle/working/train.csv", skiprows=1, names=["ID", "Text", "Author"] ).set_index("ID") test = pd.read_csv( "/kaggle/working/test.csv", skiprows=1, names=["ID", "Text"] ).set_index("ID") # Delete the word 'id' from the ID columns train.index = [id[2:] for id in train.index] test.index = [id[2:] for id in test.index] train.sort_index(inplace=True) test.sort_index(inplace=True) train["Text"][0] display(train.head()) display(test.head()) print("Training sample:", train["Text"][0]) print("Author of sample:", train["Author"][0]) print("Training Data Shape:", train.shape) print("Testing Data Shape:", test.shape) print("Training Dataset Info:") display(train.info()) print("Test Dataset Info:") display(test.info()) # **Plot** sns.barplot( x=["Edgar Allen Poe", "Mary Wollstonecraft Shelley", "H.P. Lovecraft"], y=train["Author"].value_counts(), ) plt.show() # **Create Spooky Wordcloud** font_64_decode = base64.decodebytes(font_64_encode) font_result = open( "spooky_font.ttf", "wb" ) # create a writable font file and write the decoding result font_result.write(font_64_decode) font_result.close() all_text = " ".join([text for text in train["Text"]]) print("Number of words in all_text:", len(all_text)) wordcloud = WordCloud( font_path="spooky_font.ttf", width=800, height=500, random_state=21, max_font_size=110, ).generate(all_text) plt.figure(figsize=(15, 12)) plt.imshow(wordcloud, interpolation="bilinear") plt.axis("off") plt.show() # **Wordcloud for Edgar Allen Poe** [train["Author"] == "EAP"] eap = train[train["Author"] == "EAP"] eap_text = " ".join(text for text in eap["Text"]) print("Number of words in eap_text:", len(eap_text)) wordcloud = WordCloud( font_path="spooky_font.ttf", width=800, height=500, random_state=21, max_font_size=110, ).generate(eap_text) plt.figure(figsize=(15, 12)) plt.imshow(wordcloud, interpolation="bilinear") plt.axis("off") plt.show() # **Wordcloud for Mary Wollstonecraft Shelley** train[train["Author"] == "MWS"] mws = train[train["Author"] == "MWS"] mws_text = " ".join(text for text in mws["Text"]) print("Number of words in eap_text:", len(mws_text)) wordcloud = WordCloud( font_path="spooky_font.ttf", width=800, height=500, random_state=21, max_font_size=110, ).generate(mws_text) plt.figure(figsize=(15, 12)) plt.imshow(wordcloud, interpolation="bilinear") plt.axis("off") plt.show() # **Wordcloud for H.P. Lovecraft** train[train["Author"] == "HPL"] hpl = train[train["Author"] == "HPL"] hpl_text = " ".join(text for text in hpl["Text"]) print("Number of words in eap_text:", len(hpl_text)) wordcloud = WordCloud( font_path="spooky_font.ttf", width=800, height=500, random_state=21, max_font_size=110, ).generate(hpl_text) plt.figure(figsize=(15, 12)) plt.imshow(wordcloud, interpolation="bilinear") plt.axis("off") plt.show() # **Text Preprocessing** import spacy nlp = spacy.load("en_core_web_sm") punctuations = string.punctuation def cleanup_text(docs, logging=False): texts = [] counter = 1 for doc in docs: if counter % 1000 == 0 and logging: print("Processed %d out of %d documents." % (counter, len(docs))) counter += 1 doc = nlp(doc, disable=["parser", "ner"]) tokens = [tok.lemma_.lower().strip() for tok in doc if tok.lemma_ != "-PRON-"] tokens = [ tok for tok in tokens if tok not in stopwords and tok not in punctuations ] tokens = " ".join(tokens) texts.append(tokens) return pd.Series(texts) # **Plot Word Frequency By Each Author** train[train["Author"] == "EAP"]["Text"] eap_text = [text for text in train[train["Author"] == "EAP"]["Text"]] hpl_text = [text for text in train[train["Author"] == "HPL"]["Text"]] mws_text = [text for text in train[train["Author"] == "MWS"]["Text"]] # clean up eap eap_clean = cleanup_text(eap_text) eap_clean = " ".join(eap_clean).split() eap_claen = [word for word in eap_clean if word != "\s"] # clean up hpl hpl_clean = cleanup_text(hpl_text) hpl_clean = " ".join(hpl_clean).split() # 's appears a lot in the text, so we get rid of it since it's not a word hpl_clean = [word for word in hpl_clean if word != "'s"] # clean up mws mws_clean = cleanup_text(mws_text) mws_clean = " ".join(mws_clean).split() # 's appears a lot in the text, so we get rid of it since it's not a word mws_clean = [word for word in mws_clean if word != "'s"] eap_counts = Counter(eap_clean) hpl_counts = Counter(hpl_clean) mws_counts = Counter(mws_clean) # **EAP Common Words** eap_common_words = [word[0] for word in eap_counts.most_common(25)] eap_common_counts = [word[1] for word in eap_counts.most_common(25)] plt.style.use("dark_background") plt.figure(figsize=(15, 12)) sns.barplot(x=eap_common_words, y=eap_common_counts) plt.title("Most Common Words used by Edgar Allen Poe") plt.show() # **HPL Common Words** hpl_common_words = [word[0] for word in hpl_counts.most_common(25)] hpl_common_counts = [word[1] for word in hpl_counts.most_common(25)] plt.figure(figsize=(15, 12)) sns.barplot(x=hpl_common_words, y=hpl_common_counts) plt.title("Most Common Words used by H.P. Lovecraft") plt.show() # **MWS Common Words** mws_common_words = [word[0] for word in mws_counts.most_common(25)] mws_common_counts = [word[1] for word in mws_counts.most_common(25)] plt.figure(figsize=(15, 12)) sns.barplot(x=mws_common_words, y=mws_common_counts) plt.title("Most Common Words used by Mary Wollstonecraft Shelley") plt.show() print(eap_counts.most_common(25)) print() print(hpl_counts.most_common(25)) print() print(mws_counts.most_common(25)) print("Original training data shape: ", train["Text"].shape) train_cleaned = cleanup_text(train["Text"], logging=True) print("Cleaned up training data shape: ", train_cleaned.shape) print("Parsing documents...") start = time() train_vec = [] for doc in nlp.pipe(train_cleaned, batch_size=500): if doc.has_vector: train_vec.append(doc.vector) else: train_vec.append(np.zeros((128,), dtype="float32")) train_vec = np.array(train_vec) end = time() print("Total time passed parsing documents: {} seconds".format(end - start)) print("Total number of documents parsed: {}".format(len(train_vec))) print("Number of words in first document: ", len(train["Text"][0])) print("Number of words in second document: ", len(train["Text"][1])) # print('Size of vector embeddings: ', train_vec.shape[1]) print("Shape of vectors embeddings matrix: ", train_vec.shape) # **Word2vec** all_text = np.concatenate((train["Text"], test["Text"]), axis=0) all_text = pd.DataFrame(all_text, columns=["Text"]) print("Number of total text documents:", len(all_text)) def cleanup_text_word2vec(docs, logging=False): sentences = [] counter = 1 for doc in docs: if counter % 1000 == 0 and logging: print("Processed %d out of %d documents" % (counter, len(docs))) doc = nlp(doc, disable=["tagger"]) doc = " ".join([tok.lemma_.lower() for tok in doc]) # Split into sentences based on punctuation doc = re.split("[\.?!;] ", doc) # Remove commas, periods, and other punctuation (mostly commas) doc = [re.sub("[\.,;:!?]", "", sent) for sent in doc] # Split into words doc = [sent.split() for sent in doc] sentences += doc counter += 1 return sentences train_cleaned_word2vec = cleanup_text_word2vec(all_text["Text"], logging=True) print( "Cleaned up training data size (i.e. number of sentences): ", len(train_cleaned_word2vec), ) from gensim.models.word2vec import Word2Vec text_dim = 300 print("Training Word2Vec model...") wordvec_model = Word2Vec( train_cleaned_word2vec, vector_size=text_dim, window=5, min_count=3, workers=4, sg=1 ) print("Word2Vec model created.") print( "%d unique words represented by %d dimensional vectors" % (len(wordvec_model.wv.key_to_index), text_dim) ) print(wordvec_model.wv.most_similar(positive=["woman", "king"], negative=["man"])) print( wordvec_model.wv.most_similar_cosmul(positive=["woman", "king"], negative=["man"]) ) print(wordvec_model.wv.doesnt_match("breakfast cereal dinner lunch".split())) print(wordvec_model.wv.similarity("woman", "man")) print(wordvec_model.wv.similarity("widow", "mother")) def create_average_vec(doc): average = np.zeros((text_dim,), dtype="float32") num_words = 0.0 for word in doc.split(): if word in wordvec_model.wv.key_to_index: average = np.add(average, wordvec_model.wv[word]) num_words += 1.0 if num_words != 0.0: average = np.divide(average, num_words) return average count = 0 for i in range(len(train_cleaned)): if train_cleaned[i] == "": print("index:", i) count += 1 print(count) # **Word Vectors** train_cleaned_vec = np.zeros((train.shape[0], text_dim), dtype="float32") # 19579 x 300 for i in range(len(train_cleaned)): train_cleaned_vec[i] = create_average_vec(train_cleaned[i]) print("Train word vector shape:", train_cleaned_vec.shape) # **One-hot Encode Labels** from sklearn.preprocessing import label_binarize # transform labels into the hot encoded y_train_ohe = label_binarize(train["Author"], classes=["EAP", "HPL", "MWS"]) print("y_train_ohe shape: {}".format(y_train_ohe.shape)) print("y_train_ohe samples:") print(y_train_ohe[:5]) # **Split into Train/Test Datasets** from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( train_cleaned_vec, y_train_ohe, test_size=0.2, random_state=21 ) print("X_train size: {}".format(X_train.shape)) print("X_test size: {}".format(X_test.shape)) print("y_train size: {}".format(y_train.shape)) print("y_test size: {}".format(y_test.shape)) # **Keras** from keras.models import Sequential, Model from keras.layers import Dense, Dropout, Input, LSTM, Embedding, Bidirectional, Flatten from keras.layers import Conv1D, MaxPooling1D, GlobalMaxPooling1D from keras.optimizers import SGD def build_model(architecture="mlp"): model = Sequential() if architecture == "mlp": model.add( Dense(512, activation="relu", kernel_initializer="he_normal", input_dim=300) ) model.add(Dropout(0.2)) model.add(Dense(512, activation="relu", kernel_initializer="he_normal")) model.add(Dropout(0.2)) model.add(Dense(512, activation="relu", kernel_initializer="he_normal")) model.add(Dropout(0.2)) model.add(Dense(512, activation="relu", kernel_initializer="he_normal")) model.add(Dropout(0.2)) model.add(Dense(3, activation="softmax")) elif architecture == "cnn": inputs = Input(shape=(300, 1)) x = Conv1D(64, 3, strides=1, padding="same", activation="relu")(inputs) x = MaxPooling1D(pool_size=2)(x) outputs = Dense(3, activation="softmax")(x) modeel = Model(inputs=inputs, outputs=outputs, name="CNN") elif architecture == "lstm": inputs = Input(shape=(300, 1)) x = Bidirectional(LSTM(64, return_sequences=True), merge_mode="concat")(inputs) x = Dropout(0.2)(x) x = Flatten(x) outputs = Dense(3, activation="softmax")(x) model = Model(inputs=inputs, outputs=outputs, name="LSTM") else: print("Error: Model type not found.") return model model = build_model("mlp") if model.name == "CNN" or model.name == "LSTM": X_train = np.expand_dims(X_train, axis=2) X_test = np.expand_dims(X_test, axis=2) print("Text train shape: ", X_test.shape) print("Text test shape: ", X_test.shape) model.summary() # **SGD** sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True) model.compile(optimizer=sgd, loss="categorical_crossentropy", metrics=["acc"]) epochs = 30 estimator = model.fit( X_train, y_train, validation_split=0.2, epochs=epochs, batch_size=128, verbose=1 ) print( "Training accuracy: %.2f%% / Validation accuracy: %.2f%%" % (100 * estimator.history["acc"][-1], 100 * estimator.history["val_acc"][-1]) ) sns.reset_orig() plt.plot(estimator.history["acc"]) plt.plot(estimator.history["val_acc"]) plt.title("model accuracy") plt.ylabel("accuracy") plt.xlabel("epoch") plt.legend(["train", "valid"], loc="upper left") plt.show() # Plot model loss over epochs plt.plot(estimator.history["loss"]) plt.plot(estimator.history["val_loss"]) plt.title("model loss") plt.ylabel("loss") plt.xlabel("epoch") plt.legend(["train", "valid"], loc="upper left") plt.show() # **Prediction and Submission** predicted_prob = model.predict(X_test) print(predicted_prob.shape) with open("submission.csv", "w") as file_obj: file_obj.write("ID,EAP,HPL,MWS\n") for pred in range(len(predicted_prob)): file_obj.write( str(pred + 1) + "," + ",".join("{:.2f}".format(s) for s in predicted_prob[pred].tolist()) + "\n" ) loss_sk = log_loss(y_test, predicted_prob) print("Log loss is: {}".format(loss_sk))
false
0
4,646
0
4,646
4,646
129650674
<jupyter_start><jupyter_text>Skin Cancer Dataset A training set for academic machine learning can be created using the dataset, which comprises of 10015 dermatoscopic images. All significant diagnostic categories for pigmented lesions are represented in the cases in a representative manner: - Actinic keratoses and intraepithelial carcinoma / Bowen's disease (`akiec`), - basal cell carcinoma (`bcc`), - benign keratosis-like lesions (solar lentigines / seborrheic keratoses and lichen-planus like keratoses, `bkl`), - dermatofibroma (`df`), - melanoma (`mel`), - melanocytic nevi (`nv`) and - vascular lesions (angiomas, angiokeratomas, pyogenic granulomas and hemorrhage, `vasc`). Histopathology (histo) is used to confirm more than 50% of lesions; in the remaining cases, follow-up exams, expert consensus, or in-vivo confocal microscopy confirmation are used as the gold standard (confocal). Dataset Collected from: https://dataverse.harvard.edu/dataset.xhtml?persistentId=doi:10.7910/DVN/DBW86T Kaggle dataset identifier: skin-cancer-dataset <jupyter_script>import os import cv2 import numpy as np import pandas as pd import tensorflow as tf from tensorflow.keras.applications import ResNet50 from tensorflow.keras.models import Model from tensorflow.keras.layers import GlobalAveragePooling2D, Dense from tensorflow.keras.preprocessing.image import ImageDataGenerator from tensorflow.keras.optimizers import Adam from tensorflow.keras.callbacks import ModelCheckpoint # Load the metadata file metadata = pd.read_csv("/kaggle/input/skin-cancer-dataset/HAM10000_metadata.csv") # Preprocess the data image_size = 32 X = [] y = [] for index, row in metadata.iterrows(): if index % 1000 == 0: print(f"Processing image {index}") img_id = row["image_id"] + ".jpg" img_path1 = os.path.join( "/kaggle/input/skin-cancer-dataset/Skin Cancer/Skin Cancer", img_id ) if os.path.exists(img_path1): img_path = img_path1 else: print(f"Image file does not exist: {img_id}") continue img = cv2.imread(img_path) if img is None: print(f"Error loading image: {img_path}") continue img = cv2.resize(img, (image_size, image_size)) X.append(img) y.append(row["dx"]) X = np.array(X) X = X / 255.0 y = np.array(y) # Split the data into training and testing sets X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=42 ) # Convert labels from string to numerical categories label_dict = {"akiec": 0, "bcc": 1, "bkl": 2, "df": 3, "mel": 4, "nv": 5, "vasc": 6} y_train = [label_dict[label] for label in y_train] y_test = [label_dict[label] for label in y_test] # Convert the labels to one-hot encoded vectors y_train = to_categorical(y_train, num_classes=7) y_test = to_categorical(y_test, num_classes=7) datagen = ImageDataGenerator( rotation_range=90, horizontal_flip=True, vertical_flip=True ) # Generate augmented images from the training data augmented_data = datagen.flow(X_train, y_train, batch_size=32) def resnet50(input_shape, num_classes): def residual_block(x, filters, downsample=False): strides = (2, 2) if downsample else (1, 1) shortcut = x x = Conv2D(filters, (1, 1), strides=strides, padding="valid")(x) x = BatchNormalization()(x) x = Activation("relu")(x) x = Conv2D(filters, (3, 3), padding="same")(x) x = BatchNormalization()(x) x = Activation("relu")(x) x = Conv2D(filters * 4, (1, 1))(x) x = BatchNormalization()(x) if downsample: shortcut = Conv2D(filters * 4, (1, 1), strides=(2, 2), padding="valid")( shortcut ) shortcut = BatchNormalization()(shortcut) x = Add()([x, shortcut]) x = Activation("relu")(x) return x inputs = Input(shape=input_shape) x = Conv2D(64, (7, 7), strides=(2, 2), padding="same")(inputs) x = BatchNormalization()(x) x = Activation("relu")(x) x = MaxPooling2D((3, 3), strides=(2, 2), padding="same")(x) x = residual_block(x, 64) x = residual_block(x, 64) x = residual_block(x, 64) x = residual_block(x, 128, downsample=True) x = residual_block(x, 128) x = residual_block(x, 128) x = residual_block(x, 128) x = residual_block(x, 256, downsample=True) x = residual_block(x, 256) x = residual_block(x, 256) x = residual_block(x, 256) x = residual_block(x, 256) x = residual_block(x, 256) x = residual_block(x, 512, downsample=True) x = residual_block(x, 512) x = residual_block(x, 512) x = GlobalAveragePooling2D()(x) x = Dense(num_classes, activation="softmax")(x) model = Model(inputs=inputs, outputs=x) return model # Define the input shape and number of classes input_shape = (224, 224, 3) num_classes = 7 # Create the ResNet50 model model = resnet50(input_shape, num_classes) # Compile the model opt = tf.keras.optimizers.Adam(lr=0.0001) model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"]) # Train the model using the augmented data history = model.fit( augmented_data, steps_per_epoch=len(X_train) // 32, validation_data=(X_test, y_test), epochs=21, )
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/650/129650674.ipynb
skin-cancer-dataset
farjanakabirsamanta
[{"Id": 129650674, "ScriptId": 38553792, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 10152574, "CreationDate": "05/15/2023 13:37:45", "VersionNumber": 1.0, "Title": "notebook295418df5b", "EvaluationDate": "05/15/2023", "IsChange": true, "TotalLines": 133.0, "LinesInsertedFromPrevious": 133.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 185939950, "KernelVersionId": 129650674, "SourceDatasetVersionId": 4431730}]
[{"Id": 4431730, "DatasetId": 2595427, "DatasourceVersionId": 4491177, "CreatorUserId": 11779392, "LicenseName": "Other (specified in description)", "CreationDate": "11/01/2022 09:04:58", "VersionNumber": 1.0, "Title": "Skin Cancer Dataset", "Slug": "skin-cancer-dataset", "Subtitle": "7 types of Skin Cancer", "Description": "A training set for academic machine learning can be created using the dataset, which comprises of 10015 dermatoscopic images. All significant diagnostic categories for pigmented lesions are represented in the cases in a representative manner: \n- Actinic keratoses and intraepithelial carcinoma / Bowen's disease (`akiec`), \n- basal cell carcinoma (`bcc`), \n- benign keratosis-like lesions (solar lentigines / seborrheic keratoses and lichen-planus like keratoses, `bkl`), \n- dermatofibroma (`df`), \n- melanoma (`mel`), \n- melanocytic nevi (`nv`) and \n- vascular lesions (angiomas, angiokeratomas, pyogenic granulomas and hemorrhage, `vasc`).\n\nHistopathology (histo) is used to confirm more than 50% of lesions; in the remaining cases, follow-up exams, expert consensus, or in-vivo confocal microscopy confirmation are used as the gold standard (confocal). \n\nDataset Collected from:\nhttps://dataverse.harvard.edu/dataset.xhtml?persistentId=doi:10.7910/DVN/DBW86T", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 2595427, "CreatorUserId": 11779392, "OwnerUserId": 11779392.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 4431730.0, "CurrentDatasourceVersionId": 4491177.0, "ForumId": 2625653, "Type": 2, "CreationDate": "11/01/2022 09:04:58", "LastActivityDate": "11/01/2022", "TotalViews": 27735, "TotalDownloads": 2906, "TotalVotes": 71, "TotalKernels": 11}]
[{"Id": 11779392, "UserName": "farjanakabirsamanta", "DisplayName": "Farjana Kabir", "RegisterDate": "10/01/2022", "PerformanceTier": 2}]
import os import cv2 import numpy as np import pandas as pd import tensorflow as tf from tensorflow.keras.applications import ResNet50 from tensorflow.keras.models import Model from tensorflow.keras.layers import GlobalAveragePooling2D, Dense from tensorflow.keras.preprocessing.image import ImageDataGenerator from tensorflow.keras.optimizers import Adam from tensorflow.keras.callbacks import ModelCheckpoint # Load the metadata file metadata = pd.read_csv("/kaggle/input/skin-cancer-dataset/HAM10000_metadata.csv") # Preprocess the data image_size = 32 X = [] y = [] for index, row in metadata.iterrows(): if index % 1000 == 0: print(f"Processing image {index}") img_id = row["image_id"] + ".jpg" img_path1 = os.path.join( "/kaggle/input/skin-cancer-dataset/Skin Cancer/Skin Cancer", img_id ) if os.path.exists(img_path1): img_path = img_path1 else: print(f"Image file does not exist: {img_id}") continue img = cv2.imread(img_path) if img is None: print(f"Error loading image: {img_path}") continue img = cv2.resize(img, (image_size, image_size)) X.append(img) y.append(row["dx"]) X = np.array(X) X = X / 255.0 y = np.array(y) # Split the data into training and testing sets X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=42 ) # Convert labels from string to numerical categories label_dict = {"akiec": 0, "bcc": 1, "bkl": 2, "df": 3, "mel": 4, "nv": 5, "vasc": 6} y_train = [label_dict[label] for label in y_train] y_test = [label_dict[label] for label in y_test] # Convert the labels to one-hot encoded vectors y_train = to_categorical(y_train, num_classes=7) y_test = to_categorical(y_test, num_classes=7) datagen = ImageDataGenerator( rotation_range=90, horizontal_flip=True, vertical_flip=True ) # Generate augmented images from the training data augmented_data = datagen.flow(X_train, y_train, batch_size=32) def resnet50(input_shape, num_classes): def residual_block(x, filters, downsample=False): strides = (2, 2) if downsample else (1, 1) shortcut = x x = Conv2D(filters, (1, 1), strides=strides, padding="valid")(x) x = BatchNormalization()(x) x = Activation("relu")(x) x = Conv2D(filters, (3, 3), padding="same")(x) x = BatchNormalization()(x) x = Activation("relu")(x) x = Conv2D(filters * 4, (1, 1))(x) x = BatchNormalization()(x) if downsample: shortcut = Conv2D(filters * 4, (1, 1), strides=(2, 2), padding="valid")( shortcut ) shortcut = BatchNormalization()(shortcut) x = Add()([x, shortcut]) x = Activation("relu")(x) return x inputs = Input(shape=input_shape) x = Conv2D(64, (7, 7), strides=(2, 2), padding="same")(inputs) x = BatchNormalization()(x) x = Activation("relu")(x) x = MaxPooling2D((3, 3), strides=(2, 2), padding="same")(x) x = residual_block(x, 64) x = residual_block(x, 64) x = residual_block(x, 64) x = residual_block(x, 128, downsample=True) x = residual_block(x, 128) x = residual_block(x, 128) x = residual_block(x, 128) x = residual_block(x, 256, downsample=True) x = residual_block(x, 256) x = residual_block(x, 256) x = residual_block(x, 256) x = residual_block(x, 256) x = residual_block(x, 256) x = residual_block(x, 512, downsample=True) x = residual_block(x, 512) x = residual_block(x, 512) x = GlobalAveragePooling2D()(x) x = Dense(num_classes, activation="softmax")(x) model = Model(inputs=inputs, outputs=x) return model # Define the input shape and number of classes input_shape = (224, 224, 3) num_classes = 7 # Create the ResNet50 model model = resnet50(input_shape, num_classes) # Compile the model opt = tf.keras.optimizers.Adam(lr=0.0001) model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"]) # Train the model using the augmented data history = model.fit( augmented_data, steps_per_epoch=len(X_train) // 32, validation_data=(X_test, y_test), epochs=21, )
false
1
1,407
0
1,744
1,407
129627997
import pandas as pd import numpy as np from selenium import webdriver import re import plotly.graph_objs as go import plotly.express as px pd.options.mode.chained_assignment = None data_one = pd.read_csv("H-1B_2014.csv") data_two = pd.read_csv("H-1B_2015.csv") data_three = pd.read_csv("H-1B_2016.csv") columns = [ "LCA_CASE_NUMBER", "STATUS", "LCA_CASE_SUBMIT", "DECISION_DATE", "VISA_CLASS", "LCA_CASE_EMPLOYMENT_START_DATE", "LCA_CASE_EMPLOYMENT_END_DATE", "LCA_CASE_EMPLOYER_NAME", "LCA_CASE_EMPLOYER_STATE", "LCA_CASE_EMPLOYER_CITY", "LCA_CASE_SOC_CODE", "LCA_CASE_SOC_NAME", "LCA_CASE_JOB_TITLE", "LCA_CASE_WAGE_RATE_FROM", "LCA_CASE_WAGE_RATE_UNIT", "FULL_TIME_POS", "LCA_CASE_NAICS_CODE", ] columns = [ "LCA_CASE_NUMBER", "STATUS", "LCA_CASE_SUBMIT", "DECISION_DATE", "VISA_CLASS", "LCA_CASE_EMPLOYMENT_START_DATE", "LCA_CASE_EMPLOYMENT_END_DATE", "LCA_CASE_EMPLOYER_NAME", "LCA_CASE_EMPLOYER_STATE", "LCA_CASE_EMPLOYER_CITY", "LCA_CASE_SOC_CODE", "LCA_CASE_SOC_NAME", "LCA_CASE_JOB_TITLE", "LCA_CASE_WAGE_RATE_FROM", "LCA_CASE_WAGE_RATE_UNIT", "FULL_TIME_POS", "LCA_CASE_NAICS_CODE", ] # renaming the columns data_two = data_two.rename( columns={ "CASE_NUMBER": "LCA_CASE_NUMBER", "CASE_STATUS": "STATUS", "CASE_SUBMITTED": "LCA_CASE_SUBMIT", "DECISION_DATE": "DECISION_DATE", "EMPLOYMENT_START_DATE": "LCA_CASE_EMPLOYMENT_START_DATE", "EMPLOYMENT_END_DATE": "LCA_CASE_EMPLOYMENT_END_DATE", "EMPLOYER_NAME": "LCA_CASE_EMPLOYER_NAME", "EMPLOYER_STATE": "LCA_CASE_EMPLOYER_STATE", "EMPLOYER_CITY": "LCA_CASE_EMPLOYER_CITY", "SOC_CODE": "LCA_CASE_SOC_CODE", "SOC_NAME": "LCA_CASE_SOC_NAME", "JOB_TITLE": "LCA_CASE_JOB_TITLE", "WAGE_RATE_OF_PAY": "LCA_CASE_WAGE_RATE_FROM", "WAGE_UNIT_OF_PAY": "LCA_CASE_WAGE_RATE_UNIT", "FULL_TIME_POSITION": "FULL_TIME_POS", "NAIC_CODE": "LCA_CASE_NAICS_CODE", } ) columns = [ "LCA_CASE_NUMBER", "STATUS", "LCA_CASE_SUBMIT", "DECISION_DATE", "VISA_CLASS", "LCA_CASE_EMPLOYMENT_START_DATE", "LCA_CASE_EMPLOYMENT_END_DATE", "LCA_CASE_EMPLOYER_NAME", "LCA_CASE_EMPLOYER_STATE", "LCA_CASE_EMPLOYER_CITY", "LCA_CASE_SOC_CODE", "LCA_CASE_SOC_NAME", "LCA_CASE_JOB_TITLE", "LCA_CASE_WAGE_RATE_FROM", "LCA_CASE_WAGE_RATE_UNIT", "FULL_TIME_POS", "LCA_CASE_NAICS_CODE", ] # renaming the columns data_three = data_three.rename( columns={ "CASE_NUMBER": "LCA_CASE_NUMBER", "CASE_STATUS": "STATUS", "CASE_SUBMITTED": "LCA_CASE_SUBMIT", "DECISION_DATE": "DECISION_DATE", "EMPLOYMENT_START_DATE": "LCA_CASE_EMPLOYMENT_START_DATE", "EMPLOYMENT_END_DATE": "LCA_CASE_EMPLOYMENT_END_DATE", "EMPLOYER_NAME": "LCA_CASE_EMPLOYER_NAME", "EMPLOYER_STATE": "LCA_CASE_EMPLOYER_STATE", "EMPLOYER_CITY": "LCA_CASE_EMPLOYER_CITY", "SOC_CODE": "LCA_CASE_SOC_CODE", "SOC_NAME": "LCA_CASE_SOC_NAME", "JOB_TITLE": "LCA_CASE_JOB_TITLE", "WAGE_RATE_OF_PAY_FROM": "LCA_CASE_WAGE_RATE_FROM", "WAGE_UNIT_OF_PAY": "LCA_CASE_WAGE_RATE_UNIT", "FULL_TIME_POSITION": "FULL_TIME_POS", "NAIC_CODE": "LCA_CASE_NAICS_CODE", } ) # concat the three dataframes final_df = pd.concat( [data_one[columns], data_two[columns], data_three[columns]] ).reset_index(drop=True) final_df.head() final_df.columns final_df.info() (final_df.isnull().sum()) final_df.head() # ## Feature engineering # ### creating employment period column # convert date columns to datetime format and assign nan to invalid data final_df["LCA_CASE_EMPLOYMENT_START_DATE"] = pd.to_datetime( final_df["LCA_CASE_EMPLOYMENT_START_DATE"], errors="coerce" ) final_df["LCA_CASE_EMPLOYMENT_END_DATE"] = pd.to_datetime( final_df["LCA_CASE_EMPLOYMENT_END_DATE"], errors="coerce" ) # subtract the LCA_CASE_EMPLOYMENT_END_DATE from LCA_CASE_EMPLOYMENT_START_DATEto find employment period LCA_CASE_EMPLOYMENT_PERIOD = ( final_df["LCA_CASE_EMPLOYMENT_END_DATE"] - final_df["LCA_CASE_EMPLOYMENT_START_DATE"] ) # create a new column with LCA_CASE_EMPLOYMENT_PERIOD value final_df.insert(7, "LCA_CASE_EMPLOYMENT_PERIOD", LCA_CASE_EMPLOYMENT_PERIOD) final_df.head() # converting LCA_CASE_EMPLOYMENT_PERIOD into days format final_df["LCA_CASE_EMPLOYMENT_PERIOD"] = final_df["LCA_CASE_EMPLOYMENT_PERIOD"].dt.days final_df["LCA_CASE_EMPLOYMENT_PERIOD"].describe() # delete the outlier value, i.e employment days less than 0. final_df = final_df[final_df["LCA_CASE_EMPLOYMENT_PERIOD"] > 0] final_df["LCA_CASE_EMPLOYMENT_PERIOD"].describe() # - The value of 30.44 is derived by calculating the average number of days in a month over a period of four years. # - The average number of days in a year is 365.24 (due to leap years), so dividing by 12 gives an average of 30.44 days per month. This is a commonly used approximation in calculations involving months and days. # - Using 30.44 as the number of days in a month provides a more accurate estimate when converting between months and days, rather than assuming that each month has exactly 30 or 31 days. # # the employment period is converted into months final_df["LCA_CASE_EMPLOYMENT_PERIOD"] = round( final_df["LCA_CASE_EMPLOYMENT_PERIOD"] / 30.44 ) # filled the missing value with 0 and converted the column type to int final_df["LCA_CASE_EMPLOYMENT_PERIOD"] = ( final_df["LCA_CASE_EMPLOYMENT_PERIOD"].fillna(0).astype(int) ) # ### creating visa decision duration column # convert date columns to datetime format and assign nan to invalid data final_df["LCA_CASE_SUBMIT"] = pd.to_datetime( final_df["LCA_CASE_SUBMIT"], errors="coerce" ) final_df["DECISION_DATE"] = pd.to_datetime(final_df["DECISION_DATE"], errors="coerce") # subtract the LCA_CASE_SUBMIT from DECISION_DATE to find visa decision period LCA_CASE_DECISION_PERIOD = final_df["DECISION_DATE"] - final_df["LCA_CASE_SUBMIT"] # create a new column with LCA_CASE_DECISION_PERIOD value final_df.insert(4, "LCA_CASE_DECISION_PERIOD", LCA_CASE_DECISION_PERIOD) final_df["LCA_CASE_DECISION_PERIOD"] = final_df["LCA_CASE_DECISION_PERIOD"].dt.days final_df["LCA_CASE_EMPLOYMENT_PERIOD"].describe() # remove special characters from LCA_CASE_EMPLOYER_CITY final_df["LCA_CASE_EMPLOYER_CITY"] = final_df["LCA_CASE_EMPLOYER_CITY"].replace( {"[^a-zA-Z0-9]": ""}, regex=True ) # ### find the sectors of the company using the NAICS code # Convert the LCA_CASE_NAICS_CODE column to string data type final_df["LCA_CASE_NAICS_CODE"] = final_df["LCA_CASE_NAICS_CODE"].astype(str) # Extract the first two digits of each string value final_df["LCA_CASE_NAICS_CODE"] = final_df["LCA_CASE_NAICS_CODE"].str[:2] naics_unique_values = final_df["LCA_CASE_NAICS_CODE"].unique() # reading the NAICS_data to cross check and create a new column for employer sector NAICS_data = pd.read_csv("NAICS_data.csv") NAICS_data.head() # loop through all the NAICS in the naics_unique_values for i in naics_unique_values: try: # assuming your dataframe is called 'df' NAICS_data_code = NAICS_data.loc[ NAICS_data["NAICS_CODE"] == i, "NAICS_TITLE" ].iloc[0] except: # if there is no index with the particular soc code the occupation name will be null NAICS_data_code = "Unknown" # create a boolean mask for the conditions mask = final_df["LCA_CASE_NAICS_CODE"] == i # update the LCA_CASE_SOC_NAME column for the filtered rows final_df.loc[mask, "EMPLOYER_SECTOR"] = NAICS_data_code # extract the year component from the datetime column LCA_CASE_SUBMIT and store it in a new column year final_df["year"] = final_df["LCA_CASE_SUBMIT"].dt.year # # Preprocessing # drop duplicates final_df = final_df.drop_duplicates() # remove numbers after "." period in 'LCA_CASE_SOC_CODE' column final_df["LCA_CASE_SOC_CODE"] = ( final_df["LCA_CASE_SOC_CODE"].astype(str).apply(lambda x: x.split(".")[0]) ) # function to correct the LCA_CASE_SOC_CODE def preprocess_column(column): pattern = r"^\d{2}-\d{4}$" # regex pattern for "XX-XXXX" format def preprocess_value(value): if ("-" not in value) and len(value) < 6: cleaned_value = np.nan elif "-" in value: value = value.replace("-", "") cleaned_value = value[0:2] + "-" + value[2:6] if len(cleaned_value) != 7: cleaned_value = np.nan elif ("-" not in value) and len(value) > 5: value = value.replace("/", "") cleaned_value = value[0:2] + "-" + value[2:6] return cleaned_value cleaned_column = column.apply( lambda x: np.nan if pd.isna(x) else (x if re.search(pattern, str(x)) else preprocess_value(x)) ) return cleaned_column final_df["LCA_CASE_SOC_CODE"] = preprocess_column(final_df["LCA_CASE_SOC_CODE"]) final_df.head() # #### preprocessing LCA_CASE_SOC_CODE column # Replace the values in the 'LCA_CASE_WAGE_RATE_FROM' column # define a custom function to preprocess the wage_rate column def preprocess_wage_rate(cell_value): if isinstance(cell_value, float): return cell_value elif "-" in cell_value: return cell_value.split("-")[0].strip() else: return cell_value # apply the custom function to the wage_rate column final_df["LCA_CASE_WAGE_RATE_FROM"] = final_df["LCA_CASE_WAGE_RATE_FROM"].apply( lambda x: preprocess_wage_rate(x) ) final_df.head() for i in final_df.index: if final_df.loc[i, "LCA_CASE_WAGE_RATE_UNIT"] == "Month": final_df.loc[i, "LCA_CASE_WAGE_RATE_FROM"] = ( final_df.loc[i, "LCA_CASE_WAGE_RATE_FROM"] * 12 ) if final_df.loc[i, "LCA_CASE_WAGE_RATE_UNIT"] == "Week": final_df.loc[i, "LCA_CASE_WAGE_RATE_FROM"] = ( final_df.loc[i, "LCA_CASE_WAGE_RATE_FROM"] * 52 ) if final_df.loc[i, "LCA_CASE_WAGE_RATE_UNIT"] == "Bi-Weekly": final_df.loc[i, "LCA_CASE_WAGE_RATE_FROM"] = ( final_df.loc[i, "LCA_CASE_WAGE_RATE_FROM"] * 26 ) if final_df.loc[i, "LCA_CASE_WAGE_RATE_UNIT"] == "Hour": if final_df.loc[i, "FULL_TIME_POS"] == "N": final_df.loc[i, "LCA_CASE_WAGE_RATE_FROM"] = ( final_df.loc[i, "LCA_CASE_WAGE_RATE_FROM"] * 35 * 52 ) else: final_df.loc[i, "LCA_CASE_WAGE_RATE_FROM"] = ( final_df.loc[i, "LCA_CASE_WAGE_RATE_FROM"] * 40 * 52 ) final_df.LCA_CASE_WAGE_RATE_UNIT.replace( ["Bi-Weekly", "Month", "Week", "Hour"], ["Year", "Year", "Year", "Year"], inplace=True, ) # #### scraping data for SOC name from website # initialize webdriver driver = webdriver.Chrome() # navigate to webpage driver.get("https://www.bls.gov/oes/current/oes_stru.htm#29-0000") # find all li elements li_elements = driver.find_elements("xpath", "//li") # create empty list to store data data = [] # loop through li elements for li in li_elements: text = li.text if "-" in text: # use regular expression to extract SOC code and occupation name words = text.split() soc = words[0] name = (" ".join(words[1::])).replace('"', "").strip() name_list = words[1::] if "-" in name: for i, word in enumerate(name_list): if ("-" in word) and (len(word) > 1): name = (" ".join(name_list[:i])).replace('"', "").strip() break data.append({"SOC Code": soc, "Occupation Name": name}) # close webdriver driver.quit() # create dataframe occupation_data = pd.DataFrame(data) # save dataframe as CSV occupation_data.to_csv("occupations.csv", index=False) final_df.isna().sum() # reading the occupation data to impute the missing soc_name occupation_data = pd.read_csv("occupations.csv") occupation_data # ### treating the null values # #### imputing soc name nan values by refering to scraped data # create a separate dataframe with NaN values nan_df = final_df[final_df["LCA_CASE_SOC_NAME"].isna()] mask = final_df["LCA_CASE_SOC_NAME"].isna() df_nan = final_df[mask] unique_null_soc_code = list(df_nan["LCA_CASE_SOC_CODE"].unique()) print(unique_null_soc_code) unique_null_soc_code = [x for x in unique_null_soc_code if type(x) != float] print(unique_null_soc_code) # loop through all the SOC code in unique_null_soc_code list for i in unique_null_soc_code: try: # assuming your dataframe is called 'df' occupation_name = occupation_data.loc[ occupation_data["SOC Code"] == i, "Occupation Name" ].iloc[0] except: # if there is no index with the particular soc code the occupation name will be null occupation_name = np.nan # create a boolean mask for the conditions mask = (final_df["LCA_CASE_SOC_NAME"].isna()) & (final_df["LCA_CASE_SOC_CODE"] == i) # update the LCA_CASE_SOC_NAME column for the filtered rows final_df.loc[mask, "LCA_CASE_SOC_NAME"] = occupation_name final_df.isna().sum() # #### replacing other NaN values in the other columns final_df["LCA_CASE_EMPLOYER_NAME"].fillna("Unknown", inplace=True) final_df["LCA_CASE_EMPLOYER_STATE"].fillna("Unknown", inplace=True) final_df["LCA_CASE_EMPLOYER_CITY"].fillna("Unknown", inplace=True) final_df["LCA_CASE_SOC_CODE"].fillna("Unknown", inplace=True) final_df["LCA_CASE_SOC_NAME"].fillna("Unknown", inplace=True) final_df["LCA_CASE_WAGE_RATE_FROM"].fillna(0, inplace=True) # ### dropping unwanted columns final_df.columns final_df = final_df.drop( [ "LCA_CASE_EMPLOYMENT_START_DATE", "LCA_CASE_EMPLOYMENT_END_DATE", "LCA_CASE_WAGE_RATE_UNIT", "FULL_TIME_POS", "LCA_CASE_NAICS_CODE", ], axis=1, ) final_df.columns # replacing INVALIDATED and REJECTED as the DENIED final_df.loc[(final_df["STATUS"] == "INVALIDATED"), "STATUS"] = "DENIED" final_df.loc[(final_df["STATUS"] == "REJECTED"), "STATUS"] = "DENIED" # #### preprocessing LCA_CASE_SOC_NAME column # Convert SOC names to lowercase final_df["LCA_CASE_SOC_NAME"] = final_df["LCA_CASE_SOC_NAME"].str.lower() # remove the s from the words final_df["LCA_CASE_SOC_NAME"] = final_df["LCA_CASE_SOC_NAME"].str.rstrip("s") # #### preprocessing LCA_CASE_WAGE_RATE_FROM column final_df["LCA_CASE_WAGE_RATE_FROM"] final_df["LCA_CASE_WAGE_RATE_FROM"] = final_df["LCA_CASE_WAGE_RATE_FROM"].replace( "Unknown", 0 ) final_df.head() # converting the column to numeric then fill nan values by 0 final_df["LCA_CASE_WAGE_RATE_FROM"] = pd.to_numeric( final_df["LCA_CASE_WAGE_RATE_FROM"], errors="coerce" ).fillna(0) # Divide wages by 1000 final_df["LCA_CASE_WAGE_RATE_FROM"] = final_df["LCA_CASE_WAGE_RATE_FROM"] / 1000 # converting column type to int final_df["LCA_CASE_WAGE_RATE_FROM"] = final_df["LCA_CASE_WAGE_RATE_FROM"].astype(int) final_df["LCA_CASE_WAGE_RATE_FROM"].describe() final_df = final_df.loc[final_df["LCA_CASE_WAGE_RATE_FROM"] > 0] # Determine the 0.1 and 0.99 quantiles to remove the outliers q1 = final_df["LCA_CASE_WAGE_RATE_FROM"].quantile(0.1) q99 = final_df["LCA_CASE_WAGE_RATE_FROM"].quantile(0.99) # Filter the DataFrame to remove any values outside of the 0.1 and 0.99 quantiles final_df = final_df.loc[ (final_df["LCA_CASE_WAGE_RATE_FROM"] >= q1) & (final_df["LCA_CASE_WAGE_RATE_FROM"] <= q99) ] final_df["LCA_CASE_WAGE_RATE_FROM"].describe() final_df["LCA_CASE_WAGE_RATE_FROM"].describe() # ## ---------------------------------------------------------------------------------------- # # --------------------------------Analysis----------------------------------- # ## What is the total number of H-1B visa applications and what is the growth rate of the applications over the past three years? # Calculate number of applications and growth rate per year year_count = ( final_df.loc[(final_df["year"] >= 2013)]["year"] .value_counts() .reset_index() .rename(columns={"index": "year", "year": "count"}) ) year_count = year_count.sort_values("year") year_count["growth_rate"] = year_count["count"].pct_change() * 100 # Create bar chart and line chart for growth rate fig = go.Figure() fig.add_trace( go.Bar(x=year_count["year"], y=year_count["count"], name="Number of Applications") ) fig.add_trace( go.Scatter( x=year_count["year"], y=year_count["growth_rate"], name="Growth Rate", yaxis="y2", ) ) # Set axis titles and layout fig.update_layout( title="Number of Applications and Growth Rate per Year", xaxis_title="Year", yaxis_title="Number of Applications", yaxis2=dict(side="right", overlaying="y", title="Growth Rate (%)"), ) fig.update_xaxes(tickvals=["2013", "2014", "2015", "2016"]) # Add growth rate as text to each bar for i in range(len(year_count)): fig.add_annotation( x=year_count["year"][i], y=year_count["count"][i], text=f"{year_count['growth_rate'][i]:.1f}%", showarrow=False, font=dict(size=10), yshift=5, ) # Show the chart fig.show() # # ##### The analysis shows that there was a significant increase in the number of applications between 2014 and 2015, with a growth rate of 17.7%. However, the application rate saw a sudden drop of 9% in 2016. Further investigation is needed to determine if this drop is due to an increase in rejection rates or other factors. # ## ---------------------------------------------------------------------------------------- # ## What caused the sudden drop in the application rate? Is it due to an increase in rejection rates, or were other factors contributing to this decline? # Group the data by year and status to get the total count df_grouped = ( final_df.loc[(final_df["year"] >= 2014)] .groupby(["year", "STATUS"]) .count()["LCA_CASE_NUMBER"] .reset_index() ) # Calculate the total count for each year df_year_count = df_grouped.groupby("year").sum()["LCA_CASE_NUMBER"].reset_index() # Add a column to the grouped dataframe with the percentage of each stack df_grouped["percentage"] = df_grouped.apply( lambda row: str( round( row["LCA_CASE_NUMBER"] / df_year_count[df_year_count["year"] == row["year"]][ "LCA_CASE_NUMBER" ].values[0] * 100, 2, ) ) + "%", axis=1, ) # Create the stacked bar chart fig = go.Figure() for status in df_grouped["STATUS"].unique(): df_filtered = df_grouped[df_grouped["STATUS"] == status] fig.add_trace( go.Bar( y=df_filtered["year"], x=df_filtered["LCA_CASE_NUMBER"], name=status, text=df_filtered["percentage"], textposition="auto", orientation="h", marker_color=px.colors.qualitative.Plotly[len(fig.data)], ) ) # Set axis titles and layout fig.update_layout( title="Total count by year and status", yaxis_title="Year", xaxis_title="Total count", barmode="stack", ) fig.update_yaxes(tickvals=["2014", "2015", "2016"]) # Show the chart fig.show() # ##### According to the H1B visa data analysis, it has been observed that the rejection rate for the visa has decreased significantly from 5.41% to 3.4% over the years. On the other hand, the acceptance rate has been steadily increasing every year. This could be an indication of the US government's more favorable policies towards H1B visa applications, resulting in a higher acceptance rate. It may also suggest that employers have become more adept at submitting strong applications, thereby reducing the rejection rate. # ## ---------------------------------------------------------------------------------------- # ## What are the top sectors for H1B visa applications? # Group the data by employer sector to get the count of each sector df_grouped = final_df.groupby("EMPLOYER_SECTOR").size().reset_index(name="count") # Create the pie chart fig = px.pie( df_grouped, values="count", names="EMPLOYER_SECTOR", title="Employer sector distribution", hole=0.5, ) # Show the chart fig.show() # ##### Based on our analysis, we have found that a significant proportion of H1B visa applications, approximately 72.4%, were related to the professional, scientific, and technical services sector, which includes fields such as computer programming, scientific research, engineering, and consulting services. This high number of applications can be attributed to the high demand for skilled professionals in these fields, as they require specialized expertise and knowledge. # ##### Moreover, it is also possible that larger companies have been contributing to this trend by sponsoring more H1B visas for their employees, particularly in the professional, scientific, and technical services sector. This may be due to the fact that these companies require highly skilled workers to maintain their competitive edge and growth in the industry. # ##### Further analysis is needed to investigate whether the concentration of H1B visa applications in the professional, scientific, and technical services sector is due to other factors such as pay scales, availability of skilled labor, or any regulatory changes affecting the industry. # ## ---------------------------------------------------------------------------------------- # ## Which are the top 10 employers with the highest number of H1B visa applications, and in which sectors do they belong? import plotly.graph_objects as go import pandas as pd # Group the data by employer name and status to get the total count df_grouped = ( final_df.groupby(["LCA_CASE_EMPLOYER_NAME", "STATUS", "EMPLOYER_SECTOR"]) .count()["LCA_CASE_NUMBER"] .reset_index() ) # Get the top 10 employers based on application count top_employers = ( df_grouped.groupby("LCA_CASE_EMPLOYER_NAME") .sum() .sort_values("LCA_CASE_NUMBER", ascending=False) .head(10) .reset_index()["LCA_CASE_EMPLOYER_NAME"] .to_list() ) # Filter the data for top 10 employers df_top_employers = df_grouped[df_grouped["LCA_CASE_EMPLOYER_NAME"].isin(top_employers)] # Create the stacked bar chart fig = go.Figure() for status in df_top_employers["STATUS"].unique(): df_filtered = df_top_employers[df_top_employers["STATUS"] == status] fig.add_trace( go.Bar( x=df_filtered["LCA_CASE_NUMBER"], y=df_filtered["LCA_CASE_EMPLOYER_NAME"], name=status, orientation="h", marker_color=px.colors.qualitative.T10[len(fig.data)], text=df_filtered["EMPLOYER_SECTOR"], textposition="inside", ) ) # Set axis titles and layout fig.update_layout( title="Top 10 Employers by Total Application Count", xaxis_title="Total Application Count", yaxis_title="Employer Name", barmode="stack", yaxis={"categoryorder": "total ascending"}, ) # Change color palette fig.update_traces(marker=dict(line=dict(color="yellow", width=0.5))) # Show the chart fig.show() # ##### Based on the analysis, it is found that 9 out of the top 10 employers with the highest number of H1B visa applications belong to the professional, scientific, and technical services sector. This sector is known to have a high demand for skilled professionals, and it includes fields such as computer programming, scientific research, engineering, and consulting services. # ##### It is interesting to note that Infosys has the highest number of approved applications with 82,271 and the least number of denied applications among the top 10 employers. Furthermore, Infosys has played a significant role in the H1B visa application count, surpassing the second-ranked TCS and the third-ranked Wipro combined. This raises the question of what strategies Infosys might have implemented to achieve this level of success and what type of roles they are recruiting for. # ## ---------------------------------------------------------------------------------------- # ## How much of an impact do the top employers have on the distribution of job positions for H1B visas? # Create a list of top employers top_employers = [ "INFOSYS LIMITED", "TATA CONSULTANCY SERVICES LIMITED", "WIPRO LIMITED", "IBM INDIA PRIVATE LIMITED", "ACCENTURE LLP", "DELOITTE CONSULTING LLP", "CAPGEMINI AMERICA INC", "HCL AMERICA, INC.", "MICROSOFT CORPORATION", "ERNST & YOUNG U.S. LLP", ] # Create a new column in final_df indicating whether the employer is in the top_employers list or not final_df["EMPLOYER_GROUP"] = final_df["LCA_CASE_EMPLOYER_NAME"].apply( lambda x: x if x in top_employers else "Other Employers" ) # Group by LCA_CASE_SOC_NAME and LCA_CASE_EMPLOYER_NAME and get the count df_grouped = ( final_df.groupby(["LCA_CASE_SOC_NAME", "EMPLOYER_GROUP"]) .size() .reset_index(name="count") ) # Filter out the top 10 LCA_CASE_SOC_NAME df_top10 = ( df_grouped.groupby("LCA_CASE_SOC_NAME") .sum() .reset_index() .sort_values(by="count", ascending=False) .head(10)["LCA_CASE_SOC_NAME"] .tolist() ) df_filtered = df_grouped[df_grouped["LCA_CASE_SOC_NAME"].isin(df_top10)] # Create the stacked bar chart fig = px.bar( df_filtered, x="count", y="LCA_CASE_SOC_NAME", color="EMPLOYER_GROUP", orientation="h", ) # Add axis labels and title fig.update_layout( title="Top 10 LCA_CASE_SOC_NAME with employer group", xaxis_title="Count", yaxis_title="LCA_CASE_SOC_NAME", ) fig.update_layout(yaxis={"categoryorder": "total ascending"}) # Show the chart fig.show() # ##### Upon analyzing the data, a chart was created to visualize the contribution of the top 10 H1B visa sponsoring employers to the top 10 job positions. The remaining employers were grouped as "other employers." The chart reveals that even though "other employers" occupy a significant portion of the chart, the top 10 employers have made a substantial contribution to the top 10 job positions. # ##### For instance, Infosys has made a significant contribution to the computer systems analyst position, while Microsoft has made a significant contribution to the Software developers, application position. Similarly, IBM has made a considerable contribution to the computer programmer, applications position. # ##### The chart also suggests that the top 10 employers have a significant impact on the H1B visa application process and the job positions that are filled. # ## ---------------------------------------------------------------------------------------- # ## To what extent does the salary range affect the approval or denial of H1B visa applications for the job positions!?!? # Filter the data to only include certified cases df_filtered = final_df.loc[final_df["STATUS"] == "CERTIFIED"] # Group by LCA_CASE_SOC_NAME and get the count and mean of LCA_CASE_WAGE_RATE_FROM df_grouped = ( df_filtered.groupby("LCA_CASE_SOC_NAME") .agg({"LCA_CASE_NUMBER": "count", "LCA_CASE_WAGE_RATE_FROM": "mean"}) .reset_index() ) # Sort by count in descending order and get the top 10 df_top10 = df_grouped.sort_values(by="LCA_CASE_NUMBER", ascending=False).head(10) # Create the bar chart for top 10 LCA_CASE_SOC_NAME fig = px.bar( df_top10, x="LCA_CASE_SOC_NAME", y="LCA_CASE_NUMBER", labels={"LCA_CASE_SOC_NAME": "LCA_CASE_SOC_NAME", "LCA_CASE_NUMBER": "Count"}, ) # Add the scatter chart for mean of LCA_CASE_WAGE_RATE_FROM fig.add_trace( go.Scatter( x=df_top10["LCA_CASE_SOC_NAME"], y=df_top10["LCA_CASE_WAGE_RATE_FROM"], yaxis="y2", name="Average Wage Rate in thousands", marker=dict(color="red"), ) ) # Set the layout for the chart fig.update_layout( title="Top 10 Certified LCA_CASE_SOC_NAME with highest count and average wage rate", xaxis_title="LCA_CASE_SOC_NAME", yaxis_title="Count", yaxis2=dict(title="Average Wage Rate", overlaying="y", side="right"), ) # Show the chart fig.show() # Filter the data to only include certified cases df_filtered = final_df.loc[final_df["STATUS"] == "DENIED"] # Group by LCA_CASE_SOC_NAME and get the count and mean of LCA_CASE_WAGE_RATE_FROM df_grouped = ( df_filtered.groupby("LCA_CASE_SOC_NAME") .agg({"LCA_CASE_NUMBER": "count", "LCA_CASE_WAGE_RATE_FROM": "mean"}) .reset_index() ) # Sort by count in descending order and get the top 10 df_top10 = df_grouped.sort_values(by="LCA_CASE_NUMBER", ascending=False).head(10) # Create the bar chart for top 10 LCA_CASE_SOC_NAME fig = px.bar( df_top10, x="LCA_CASE_SOC_NAME", y="LCA_CASE_NUMBER", labels={"LCA_CASE_SOC_NAME": "LCA_CASE_SOC_NAME", "LCA_CASE_NUMBER": "Count"}, ) # Add the scatter chart for mean of LCA_CASE_WAGE_RATE_FROM fig.add_trace( go.Scatter( x=df_top10["LCA_CASE_SOC_NAME"], y=df_top10["LCA_CASE_WAGE_RATE_FROM"], yaxis="y2", name="Average Wage Rate in thousands", marker=dict(color="red"), ) ) # Set the layout for the chart fig.update_layout( title="Top 10 Denied LCA_CASE_SOC_NAME with highest count and average wage rate", xaxis_title="LCA_CASE_SOC_NAME", yaxis_title="Count", yaxis2=dict(title="Average Wage Rate", overlaying="y", side="right"), ) # Show the chart fig.show() # calculate the quantiles q1, q2, q3, q4 = final_df["LCA_CASE_WAGE_RATE_FROM"].quantile([0.25, 0.5, 0.75, 0.99]) # define the label function def label_wage_range(x): if x > 0 and x <= q1: return "Below Q1 (64K)" elif x > q1 and x <= q2: return "Q1 (64K - 75K)" elif x > q2 and x <= q3: return "Q2 (75K - 95K)" elif x > q3 and x <= q4: return "Q3 (95K - 180K)" elif x > q4: return "Above Q4 (180K)" # create the new column using apply() and the label function final_df["wage_range"] = final_df["LCA_CASE_WAGE_RATE_FROM"].apply(label_wage_range) df_filtered = final_df # Create a grouped bar chart using the quantile column and the STATUS column as color fig = px.histogram(df_filtered, x="wage_range", color="STATUS", barmode="group") # Set the layout for the chart fig.update_layout( title="Distribution of Wage Rates by Status and Quantile", xaxis_title="Quantile", yaxis_title="Count", ) # Show the chart fig.show() # ##### Upon analyzing the data, no significant correlation was observed between the salary range and the application status. After analyzing the data for the top 10 accepted and denied job positions, there was no significant correlation observed between salary range and application status. Interestingly, both the top 10 accepted and denied job positions had the same salary range. # ##### To further investigate the relationship between salary range and application status, the salary range was split into four quantiles. After analyzing the data for low, average, above average, and higher pay salary levels, it was found that the majority of H1B visa applications that were approved fell into the low (Q1) and average (Q2) salary range categories. However, there were no clear trends observed between the salary range and the application status, suggesting that factors other than salary may have played a more significant role in determining the outcome of H1B visa applications. # ## ---------------------------------------------------------------------------------------- # ## How does the length of employment impact the decision of the H1B visa application? # Filter the data to only include certified cases df_filtered = final_df.loc[final_df["STATUS"] == "CERTIFIED"] # Group by LCA_CASE_SOC_NAME and get the count and mean of LCA_CASE_WAGE_RATE_FROM df_grouped = ( df_filtered.groupby("LCA_CASE_SOC_NAME") .agg({"LCA_CASE_NUMBER": "count", "LCA_CASE_EMPLOYMENT_PERIOD": "mean"}) .reset_index() ) # Sort by count in descending order and get the top 10 df_top10 = df_grouped.sort_values(by="LCA_CASE_NUMBER", ascending=False).head(10) # Create the bar chart for top 10 LCA_CASE_SOC_NAME fig = px.bar( df_top10, x="LCA_CASE_SOC_NAME", y="LCA_CASE_NUMBER", labels={"LCA_CASE_SOC_NAME": "LCA_CASE_SOC_NAME", "LCA_CASE_NUMBER": "Count"}, ) # Add the scatter chart for mean of LCA_CASE_WAGE_RATE_FROM fig.add_trace( go.Scatter( x=df_top10["LCA_CASE_SOC_NAME"], y=df_top10["LCA_CASE_EMPLOYMENT_PERIOD"], yaxis="y2", name="Average Employment period in months", marker=dict(color="red"), ) ) # Set the layout for the chart fig.update_layout( title="Top 10 Certified LCA_CASE_SOC_NAME with highest count and average employment period", xaxis_title="LCA_CASE_SOC_NAME", yaxis_title="Count", yaxis2=dict(title="Average employment period", overlaying="y", side="right"), ) # Show the chart fig.show() # Filter the data to only include certified cases df_filtered = final_df.loc[final_df["STATUS"] == "DENIED"] # Group by LCA_CASE_SOC_NAME and get the count and mean of LCA_CASE_EMPLOYMENT_PERIOD df_grouped = ( df_filtered.groupby("LCA_CASE_SOC_NAME") .agg({"LCA_CASE_NUMBER": "count", "LCA_CASE_EMPLOYMENT_PERIOD": "mean"}) .reset_index() ) # Sort by count in descending order and get the top 10 df_top10 = df_grouped.sort_values(by="LCA_CASE_NUMBER", ascending=False).head(10) # Create the bar chart for top 10 LCA_CASE_SOC_NAME fig = px.bar( df_top10, x="LCA_CASE_SOC_NAME", y="LCA_CASE_NUMBER", labels={"LCA_CASE_SOC_NAME": "LCA_CASE_SOC_NAME", "LCA_CASE_NUMBER": "Count"}, ) # Add the scatter chart for mean of LCA_CASE_EMPLOYMENT_PERIOD fig.add_trace( go.Scatter( x=df_top10["LCA_CASE_SOC_NAME"], y=df_top10["LCA_CASE_EMPLOYMENT_PERIOD"], yaxis="y2", name="Average Employment Period in months", marker=dict(color="red"), ) ) # Set the layout for the chart fig.update_layout( title="Top 10 Denied LCA_CASE_SOC_NAME with highest count and average employment period", xaxis_title="LCA_CASE_SOC_NAME", yaxis_title="Count", yaxis2=dict( title="Average Employment Period in months", overlaying="y", side="right" ), ) # Show the chart fig.show() mean = final_df["LCA_CASE_EMPLOYMENT_PERIOD"].mean() # define the label function def label_employment_range(x): if x < mean: return "Below average (34 months)" elif x > mean: return "Above average (34 months)" # create the new column using apply() and the label function final_df["Employment_range"] = final_df["LCA_CASE_EMPLOYMENT_PERIOD"].apply( label_employment_range ) df_filtered = final_df # Create a grouped bar chart using the quantile column and the STATUS column as color fig = px.histogram(df_filtered, x="Employment_range", color="STATUS", barmode="group") # Set the layout for the chart fig.update_layout( title="Distribution of Employment_range by Status and Mean of Employment period", xaxis_title="Quantile", yaxis_title="Count", ) # Show the chart fig.show() # ##### After analyzing the data, it was found that the employment period also did not have any significant correlation with the decision of the visa. Both the top approved and the top denied job positions had an average employment period of 33 months. # ##### Upon further investigation, the applicants were separated by the average month of employment period below and above 33 months.It was observed that only a few applications had an employment period of less than 33 months. This suggests that the employment period may not have played a significant role in determining the outcome of H1B visa applications. Other factors such as the applicant's qualifications and the employer's sponsorship may have had a greater impact on the decision. # ## ---------------------------------------------------------------------------------------- # ## Are there any trends or patterns in the geographic distribution of H1B visa workers? # Group by state and get the count df_state = ( final_df.groupby("LCA_CASE_EMPLOYER_STATE")["LCA_CASE_NUMBER"].count().reset_index() ) # Create the choropleth map fig = px.choropleth( df_state, locations="LCA_CASE_EMPLOYER_STATE", locationmode="USA-states", color="LCA_CASE_NUMBER", scope="usa", color_continuous_scale="Blues", title="H1B Visa Applications by State", ) fig.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/627/129627997.ipynb
null
null
[{"Id": 129627997, "ScriptId": 38546957, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 10264085, "CreationDate": "05/15/2023 10:37:50", "VersionNumber": 1.0, "Title": "H1B-Visa", "EvaluationDate": "05/15/2023", "IsChange": true, "TotalLines": 802.0, "LinesInsertedFromPrevious": 802.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import pandas as pd import numpy as np from selenium import webdriver import re import plotly.graph_objs as go import plotly.express as px pd.options.mode.chained_assignment = None data_one = pd.read_csv("H-1B_2014.csv") data_two = pd.read_csv("H-1B_2015.csv") data_three = pd.read_csv("H-1B_2016.csv") columns = [ "LCA_CASE_NUMBER", "STATUS", "LCA_CASE_SUBMIT", "DECISION_DATE", "VISA_CLASS", "LCA_CASE_EMPLOYMENT_START_DATE", "LCA_CASE_EMPLOYMENT_END_DATE", "LCA_CASE_EMPLOYER_NAME", "LCA_CASE_EMPLOYER_STATE", "LCA_CASE_EMPLOYER_CITY", "LCA_CASE_SOC_CODE", "LCA_CASE_SOC_NAME", "LCA_CASE_JOB_TITLE", "LCA_CASE_WAGE_RATE_FROM", "LCA_CASE_WAGE_RATE_UNIT", "FULL_TIME_POS", "LCA_CASE_NAICS_CODE", ] columns = [ "LCA_CASE_NUMBER", "STATUS", "LCA_CASE_SUBMIT", "DECISION_DATE", "VISA_CLASS", "LCA_CASE_EMPLOYMENT_START_DATE", "LCA_CASE_EMPLOYMENT_END_DATE", "LCA_CASE_EMPLOYER_NAME", "LCA_CASE_EMPLOYER_STATE", "LCA_CASE_EMPLOYER_CITY", "LCA_CASE_SOC_CODE", "LCA_CASE_SOC_NAME", "LCA_CASE_JOB_TITLE", "LCA_CASE_WAGE_RATE_FROM", "LCA_CASE_WAGE_RATE_UNIT", "FULL_TIME_POS", "LCA_CASE_NAICS_CODE", ] # renaming the columns data_two = data_two.rename( columns={ "CASE_NUMBER": "LCA_CASE_NUMBER", "CASE_STATUS": "STATUS", "CASE_SUBMITTED": "LCA_CASE_SUBMIT", "DECISION_DATE": "DECISION_DATE", "EMPLOYMENT_START_DATE": "LCA_CASE_EMPLOYMENT_START_DATE", "EMPLOYMENT_END_DATE": "LCA_CASE_EMPLOYMENT_END_DATE", "EMPLOYER_NAME": "LCA_CASE_EMPLOYER_NAME", "EMPLOYER_STATE": "LCA_CASE_EMPLOYER_STATE", "EMPLOYER_CITY": "LCA_CASE_EMPLOYER_CITY", "SOC_CODE": "LCA_CASE_SOC_CODE", "SOC_NAME": "LCA_CASE_SOC_NAME", "JOB_TITLE": "LCA_CASE_JOB_TITLE", "WAGE_RATE_OF_PAY": "LCA_CASE_WAGE_RATE_FROM", "WAGE_UNIT_OF_PAY": "LCA_CASE_WAGE_RATE_UNIT", "FULL_TIME_POSITION": "FULL_TIME_POS", "NAIC_CODE": "LCA_CASE_NAICS_CODE", } ) columns = [ "LCA_CASE_NUMBER", "STATUS", "LCA_CASE_SUBMIT", "DECISION_DATE", "VISA_CLASS", "LCA_CASE_EMPLOYMENT_START_DATE", "LCA_CASE_EMPLOYMENT_END_DATE", "LCA_CASE_EMPLOYER_NAME", "LCA_CASE_EMPLOYER_STATE", "LCA_CASE_EMPLOYER_CITY", "LCA_CASE_SOC_CODE", "LCA_CASE_SOC_NAME", "LCA_CASE_JOB_TITLE", "LCA_CASE_WAGE_RATE_FROM", "LCA_CASE_WAGE_RATE_UNIT", "FULL_TIME_POS", "LCA_CASE_NAICS_CODE", ] # renaming the columns data_three = data_three.rename( columns={ "CASE_NUMBER": "LCA_CASE_NUMBER", "CASE_STATUS": "STATUS", "CASE_SUBMITTED": "LCA_CASE_SUBMIT", "DECISION_DATE": "DECISION_DATE", "EMPLOYMENT_START_DATE": "LCA_CASE_EMPLOYMENT_START_DATE", "EMPLOYMENT_END_DATE": "LCA_CASE_EMPLOYMENT_END_DATE", "EMPLOYER_NAME": "LCA_CASE_EMPLOYER_NAME", "EMPLOYER_STATE": "LCA_CASE_EMPLOYER_STATE", "EMPLOYER_CITY": "LCA_CASE_EMPLOYER_CITY", "SOC_CODE": "LCA_CASE_SOC_CODE", "SOC_NAME": "LCA_CASE_SOC_NAME", "JOB_TITLE": "LCA_CASE_JOB_TITLE", "WAGE_RATE_OF_PAY_FROM": "LCA_CASE_WAGE_RATE_FROM", "WAGE_UNIT_OF_PAY": "LCA_CASE_WAGE_RATE_UNIT", "FULL_TIME_POSITION": "FULL_TIME_POS", "NAIC_CODE": "LCA_CASE_NAICS_CODE", } ) # concat the three dataframes final_df = pd.concat( [data_one[columns], data_two[columns], data_three[columns]] ).reset_index(drop=True) final_df.head() final_df.columns final_df.info() (final_df.isnull().sum()) final_df.head() # ## Feature engineering # ### creating employment period column # convert date columns to datetime format and assign nan to invalid data final_df["LCA_CASE_EMPLOYMENT_START_DATE"] = pd.to_datetime( final_df["LCA_CASE_EMPLOYMENT_START_DATE"], errors="coerce" ) final_df["LCA_CASE_EMPLOYMENT_END_DATE"] = pd.to_datetime( final_df["LCA_CASE_EMPLOYMENT_END_DATE"], errors="coerce" ) # subtract the LCA_CASE_EMPLOYMENT_END_DATE from LCA_CASE_EMPLOYMENT_START_DATEto find employment period LCA_CASE_EMPLOYMENT_PERIOD = ( final_df["LCA_CASE_EMPLOYMENT_END_DATE"] - final_df["LCA_CASE_EMPLOYMENT_START_DATE"] ) # create a new column with LCA_CASE_EMPLOYMENT_PERIOD value final_df.insert(7, "LCA_CASE_EMPLOYMENT_PERIOD", LCA_CASE_EMPLOYMENT_PERIOD) final_df.head() # converting LCA_CASE_EMPLOYMENT_PERIOD into days format final_df["LCA_CASE_EMPLOYMENT_PERIOD"] = final_df["LCA_CASE_EMPLOYMENT_PERIOD"].dt.days final_df["LCA_CASE_EMPLOYMENT_PERIOD"].describe() # delete the outlier value, i.e employment days less than 0. final_df = final_df[final_df["LCA_CASE_EMPLOYMENT_PERIOD"] > 0] final_df["LCA_CASE_EMPLOYMENT_PERIOD"].describe() # - The value of 30.44 is derived by calculating the average number of days in a month over a period of four years. # - The average number of days in a year is 365.24 (due to leap years), so dividing by 12 gives an average of 30.44 days per month. This is a commonly used approximation in calculations involving months and days. # - Using 30.44 as the number of days in a month provides a more accurate estimate when converting between months and days, rather than assuming that each month has exactly 30 or 31 days. # # the employment period is converted into months final_df["LCA_CASE_EMPLOYMENT_PERIOD"] = round( final_df["LCA_CASE_EMPLOYMENT_PERIOD"] / 30.44 ) # filled the missing value with 0 and converted the column type to int final_df["LCA_CASE_EMPLOYMENT_PERIOD"] = ( final_df["LCA_CASE_EMPLOYMENT_PERIOD"].fillna(0).astype(int) ) # ### creating visa decision duration column # convert date columns to datetime format and assign nan to invalid data final_df["LCA_CASE_SUBMIT"] = pd.to_datetime( final_df["LCA_CASE_SUBMIT"], errors="coerce" ) final_df["DECISION_DATE"] = pd.to_datetime(final_df["DECISION_DATE"], errors="coerce") # subtract the LCA_CASE_SUBMIT from DECISION_DATE to find visa decision period LCA_CASE_DECISION_PERIOD = final_df["DECISION_DATE"] - final_df["LCA_CASE_SUBMIT"] # create a new column with LCA_CASE_DECISION_PERIOD value final_df.insert(4, "LCA_CASE_DECISION_PERIOD", LCA_CASE_DECISION_PERIOD) final_df["LCA_CASE_DECISION_PERIOD"] = final_df["LCA_CASE_DECISION_PERIOD"].dt.days final_df["LCA_CASE_EMPLOYMENT_PERIOD"].describe() # remove special characters from LCA_CASE_EMPLOYER_CITY final_df["LCA_CASE_EMPLOYER_CITY"] = final_df["LCA_CASE_EMPLOYER_CITY"].replace( {"[^a-zA-Z0-9]": ""}, regex=True ) # ### find the sectors of the company using the NAICS code # Convert the LCA_CASE_NAICS_CODE column to string data type final_df["LCA_CASE_NAICS_CODE"] = final_df["LCA_CASE_NAICS_CODE"].astype(str) # Extract the first two digits of each string value final_df["LCA_CASE_NAICS_CODE"] = final_df["LCA_CASE_NAICS_CODE"].str[:2] naics_unique_values = final_df["LCA_CASE_NAICS_CODE"].unique() # reading the NAICS_data to cross check and create a new column for employer sector NAICS_data = pd.read_csv("NAICS_data.csv") NAICS_data.head() # loop through all the NAICS in the naics_unique_values for i in naics_unique_values: try: # assuming your dataframe is called 'df' NAICS_data_code = NAICS_data.loc[ NAICS_data["NAICS_CODE"] == i, "NAICS_TITLE" ].iloc[0] except: # if there is no index with the particular soc code the occupation name will be null NAICS_data_code = "Unknown" # create a boolean mask for the conditions mask = final_df["LCA_CASE_NAICS_CODE"] == i # update the LCA_CASE_SOC_NAME column for the filtered rows final_df.loc[mask, "EMPLOYER_SECTOR"] = NAICS_data_code # extract the year component from the datetime column LCA_CASE_SUBMIT and store it in a new column year final_df["year"] = final_df["LCA_CASE_SUBMIT"].dt.year # # Preprocessing # drop duplicates final_df = final_df.drop_duplicates() # remove numbers after "." period in 'LCA_CASE_SOC_CODE' column final_df["LCA_CASE_SOC_CODE"] = ( final_df["LCA_CASE_SOC_CODE"].astype(str).apply(lambda x: x.split(".")[0]) ) # function to correct the LCA_CASE_SOC_CODE def preprocess_column(column): pattern = r"^\d{2}-\d{4}$" # regex pattern for "XX-XXXX" format def preprocess_value(value): if ("-" not in value) and len(value) < 6: cleaned_value = np.nan elif "-" in value: value = value.replace("-", "") cleaned_value = value[0:2] + "-" + value[2:6] if len(cleaned_value) != 7: cleaned_value = np.nan elif ("-" not in value) and len(value) > 5: value = value.replace("/", "") cleaned_value = value[0:2] + "-" + value[2:6] return cleaned_value cleaned_column = column.apply( lambda x: np.nan if pd.isna(x) else (x if re.search(pattern, str(x)) else preprocess_value(x)) ) return cleaned_column final_df["LCA_CASE_SOC_CODE"] = preprocess_column(final_df["LCA_CASE_SOC_CODE"]) final_df.head() # #### preprocessing LCA_CASE_SOC_CODE column # Replace the values in the 'LCA_CASE_WAGE_RATE_FROM' column # define a custom function to preprocess the wage_rate column def preprocess_wage_rate(cell_value): if isinstance(cell_value, float): return cell_value elif "-" in cell_value: return cell_value.split("-")[0].strip() else: return cell_value # apply the custom function to the wage_rate column final_df["LCA_CASE_WAGE_RATE_FROM"] = final_df["LCA_CASE_WAGE_RATE_FROM"].apply( lambda x: preprocess_wage_rate(x) ) final_df.head() for i in final_df.index: if final_df.loc[i, "LCA_CASE_WAGE_RATE_UNIT"] == "Month": final_df.loc[i, "LCA_CASE_WAGE_RATE_FROM"] = ( final_df.loc[i, "LCA_CASE_WAGE_RATE_FROM"] * 12 ) if final_df.loc[i, "LCA_CASE_WAGE_RATE_UNIT"] == "Week": final_df.loc[i, "LCA_CASE_WAGE_RATE_FROM"] = ( final_df.loc[i, "LCA_CASE_WAGE_RATE_FROM"] * 52 ) if final_df.loc[i, "LCA_CASE_WAGE_RATE_UNIT"] == "Bi-Weekly": final_df.loc[i, "LCA_CASE_WAGE_RATE_FROM"] = ( final_df.loc[i, "LCA_CASE_WAGE_RATE_FROM"] * 26 ) if final_df.loc[i, "LCA_CASE_WAGE_RATE_UNIT"] == "Hour": if final_df.loc[i, "FULL_TIME_POS"] == "N": final_df.loc[i, "LCA_CASE_WAGE_RATE_FROM"] = ( final_df.loc[i, "LCA_CASE_WAGE_RATE_FROM"] * 35 * 52 ) else: final_df.loc[i, "LCA_CASE_WAGE_RATE_FROM"] = ( final_df.loc[i, "LCA_CASE_WAGE_RATE_FROM"] * 40 * 52 ) final_df.LCA_CASE_WAGE_RATE_UNIT.replace( ["Bi-Weekly", "Month", "Week", "Hour"], ["Year", "Year", "Year", "Year"], inplace=True, ) # #### scraping data for SOC name from website # initialize webdriver driver = webdriver.Chrome() # navigate to webpage driver.get("https://www.bls.gov/oes/current/oes_stru.htm#29-0000") # find all li elements li_elements = driver.find_elements("xpath", "//li") # create empty list to store data data = [] # loop through li elements for li in li_elements: text = li.text if "-" in text: # use regular expression to extract SOC code and occupation name words = text.split() soc = words[0] name = (" ".join(words[1::])).replace('"', "").strip() name_list = words[1::] if "-" in name: for i, word in enumerate(name_list): if ("-" in word) and (len(word) > 1): name = (" ".join(name_list[:i])).replace('"', "").strip() break data.append({"SOC Code": soc, "Occupation Name": name}) # close webdriver driver.quit() # create dataframe occupation_data = pd.DataFrame(data) # save dataframe as CSV occupation_data.to_csv("occupations.csv", index=False) final_df.isna().sum() # reading the occupation data to impute the missing soc_name occupation_data = pd.read_csv("occupations.csv") occupation_data # ### treating the null values # #### imputing soc name nan values by refering to scraped data # create a separate dataframe with NaN values nan_df = final_df[final_df["LCA_CASE_SOC_NAME"].isna()] mask = final_df["LCA_CASE_SOC_NAME"].isna() df_nan = final_df[mask] unique_null_soc_code = list(df_nan["LCA_CASE_SOC_CODE"].unique()) print(unique_null_soc_code) unique_null_soc_code = [x for x in unique_null_soc_code if type(x) != float] print(unique_null_soc_code) # loop through all the SOC code in unique_null_soc_code list for i in unique_null_soc_code: try: # assuming your dataframe is called 'df' occupation_name = occupation_data.loc[ occupation_data["SOC Code"] == i, "Occupation Name" ].iloc[0] except: # if there is no index with the particular soc code the occupation name will be null occupation_name = np.nan # create a boolean mask for the conditions mask = (final_df["LCA_CASE_SOC_NAME"].isna()) & (final_df["LCA_CASE_SOC_CODE"] == i) # update the LCA_CASE_SOC_NAME column for the filtered rows final_df.loc[mask, "LCA_CASE_SOC_NAME"] = occupation_name final_df.isna().sum() # #### replacing other NaN values in the other columns final_df["LCA_CASE_EMPLOYER_NAME"].fillna("Unknown", inplace=True) final_df["LCA_CASE_EMPLOYER_STATE"].fillna("Unknown", inplace=True) final_df["LCA_CASE_EMPLOYER_CITY"].fillna("Unknown", inplace=True) final_df["LCA_CASE_SOC_CODE"].fillna("Unknown", inplace=True) final_df["LCA_CASE_SOC_NAME"].fillna("Unknown", inplace=True) final_df["LCA_CASE_WAGE_RATE_FROM"].fillna(0, inplace=True) # ### dropping unwanted columns final_df.columns final_df = final_df.drop( [ "LCA_CASE_EMPLOYMENT_START_DATE", "LCA_CASE_EMPLOYMENT_END_DATE", "LCA_CASE_WAGE_RATE_UNIT", "FULL_TIME_POS", "LCA_CASE_NAICS_CODE", ], axis=1, ) final_df.columns # replacing INVALIDATED and REJECTED as the DENIED final_df.loc[(final_df["STATUS"] == "INVALIDATED"), "STATUS"] = "DENIED" final_df.loc[(final_df["STATUS"] == "REJECTED"), "STATUS"] = "DENIED" # #### preprocessing LCA_CASE_SOC_NAME column # Convert SOC names to lowercase final_df["LCA_CASE_SOC_NAME"] = final_df["LCA_CASE_SOC_NAME"].str.lower() # remove the s from the words final_df["LCA_CASE_SOC_NAME"] = final_df["LCA_CASE_SOC_NAME"].str.rstrip("s") # #### preprocessing LCA_CASE_WAGE_RATE_FROM column final_df["LCA_CASE_WAGE_RATE_FROM"] final_df["LCA_CASE_WAGE_RATE_FROM"] = final_df["LCA_CASE_WAGE_RATE_FROM"].replace( "Unknown", 0 ) final_df.head() # converting the column to numeric then fill nan values by 0 final_df["LCA_CASE_WAGE_RATE_FROM"] = pd.to_numeric( final_df["LCA_CASE_WAGE_RATE_FROM"], errors="coerce" ).fillna(0) # Divide wages by 1000 final_df["LCA_CASE_WAGE_RATE_FROM"] = final_df["LCA_CASE_WAGE_RATE_FROM"] / 1000 # converting column type to int final_df["LCA_CASE_WAGE_RATE_FROM"] = final_df["LCA_CASE_WAGE_RATE_FROM"].astype(int) final_df["LCA_CASE_WAGE_RATE_FROM"].describe() final_df = final_df.loc[final_df["LCA_CASE_WAGE_RATE_FROM"] > 0] # Determine the 0.1 and 0.99 quantiles to remove the outliers q1 = final_df["LCA_CASE_WAGE_RATE_FROM"].quantile(0.1) q99 = final_df["LCA_CASE_WAGE_RATE_FROM"].quantile(0.99) # Filter the DataFrame to remove any values outside of the 0.1 and 0.99 quantiles final_df = final_df.loc[ (final_df["LCA_CASE_WAGE_RATE_FROM"] >= q1) & (final_df["LCA_CASE_WAGE_RATE_FROM"] <= q99) ] final_df["LCA_CASE_WAGE_RATE_FROM"].describe() final_df["LCA_CASE_WAGE_RATE_FROM"].describe() # ## ---------------------------------------------------------------------------------------- # # --------------------------------Analysis----------------------------------- # ## What is the total number of H-1B visa applications and what is the growth rate of the applications over the past three years? # Calculate number of applications and growth rate per year year_count = ( final_df.loc[(final_df["year"] >= 2013)]["year"] .value_counts() .reset_index() .rename(columns={"index": "year", "year": "count"}) ) year_count = year_count.sort_values("year") year_count["growth_rate"] = year_count["count"].pct_change() * 100 # Create bar chart and line chart for growth rate fig = go.Figure() fig.add_trace( go.Bar(x=year_count["year"], y=year_count["count"], name="Number of Applications") ) fig.add_trace( go.Scatter( x=year_count["year"], y=year_count["growth_rate"], name="Growth Rate", yaxis="y2", ) ) # Set axis titles and layout fig.update_layout( title="Number of Applications and Growth Rate per Year", xaxis_title="Year", yaxis_title="Number of Applications", yaxis2=dict(side="right", overlaying="y", title="Growth Rate (%)"), ) fig.update_xaxes(tickvals=["2013", "2014", "2015", "2016"]) # Add growth rate as text to each bar for i in range(len(year_count)): fig.add_annotation( x=year_count["year"][i], y=year_count["count"][i], text=f"{year_count['growth_rate'][i]:.1f}%", showarrow=False, font=dict(size=10), yshift=5, ) # Show the chart fig.show() # # ##### The analysis shows that there was a significant increase in the number of applications between 2014 and 2015, with a growth rate of 17.7%. However, the application rate saw a sudden drop of 9% in 2016. Further investigation is needed to determine if this drop is due to an increase in rejection rates or other factors. # ## ---------------------------------------------------------------------------------------- # ## What caused the sudden drop in the application rate? Is it due to an increase in rejection rates, or were other factors contributing to this decline? # Group the data by year and status to get the total count df_grouped = ( final_df.loc[(final_df["year"] >= 2014)] .groupby(["year", "STATUS"]) .count()["LCA_CASE_NUMBER"] .reset_index() ) # Calculate the total count for each year df_year_count = df_grouped.groupby("year").sum()["LCA_CASE_NUMBER"].reset_index() # Add a column to the grouped dataframe with the percentage of each stack df_grouped["percentage"] = df_grouped.apply( lambda row: str( round( row["LCA_CASE_NUMBER"] / df_year_count[df_year_count["year"] == row["year"]][ "LCA_CASE_NUMBER" ].values[0] * 100, 2, ) ) + "%", axis=1, ) # Create the stacked bar chart fig = go.Figure() for status in df_grouped["STATUS"].unique(): df_filtered = df_grouped[df_grouped["STATUS"] == status] fig.add_trace( go.Bar( y=df_filtered["year"], x=df_filtered["LCA_CASE_NUMBER"], name=status, text=df_filtered["percentage"], textposition="auto", orientation="h", marker_color=px.colors.qualitative.Plotly[len(fig.data)], ) ) # Set axis titles and layout fig.update_layout( title="Total count by year and status", yaxis_title="Year", xaxis_title="Total count", barmode="stack", ) fig.update_yaxes(tickvals=["2014", "2015", "2016"]) # Show the chart fig.show() # ##### According to the H1B visa data analysis, it has been observed that the rejection rate for the visa has decreased significantly from 5.41% to 3.4% over the years. On the other hand, the acceptance rate has been steadily increasing every year. This could be an indication of the US government's more favorable policies towards H1B visa applications, resulting in a higher acceptance rate. It may also suggest that employers have become more adept at submitting strong applications, thereby reducing the rejection rate. # ## ---------------------------------------------------------------------------------------- # ## What are the top sectors for H1B visa applications? # Group the data by employer sector to get the count of each sector df_grouped = final_df.groupby("EMPLOYER_SECTOR").size().reset_index(name="count") # Create the pie chart fig = px.pie( df_grouped, values="count", names="EMPLOYER_SECTOR", title="Employer sector distribution", hole=0.5, ) # Show the chart fig.show() # ##### Based on our analysis, we have found that a significant proportion of H1B visa applications, approximately 72.4%, were related to the professional, scientific, and technical services sector, which includes fields such as computer programming, scientific research, engineering, and consulting services. This high number of applications can be attributed to the high demand for skilled professionals in these fields, as they require specialized expertise and knowledge. # ##### Moreover, it is also possible that larger companies have been contributing to this trend by sponsoring more H1B visas for their employees, particularly in the professional, scientific, and technical services sector. This may be due to the fact that these companies require highly skilled workers to maintain their competitive edge and growth in the industry. # ##### Further analysis is needed to investigate whether the concentration of H1B visa applications in the professional, scientific, and technical services sector is due to other factors such as pay scales, availability of skilled labor, or any regulatory changes affecting the industry. # ## ---------------------------------------------------------------------------------------- # ## Which are the top 10 employers with the highest number of H1B visa applications, and in which sectors do they belong? import plotly.graph_objects as go import pandas as pd # Group the data by employer name and status to get the total count df_grouped = ( final_df.groupby(["LCA_CASE_EMPLOYER_NAME", "STATUS", "EMPLOYER_SECTOR"]) .count()["LCA_CASE_NUMBER"] .reset_index() ) # Get the top 10 employers based on application count top_employers = ( df_grouped.groupby("LCA_CASE_EMPLOYER_NAME") .sum() .sort_values("LCA_CASE_NUMBER", ascending=False) .head(10) .reset_index()["LCA_CASE_EMPLOYER_NAME"] .to_list() ) # Filter the data for top 10 employers df_top_employers = df_grouped[df_grouped["LCA_CASE_EMPLOYER_NAME"].isin(top_employers)] # Create the stacked bar chart fig = go.Figure() for status in df_top_employers["STATUS"].unique(): df_filtered = df_top_employers[df_top_employers["STATUS"] == status] fig.add_trace( go.Bar( x=df_filtered["LCA_CASE_NUMBER"], y=df_filtered["LCA_CASE_EMPLOYER_NAME"], name=status, orientation="h", marker_color=px.colors.qualitative.T10[len(fig.data)], text=df_filtered["EMPLOYER_SECTOR"], textposition="inside", ) ) # Set axis titles and layout fig.update_layout( title="Top 10 Employers by Total Application Count", xaxis_title="Total Application Count", yaxis_title="Employer Name", barmode="stack", yaxis={"categoryorder": "total ascending"}, ) # Change color palette fig.update_traces(marker=dict(line=dict(color="yellow", width=0.5))) # Show the chart fig.show() # ##### Based on the analysis, it is found that 9 out of the top 10 employers with the highest number of H1B visa applications belong to the professional, scientific, and technical services sector. This sector is known to have a high demand for skilled professionals, and it includes fields such as computer programming, scientific research, engineering, and consulting services. # ##### It is interesting to note that Infosys has the highest number of approved applications with 82,271 and the least number of denied applications among the top 10 employers. Furthermore, Infosys has played a significant role in the H1B visa application count, surpassing the second-ranked TCS and the third-ranked Wipro combined. This raises the question of what strategies Infosys might have implemented to achieve this level of success and what type of roles they are recruiting for. # ## ---------------------------------------------------------------------------------------- # ## How much of an impact do the top employers have on the distribution of job positions for H1B visas? # Create a list of top employers top_employers = [ "INFOSYS LIMITED", "TATA CONSULTANCY SERVICES LIMITED", "WIPRO LIMITED", "IBM INDIA PRIVATE LIMITED", "ACCENTURE LLP", "DELOITTE CONSULTING LLP", "CAPGEMINI AMERICA INC", "HCL AMERICA, INC.", "MICROSOFT CORPORATION", "ERNST & YOUNG U.S. LLP", ] # Create a new column in final_df indicating whether the employer is in the top_employers list or not final_df["EMPLOYER_GROUP"] = final_df["LCA_CASE_EMPLOYER_NAME"].apply( lambda x: x if x in top_employers else "Other Employers" ) # Group by LCA_CASE_SOC_NAME and LCA_CASE_EMPLOYER_NAME and get the count df_grouped = ( final_df.groupby(["LCA_CASE_SOC_NAME", "EMPLOYER_GROUP"]) .size() .reset_index(name="count") ) # Filter out the top 10 LCA_CASE_SOC_NAME df_top10 = ( df_grouped.groupby("LCA_CASE_SOC_NAME") .sum() .reset_index() .sort_values(by="count", ascending=False) .head(10)["LCA_CASE_SOC_NAME"] .tolist() ) df_filtered = df_grouped[df_grouped["LCA_CASE_SOC_NAME"].isin(df_top10)] # Create the stacked bar chart fig = px.bar( df_filtered, x="count", y="LCA_CASE_SOC_NAME", color="EMPLOYER_GROUP", orientation="h", ) # Add axis labels and title fig.update_layout( title="Top 10 LCA_CASE_SOC_NAME with employer group", xaxis_title="Count", yaxis_title="LCA_CASE_SOC_NAME", ) fig.update_layout(yaxis={"categoryorder": "total ascending"}) # Show the chart fig.show() # ##### Upon analyzing the data, a chart was created to visualize the contribution of the top 10 H1B visa sponsoring employers to the top 10 job positions. The remaining employers were grouped as "other employers." The chart reveals that even though "other employers" occupy a significant portion of the chart, the top 10 employers have made a substantial contribution to the top 10 job positions. # ##### For instance, Infosys has made a significant contribution to the computer systems analyst position, while Microsoft has made a significant contribution to the Software developers, application position. Similarly, IBM has made a considerable contribution to the computer programmer, applications position. # ##### The chart also suggests that the top 10 employers have a significant impact on the H1B visa application process and the job positions that are filled. # ## ---------------------------------------------------------------------------------------- # ## To what extent does the salary range affect the approval or denial of H1B visa applications for the job positions!?!? # Filter the data to only include certified cases df_filtered = final_df.loc[final_df["STATUS"] == "CERTIFIED"] # Group by LCA_CASE_SOC_NAME and get the count and mean of LCA_CASE_WAGE_RATE_FROM df_grouped = ( df_filtered.groupby("LCA_CASE_SOC_NAME") .agg({"LCA_CASE_NUMBER": "count", "LCA_CASE_WAGE_RATE_FROM": "mean"}) .reset_index() ) # Sort by count in descending order and get the top 10 df_top10 = df_grouped.sort_values(by="LCA_CASE_NUMBER", ascending=False).head(10) # Create the bar chart for top 10 LCA_CASE_SOC_NAME fig = px.bar( df_top10, x="LCA_CASE_SOC_NAME", y="LCA_CASE_NUMBER", labels={"LCA_CASE_SOC_NAME": "LCA_CASE_SOC_NAME", "LCA_CASE_NUMBER": "Count"}, ) # Add the scatter chart for mean of LCA_CASE_WAGE_RATE_FROM fig.add_trace( go.Scatter( x=df_top10["LCA_CASE_SOC_NAME"], y=df_top10["LCA_CASE_WAGE_RATE_FROM"], yaxis="y2", name="Average Wage Rate in thousands", marker=dict(color="red"), ) ) # Set the layout for the chart fig.update_layout( title="Top 10 Certified LCA_CASE_SOC_NAME with highest count and average wage rate", xaxis_title="LCA_CASE_SOC_NAME", yaxis_title="Count", yaxis2=dict(title="Average Wage Rate", overlaying="y", side="right"), ) # Show the chart fig.show() # Filter the data to only include certified cases df_filtered = final_df.loc[final_df["STATUS"] == "DENIED"] # Group by LCA_CASE_SOC_NAME and get the count and mean of LCA_CASE_WAGE_RATE_FROM df_grouped = ( df_filtered.groupby("LCA_CASE_SOC_NAME") .agg({"LCA_CASE_NUMBER": "count", "LCA_CASE_WAGE_RATE_FROM": "mean"}) .reset_index() ) # Sort by count in descending order and get the top 10 df_top10 = df_grouped.sort_values(by="LCA_CASE_NUMBER", ascending=False).head(10) # Create the bar chart for top 10 LCA_CASE_SOC_NAME fig = px.bar( df_top10, x="LCA_CASE_SOC_NAME", y="LCA_CASE_NUMBER", labels={"LCA_CASE_SOC_NAME": "LCA_CASE_SOC_NAME", "LCA_CASE_NUMBER": "Count"}, ) # Add the scatter chart for mean of LCA_CASE_WAGE_RATE_FROM fig.add_trace( go.Scatter( x=df_top10["LCA_CASE_SOC_NAME"], y=df_top10["LCA_CASE_WAGE_RATE_FROM"], yaxis="y2", name="Average Wage Rate in thousands", marker=dict(color="red"), ) ) # Set the layout for the chart fig.update_layout( title="Top 10 Denied LCA_CASE_SOC_NAME with highest count and average wage rate", xaxis_title="LCA_CASE_SOC_NAME", yaxis_title="Count", yaxis2=dict(title="Average Wage Rate", overlaying="y", side="right"), ) # Show the chart fig.show() # calculate the quantiles q1, q2, q3, q4 = final_df["LCA_CASE_WAGE_RATE_FROM"].quantile([0.25, 0.5, 0.75, 0.99]) # define the label function def label_wage_range(x): if x > 0 and x <= q1: return "Below Q1 (64K)" elif x > q1 and x <= q2: return "Q1 (64K - 75K)" elif x > q2 and x <= q3: return "Q2 (75K - 95K)" elif x > q3 and x <= q4: return "Q3 (95K - 180K)" elif x > q4: return "Above Q4 (180K)" # create the new column using apply() and the label function final_df["wage_range"] = final_df["LCA_CASE_WAGE_RATE_FROM"].apply(label_wage_range) df_filtered = final_df # Create a grouped bar chart using the quantile column and the STATUS column as color fig = px.histogram(df_filtered, x="wage_range", color="STATUS", barmode="group") # Set the layout for the chart fig.update_layout( title="Distribution of Wage Rates by Status and Quantile", xaxis_title="Quantile", yaxis_title="Count", ) # Show the chart fig.show() # ##### Upon analyzing the data, no significant correlation was observed between the salary range and the application status. After analyzing the data for the top 10 accepted and denied job positions, there was no significant correlation observed between salary range and application status. Interestingly, both the top 10 accepted and denied job positions had the same salary range. # ##### To further investigate the relationship between salary range and application status, the salary range was split into four quantiles. After analyzing the data for low, average, above average, and higher pay salary levels, it was found that the majority of H1B visa applications that were approved fell into the low (Q1) and average (Q2) salary range categories. However, there were no clear trends observed between the salary range and the application status, suggesting that factors other than salary may have played a more significant role in determining the outcome of H1B visa applications. # ## ---------------------------------------------------------------------------------------- # ## How does the length of employment impact the decision of the H1B visa application? # Filter the data to only include certified cases df_filtered = final_df.loc[final_df["STATUS"] == "CERTIFIED"] # Group by LCA_CASE_SOC_NAME and get the count and mean of LCA_CASE_WAGE_RATE_FROM df_grouped = ( df_filtered.groupby("LCA_CASE_SOC_NAME") .agg({"LCA_CASE_NUMBER": "count", "LCA_CASE_EMPLOYMENT_PERIOD": "mean"}) .reset_index() ) # Sort by count in descending order and get the top 10 df_top10 = df_grouped.sort_values(by="LCA_CASE_NUMBER", ascending=False).head(10) # Create the bar chart for top 10 LCA_CASE_SOC_NAME fig = px.bar( df_top10, x="LCA_CASE_SOC_NAME", y="LCA_CASE_NUMBER", labels={"LCA_CASE_SOC_NAME": "LCA_CASE_SOC_NAME", "LCA_CASE_NUMBER": "Count"}, ) # Add the scatter chart for mean of LCA_CASE_WAGE_RATE_FROM fig.add_trace( go.Scatter( x=df_top10["LCA_CASE_SOC_NAME"], y=df_top10["LCA_CASE_EMPLOYMENT_PERIOD"], yaxis="y2", name="Average Employment period in months", marker=dict(color="red"), ) ) # Set the layout for the chart fig.update_layout( title="Top 10 Certified LCA_CASE_SOC_NAME with highest count and average employment period", xaxis_title="LCA_CASE_SOC_NAME", yaxis_title="Count", yaxis2=dict(title="Average employment period", overlaying="y", side="right"), ) # Show the chart fig.show() # Filter the data to only include certified cases df_filtered = final_df.loc[final_df["STATUS"] == "DENIED"] # Group by LCA_CASE_SOC_NAME and get the count and mean of LCA_CASE_EMPLOYMENT_PERIOD df_grouped = ( df_filtered.groupby("LCA_CASE_SOC_NAME") .agg({"LCA_CASE_NUMBER": "count", "LCA_CASE_EMPLOYMENT_PERIOD": "mean"}) .reset_index() ) # Sort by count in descending order and get the top 10 df_top10 = df_grouped.sort_values(by="LCA_CASE_NUMBER", ascending=False).head(10) # Create the bar chart for top 10 LCA_CASE_SOC_NAME fig = px.bar( df_top10, x="LCA_CASE_SOC_NAME", y="LCA_CASE_NUMBER", labels={"LCA_CASE_SOC_NAME": "LCA_CASE_SOC_NAME", "LCA_CASE_NUMBER": "Count"}, ) # Add the scatter chart for mean of LCA_CASE_EMPLOYMENT_PERIOD fig.add_trace( go.Scatter( x=df_top10["LCA_CASE_SOC_NAME"], y=df_top10["LCA_CASE_EMPLOYMENT_PERIOD"], yaxis="y2", name="Average Employment Period in months", marker=dict(color="red"), ) ) # Set the layout for the chart fig.update_layout( title="Top 10 Denied LCA_CASE_SOC_NAME with highest count and average employment period", xaxis_title="LCA_CASE_SOC_NAME", yaxis_title="Count", yaxis2=dict( title="Average Employment Period in months", overlaying="y", side="right" ), ) # Show the chart fig.show() mean = final_df["LCA_CASE_EMPLOYMENT_PERIOD"].mean() # define the label function def label_employment_range(x): if x < mean: return "Below average (34 months)" elif x > mean: return "Above average (34 months)" # create the new column using apply() and the label function final_df["Employment_range"] = final_df["LCA_CASE_EMPLOYMENT_PERIOD"].apply( label_employment_range ) df_filtered = final_df # Create a grouped bar chart using the quantile column and the STATUS column as color fig = px.histogram(df_filtered, x="Employment_range", color="STATUS", barmode="group") # Set the layout for the chart fig.update_layout( title="Distribution of Employment_range by Status and Mean of Employment period", xaxis_title="Quantile", yaxis_title="Count", ) # Show the chart fig.show() # ##### After analyzing the data, it was found that the employment period also did not have any significant correlation with the decision of the visa. Both the top approved and the top denied job positions had an average employment period of 33 months. # ##### Upon further investigation, the applicants were separated by the average month of employment period below and above 33 months.It was observed that only a few applications had an employment period of less than 33 months. This suggests that the employment period may not have played a significant role in determining the outcome of H1B visa applications. Other factors such as the applicant's qualifications and the employer's sponsorship may have had a greater impact on the decision. # ## ---------------------------------------------------------------------------------------- # ## Are there any trends or patterns in the geographic distribution of H1B visa workers? # Group by state and get the count df_state = ( final_df.groupby("LCA_CASE_EMPLOYER_STATE")["LCA_CASE_NUMBER"].count().reset_index() ) # Create the choropleth map fig = px.choropleth( df_state, locations="LCA_CASE_EMPLOYER_STATE", locationmode="USA-states", color="LCA_CASE_NUMBER", scope="usa", color_continuous_scale="Blues", title="H1B Visa Applications by State", ) fig.show()
false
0
11,423
0
11,423
11,423
129627018
import nltk from nltk.collocations import ( TrigramCollocationFinder, BigramCollocationFinder, QuadgramCollocationFinder, ) from nltk.metrics import ( TrigramAssocMeasures, BigramAssocMeasures, QuadgramAssocMeasures, ) # Tokenize the sentence # tokens = nltk.word_tokenize(txt[0]) tokens = [] for i in txt: for j in i: tokens.append(j) # Create a trigram collocation finder finder_b = BigramCollocationFinder.from_words(tokens) finder_t = TrigramCollocationFinder.from_words(tokens) finder_q = QuadgramCollocationFinder.from_words(tokens) # Filter out common words and punctuation finder_b.apply_freq_filter(1) finder_t.apply_freq_filter(1) finder_q.apply_freq_filter(1) # Set the scoring metric scoring_measure_b = BigramAssocMeasures.raw_freq scoring_measure_t = TrigramAssocMeasures.raw_freq scoring_measure_t = QuadgramAssocMeasures.raw_freq # Get the top 10 trigrams based on the scoring metric top_trigrams = finder_t.nbest(scoring_measure_t, 10) top_bigrams = finder_b.nbest(scoring_measure_b, 10) top_quadgrams = finder_q.nbest(scoring_measure_b, 10) trigrams = {} bigrams = {} quadgrams = {} t = finder_t.ngram_fd.items() b = finder_b.ngram_fd.items() q = finder_q.ngram_fd.items() for i in t: if i[0] in top_trigrams: trigrams[" ".join(i[0])] = i[1] trigrams = { k: v for k, v in sorted(trigrams.items(), key=lambda item: item[1], reverse=True) } for i in b: if i[0] in top_bigrams: bigrams[" ".join(i[0])] = i[1] bigrams = { k: v for k, v in sorted(bigrams.items(), key=lambda item: item[1], reverse=True) } for i in q: if i[0] in top_quadgrams: quadgrams[" ".join(i[0])] = i[1] quadgrams = { k: v for k, v in sorted(quadgrams.items(), key=lambda item: item[1], reverse=True) } trigrams bigrams quadgrams for n in top_trigrams: print(trigram) # Print the top trigrams print("Top Trigrams:") for trigram in finder_t.ngram_fd.items(): print(trigram) print("-------------------------") print("Top Bigrams:") for trigram in finder_b.ngram_fd.items(): print(trigram) print("-------------------------") print("Top Quadgrams:") for trigram in finder_q.ngram_fd.items(): print(trigram) import re # Preprocessing def remove_string_special_characters(s): # removes special characters with ' ' stripped = re.sub("[^0-9a-zA-z\s]", "", s) stripped = re.sub("_", "", stripped) # Change any white space to one space stripped = re.sub("\s+", " ", stripped) # Remove start and end white spaces stripped = stripped.strip() if stripped != "": return stripped.lower() from nltk.stem import WordNetLemmatizer from nltk.corpus import stopwords from nltk import word_tokenize lemmatizer = WordNetLemmatizer() # Stopword removal stop_words = set(stopwords.words("english")) for i, line in enumerate(txt): line = remove_string_special_characters(line) # txt[i] = [x for x in line if ( x not in stop_words )] txt[i] = [x for x in word_tokenize(line) if (x not in stop_words)] import pandas as pd data_may1 = pd.read_json("/kaggle/input/nctc-may-1-7/NCTC_may_3.json") txt = list(data_may1["Text"]) lemmatizer.lemmatize("Rs")
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/627/129627018.ipynb
null
null
[{"Id": 129627018, "ScriptId": 38536156, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14218315, "CreationDate": "05/15/2023 10:29:07", "VersionNumber": 2.0, "Title": "ngram_workspace", "EvaluationDate": NaN, "IsChange": true, "TotalLines": 117.0, "LinesInsertedFromPrevious": 51.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 66.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import nltk from nltk.collocations import ( TrigramCollocationFinder, BigramCollocationFinder, QuadgramCollocationFinder, ) from nltk.metrics import ( TrigramAssocMeasures, BigramAssocMeasures, QuadgramAssocMeasures, ) # Tokenize the sentence # tokens = nltk.word_tokenize(txt[0]) tokens = [] for i in txt: for j in i: tokens.append(j) # Create a trigram collocation finder finder_b = BigramCollocationFinder.from_words(tokens) finder_t = TrigramCollocationFinder.from_words(tokens) finder_q = QuadgramCollocationFinder.from_words(tokens) # Filter out common words and punctuation finder_b.apply_freq_filter(1) finder_t.apply_freq_filter(1) finder_q.apply_freq_filter(1) # Set the scoring metric scoring_measure_b = BigramAssocMeasures.raw_freq scoring_measure_t = TrigramAssocMeasures.raw_freq scoring_measure_t = QuadgramAssocMeasures.raw_freq # Get the top 10 trigrams based on the scoring metric top_trigrams = finder_t.nbest(scoring_measure_t, 10) top_bigrams = finder_b.nbest(scoring_measure_b, 10) top_quadgrams = finder_q.nbest(scoring_measure_b, 10) trigrams = {} bigrams = {} quadgrams = {} t = finder_t.ngram_fd.items() b = finder_b.ngram_fd.items() q = finder_q.ngram_fd.items() for i in t: if i[0] in top_trigrams: trigrams[" ".join(i[0])] = i[1] trigrams = { k: v for k, v in sorted(trigrams.items(), key=lambda item: item[1], reverse=True) } for i in b: if i[0] in top_bigrams: bigrams[" ".join(i[0])] = i[1] bigrams = { k: v for k, v in sorted(bigrams.items(), key=lambda item: item[1], reverse=True) } for i in q: if i[0] in top_quadgrams: quadgrams[" ".join(i[0])] = i[1] quadgrams = { k: v for k, v in sorted(quadgrams.items(), key=lambda item: item[1], reverse=True) } trigrams bigrams quadgrams for n in top_trigrams: print(trigram) # Print the top trigrams print("Top Trigrams:") for trigram in finder_t.ngram_fd.items(): print(trigram) print("-------------------------") print("Top Bigrams:") for trigram in finder_b.ngram_fd.items(): print(trigram) print("-------------------------") print("Top Quadgrams:") for trigram in finder_q.ngram_fd.items(): print(trigram) import re # Preprocessing def remove_string_special_characters(s): # removes special characters with ' ' stripped = re.sub("[^0-9a-zA-z\s]", "", s) stripped = re.sub("_", "", stripped) # Change any white space to one space stripped = re.sub("\s+", " ", stripped) # Remove start and end white spaces stripped = stripped.strip() if stripped != "": return stripped.lower() from nltk.stem import WordNetLemmatizer from nltk.corpus import stopwords from nltk import word_tokenize lemmatizer = WordNetLemmatizer() # Stopword removal stop_words = set(stopwords.words("english")) for i, line in enumerate(txt): line = remove_string_special_characters(line) # txt[i] = [x for x in line if ( x not in stop_words )] txt[i] = [x for x in word_tokenize(line) if (x not in stop_words)] import pandas as pd data_may1 = pd.read_json("/kaggle/input/nctc-may-1-7/NCTC_may_3.json") txt = list(data_may1["Text"]) lemmatizer.lemmatize("Rs")
false
0
1,065
0
1,065
1,065
129720104
<jupyter_start><jupyter_text>Predicting Critical Heat Flux ### Context This dataset was prepared for the journal article entitled "On the prediction of critical heat flux using a physics-informed machine learning-aided framework" (doi: 10.1016/j.applthermaleng.2019.114540). The dataset contains processed and compiled records of experimental critical heat flux and boundary conditions used for the work presented in the article. Kaggle dataset identifier: predicting-heat-flux <jupyter_script># In this notebook we will try to use AutoML to create a model. # If you like it, please, upvote! # The EDA with 'classical' paameters tunning is published here: https://www.kaggle.com/kdmitrie/pgs315-simple-imputer-optuna-blending # **Version info:** # 5. Initial submit without Deep Learning # # 0. Import basic libraries and data sources, including the original dataset import pandas as pd import numpy as np import h2o h2o.init() from h2o.automl import H2OAutoML from sklearn.impute import SimpleImputer from sklearn.preprocessing import StandardScaler from sklearn.model_selection import train_test_split TRAIN_CSV = "/kaggle/input/playground-series-s3e15/data.csv" TEST_CSV = "/kaggle/input/playground-series-s3e15/sample_submission.csv" EXTERNAL_CSV = "/kaggle/input/predicting-heat-flux/Data_CHF_Zhao_2020_ATE.csv" # # 1. Prepare the data # ## 1.1. Read the data # First we read the CSV files and put all the data into a single dataframe. Though we don't have access to the missed features, with this combination of the datasets we can possibly better use the relations between features. # `test` column indicates the datasets and `gen` column indicates, whether the data was generated by deep learning model or not. target = "x_e_out [-]" df1 = pd.read_csv(TRAIN_CSV) df1["test"] = pd.isna(df1[target]) df1["gen"] = 1 df2 = pd.read_csv(EXTERNAL_CSV) df2["test"] = 0 df2["gen"] = 0 # df = df1 # Do not include the original dataset df = pd.concat([df1, df2]) df.reset_index(inplace=True) df.drop("index", axis=1, inplace=True) df.head() # We replace categorical variables with their mean values. columns = [ "author", "geometry", "pressure [MPa]", "mass_flux [kg/m2-s]", "D_e [mm]", "D_h [mm]", "length [mm]", "chf_exp [MW/m2]", ] cat_columns = ["author", "geometry"] for col in cat_columns: replace = df.groupby(by=col)[target].mean().to_dict() df[col].replace(replace, inplace=True) # # 2. Using H2O def get_numpy_arrays(data): X = data.drop(["id", "test", "gen", target], axis=1).to_numpy() y = data[target].to_numpy() scaler = StandardScaler() X = scaler.fit_transform(X) return X, y X, y = get_numpy_arrays(df[df.test != 1]) X_submit, y_submit = get_numpy_arrays(df[df.test == 1]) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1) df_train = pd.DataFrame(data=X_train, columns=columns) df_train[target] = y_train df_test = pd.DataFrame(data=X_test, columns=columns) df_test[target] = y_test data_h2o_train_val = h2o.H2OFrame(df_train) test_frame = h2o.H2OFrame(df_test) data_h2o_train_val[target] = data_h2o_train_val[target] test_frame[target] = test_frame[target] training_frame, validation_frame = data_h2o_train_val.split_frame( ratios=[ 0.7, ], seed=42, ) aml = H2OAutoML( max_models=100, seed=12, exclude_algos=["DeepLearning"], verbosity=None, nfolds=10, sort_metric="RMSE", stopping_metric="RMSE", ) # aml = H2OAutoML(max_models=10, seed=12, exclude_algos=[], verbosity=None, nfolds=5) aml.train( x=columns, y=target, training_frame=training_frame, validation_frame=validation_frame, ) # # 3. Predict & submit df_submit = pd.DataFrame(data=X_submit, columns=columns) data_h2o_submit = h2o.H2OFrame(df_submit) prediction = aml.leader.predict(data_h2o_submit).as_data_frame()["predict"].to_numpy() data_submit = df[df.test == 1].copy() data_submit[target] = prediction data_submit[["id", target]].to_csv("submission_h2o.csv", index=False)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/720/129720104.ipynb
predicting-heat-flux
saurabhshahane
[{"Id": 129720104, "ScriptId": 38088939, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4308868, "CreationDate": "05/16/2023 02:39:52", "VersionNumber": 5.0, "Title": "PGS315: Easy use of H2O AutoML", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 109.0, "LinesInsertedFromPrevious": 31.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 78.0, "LinesInsertedFromFork": 54.0, "LinesDeletedFromFork": 526.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 55.0, "TotalVotes": 1}]
[{"Id": 186058942, "KernelVersionId": 129720104, "SourceDatasetVersionId": 1921393}]
[{"Id": 1921393, "DatasetId": 1145869, "DatasourceVersionId": 1959907, "CreatorUserId": 2411256, "LicenseName": "Attribution 4.0 International (CC BY 4.0)", "CreationDate": "02/08/2021 11:44:07", "VersionNumber": 1.0, "Title": "Predicting Critical Heat Flux", "Slug": "predicting-heat-flux", "Subtitle": "prediction of critical heat flux using Machine Learning", "Description": "### Context\n\nThis dataset was prepared for the journal article entitled \"On the prediction of critical heat flux using a physics-informed machine learning-aided framework\" (doi: 10.1016/j.applthermaleng.2019.114540). The dataset contains processed and compiled records of experimental critical heat flux and boundary conditions used for the work presented in the article. \n\n### Acknowledgements\n\nZhao, Xingang (2020), \u201cData for: On the prediction of critical heat flux using a physics-informed machine learning-aided framework\u201d, Mendeley Data, V1, doi: 10.17632/5p5h37tyv7.1", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 1145869, "CreatorUserId": 2411256, "OwnerUserId": 2411256.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 1921393.0, "CurrentDatasourceVersionId": 1959907.0, "ForumId": 1163376, "Type": 2, "CreationDate": "02/08/2021 11:44:07", "LastActivityDate": "02/08/2021", "TotalViews": 6889, "TotalDownloads": 589, "TotalVotes": 42, "TotalKernels": 78}]
[{"Id": 2411256, "UserName": "saurabhshahane", "DisplayName": "Saurabh Shahane", "RegisterDate": "10/26/2018", "PerformanceTier": 4}]
# In this notebook we will try to use AutoML to create a model. # If you like it, please, upvote! # The EDA with 'classical' paameters tunning is published here: https://www.kaggle.com/kdmitrie/pgs315-simple-imputer-optuna-blending # **Version info:** # 5. Initial submit without Deep Learning # # 0. Import basic libraries and data sources, including the original dataset import pandas as pd import numpy as np import h2o h2o.init() from h2o.automl import H2OAutoML from sklearn.impute import SimpleImputer from sklearn.preprocessing import StandardScaler from sklearn.model_selection import train_test_split TRAIN_CSV = "/kaggle/input/playground-series-s3e15/data.csv" TEST_CSV = "/kaggle/input/playground-series-s3e15/sample_submission.csv" EXTERNAL_CSV = "/kaggle/input/predicting-heat-flux/Data_CHF_Zhao_2020_ATE.csv" # # 1. Prepare the data # ## 1.1. Read the data # First we read the CSV files and put all the data into a single dataframe. Though we don't have access to the missed features, with this combination of the datasets we can possibly better use the relations between features. # `test` column indicates the datasets and `gen` column indicates, whether the data was generated by deep learning model or not. target = "x_e_out [-]" df1 = pd.read_csv(TRAIN_CSV) df1["test"] = pd.isna(df1[target]) df1["gen"] = 1 df2 = pd.read_csv(EXTERNAL_CSV) df2["test"] = 0 df2["gen"] = 0 # df = df1 # Do not include the original dataset df = pd.concat([df1, df2]) df.reset_index(inplace=True) df.drop("index", axis=1, inplace=True) df.head() # We replace categorical variables with their mean values. columns = [ "author", "geometry", "pressure [MPa]", "mass_flux [kg/m2-s]", "D_e [mm]", "D_h [mm]", "length [mm]", "chf_exp [MW/m2]", ] cat_columns = ["author", "geometry"] for col in cat_columns: replace = df.groupby(by=col)[target].mean().to_dict() df[col].replace(replace, inplace=True) # # 2. Using H2O def get_numpy_arrays(data): X = data.drop(["id", "test", "gen", target], axis=1).to_numpy() y = data[target].to_numpy() scaler = StandardScaler() X = scaler.fit_transform(X) return X, y X, y = get_numpy_arrays(df[df.test != 1]) X_submit, y_submit = get_numpy_arrays(df[df.test == 1]) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1) df_train = pd.DataFrame(data=X_train, columns=columns) df_train[target] = y_train df_test = pd.DataFrame(data=X_test, columns=columns) df_test[target] = y_test data_h2o_train_val = h2o.H2OFrame(df_train) test_frame = h2o.H2OFrame(df_test) data_h2o_train_val[target] = data_h2o_train_val[target] test_frame[target] = test_frame[target] training_frame, validation_frame = data_h2o_train_val.split_frame( ratios=[ 0.7, ], seed=42, ) aml = H2OAutoML( max_models=100, seed=12, exclude_algos=["DeepLearning"], verbosity=None, nfolds=10, sort_metric="RMSE", stopping_metric="RMSE", ) # aml = H2OAutoML(max_models=10, seed=12, exclude_algos=[], verbosity=None, nfolds=5) aml.train( x=columns, y=target, training_frame=training_frame, validation_frame=validation_frame, ) # # 3. Predict & submit df_submit = pd.DataFrame(data=X_submit, columns=columns) data_h2o_submit = h2o.H2OFrame(df_submit) prediction = aml.leader.predict(data_h2o_submit).as_data_frame()["predict"].to_numpy() data_submit = df[df.test == 1].copy() data_submit[target] = prediction data_submit[["id", target]].to_csv("submission_h2o.csv", index=False)
false
0
1,250
1
1,366
1,250
129720583
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import seaborn as sns full_data = pd.read_csv("/kaggle/input/playground-series-s3e15/data.csv") sample = pd.read_csv("/kaggle/input/playground-series-s3e15/sample_submission.csv") len(sample), len(full_data) # Check if IDs are same u1 = full_data[full_data["x_e_out [-]"].isna()]["id"].unique() u2 = sample["id"].unique() set(u1).union(set(u2)) == set(u1).intersection(set(u2)) test = full_data[full_data["x_e_out [-]"].isna()] data = full_data[~full_data["x_e_out [-]"].isna()] data.info() sample sample["x_e_out [-]"] = data.mean()["x_e_out [-]"] sample.to_csv("submission.csv", index=False)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/720/129720583.ipynb
null
null
[{"Id": 129720583, "ScriptId": 38577091, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 12420055, "CreationDate": "05/16/2023 02:46:48", "VersionNumber": 1.0, "Title": "notebook98729bbae7", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 43.0, "LinesInsertedFromPrevious": 43.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import seaborn as sns full_data = pd.read_csv("/kaggle/input/playground-series-s3e15/data.csv") sample = pd.read_csv("/kaggle/input/playground-series-s3e15/sample_submission.csv") len(sample), len(full_data) # Check if IDs are same u1 = full_data[full_data["x_e_out [-]"].isna()]["id"].unique() u2 = sample["id"].unique() set(u1).union(set(u2)) == set(u1).intersection(set(u2)) test = full_data[full_data["x_e_out [-]"].isna()] data = full_data[~full_data["x_e_out [-]"].isna()] data.info() sample sample["x_e_out [-]"] = data.mean()["x_e_out [-]"] sample.to_csv("submission.csv", index=False)
false
0
419
0
419
419
129720149
<jupyter_start><jupyter_text>Rice Dataset Commeo and Osmancik DATASET: https://www.muratkoklu.com/datasets/ 1: KOKLU, M., CINAR, I. and TASPINAR, Y. S. (2021). Classification of rice varieties with deep learning methods. Computers and Electronics in Agriculture, 187, 106285. DOI: https://doi.org/10.1016/j.compag.2021.106285 2: CINAR, I. and KOKLU, M. (2021). Determination of Effective and Specific Physical Features of Rice Varieties by Computer Vision In Exterior Quality Inspection. Selcuk Journal of Agriculture and Food Sciences, 35(3), 229-243. DOI: https://doi.org/10.15316/SJAFS.2021.252 3: CINAR, I. and KOKLU, M. (2022). Identification of Rice Varieties Using Machine Learning Algorithms. Journal of Agricultural Sciences, 28 (2), 307-325. DOI: https://doi.org/10.15832/ankutbd.862482 4: CINAR, I. and KOKLU, M. (2019). Classification of Rice Varieties Using Artificial Intelligence Methods. International Journal of Intelligent Systems and Applications in Engineering, 7(3), 188-194. DOI: https://doi.org/10.18201/ijisae.2019355381 Relevant Papers / Citation Requests / Acknowledgements: Cinar, I. and Koklu, M. (2019). Classification of Rice Varieties Using Artificial Intelligence Methods. International Journal of Intelligent Systems and Applications in Engineering, vol.7, no.3 (Sep. 2019), pp.188-194. https://doi.org/10.18201/ijisae.2019355381. Data Set Name: Rice Dataset (Commeo and Osmancik) Abstract: A total of 3810 rice grain's images were taken for the two species (Cammeo and Osmancik), processed and feature inferences were made. 7 morphological features were obtained for each grain of rice. Source: Ilkay CINAR Graduate School of Natural and Applied Sciences, Selcuk University, Konya, TURKEY [email protected] Murat KOKLU Faculty of Technology, Selcuk University, Konya, TURKEY. [email protected] DATASET: https://www.muratkoklu.com/datasets/ Relevant Information: In order to classify the rice varieties (Cammeo and Osmancik) used, preliminary processing was applied to the pictures obtained with computer vision system and a total of 3810 rice grains were obtained. Furthermore, 7 morphological features have been inferred for each grain. A data set has been created for the properties obtained. Attribute Information: 1. Area: Returns the number of pixels within the boundaries of the rice grain. 2. Perimeter: Calculates the circumference by calculating the distance between pixels around the boundaries of the rice grain. 3. Major Axis Length: The longest line that can be drawn on the rice grain, i.e. the main axis distance, gives. 4. Minor Axis Length: The shortest line that can be drawn on the rice grain, i.e. the small axis distance, gives. 5. Eccentricity: It measures how round the ellipse, which has the same moments as the rice grain, is. 6. Convex Area: Returns the pixel count of the smallest convex shell of the region formed by the rice grain. 7. Extent: Returns the ratio of the region formed by the rice grain to the bounding box pixels 8. Class: Commeo and Osmancik. Relevant Papers / Citation Requests / Acknowledgements: Cinar, I. and Koklu, M. (2019). Classification of Rice Varieties Using Artificial Intelligence Methods. International Journal of Intelligent Systems and Applications in Engineering, vol.7, no.3 (Sep. 2019), pp.188-194. https://doi.org/10.18201/ijisae.2019355381. Kaggle dataset identifier: rice-dataset-commeo-and-osmancik <jupyter_script>import pandas as pd import numpy as np import seaborn as sb import matplotlib.pyplot as plt from sklearn.cluster import KMeans data = pd.read_excel( "/kaggle/input/rice-dataset-commeo-and-osmancik/Rice_Dataset_Commeo_and_Osmancik/Rice_Cammeo_Osmancik.xlsx" ) data # **Выбор признаков** x = data.loc[:, ["Major_Axis_Length", "Perimeter"]].values # **Определение оптимального количества кластеров:** wcss = [] for i in range(1, 11): kmeans = KMeans(n_clusters=i, init="k-means++", random_state=100500) kmeans.fit(X) wcss.append(kmeans.inertia_) plt.plot(range(1, 11), wcss) plt.title("Метод локтя") plt.xlabel("Количество кластеров") plt.ylabel("WCSS") plt.show() # Оптимальное количество кластеров - 5 # Обучение модели с оптимальным количествои кластеров kmeans = KMeans(n_clusters=5, init="k-means++", random_state=100500) y_kmeans = kmeans.fit_predict(X) plt.scatter(X[y_kmeans == 0, 0], X[y_kmeans == 0, 1], c="red", label="Cluster 1") plt.scatter(X[y_kmeans == 1, 0], X[y_kmeans == 1, 1], c="green", label="Cluster 2") plt.scatter(X[y_kmeans == 2, 0], X[y_kmeans == 2, 1], c="cyan", label="Cluster 3") plt.scatter(X[y_kmeans == 3, 0], X[y_kmeans == 3, 1], c="purple", label="Cluster 4") plt.scatter(X[y_kmeans == 4, 0], X[y_kmeans == 4, 1], c="orange", label="Cluster 5") plt.scatter(X[y_kmeans == 5, 0], X[y_kmeans == 5, 1], c="blue", label="Cluster 6") plt.scatter( kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1], c="yellow", label="Centroids", ) plt.title("Кластеры") plt.xlabel("MajorAxisLength") plt.ylabel("Perimeter") plt.legend() plt.show() # Визуализация с другим количеством кластеров kmeans = KMeans(n_clusters=3, init="k-means++", random_state=100500) y_kmeans = kmeans.fit_predict(X) plt.scatter(X[y_kmeans == 0, 0], X[y_kmeans == 0, 1], c="red", label="Cluster 1") plt.scatter(X[y_kmeans == 1, 0], X[y_kmeans == 1, 1], c="green", label="Cluster 2") plt.scatter(X[y_kmeans == 2, 0], X[y_kmeans == 2, 1], c="cyan", label="Cluster 3") plt.scatter( kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1], c="yellow", label="Centroids", ) plt.title("Кластеры") plt.xlabel("MajorAxisLength") plt.ylabel("Perimeter") plt.legend() plt.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/720/129720149.ipynb
rice-dataset-commeo-and-osmancik
muratkokludataset
[{"Id": 129720149, "ScriptId": 37975997, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14569588, "CreationDate": "05/16/2023 02:40:34", "VersionNumber": 3.0, "Title": "metriclab", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 61.0, "LinesInsertedFromPrevious": 48.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 13.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 186059002, "KernelVersionId": 129720149, "SourceDatasetVersionId": 3398955}]
[{"Id": 3398955, "DatasetId": 2048923, "DatasourceVersionId": 3450628, "CreatorUserId": 10072866, "LicenseName": "CC0: Public Domain", "CreationDate": "04/03/2022 00:40:03", "VersionNumber": 1.0, "Title": "Rice Dataset Commeo and Osmancik", "Slug": "rice-dataset-commeo-and-osmancik", "Subtitle": "Rice Dataset: 2 Class Commeo and Osmancik Rice", "Description": "DATASET: https://www.muratkoklu.com/datasets/\n\n1: KOKLU, M., CINAR, I. and TASPINAR, Y. S. (2021). Classification of rice varieties with deep learning methods. Computers and Electronics in Agriculture, 187, 106285.\nDOI: https://doi.org/10.1016/j.compag.2021.106285\n\n2: CINAR, I. and KOKLU, M. (2021). Determination of Effective and Specific Physical Features of Rice Varieties by Computer Vision In Exterior Quality Inspection. Selcuk Journal of Agriculture and Food Sciences, 35(3), 229-243.\nDOI: https://doi.org/10.15316/SJAFS.2021.252\n\n3: CINAR, I. and KOKLU, M. (2022). Identification of Rice Varieties Using Machine Learning Algorithms. Journal of Agricultural Sciences, 28 (2), 307-325.\nDOI: https://doi.org/10.15832/ankutbd.862482\n\n4: CINAR, I. and KOKLU, M. (2019). Classification of Rice Varieties Using Artificial Intelligence Methods. International Journal of Intelligent Systems and Applications in Engineering, 7(3), 188-194.\nDOI: https://doi.org/10.18201/ijisae.2019355381\n\nRelevant Papers / Citation Requests / Acknowledgements:\nCinar, I. and Koklu, M. (2019). Classification of Rice Varieties Using Artificial Intelligence Methods. International Journal of Intelligent Systems and Applications in Engineering, vol.7, no.3 (Sep. 2019), pp.188-194. https://doi.org/10.18201/ijisae.2019355381.\n\nData Set Name: Rice Dataset (Commeo and Osmancik)\nAbstract: A total of 3810 rice grain's images were taken for the two species (Cammeo and Osmancik), processed and feature inferences were made. 7 morphological features were obtained for each grain of rice.\t\n\nSource:\nIlkay CINAR\nGraduate School of Natural and Applied Sciences, \nSelcuk University, Konya, TURKEY\[email protected]\n\nMurat KOKLU\nFaculty of Technology, \nSelcuk University, Konya, TURKEY.\[email protected]\n\nDATASET: https://www.muratkoklu.com/datasets/\n\nRelevant Information: In order to classify the rice varieties (Cammeo and Osmancik) used, preliminary processing was applied to the pictures obtained with computer vision system and a total of 3810 rice grains were obtained. Furthermore, 7 morphological features have been inferred for each grain. A data set has been created for the properties obtained.\n\nAttribute Information:\n1. Area: Returns the number of pixels within the boundaries of the rice grain.\n2. Perimeter: Calculates the circumference by calculating the distance between pixels around the boundaries of the rice grain.\n3. Major Axis Length: The longest line that can be drawn on the rice grain, i.e. the main axis distance, gives.\n4. Minor Axis Length: The shortest line that can be drawn on the rice grain, i.e. the small axis distance, gives.\n5. Eccentricity: It measures how round the ellipse, which has the same moments as the rice grain, is.\n6. Convex Area: Returns the pixel count of the smallest convex shell of the region formed by the rice grain.\n7. Extent: Returns the ratio of the region formed by the rice grain to the bounding box pixels\n8. Class: Commeo and Osmancik.\n\nRelevant Papers / Citation Requests / Acknowledgements:\nCinar, I. and Koklu, M. (2019). Classification of Rice Varieties Using Artificial Intelligence Methods. International Journal of Intelligent Systems and Applications in Engineering, vol.7, no.3 (Sep. 2019), pp.188-194. https://doi.org/10.18201/ijisae.2019355381.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 2048923, "CreatorUserId": 10072866, "OwnerUserId": 10072866.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 3398955.0, "CurrentDatasourceVersionId": 3450628.0, "ForumId": 2073935, "Type": 2, "CreationDate": "04/03/2022 00:40:03", "LastActivityDate": "04/03/2022", "TotalViews": 6863, "TotalDownloads": 810, "TotalVotes": 1417, "TotalKernels": 2}]
[{"Id": 10072866, "UserName": "muratkokludataset", "DisplayName": "Murat KOKLU", "RegisterDate": "03/28/2022", "PerformanceTier": 2}]
import pandas as pd import numpy as np import seaborn as sb import matplotlib.pyplot as plt from sklearn.cluster import KMeans data = pd.read_excel( "/kaggle/input/rice-dataset-commeo-and-osmancik/Rice_Dataset_Commeo_and_Osmancik/Rice_Cammeo_Osmancik.xlsx" ) data # **Выбор признаков** x = data.loc[:, ["Major_Axis_Length", "Perimeter"]].values # **Определение оптимального количества кластеров:** wcss = [] for i in range(1, 11): kmeans = KMeans(n_clusters=i, init="k-means++", random_state=100500) kmeans.fit(X) wcss.append(kmeans.inertia_) plt.plot(range(1, 11), wcss) plt.title("Метод локтя") plt.xlabel("Количество кластеров") plt.ylabel("WCSS") plt.show() # Оптимальное количество кластеров - 5 # Обучение модели с оптимальным количествои кластеров kmeans = KMeans(n_clusters=5, init="k-means++", random_state=100500) y_kmeans = kmeans.fit_predict(X) plt.scatter(X[y_kmeans == 0, 0], X[y_kmeans == 0, 1], c="red", label="Cluster 1") plt.scatter(X[y_kmeans == 1, 0], X[y_kmeans == 1, 1], c="green", label="Cluster 2") plt.scatter(X[y_kmeans == 2, 0], X[y_kmeans == 2, 1], c="cyan", label="Cluster 3") plt.scatter(X[y_kmeans == 3, 0], X[y_kmeans == 3, 1], c="purple", label="Cluster 4") plt.scatter(X[y_kmeans == 4, 0], X[y_kmeans == 4, 1], c="orange", label="Cluster 5") plt.scatter(X[y_kmeans == 5, 0], X[y_kmeans == 5, 1], c="blue", label="Cluster 6") plt.scatter( kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1], c="yellow", label="Centroids", ) plt.title("Кластеры") plt.xlabel("MajorAxisLength") plt.ylabel("Perimeter") plt.legend() plt.show() # Визуализация с другим количеством кластеров kmeans = KMeans(n_clusters=3, init="k-means++", random_state=100500) y_kmeans = kmeans.fit_predict(X) plt.scatter(X[y_kmeans == 0, 0], X[y_kmeans == 0, 1], c="red", label="Cluster 1") plt.scatter(X[y_kmeans == 1, 0], X[y_kmeans == 1, 1], c="green", label="Cluster 2") plt.scatter(X[y_kmeans == 2, 0], X[y_kmeans == 2, 1], c="cyan", label="Cluster 3") plt.scatter( kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1], c="yellow", label="Centroids", ) plt.title("Кластеры") plt.xlabel("MajorAxisLength") plt.ylabel("Perimeter") plt.legend() plt.show()
false
0
964
0
2,114
964
129720447
<jupyter_start><jupyter_text>Diamonds ### Context This classic dataset contains the prices and other attributes of almost 54,000 diamonds. It's a great dataset for beginners learning to work with data analysis and visualization. ### Content **price** price in US dollars (\$326--\$18,823) **carat** weight of the diamond (0.2--5.01) **cut** quality of the cut (Fair, Good, Very Good, Premium, Ideal) **color** diamond colour, from J (worst) to D (best) **clarity** a measurement of how clear the diamond is (I1 (worst), SI2, SI1, VS2, VS1, VVS2, VVS1, IF (best)) **x** length in mm (0--10.74) **y** width in mm (0--58.9) **z** depth in mm (0--31.8) **depth** total depth percentage = z / mean(x, y) = 2 * z / (x + y) (43--79) **table** width of top of diamond relative to widest point (43--95) Kaggle dataset identifier: diamonds <jupyter_code>import pandas as pd df = pd.read_csv('diamonds/diamonds.csv') df.info() <jupyter_output><class 'pandas.core.frame.DataFrame'> RangeIndex: 53940 entries, 0 to 53939 Data columns (total 11 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Unnamed: 0 53940 non-null int64 1 carat 53940 non-null float64 2 cut 53940 non-null object 3 color 53940 non-null object 4 clarity 53940 non-null object 5 depth 53940 non-null float64 6 table 53940 non-null float64 7 price 53940 non-null int64 8 x 53940 non-null float64 9 y 53940 non-null float64 10 z 53940 non-null float64 dtypes: float64(6), int64(2), object(3) memory usage: 4.5+ MB <jupyter_text>Examples: { "Unnamed: 0": 1, "carat": 0.23, "cut": "Ideal", "color": "E", "clarity": "SI2", "depth": 61.5, "table": 55, "price": 326, "x": 3.95, "y": 3.98, "z": 2.43 } { "Unnamed: 0": 2, "carat": 0.21, "cut": "Premium", "color": "E", "clarity": "SI1", "depth": 59.8, "table": 61, "price": 326, "x": 3.89, "y": 3.84, "z": 2.31 } { "Unnamed: 0": 3, "carat": 0.23, "cut": "Good", "color": "E", "clarity": "VS1", "depth": 56.9, "table": 65, "price": 327, "x": 4.05, "y": 4.07, "z": 2.31 } { "Unnamed: 0": 4, "carat": 0.29, "cut": "Premium", "color": "I", "clarity": "VS2", "depth": 62.4, "table": 58, "price": 334, "x": 4.2, "y": 4.23, "z": 2.63 } <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # Load Diamonds Dataset df = pd.read_csv("/kaggle/input/diamonds/diamonds.csv") df.head(20) # Analyze dataset df.describe() # Example 1: Get the summary statistics of a numerical column summary_stats = df["price"].describe() summary_stats # Example 2: Count the occurrences of unique values in a categorical column value_counts = df["cut"].value_counts() value_counts # ## Insights from the "diamonds" dataset # In Diamonds dataset the most expensive type it's price could reach to '18823.000000',whereas the cheppest kind reaches around '326.000000'and the mean is '3932.799722' # About the carat for the most expensive one its '5.010000',about the cheppest it's '0.200000' and the mean is '3932.799722'. # Actully,we have four catogries of cutting the pieces (good,very good , premium ,ideal and fair) # - The distribution of diamond cuts is as follows: # - Fair: Count 1610 # - Good: Count 4906 # - Very Good: Count 12082 # - Premium : Count 13791 # - Ideal : Count 21551 # import seaborn as sns import matplotlib.pyplot as plt # Example: Bar plot of cut counts plt.figure(figsize=(8, 6)) value_counts.plot(kind="bar") plt.title("Diamond Cut Counts") plt.xlabel("Cut") plt.ylabel("Count") plt.show() sns.boxplot(x="cut", y="price", data=df) plt.title("Diamond Prices by Cut") plt.xlabel("Cut") plt.ylabel("Price") plt.show() sns.scatterplot(x="carat", y="price", hue="clarity", size="depth", data=df) plt.title("Carat vs. Price (Colored by Clarity, Sized by Depth)") plt.xlabel("Carat") plt.ylabel("Price") plt.show() # Load Diabetes Dataset df = pd.read_csv("/kaggle/input/pima-indians-diabetes-database/diabetes.csv") df # Analyze dataset df.describe() # Get the summary statistics of the numerical columns summary_stats = df.describe() summary_stats # Count the number of instances with and without diabetes diabetes_counts = df["Outcome"].value_counts() diabetes_counts # The dataset contains information about various factors such as glucose levels, blood pressure, BMI, and age of Pima Indian women, along with an outcome variable indicating whether or not they have diabetes. # - The summary statistics of the numerical columns are as follows: # - Glucose: Mean = 120.894531, Standard Deviation = 31.972618, Minimum = 0.000000, Maximum = 199.000000 # - BloodPressure: Mean = 69.105469, Standard Deviation = 19.355807, Minimum = 0.000000, Maximum = 122.000000 # - BMI: Mean = 31.992578, Standard Deviation = 7.884160, Minimum = 0.000000, Maximum = 67.100000 # - Age: Mean = 33.240885, Standard Deviation = 11.760232, Minimum =21.000000, Maximum = 81.000000. # - The counts of diabetes outcome are as follows: # - No Diabetes: 500 instances # - Diabetes: 268 instances # - From the count plot, we can see that the dataset is slightly imbalanced, with a higher number of instances without diabetes compared to those with diabetes. # - In the scatter plot of glucose levels versus BMI, we observe that there seems to be a higher concentration of diabetes cases in the higher glucose and higher BMI region, while the non-diabetes cases are spread across the range of glucose and BMI values. sns.lineplot(x="Age", y="Glucose", hue="Outcome", data=df) plt.title("Age vs. Glucose Levels (Colored by Diabetes Outcome)") plt.xlabel("Age") plt.ylabel("Glucose") plt.show() sns.scatterplot(x="Glucose", y="BMI", hue="Outcome", data=df) plt.title("Glucose Levels vs. BMI (Colored by Diabetes Outcome)") plt.xlabel("Glucose") plt.ylabel("BMI") plt.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/720/129720447.ipynb
diamonds
shivam2503
[{"Id": 129720447, "ScriptId": 38575075, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 8004461, "CreationDate": "05/16/2023 02:45:04", "VersionNumber": 1.0, "Title": "Data Visualization", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 120.0, "LinesInsertedFromPrevious": 120.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 186059550, "KernelVersionId": 129720447, "SourceDatasetVersionId": 2368}, {"Id": 186059549, "KernelVersionId": 129720447, "SourceDatasetVersionId": 482}]
[{"Id": 2368, "DatasetId": 1312, "DatasourceVersionId": 2368, "CreatorUserId": 945829, "LicenseName": "Unknown", "CreationDate": "05/25/2017 03:06:57", "VersionNumber": 1.0, "Title": "Diamonds", "Slug": "diamonds", "Subtitle": "Analyze diamonds by their cut, color, clarity, price, and other attributes", "Description": "### Context \n\nThis classic dataset contains the prices and other attributes of almost 54,000 diamonds. It's a great dataset for beginners learning to work with data analysis and visualization.\n\n### Content\n\n**price** price in US dollars (\\$326--\\$18,823)\n\n**carat** weight of the diamond (0.2--5.01)\n\n**cut** quality of the cut (Fair, Good, Very Good, Premium, Ideal)\n\n**color** diamond colour, from J (worst) to D (best)\n\n**clarity** a measurement of how clear the diamond is (I1 (worst), SI2, SI1, VS2, VS1, VVS2, VVS1, IF (best))\n\n**x** length in mm (0--10.74)\n\n**y** width in mm (0--58.9)\n\n**z** depth in mm (0--31.8)\n\n**depth** total depth percentage = z / mean(x, y) = 2 * z / (x + y) (43--79)\n\n**table** width of top of diamond relative to widest point (43--95)", "VersionNotes": "Initial release", "TotalCompressedBytes": 3192560.0, "TotalUncompressedBytes": 3192560.0}]
[{"Id": 1312, "CreatorUserId": 945829, "OwnerUserId": 945829.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2368.0, "CurrentDatasourceVersionId": 2368.0, "ForumId": 3701, "Type": 2, "CreationDate": "05/25/2017 03:06:57", "LastActivityDate": "02/06/2018", "TotalViews": 434479, "TotalDownloads": 74575, "TotalVotes": 952, "TotalKernels": 444}]
[{"Id": 945829, "UserName": "shivam2503", "DisplayName": "Shivam Agrawal", "RegisterDate": "03/07/2017", "PerformanceTier": 1}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # Load Diamonds Dataset df = pd.read_csv("/kaggle/input/diamonds/diamonds.csv") df.head(20) # Analyze dataset df.describe() # Example 1: Get the summary statistics of a numerical column summary_stats = df["price"].describe() summary_stats # Example 2: Count the occurrences of unique values in a categorical column value_counts = df["cut"].value_counts() value_counts # ## Insights from the "diamonds" dataset # In Diamonds dataset the most expensive type it's price could reach to '18823.000000',whereas the cheppest kind reaches around '326.000000'and the mean is '3932.799722' # About the carat for the most expensive one its '5.010000',about the cheppest it's '0.200000' and the mean is '3932.799722'. # Actully,we have four catogries of cutting the pieces (good,very good , premium ,ideal and fair) # - The distribution of diamond cuts is as follows: # - Fair: Count 1610 # - Good: Count 4906 # - Very Good: Count 12082 # - Premium : Count 13791 # - Ideal : Count 21551 # import seaborn as sns import matplotlib.pyplot as plt # Example: Bar plot of cut counts plt.figure(figsize=(8, 6)) value_counts.plot(kind="bar") plt.title("Diamond Cut Counts") plt.xlabel("Cut") plt.ylabel("Count") plt.show() sns.boxplot(x="cut", y="price", data=df) plt.title("Diamond Prices by Cut") plt.xlabel("Cut") plt.ylabel("Price") plt.show() sns.scatterplot(x="carat", y="price", hue="clarity", size="depth", data=df) plt.title("Carat vs. Price (Colored by Clarity, Sized by Depth)") plt.xlabel("Carat") plt.ylabel("Price") plt.show() # Load Diabetes Dataset df = pd.read_csv("/kaggle/input/pima-indians-diabetes-database/diabetes.csv") df # Analyze dataset df.describe() # Get the summary statistics of the numerical columns summary_stats = df.describe() summary_stats # Count the number of instances with and without diabetes diabetes_counts = df["Outcome"].value_counts() diabetes_counts # The dataset contains information about various factors such as glucose levels, blood pressure, BMI, and age of Pima Indian women, along with an outcome variable indicating whether or not they have diabetes. # - The summary statistics of the numerical columns are as follows: # - Glucose: Mean = 120.894531, Standard Deviation = 31.972618, Minimum = 0.000000, Maximum = 199.000000 # - BloodPressure: Mean = 69.105469, Standard Deviation = 19.355807, Minimum = 0.000000, Maximum = 122.000000 # - BMI: Mean = 31.992578, Standard Deviation = 7.884160, Minimum = 0.000000, Maximum = 67.100000 # - Age: Mean = 33.240885, Standard Deviation = 11.760232, Minimum =21.000000, Maximum = 81.000000. # - The counts of diabetes outcome are as follows: # - No Diabetes: 500 instances # - Diabetes: 268 instances # - From the count plot, we can see that the dataset is slightly imbalanced, with a higher number of instances without diabetes compared to those with diabetes. # - In the scatter plot of glucose levels versus BMI, we observe that there seems to be a higher concentration of diabetes cases in the higher glucose and higher BMI region, while the non-diabetes cases are spread across the range of glucose and BMI values. sns.lineplot(x="Age", y="Glucose", hue="Outcome", data=df) plt.title("Age vs. Glucose Levels (Colored by Diabetes Outcome)") plt.xlabel("Age") plt.ylabel("Glucose") plt.show() sns.scatterplot(x="Glucose", y="BMI", hue="Outcome", data=df) plt.title("Glucose Levels vs. BMI (Colored by Diabetes Outcome)") plt.xlabel("Glucose") plt.ylabel("BMI") plt.show()
[{"diamonds/diamonds.csv": {"column_names": "[\"Unnamed: 0\", \"carat\", \"cut\", \"color\", \"clarity\", \"depth\", \"table\", \"price\", \"x\", \"y\", \"z\"]", "column_data_types": "{\"Unnamed: 0\": \"int64\", \"carat\": \"float64\", \"cut\": \"object\", \"color\": \"object\", \"clarity\": \"object\", \"depth\": \"float64\", \"table\": \"float64\", \"price\": \"int64\", \"x\": \"float64\", \"y\": \"float64\", \"z\": \"float64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 53940 entries, 0 to 53939\nData columns (total 11 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Unnamed: 0 53940 non-null int64 \n 1 carat 53940 non-null float64\n 2 cut 53940 non-null object \n 3 color 53940 non-null object \n 4 clarity 53940 non-null object \n 5 depth 53940 non-null float64\n 6 table 53940 non-null float64\n 7 price 53940 non-null int64 \n 8 x 53940 non-null float64\n 9 y 53940 non-null float64\n 10 z 53940 non-null float64\ndtypes: float64(6), int64(2), object(3)\nmemory usage: 4.5+ MB\n", "summary": "{\"Unnamed: 0\": {\"count\": 53940.0, \"mean\": 26970.5, \"std\": 15571.281096942537, \"min\": 1.0, \"25%\": 13485.75, \"50%\": 26970.5, \"75%\": 40455.25, \"max\": 53940.0}, \"carat\": {\"count\": 53940.0, \"mean\": 0.7979397478680014, \"std\": 0.4740112444054184, \"min\": 0.2, \"25%\": 0.4, \"50%\": 0.7, \"75%\": 1.04, \"max\": 5.01}, \"depth\": {\"count\": 53940.0, \"mean\": 61.749404894327036, \"std\": 1.432621318833661, \"min\": 43.0, \"25%\": 61.0, \"50%\": 61.8, \"75%\": 62.5, \"max\": 79.0}, \"table\": {\"count\": 53940.0, \"mean\": 57.45718390804598, \"std\": 2.2344905628213225, \"min\": 43.0, \"25%\": 56.0, \"50%\": 57.0, \"75%\": 59.0, \"max\": 95.0}, \"price\": {\"count\": 53940.0, \"mean\": 3932.799721913237, \"std\": 3989.439738146379, \"min\": 326.0, \"25%\": 950.0, \"50%\": 2401.0, \"75%\": 5324.25, \"max\": 18823.0}, \"x\": {\"count\": 53940.0, \"mean\": 5.731157211716722, \"std\": 1.1217607467924928, \"min\": 0.0, \"25%\": 4.71, \"50%\": 5.7, \"75%\": 6.54, \"max\": 10.74}, \"y\": {\"count\": 53940.0, \"mean\": 5.734525954764553, \"std\": 1.1421346741235552, \"min\": 0.0, \"25%\": 4.72, \"50%\": 5.71, \"75%\": 6.54, \"max\": 58.9}, \"z\": {\"count\": 53940.0, \"mean\": 3.5387337782721544, \"std\": 0.7056988469499941, \"min\": 0.0, \"25%\": 2.91, \"50%\": 3.53, \"75%\": 4.04, \"max\": 31.8}}", "examples": "{\"Unnamed: 0\":{\"0\":1,\"1\":2,\"2\":3,\"3\":4},\"carat\":{\"0\":0.23,\"1\":0.21,\"2\":0.23,\"3\":0.29},\"cut\":{\"0\":\"Ideal\",\"1\":\"Premium\",\"2\":\"Good\",\"3\":\"Premium\"},\"color\":{\"0\":\"E\",\"1\":\"E\",\"2\":\"E\",\"3\":\"I\"},\"clarity\":{\"0\":\"SI2\",\"1\":\"SI1\",\"2\":\"VS1\",\"3\":\"VS2\"},\"depth\":{\"0\":61.5,\"1\":59.8,\"2\":56.9,\"3\":62.4},\"table\":{\"0\":55.0,\"1\":61.0,\"2\":65.0,\"3\":58.0},\"price\":{\"0\":326,\"1\":326,\"2\":327,\"3\":334},\"x\":{\"0\":3.95,\"1\":3.89,\"2\":4.05,\"3\":4.2},\"y\":{\"0\":3.98,\"1\":3.84,\"2\":4.07,\"3\":4.23},\"z\":{\"0\":2.43,\"1\":2.31,\"2\":2.31,\"3\":2.63}}"}}]
true
2
<start_data_description><data_path>diamonds/diamonds.csv: <column_names> ['Unnamed: 0', 'carat', 'cut', 'color', 'clarity', 'depth', 'table', 'price', 'x', 'y', 'z'] <column_types> {'Unnamed: 0': 'int64', 'carat': 'float64', 'cut': 'object', 'color': 'object', 'clarity': 'object', 'depth': 'float64', 'table': 'float64', 'price': 'int64', 'x': 'float64', 'y': 'float64', 'z': 'float64'} <dataframe_Summary> {'Unnamed: 0': {'count': 53940.0, 'mean': 26970.5, 'std': 15571.281096942537, 'min': 1.0, '25%': 13485.75, '50%': 26970.5, '75%': 40455.25, 'max': 53940.0}, 'carat': {'count': 53940.0, 'mean': 0.7979397478680014, 'std': 0.4740112444054184, 'min': 0.2, '25%': 0.4, '50%': 0.7, '75%': 1.04, 'max': 5.01}, 'depth': {'count': 53940.0, 'mean': 61.749404894327036, 'std': 1.432621318833661, 'min': 43.0, '25%': 61.0, '50%': 61.8, '75%': 62.5, 'max': 79.0}, 'table': {'count': 53940.0, 'mean': 57.45718390804598, 'std': 2.2344905628213225, 'min': 43.0, '25%': 56.0, '50%': 57.0, '75%': 59.0, 'max': 95.0}, 'price': {'count': 53940.0, 'mean': 3932.799721913237, 'std': 3989.439738146379, 'min': 326.0, '25%': 950.0, '50%': 2401.0, '75%': 5324.25, 'max': 18823.0}, 'x': {'count': 53940.0, 'mean': 5.731157211716722, 'std': 1.1217607467924928, 'min': 0.0, '25%': 4.71, '50%': 5.7, '75%': 6.54, 'max': 10.74}, 'y': {'count': 53940.0, 'mean': 5.734525954764553, 'std': 1.1421346741235552, 'min': 0.0, '25%': 4.72, '50%': 5.71, '75%': 6.54, 'max': 58.9}, 'z': {'count': 53940.0, 'mean': 3.5387337782721544, 'std': 0.7056988469499941, 'min': 0.0, '25%': 2.91, '50%': 3.53, '75%': 4.04, 'max': 31.8}} <dataframe_info> RangeIndex: 53940 entries, 0 to 53939 Data columns (total 11 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Unnamed: 0 53940 non-null int64 1 carat 53940 non-null float64 2 cut 53940 non-null object 3 color 53940 non-null object 4 clarity 53940 non-null object 5 depth 53940 non-null float64 6 table 53940 non-null float64 7 price 53940 non-null int64 8 x 53940 non-null float64 9 y 53940 non-null float64 10 z 53940 non-null float64 dtypes: float64(6), int64(2), object(3) memory usage: 4.5+ MB <some_examples> {'Unnamed: 0': {'0': 1, '1': 2, '2': 3, '3': 4}, 'carat': {'0': 0.23, '1': 0.21, '2': 0.23, '3': 0.29}, 'cut': {'0': 'Ideal', '1': 'Premium', '2': 'Good', '3': 'Premium'}, 'color': {'0': 'E', '1': 'E', '2': 'E', '3': 'I'}, 'clarity': {'0': 'SI2', '1': 'SI1', '2': 'VS1', '3': 'VS2'}, 'depth': {'0': 61.5, '1': 59.8, '2': 56.9, '3': 62.4}, 'table': {'0': 55.0, '1': 61.0, '2': 65.0, '3': 58.0}, 'price': {'0': 326, '1': 326, '2': 327, '3': 334}, 'x': {'0': 3.95, '1': 3.89, '2': 4.05, '3': 4.2}, 'y': {'0': 3.98, '1': 3.84, '2': 4.07, '3': 4.23}, 'z': {'0': 2.43, '1': 2.31, '2': 2.31, '3': 2.63}} <end_description>
1,436
0
2,480
1,436
129757782
<jupyter_start><jupyter_text>IMDB movies dataset The IMDB dataset contains information about movies, including their names, release dates, user ratings, genres, overviews, cast and crew members, original titles, production status, original languages, budgets, revenues, and countries of origin. This data can be used for various analyses, such as identifying trends in movie genres, exploring the relationship between budget and revenue, and predicting the success of future movies. Kaggle dataset identifier: imdb-movies-dataset <jupyter_script>import pyspark from pyspark.sql import SparkSession from pyspark.sql.functions import * spark = SparkSession.builder.appName("Movie").getOrCreate() df = spark.read.csv("/kaggle/input/imdb-movies-dataset/imdb_movies.csv", header=True) df.show(5) df = df.withColumn("Profit", col("revenue") - col("budget_x")) df = df.withColumn("Achievement", when(col("score") < 50, "Average").otherwise("Good")) df.createOrReplaceTempView("movie_released") var = spark.sql("select * from movie_released") var.show(5) df.select("names", "status", "date_x", "score", "Profit", "Achievement").show(20)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/757/129757782.ipynb
imdb-movies-dataset
ashpalsingh1525
[{"Id": 129757782, "ScriptId": 38433248, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 8866968, "CreationDate": "05/16/2023 09:04:08", "VersionNumber": 1.0, "Title": "movies", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 26.0, "LinesInsertedFromPrevious": 26.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 186113879, "KernelVersionId": 129757782, "SourceDatasetVersionId": 5552662}]
[{"Id": 5552662, "DatasetId": 3198793, "DatasourceVersionId": 5627422, "CreatorUserId": 13490345, "LicenseName": "Community Data License Agreement - Permissive - Version 1.0", "CreationDate": "04/28/2023 23:18:15", "VersionNumber": 1.0, "Title": "IMDB movies dataset", "Slug": "imdb-movies-dataset", "Subtitle": "Explore 10000+ movies worldwide with the IMDB Movies dataset", "Description": "The IMDB dataset contains information about movies, including their names, release dates, user ratings, genres, overviews, cast and crew members, original titles, production status, original languages, budgets, revenues, and countries of origin. This data can be used for various analyses, such as identifying trends in movie genres, exploring the relationship between budget and revenue, and predicting the success of future movies.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3198793, "CreatorUserId": 13490345, "OwnerUserId": 13490345.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5552662.0, "CurrentDatasourceVersionId": 5627422.0, "ForumId": 3263430, "Type": 2, "CreationDate": "04/28/2023 23:18:15", "LastActivityDate": "04/28/2023", "TotalViews": 19297, "TotalDownloads": 3999, "TotalVotes": 79, "TotalKernels": 10}]
[{"Id": 13490345, "UserName": "ashpalsingh1525", "DisplayName": "Ashpal Singh1525", "RegisterDate": "01/31/2023", "PerformanceTier": 2}]
import pyspark from pyspark.sql import SparkSession from pyspark.sql.functions import * spark = SparkSession.builder.appName("Movie").getOrCreate() df = spark.read.csv("/kaggle/input/imdb-movies-dataset/imdb_movies.csv", header=True) df.show(5) df = df.withColumn("Profit", col("revenue") - col("budget_x")) df = df.withColumn("Achievement", when(col("score") < 50, "Average").otherwise("Good")) df.createOrReplaceTempView("movie_released") var = spark.sql("select * from movie_released") var.show(5) df.select("names", "status", "date_x", "score", "Profit", "Achievement").show(20)
false
0
191
0
307
191
129389124
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session train_df = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/train.csv") test_df = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/test.csv") greeks_df = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/greeks.csv") sample_submission_df = pd.read_csv( "/kaggle/input/icr-identify-age-related-conditions/sample_submission.csv" ) sample_submission_df.head() categorical_columns = train_df.select_dtypes(include=["object"]).columns print(categorical_columns) test_df.head(50) import pandas as pd from scipy.stats import skew import seaborn as sns import matplotlib.pyplot as plt def get_skewed_columns(df): skewed_columns = [] for column in df.columns: if df[column].dtype != "object": # Consider numerical columns only skewness = skew(df[column]) if skewness > 1 or skewness < -1: # Define a threshold for skewness skewed_columns.append(column) print("Columns with skewed distribution:") print(skewed_columns) get_skewed_columns(train_df) X = train_df.copy() from sklearn.preprocessing import LabelEncoder label_encoder = LabelEncoder() X["EJ"] = label_encoder.fit_transform(X["EJ"]) y_train = X.pop("Class") X_train = X.loc[:, X.columns != "Id"] X_test = test_df.loc[:, test_df.columns != "Id"] X_test["EJ"] = label_encoder.fit_transform(X_test["EJ"]) X_test.info() import lightgbm as lgb import numpy as np from sklearn.metrics import log_loss from sklearn.model_selection import StratifiedKFold # Assuming X_train, y_train, X_test are your training features, training labels, and test features respectively # Set parameters for LightGBM model params = { "objective": "binary", "metric": "binary_logloss", "verbosity": -1, "n_estimators": 1000, "learning_rate": 0.1, "max_depth": 6, "subsample": 0.8, "colsample_bytree": 0.8, "lambda": 1.0, } # Initialize arrays to store validation scores and test predictions validation_scores = [] test_prediction = np.zeros((len(X_test))) # Define the number of folds for cross-validation n_folds = 5 # Create the cross-validation strategy skf = StratifiedKFold(n_splits=n_folds, shuffle=True, random_state=42) # Iterate over each fold for fold, (train_index, val_index) in enumerate(skf.split(X_train, y_train)): print(f"Training on Fold {fold + 1}") # Get the training and validation sets for this fold X_train_fold, X_val_fold = X_train.iloc[train_index], X_train.iloc[val_index] y_train_fold, y_val_fold = y_train.iloc[train_index], y_train.iloc[val_index] # Create LightGBM datasets for training and validation train_data = lgb.Dataset(X_train_fold, label=y_train_fold) val_data = lgb.Dataset(X_val_fold, label=y_val_fold) # Train the LightGBM model model = lgb.train( params, train_data, valid_sets=[train_data, val_data], early_stopping_rounds=100, verbose_eval=100, ) # Make predictions on the validation set val_pred = model.predict(X_val_fold) fold_logloss = log_loss(y_val_fold, val_pred) validation_scores.append(fold_logloss) # Make predictions on the test set for this fold # test_pred = model.predict(X_test) test_prediction += model.predict(X_test, num_iteration=model.best_iteration) print(f"Fold {fold + 1} log loss: {fold_logloss}") # Calculate the mean and standard deviation of validation scores mean_validation_score = np.mean(validation_scores) std_validation_score = np.std(validation_scores) print(f"\nMean validation log loss: {mean_validation_score}") print(f"Standard deviation of validation log loss: {std_validation_score}") # Average the predictions across all folds for the final test predictions test_prediction /= n_folds print(test_prediction) sample_submission = sample_submission_df.copy() sample_submission["class_1"] = test_prediction sample_submission["class_0"] = 1 - test_prediction sample_submission.to_csv(r"submission.csv", index=False) sample_submission.head()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/389/129389124.ipynb
null
null
[{"Id": 129389124, "ScriptId": 38470513, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 1870127, "CreationDate": "05/13/2023 11:23:19", "VersionNumber": 1.0, "Title": "notebooke963155877", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 149.0, "LinesInsertedFromPrevious": 149.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 2}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session train_df = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/train.csv") test_df = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/test.csv") greeks_df = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/greeks.csv") sample_submission_df = pd.read_csv( "/kaggle/input/icr-identify-age-related-conditions/sample_submission.csv" ) sample_submission_df.head() categorical_columns = train_df.select_dtypes(include=["object"]).columns print(categorical_columns) test_df.head(50) import pandas as pd from scipy.stats import skew import seaborn as sns import matplotlib.pyplot as plt def get_skewed_columns(df): skewed_columns = [] for column in df.columns: if df[column].dtype != "object": # Consider numerical columns only skewness = skew(df[column]) if skewness > 1 or skewness < -1: # Define a threshold for skewness skewed_columns.append(column) print("Columns with skewed distribution:") print(skewed_columns) get_skewed_columns(train_df) X = train_df.copy() from sklearn.preprocessing import LabelEncoder label_encoder = LabelEncoder() X["EJ"] = label_encoder.fit_transform(X["EJ"]) y_train = X.pop("Class") X_train = X.loc[:, X.columns != "Id"] X_test = test_df.loc[:, test_df.columns != "Id"] X_test["EJ"] = label_encoder.fit_transform(X_test["EJ"]) X_test.info() import lightgbm as lgb import numpy as np from sklearn.metrics import log_loss from sklearn.model_selection import StratifiedKFold # Assuming X_train, y_train, X_test are your training features, training labels, and test features respectively # Set parameters for LightGBM model params = { "objective": "binary", "metric": "binary_logloss", "verbosity": -1, "n_estimators": 1000, "learning_rate": 0.1, "max_depth": 6, "subsample": 0.8, "colsample_bytree": 0.8, "lambda": 1.0, } # Initialize arrays to store validation scores and test predictions validation_scores = [] test_prediction = np.zeros((len(X_test))) # Define the number of folds for cross-validation n_folds = 5 # Create the cross-validation strategy skf = StratifiedKFold(n_splits=n_folds, shuffle=True, random_state=42) # Iterate over each fold for fold, (train_index, val_index) in enumerate(skf.split(X_train, y_train)): print(f"Training on Fold {fold + 1}") # Get the training and validation sets for this fold X_train_fold, X_val_fold = X_train.iloc[train_index], X_train.iloc[val_index] y_train_fold, y_val_fold = y_train.iloc[train_index], y_train.iloc[val_index] # Create LightGBM datasets for training and validation train_data = lgb.Dataset(X_train_fold, label=y_train_fold) val_data = lgb.Dataset(X_val_fold, label=y_val_fold) # Train the LightGBM model model = lgb.train( params, train_data, valid_sets=[train_data, val_data], early_stopping_rounds=100, verbose_eval=100, ) # Make predictions on the validation set val_pred = model.predict(X_val_fold) fold_logloss = log_loss(y_val_fold, val_pred) validation_scores.append(fold_logloss) # Make predictions on the test set for this fold # test_pred = model.predict(X_test) test_prediction += model.predict(X_test, num_iteration=model.best_iteration) print(f"Fold {fold + 1} log loss: {fold_logloss}") # Calculate the mean and standard deviation of validation scores mean_validation_score = np.mean(validation_scores) std_validation_score = np.std(validation_scores) print(f"\nMean validation log loss: {mean_validation_score}") print(f"Standard deviation of validation log loss: {std_validation_score}") # Average the predictions across all folds for the final test predictions test_prediction /= n_folds print(test_prediction) sample_submission = sample_submission_df.copy() sample_submission["class_1"] = test_prediction sample_submission["class_0"] = 1 - test_prediction sample_submission.to_csv(r"submission.csv", index=False) sample_submission.head()
false
0
1,418
2
1,418
1,418
129389452
import pandas as pd import numpy as np import matplotlib.pyplot as plt # ## Load Preprocessed data tr = pd.read_csv("/kaggle/input/dummy-hp/train_proc.csv") ts = pd.read_csv("/kaggle/input/dummy-hp/test_proc.csv") # ## Frequency subsampling # In order for recurrent and autoregressive networks to work it is needed to have a constant frequency. That does not happend, there are missing values. I simply interpolate linearly those values num = 9870 subset = tr[tr["product_number"] == num] timestamp = pd.to_datetime(subset["date"]).values.astype(np.int64) // 10**9 def clean_freq(timestamp, series): # Detect wrong frequencies diffs = np.diff(timestamp.astype(np.int64)) median = np.median(diffs) bad = np.where(diffs != median)[0] if len(bad) == 0: return timestamp, series # Add missing for b in bad: ratio = int(diffs[b] / median) assert ratio * median == diffs[b], "Not integer frequency" np.linspace(timestamp[b], timestamp[b + 1], ratio + 1)[1:-1] timestamp = np.hstack( (timestamp, np.linspace(timestamp[b], timestamp[b + 1], ratio + 1)[1:-1]) ) series = np.hstack( (series, np.linspace(series[b], series[b + 1], ratio + 1)[1:-1]) ) # Sort again timestamp, series = np.array(sorted(zip(timestamp, series))).transpose() # Check everyting is alright diffs = np.diff(timestamp.astype(np.int64)) bad = np.where(diffs != np.median(diffs))[0] assert len(bad) == 0, "Wrong processing" return timestamp, series timestamp, series = clean_freq(timestamp, subset["inventory_units"].to_numpy()) # ### Example of inventory for one product plt.plot(timestamp, series) plt.show() # Check every series is well behaved for num in pd.unique(tr["product_number"]): subset = tr[tr["product_number"] == num] timestamp = pd.to_datetime(subset["date"]).values.astype(np.int64) // 10**9 try: timestamp, series = clean_freq(timestamp, subset["inventory_units"].to_numpy()) except: print(num) # # Gated Recurrent Units import torch import torch.nn as nn import torch.optim as optim # Hyperparameters input_size = 1 hidden_size = 100 output_size = 1 seq_length = 10 batch_size = 5 train_ratio = 0.5 num = 9870 subset = tr[tr["product_number"] == num] timestamp = pd.to_datetime(subset["date"]).values.astype(np.int64) // 10**9 series = subset["inventory_units"].to_numpy() timestamp, series = clean_freq(timestamp, subset["inventory_units"].to_numpy()) data = np.vstack((timestamp, series)).transpose() train_data = data[: int(len(data) * train_ratio)] val_data = data[int(len(data) * train_ratio) :] # ### Train / Validation data plt.plot(train_data[:, 0], train_data[:, 1]) plt.plot(val_data[:, 0], val_data[:, 1], c="r") plt.show() def create_data_loader(data, seq_length, batch_size): inputs = [] targets = [] for i in range(len(data) - seq_length): inputs.append(data[i : i + seq_length, 1]) targets.append(data[i + seq_length, 1]) inputs = torch.tensor(np.array(inputs)).float().view(-1, seq_length, input_size) targets = torch.tensor(np.array(targets)).float().view(-1, output_size) dataset = torch.utils.data.TensorDataset(inputs, targets) data_loader = torch.utils.data.DataLoader( dataset, batch_size=batch_size, shuffle=False ) return data_loader train_loader = create_data_loader(train_data, seq_length, batch_size) val_loader = create_data_loader(val_data, seq_length, batch_size) # Define GRU model class GRUModel(nn.Module): def __init__(self, input_size, hidden_size, output_size): super(GRUModel, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.gru = nn.GRU(input_size, hidden_size) self.fc = nn.Linear(hidden_size, output_size) def forward(self, x, hidden): out, hidden = self.gru(x, hidden) out = self.fc(out[:, -1, :]) return out, hidden def init_hidden(self, seq_length): return torch.zeros(self.input_size, seq_length, self.hidden_size) # Initialize model, criterion, and optimizer model = GRUModel(input_size, hidden_size, output_size) criterion = nn.MSELoss() optimizer = optim.Adam(model.parameters(), lr=0.01) # Training loop def train(model, train_loader, criterion, optimizer, device): model.train() hidden = model.init_hidden(seq_length).to(device) total_loss = 0 for inputs, targets in train_loader: inputs, targets = inputs.to(device), targets.to(device) hidden = hidden.detach() optimizer.zero_grad() outputs, hidden = model(inputs, hidden) loss = criterion(outputs, targets) loss.backward() optimizer.step() total_loss += loss.item() return total_loss / len(val_loader) # Validation loop def validate(model, val_loader, criterion, device): model.eval() hidden = model.init_hidden(seq_length).to(device) total_loss = 0 with torch.no_grad(): for inputs, targets in val_loader: inputs, targets = inputs.to(device), targets.to(device) outputs, hidden = model(inputs, hidden) loss = criterion(outputs, targets) total_loss += loss.item() return total_loss / len(val_loader) # ### Training loop # As you can see there is a huge difference between train and validation loss. # Main training and validation loop device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model.to(device) num_epochs = 500 for epoch in range(num_epochs): tr_loss = train(model, train_loader, criterion, optimizer, device) val_loss = validate(model, val_loader, criterion, device) if (epoch + 1) % 10 == 0: print(f"Epoch [{epoch+1}/{num_epochs}], Training Loss: {tr_loss:.4f}") print(f"Epoch [{epoch+1}/{num_epochs}], Validation Loss: {val_loss:.4f}") print("Training and validation completed.") # ## Predictions # There are two ways of predicting in GRU. One is giving the real previous samples. The other is to execute the model in an autoregressive manner. The latter is clearly the only one available at runtime. But it is interesting to have both to compare. def predict(model, loader, device): model.eval() hidden = model.init_hidden(seq_length).to(device) outputs = [] with torch.no_grad(): for inputs, targets in loader: inputs, targets = inputs.to(device), targets.to(device) output, hidden = model(inputs, hidden) outputs.append(output.numpy()[0][0]) return np.array(outputs) def predict_autoreg(model, device, steps, initial_inp): model.eval() hidden = model.init_hidden(seq_length).to(device) outputs = [] inputs = initial_inp with torch.no_grad(): for k in range(steps): output, hidden = model(inputs, hidden) next = output.numpy()[0][0] outputs.append(next) aux = np.empty(inputs.shape) aux[:, :-1, :] = inputs[:, 1:, :] aux[:, -1, :] = next inputs = torch.tensor(aux).float() return np.array(outputs) train_loader_pred = create_data_loader(train_data, seq_length, 1) val_loader_pred = create_data_loader(val_data, seq_length, 1) preds_tr = predict(model, train_loader_pred, device) preds_val = predict(model, val_loader_pred, device) preds_reg = predict_autoreg( model, device, len(val_data), list(train_loader_pred)[-1][0] ) preds_reg_tr = predict_autoreg( model, device, len(train_data), list(train_loader_pred)[0][0] ) plt.plot(train_data[:, 0], train_data[:, 1], label="Training") plt.plot(train_data[seq_length:, 0], preds_tr, c="g", label="Predictions training") plt.plot( train_data[:, 0], preds_reg_tr, c="k", label="Predictions training autoregreesive" ) plt.plot(val_data[:, 0], val_data[:, 1], c="r", label="Validation") plt.plot(val_data[seq_length:, 0], preds_val, c="g", label="Predictions validation") plt.plot( val_data[:, 0], preds_reg, c="k", label="Predictions validation autoregressive" ) plt.legend(loc="best") plt.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/389/129389452.ipynb
null
null
[{"Id": 129389452, "ScriptId": 38472148, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 2424396, "CreationDate": "05/13/2023 11:26:49", "VersionNumber": 1.0, "Title": "GRU example", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 243.0, "LinesInsertedFromPrevious": 243.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import pandas as pd import numpy as np import matplotlib.pyplot as plt # ## Load Preprocessed data tr = pd.read_csv("/kaggle/input/dummy-hp/train_proc.csv") ts = pd.read_csv("/kaggle/input/dummy-hp/test_proc.csv") # ## Frequency subsampling # In order for recurrent and autoregressive networks to work it is needed to have a constant frequency. That does not happend, there are missing values. I simply interpolate linearly those values num = 9870 subset = tr[tr["product_number"] == num] timestamp = pd.to_datetime(subset["date"]).values.astype(np.int64) // 10**9 def clean_freq(timestamp, series): # Detect wrong frequencies diffs = np.diff(timestamp.astype(np.int64)) median = np.median(diffs) bad = np.where(diffs != median)[0] if len(bad) == 0: return timestamp, series # Add missing for b in bad: ratio = int(diffs[b] / median) assert ratio * median == diffs[b], "Not integer frequency" np.linspace(timestamp[b], timestamp[b + 1], ratio + 1)[1:-1] timestamp = np.hstack( (timestamp, np.linspace(timestamp[b], timestamp[b + 1], ratio + 1)[1:-1]) ) series = np.hstack( (series, np.linspace(series[b], series[b + 1], ratio + 1)[1:-1]) ) # Sort again timestamp, series = np.array(sorted(zip(timestamp, series))).transpose() # Check everyting is alright diffs = np.diff(timestamp.astype(np.int64)) bad = np.where(diffs != np.median(diffs))[0] assert len(bad) == 0, "Wrong processing" return timestamp, series timestamp, series = clean_freq(timestamp, subset["inventory_units"].to_numpy()) # ### Example of inventory for one product plt.plot(timestamp, series) plt.show() # Check every series is well behaved for num in pd.unique(tr["product_number"]): subset = tr[tr["product_number"] == num] timestamp = pd.to_datetime(subset["date"]).values.astype(np.int64) // 10**9 try: timestamp, series = clean_freq(timestamp, subset["inventory_units"].to_numpy()) except: print(num) # # Gated Recurrent Units import torch import torch.nn as nn import torch.optim as optim # Hyperparameters input_size = 1 hidden_size = 100 output_size = 1 seq_length = 10 batch_size = 5 train_ratio = 0.5 num = 9870 subset = tr[tr["product_number"] == num] timestamp = pd.to_datetime(subset["date"]).values.astype(np.int64) // 10**9 series = subset["inventory_units"].to_numpy() timestamp, series = clean_freq(timestamp, subset["inventory_units"].to_numpy()) data = np.vstack((timestamp, series)).transpose() train_data = data[: int(len(data) * train_ratio)] val_data = data[int(len(data) * train_ratio) :] # ### Train / Validation data plt.plot(train_data[:, 0], train_data[:, 1]) plt.plot(val_data[:, 0], val_data[:, 1], c="r") plt.show() def create_data_loader(data, seq_length, batch_size): inputs = [] targets = [] for i in range(len(data) - seq_length): inputs.append(data[i : i + seq_length, 1]) targets.append(data[i + seq_length, 1]) inputs = torch.tensor(np.array(inputs)).float().view(-1, seq_length, input_size) targets = torch.tensor(np.array(targets)).float().view(-1, output_size) dataset = torch.utils.data.TensorDataset(inputs, targets) data_loader = torch.utils.data.DataLoader( dataset, batch_size=batch_size, shuffle=False ) return data_loader train_loader = create_data_loader(train_data, seq_length, batch_size) val_loader = create_data_loader(val_data, seq_length, batch_size) # Define GRU model class GRUModel(nn.Module): def __init__(self, input_size, hidden_size, output_size): super(GRUModel, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.gru = nn.GRU(input_size, hidden_size) self.fc = nn.Linear(hidden_size, output_size) def forward(self, x, hidden): out, hidden = self.gru(x, hidden) out = self.fc(out[:, -1, :]) return out, hidden def init_hidden(self, seq_length): return torch.zeros(self.input_size, seq_length, self.hidden_size) # Initialize model, criterion, and optimizer model = GRUModel(input_size, hidden_size, output_size) criterion = nn.MSELoss() optimizer = optim.Adam(model.parameters(), lr=0.01) # Training loop def train(model, train_loader, criterion, optimizer, device): model.train() hidden = model.init_hidden(seq_length).to(device) total_loss = 0 for inputs, targets in train_loader: inputs, targets = inputs.to(device), targets.to(device) hidden = hidden.detach() optimizer.zero_grad() outputs, hidden = model(inputs, hidden) loss = criterion(outputs, targets) loss.backward() optimizer.step() total_loss += loss.item() return total_loss / len(val_loader) # Validation loop def validate(model, val_loader, criterion, device): model.eval() hidden = model.init_hidden(seq_length).to(device) total_loss = 0 with torch.no_grad(): for inputs, targets in val_loader: inputs, targets = inputs.to(device), targets.to(device) outputs, hidden = model(inputs, hidden) loss = criterion(outputs, targets) total_loss += loss.item() return total_loss / len(val_loader) # ### Training loop # As you can see there is a huge difference between train and validation loss. # Main training and validation loop device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model.to(device) num_epochs = 500 for epoch in range(num_epochs): tr_loss = train(model, train_loader, criterion, optimizer, device) val_loss = validate(model, val_loader, criterion, device) if (epoch + 1) % 10 == 0: print(f"Epoch [{epoch+1}/{num_epochs}], Training Loss: {tr_loss:.4f}") print(f"Epoch [{epoch+1}/{num_epochs}], Validation Loss: {val_loss:.4f}") print("Training and validation completed.") # ## Predictions # There are two ways of predicting in GRU. One is giving the real previous samples. The other is to execute the model in an autoregressive manner. The latter is clearly the only one available at runtime. But it is interesting to have both to compare. def predict(model, loader, device): model.eval() hidden = model.init_hidden(seq_length).to(device) outputs = [] with torch.no_grad(): for inputs, targets in loader: inputs, targets = inputs.to(device), targets.to(device) output, hidden = model(inputs, hidden) outputs.append(output.numpy()[0][0]) return np.array(outputs) def predict_autoreg(model, device, steps, initial_inp): model.eval() hidden = model.init_hidden(seq_length).to(device) outputs = [] inputs = initial_inp with torch.no_grad(): for k in range(steps): output, hidden = model(inputs, hidden) next = output.numpy()[0][0] outputs.append(next) aux = np.empty(inputs.shape) aux[:, :-1, :] = inputs[:, 1:, :] aux[:, -1, :] = next inputs = torch.tensor(aux).float() return np.array(outputs) train_loader_pred = create_data_loader(train_data, seq_length, 1) val_loader_pred = create_data_loader(val_data, seq_length, 1) preds_tr = predict(model, train_loader_pred, device) preds_val = predict(model, val_loader_pred, device) preds_reg = predict_autoreg( model, device, len(val_data), list(train_loader_pred)[-1][0] ) preds_reg_tr = predict_autoreg( model, device, len(train_data), list(train_loader_pred)[0][0] ) plt.plot(train_data[:, 0], train_data[:, 1], label="Training") plt.plot(train_data[seq_length:, 0], preds_tr, c="g", label="Predictions training") plt.plot( train_data[:, 0], preds_reg_tr, c="k", label="Predictions training autoregreesive" ) plt.plot(val_data[:, 0], val_data[:, 1], c="r", label="Validation") plt.plot(val_data[seq_length:, 0], preds_val, c="g", label="Predictions validation") plt.plot( val_data[:, 0], preds_reg, c="k", label="Predictions validation autoregressive" ) plt.legend(loc="best") plt.show()
false
0
2,458
0
2,458
2,458
129389501
# # Regression Model for car data # # 1. About Dataset # Variable Name # Description # Car_Name # Name of the cars # Year # Year of the car when it was bought # Selling_Price # Price at which the car is being sold # Present_Price # present price of cars # Kms_Driven # Number of the kilometres the car is driven # Fuel_Type # Fuel type of car # Seller_Type # Tells if seller is individual or a dealer # Transmission # MGear transmission of the car # Owner # Number of previous owner of the car # # 2. Dataset and data analysis import pandas as pd import numpy as np import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn import metrics from warnings import filterwarnings filterwarnings("ignore") data = pd.read_csv("/kaggle/input/cardekho/car data (1).csv") df = pd.DataFrame(data) df # after reading data we can find 8 features which I deleted one of them (car_name) and here selling_price is target. # now we have to checking data for missing value and noise. so with describe and isna we can check it. df.describe() # Check for missing values nans = df[df.isna().any(axis=1)] print(f"Total rows with NaNs: {nans.shape[0]}\n") df.info() # remove car_name column from dataset df1 = df.drop("Car_Name", axis="columns") # after removing car_name column, inserted one column for age which make from year column # define age for year column and insert it Age = abs(df1.Year - 2019) df1.insert(1, "Age", Age) df1.nunique() df1 print(df["Fuel_Type"].unique()) print(df["Seller_Type"].unique()) print(df["Transmission"].unique()) df1["Fuel_Type"] = df1["Fuel_Type"].replace("Petrol", 2) df1["Fuel_Type"] = df1["Fuel_Type"].replace("Diesel", 3) df1["Fuel_Type"] = df1["Fuel_Type"].replace("CNG", 4) df1["Seller_Type"] = df1["Seller_Type"].replace("Dealer", 2) df1["Seller_Type"] = df1["Seller_Type"].replace("Individual", 3) df1["Transmission"] = df1["Transmission"].replace("Manual", 2) df1["Transmission"] = df1["Transmission"].replace("Automatic", 3) # with look at information af data, we can find 3 columns have object type. so we have to change their type. # The classification is as follows: # Fuel_Type_cat ===>> petrol==2 diesel == 3 , gas == 4 # Seller_Type_cat ===>> dealer ==2 indivisual ==3 # Transmission_cat ==>> manual ==2 automatic ==3 df2 = df1.drop(columns=["Year"], axis="columns") df2 # Checking the corelation of all the inputs pear_corr = df2.corr(method="pearson") pear_corr.style.background_gradient(cmap="Greens") # as we see selling price and present price have good correlation. also kms driven and age. # for checking noise I draw scatter for all features for column in df2.drop(columns=["Selling_Price"]).columns: plt.figure(figsize=(10, 5)) plt.scatter(df2[column], df2.Selling_Price, alpha=0.5) plt.title( column + " & Selling_Price", backgroundcolor="green", color="white", fontsize=15 ) plt.xlabel(column, fontsize=20) plt.ylabel("Selling_Price", fontsize=20) plt.grid() plt.show() plt.figure(figsize=(10, 5)) plt.scatter(df2["Fuel_Type"], df2.Selling_Price, alpha=0.5) plt.xlabel("Fuel_Type", fontsize=20) plt.ylabel("Selling_Price", fontsize=20) plt.annotate( "maybe noise", xy=(4, 4), xytext=(3.75, 10), arrowprops=dict(facecolor="red", shrink=0.05), fontsize=20, ) plt.grid() plt.show() plt.figure(figsize=(10, 5)) plt.scatter(df2["Owner"], df2.Selling_Price, alpha=0.5) plt.xlabel("Owner", fontsize=20) plt.ylabel("Selling_Price", fontsize=20) plt.annotate( "maybe noise", xy=(3, 3), xytext=(2.75, 6), arrowprops=dict(facecolor="red", shrink=0.05), fontsize=20, ) plt.grid() plt.show() plt.figure(figsize=(10, 5)) plt.scatter(df2["Present_Price"], df2.Selling_Price, alpha=0.5) plt.xlabel("Present_Price", fontsize=20) plt.ylabel("Selling_Price", fontsize=20) plt.annotate( "maybe noise", xy=(92, 35), xytext=(90, 28), arrowprops=dict(facecolor="red", shrink=0.05), fontsize=20, ) plt.grid() plt.show() plt.figure(figsize=(10, 5)) plt.scatter(df2["Kms_Driven"], df2.Selling_Price, alpha=0.5) plt.xlabel("Kms_Driven", fontsize=20) plt.ylabel("Selling_Price", fontsize=20) plt.annotate( "maybe noise", xy=(500000, 0), xytext=(500000, 5), arrowprops=dict(facecolor="red", shrink=0.05), fontsize=20, ) plt.grid() plt.show() # univariate analysis of categorical data: import seaborn as sns classification = ["Fuel_Type", "Seller_Type", "Transmission", "Owner"] sns.set_palette("summer_r") for i, col in enumerate(classification): fig, axes = plt.subplots(1, 2, figsize=(10, 4)) # count of col (countplot) sns.countplot(data=df, x=col, ax=axes[0]) for container in axes[0].containers: axes[0].bar_label(container) # count of col (pie chart) slices = df[col].value_counts().sort_index().values activities = [var for var in df[col].value_counts().sort_index().index] axes[1].pie(slices, labels=activities, shadow=True, autopct="%1.1f%%") plt.suptitle(col, backgroundcolor="black", color="white", fontsize=15) plt.show() numerical = ["Age", "Selling_Price", "Present_Price", "Kms_Driven"] i = 0 sns.set_palette("summer_r") while i < 4: fig = plt.figure(figsize=(10, 4)) plt.subplot(1, 2, 1) sns.boxplot(x=numerical[i], data=df1) plt.title(numerical[i]) plt.title(numerical[i], backgroundcolor="black", color="white", fontsize=15) i += 1 plt.subplot(1, 2, 2) sns.boxplot(x=numerical[i], data=df1) plt.title(numerical[i]) plt.title(numerical[i], backgroundcolor="black", color="white", fontsize=15) i += 1 plt.show() sns.set_style("darkgrid") fig, ax = plt.subplots(2, 2, figsize=(16, 12)) i = 0 j = 0 for idx, col in enumerate(numerical): i = idx // 2 j = idx % 2 sns.histplot(x=col, data=df2, stat="frequency", ax=ax[i, j]) sns.kdeplot(x=col, data=df2, color="purple", ax=ax[i, j], linewidth=3) ax[i, j].axvline( x=df2[col].mean(), color="r", ls="--", label="Mean value", linewidth=2 ) ax[i, j].legend() ax[i, j].set_xlabel(col, fontsize=12) plt.suptitle("Continuous Feature Frequency", size=25, y=1.02, fontweight="bold") plt.tight_layout() plt.show() # remove 4 data which are noises df3 = df2[df2["Present_Price"] < 80.00] df3 = df3[df3["Kms_Driven"] < 300000.000000] df3 = df3[df3["Fuel_Type"] < 4] df3 = df3[df3["Owner"] < 3] df3.describe() # # # 3. model # # Regression Model from sklearn import preprocessing # normalizing scaler = preprocessing.MinMaxScaler(feature_range=(0, 1)) df4 = df3.drop(columns=["Selling_Price"]) norm = scaler.fit_transform(df4) norm_df = pd.DataFrame( norm, columns=[ "Present_Price", "Kms_Driven", "Age", "Owner", "Fuel_Type", "Seller_Type", "Transmission", ], ) x = pd.DataFrame( norm_df, columns=[ "Present_Price", "Kms_Driven", "Age", "Owner", "Fuel_Type", "Seller_Type", "Transmission", ], ) y = df3["Selling_Price"].values.reshape(-1, 1) x_train, x_test, y_train, y_test = train_test_split( x, y, test_size=0.25, random_state=101 ) regressor = LinearRegression() regressor.fit(x_train, y_train) y_pred = regressor.predict(x_test) print(regressor.intercept_) print(regressor.coef_) # for 20% test of data print("Mean Absolute Error :", metrics.mean_absolute_error(y_test, y_pred)) print("Mean Squared Error :", metrics.mean_squared_error(y_test, y_pred)) print("Root Mean Squared Error :", np.sqrt(metrics.mean_squared_error(y_test, y_pred))) print("R2 Score:", metrics.r2_score(y_test, y_pred)) regressor.score(x, y) compare = pd.DataFrame({"actual": y_test.flatten(), "prediction": y_pred.flatten()}) compare # plot predicted vs actual sns.set_palette("dark:#5A9_r") plt.figure(figsize=(10, 5)) # sns.regplot(compare['actual'],compare['prediction'] , line_kws={'color':'r', 'alpha':0.8, # 'linestyle':'--', 'linewidth':2}, # scatter_kws={'alpha':0.5}) sns.jointplot(data=compare, x="actual", y="prediction", kind="reg") plt.ylim(0, 25) plt.xlabel("Actual Values") plt.ylabel("prediction Values") plt.show() import scipy.stats as stats print("Pearson R: ", stats.pearsonr(compare["prediction"], compare["actual"])) a = x_train.Present_Price b = y_train c = x_test.Present_Price d = y_test plt.scatter(a, b) plt.scatter(c, d) x_test.insert(3, "y_test", y_test) x_test.insert(4, "y_pred", y_pred) A = x_test.sort_values(by=["Age"]) plt.scatter(A.Age, y_test) plt.plot(A.Age, y_pred, color="red") # insert column with ^2 for some feature present2 = df3.Present_Price**2 owner2 = df3.Owner**2 Kms2 = df3.Kms_Driven**2 Fuel2 = df3.Fuel_Type**2 df3.insert(1, "present2", present2) df3.insert(1, "Kms2", Kms2) df3.insert(1, "Fuel2", Fuel2) scaler = preprocessing.MinMaxScaler(feature_range=(0, 1)) df4 = df3.drop(columns=["Selling_Price"]) norm = scaler.fit_transform(df4) norm_df = pd.DataFrame( norm, columns=[ "Fuel2", "Kms2", "present2", "Present_Price", "Kms_Driven", "Age", "Owner", "Fuel_Type", "Seller_Type", "Transmission", ], ) x = pd.DataFrame( norm_df, columns=[ "Fuel2", "Kms2", "present2", "Present_Price", "Kms_Driven", "Age", "Owner", "Fuel_Type", "Seller_Type", "Transmission", ], ) y = df3["Selling_Price"].values.reshape(-1, 1) x_train, x_test, y_train, y_test = train_test_split( x, y, test_size=0.25, random_state=101 ) regressor = LinearRegression() regressor.fit(x_train, y_train) y_pred = regressor.predict(x_test) # for power 2 test of data print("Mean Absolute Error :", metrics.mean_absolute_error(y_test, y_pred)) print("Mean Squared Error :", metrics.mean_squared_error(y_test, y_pred)) print("Root Mean Squared Error :", np.sqrt(metrics.mean_squared_error(y_test, y_pred))) print("R2 Score:", metrics.r2_score(y_test, y_pred)) regressor.score(x, y) # insert column km*age AK = df3.Kms_Driven * df3.Age df3.insert(1, "AK", AK) scaler = preprocessing.MinMaxScaler(feature_range=(0, 1)) df4 = df3.drop(columns=["Selling_Price"]) norm = scaler.fit_transform(df4) norm_df = pd.DataFrame( norm, columns=[ "AK", "Fuel2", "Kms2", "present2", "Present_Price", "Kms_Driven", "Age", "Owner", "Fuel_Type", "Seller_Type", "Transmission", ], ) x = pd.DataFrame( norm_df, columns=[ "AK", "Fuel2", "Kms2", "present2", "Present_Price", "Kms_Driven", "Age", "Owner", "Fuel_Type", "Seller_Type", "Transmission", ], ) y = df3["Selling_Price"].values.reshape(-1, 1) x_train, x_test, y_train, y_test = train_test_split( x, y, test_size=0.25, random_state=101 ) regressor = LinearRegression() regressor.fit(x_train, y_train) y_pred = regressor.predict(x_test) print(regressor.intercept_) print(regressor.coef_) # for power 2& 3 and multiple km&age of data print("Mean Absolute Error :", metrics.mean_absolute_error(y_test, y_pred)) print("Mean Squared Error :", metrics.mean_squared_error(y_test, y_pred)) print("Root Mean Squared Error :", np.sqrt(metrics.mean_squared_error(y_test, y_pred))) print("R2 Score:", metrics.r2_score(y_test, y_pred)) regressor.score(x, y) # now add new x and normalizing again for predict new y # adding new x for predict y new and normalizing again df_new = pd.DataFrame( { "AK": [420000], "Fuel2": [4], "Kms2": [42000 * 42000], "present2": [126.1129], "Selling_Price": [1], "Present_Price": [11.23], "Kms_Driven": [42000], "Age": [10], "Owner": [1], "Fuel_Type": [2], "Seller_Type": [2], "Transmission": [2], } ) df_new1 = df3.append(df_new) scaler = preprocessing.MinMaxScaler(feature_range=(0, 1)) df5 = df_new1.drop(columns=["Selling_Price"]) norm1 = scaler.fit_transform(df5) norm_df1 = pd.DataFrame( norm1, columns=[ "AK", "Fuel2", "Kms2", "present2", "Present_Price", "Kms_Driven", "Age", "Owner", "Fuel_Type", "Seller_Type", "Transmission", ], ) x_train = norm_df1[ [ "AK", "Fuel2", "Kms2", "present2", "Present_Price", "Kms_Driven", "Age", "Owner", "Fuel_Type", "Seller_Type", "Transmission", ] ][:296] y_train = df_new1["Selling_Price"][:296].values.reshape(-1, 1) regressor.fit(x_train, y_train) x_test = norm_df1[ [ "AK", "Fuel2", "Kms2", "present2", "Present_Price", "Kms_Driven", "Age", "Owner", "Fuel_Type", "Seller_Type", "Transmission", ] ][296:] y_pred = regressor.predict(x_test) y_pred
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/389/129389501.ipynb
null
null
[{"Id": 129389501, "ScriptId": 38469068, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 12941081, "CreationDate": "05/13/2023 11:27:27", "VersionNumber": 1.0, "Title": "Regression model for car data", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 379.0, "LinesInsertedFromPrevious": 379.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 44}]
null
null
null
null
# # Regression Model for car data # # 1. About Dataset # Variable Name # Description # Car_Name # Name of the cars # Year # Year of the car when it was bought # Selling_Price # Price at which the car is being sold # Present_Price # present price of cars # Kms_Driven # Number of the kilometres the car is driven # Fuel_Type # Fuel type of car # Seller_Type # Tells if seller is individual or a dealer # Transmission # MGear transmission of the car # Owner # Number of previous owner of the car # # 2. Dataset and data analysis import pandas as pd import numpy as np import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn import metrics from warnings import filterwarnings filterwarnings("ignore") data = pd.read_csv("/kaggle/input/cardekho/car data (1).csv") df = pd.DataFrame(data) df # after reading data we can find 8 features which I deleted one of them (car_name) and here selling_price is target. # now we have to checking data for missing value and noise. so with describe and isna we can check it. df.describe() # Check for missing values nans = df[df.isna().any(axis=1)] print(f"Total rows with NaNs: {nans.shape[0]}\n") df.info() # remove car_name column from dataset df1 = df.drop("Car_Name", axis="columns") # after removing car_name column, inserted one column for age which make from year column # define age for year column and insert it Age = abs(df1.Year - 2019) df1.insert(1, "Age", Age) df1.nunique() df1 print(df["Fuel_Type"].unique()) print(df["Seller_Type"].unique()) print(df["Transmission"].unique()) df1["Fuel_Type"] = df1["Fuel_Type"].replace("Petrol", 2) df1["Fuel_Type"] = df1["Fuel_Type"].replace("Diesel", 3) df1["Fuel_Type"] = df1["Fuel_Type"].replace("CNG", 4) df1["Seller_Type"] = df1["Seller_Type"].replace("Dealer", 2) df1["Seller_Type"] = df1["Seller_Type"].replace("Individual", 3) df1["Transmission"] = df1["Transmission"].replace("Manual", 2) df1["Transmission"] = df1["Transmission"].replace("Automatic", 3) # with look at information af data, we can find 3 columns have object type. so we have to change their type. # The classification is as follows: # Fuel_Type_cat ===>> petrol==2 diesel == 3 , gas == 4 # Seller_Type_cat ===>> dealer ==2 indivisual ==3 # Transmission_cat ==>> manual ==2 automatic ==3 df2 = df1.drop(columns=["Year"], axis="columns") df2 # Checking the corelation of all the inputs pear_corr = df2.corr(method="pearson") pear_corr.style.background_gradient(cmap="Greens") # as we see selling price and present price have good correlation. also kms driven and age. # for checking noise I draw scatter for all features for column in df2.drop(columns=["Selling_Price"]).columns: plt.figure(figsize=(10, 5)) plt.scatter(df2[column], df2.Selling_Price, alpha=0.5) plt.title( column + " & Selling_Price", backgroundcolor="green", color="white", fontsize=15 ) plt.xlabel(column, fontsize=20) plt.ylabel("Selling_Price", fontsize=20) plt.grid() plt.show() plt.figure(figsize=(10, 5)) plt.scatter(df2["Fuel_Type"], df2.Selling_Price, alpha=0.5) plt.xlabel("Fuel_Type", fontsize=20) plt.ylabel("Selling_Price", fontsize=20) plt.annotate( "maybe noise", xy=(4, 4), xytext=(3.75, 10), arrowprops=dict(facecolor="red", shrink=0.05), fontsize=20, ) plt.grid() plt.show() plt.figure(figsize=(10, 5)) plt.scatter(df2["Owner"], df2.Selling_Price, alpha=0.5) plt.xlabel("Owner", fontsize=20) plt.ylabel("Selling_Price", fontsize=20) plt.annotate( "maybe noise", xy=(3, 3), xytext=(2.75, 6), arrowprops=dict(facecolor="red", shrink=0.05), fontsize=20, ) plt.grid() plt.show() plt.figure(figsize=(10, 5)) plt.scatter(df2["Present_Price"], df2.Selling_Price, alpha=0.5) plt.xlabel("Present_Price", fontsize=20) plt.ylabel("Selling_Price", fontsize=20) plt.annotate( "maybe noise", xy=(92, 35), xytext=(90, 28), arrowprops=dict(facecolor="red", shrink=0.05), fontsize=20, ) plt.grid() plt.show() plt.figure(figsize=(10, 5)) plt.scatter(df2["Kms_Driven"], df2.Selling_Price, alpha=0.5) plt.xlabel("Kms_Driven", fontsize=20) plt.ylabel("Selling_Price", fontsize=20) plt.annotate( "maybe noise", xy=(500000, 0), xytext=(500000, 5), arrowprops=dict(facecolor="red", shrink=0.05), fontsize=20, ) plt.grid() plt.show() # univariate analysis of categorical data: import seaborn as sns classification = ["Fuel_Type", "Seller_Type", "Transmission", "Owner"] sns.set_palette("summer_r") for i, col in enumerate(classification): fig, axes = plt.subplots(1, 2, figsize=(10, 4)) # count of col (countplot) sns.countplot(data=df, x=col, ax=axes[0]) for container in axes[0].containers: axes[0].bar_label(container) # count of col (pie chart) slices = df[col].value_counts().sort_index().values activities = [var for var in df[col].value_counts().sort_index().index] axes[1].pie(slices, labels=activities, shadow=True, autopct="%1.1f%%") plt.suptitle(col, backgroundcolor="black", color="white", fontsize=15) plt.show() numerical = ["Age", "Selling_Price", "Present_Price", "Kms_Driven"] i = 0 sns.set_palette("summer_r") while i < 4: fig = plt.figure(figsize=(10, 4)) plt.subplot(1, 2, 1) sns.boxplot(x=numerical[i], data=df1) plt.title(numerical[i]) plt.title(numerical[i], backgroundcolor="black", color="white", fontsize=15) i += 1 plt.subplot(1, 2, 2) sns.boxplot(x=numerical[i], data=df1) plt.title(numerical[i]) plt.title(numerical[i], backgroundcolor="black", color="white", fontsize=15) i += 1 plt.show() sns.set_style("darkgrid") fig, ax = plt.subplots(2, 2, figsize=(16, 12)) i = 0 j = 0 for idx, col in enumerate(numerical): i = idx // 2 j = idx % 2 sns.histplot(x=col, data=df2, stat="frequency", ax=ax[i, j]) sns.kdeplot(x=col, data=df2, color="purple", ax=ax[i, j], linewidth=3) ax[i, j].axvline( x=df2[col].mean(), color="r", ls="--", label="Mean value", linewidth=2 ) ax[i, j].legend() ax[i, j].set_xlabel(col, fontsize=12) plt.suptitle("Continuous Feature Frequency", size=25, y=1.02, fontweight="bold") plt.tight_layout() plt.show() # remove 4 data which are noises df3 = df2[df2["Present_Price"] < 80.00] df3 = df3[df3["Kms_Driven"] < 300000.000000] df3 = df3[df3["Fuel_Type"] < 4] df3 = df3[df3["Owner"] < 3] df3.describe() # # # 3. model # # Regression Model from sklearn import preprocessing # normalizing scaler = preprocessing.MinMaxScaler(feature_range=(0, 1)) df4 = df3.drop(columns=["Selling_Price"]) norm = scaler.fit_transform(df4) norm_df = pd.DataFrame( norm, columns=[ "Present_Price", "Kms_Driven", "Age", "Owner", "Fuel_Type", "Seller_Type", "Transmission", ], ) x = pd.DataFrame( norm_df, columns=[ "Present_Price", "Kms_Driven", "Age", "Owner", "Fuel_Type", "Seller_Type", "Transmission", ], ) y = df3["Selling_Price"].values.reshape(-1, 1) x_train, x_test, y_train, y_test = train_test_split( x, y, test_size=0.25, random_state=101 ) regressor = LinearRegression() regressor.fit(x_train, y_train) y_pred = regressor.predict(x_test) print(regressor.intercept_) print(regressor.coef_) # for 20% test of data print("Mean Absolute Error :", metrics.mean_absolute_error(y_test, y_pred)) print("Mean Squared Error :", metrics.mean_squared_error(y_test, y_pred)) print("Root Mean Squared Error :", np.sqrt(metrics.mean_squared_error(y_test, y_pred))) print("R2 Score:", metrics.r2_score(y_test, y_pred)) regressor.score(x, y) compare = pd.DataFrame({"actual": y_test.flatten(), "prediction": y_pred.flatten()}) compare # plot predicted vs actual sns.set_palette("dark:#5A9_r") plt.figure(figsize=(10, 5)) # sns.regplot(compare['actual'],compare['prediction'] , line_kws={'color':'r', 'alpha':0.8, # 'linestyle':'--', 'linewidth':2}, # scatter_kws={'alpha':0.5}) sns.jointplot(data=compare, x="actual", y="prediction", kind="reg") plt.ylim(0, 25) plt.xlabel("Actual Values") plt.ylabel("prediction Values") plt.show() import scipy.stats as stats print("Pearson R: ", stats.pearsonr(compare["prediction"], compare["actual"])) a = x_train.Present_Price b = y_train c = x_test.Present_Price d = y_test plt.scatter(a, b) plt.scatter(c, d) x_test.insert(3, "y_test", y_test) x_test.insert(4, "y_pred", y_pred) A = x_test.sort_values(by=["Age"]) plt.scatter(A.Age, y_test) plt.plot(A.Age, y_pred, color="red") # insert column with ^2 for some feature present2 = df3.Present_Price**2 owner2 = df3.Owner**2 Kms2 = df3.Kms_Driven**2 Fuel2 = df3.Fuel_Type**2 df3.insert(1, "present2", present2) df3.insert(1, "Kms2", Kms2) df3.insert(1, "Fuel2", Fuel2) scaler = preprocessing.MinMaxScaler(feature_range=(0, 1)) df4 = df3.drop(columns=["Selling_Price"]) norm = scaler.fit_transform(df4) norm_df = pd.DataFrame( norm, columns=[ "Fuel2", "Kms2", "present2", "Present_Price", "Kms_Driven", "Age", "Owner", "Fuel_Type", "Seller_Type", "Transmission", ], ) x = pd.DataFrame( norm_df, columns=[ "Fuel2", "Kms2", "present2", "Present_Price", "Kms_Driven", "Age", "Owner", "Fuel_Type", "Seller_Type", "Transmission", ], ) y = df3["Selling_Price"].values.reshape(-1, 1) x_train, x_test, y_train, y_test = train_test_split( x, y, test_size=0.25, random_state=101 ) regressor = LinearRegression() regressor.fit(x_train, y_train) y_pred = regressor.predict(x_test) # for power 2 test of data print("Mean Absolute Error :", metrics.mean_absolute_error(y_test, y_pred)) print("Mean Squared Error :", metrics.mean_squared_error(y_test, y_pred)) print("Root Mean Squared Error :", np.sqrt(metrics.mean_squared_error(y_test, y_pred))) print("R2 Score:", metrics.r2_score(y_test, y_pred)) regressor.score(x, y) # insert column km*age AK = df3.Kms_Driven * df3.Age df3.insert(1, "AK", AK) scaler = preprocessing.MinMaxScaler(feature_range=(0, 1)) df4 = df3.drop(columns=["Selling_Price"]) norm = scaler.fit_transform(df4) norm_df = pd.DataFrame( norm, columns=[ "AK", "Fuel2", "Kms2", "present2", "Present_Price", "Kms_Driven", "Age", "Owner", "Fuel_Type", "Seller_Type", "Transmission", ], ) x = pd.DataFrame( norm_df, columns=[ "AK", "Fuel2", "Kms2", "present2", "Present_Price", "Kms_Driven", "Age", "Owner", "Fuel_Type", "Seller_Type", "Transmission", ], ) y = df3["Selling_Price"].values.reshape(-1, 1) x_train, x_test, y_train, y_test = train_test_split( x, y, test_size=0.25, random_state=101 ) regressor = LinearRegression() regressor.fit(x_train, y_train) y_pred = regressor.predict(x_test) print(regressor.intercept_) print(regressor.coef_) # for power 2& 3 and multiple km&age of data print("Mean Absolute Error :", metrics.mean_absolute_error(y_test, y_pred)) print("Mean Squared Error :", metrics.mean_squared_error(y_test, y_pred)) print("Root Mean Squared Error :", np.sqrt(metrics.mean_squared_error(y_test, y_pred))) print("R2 Score:", metrics.r2_score(y_test, y_pred)) regressor.score(x, y) # now add new x and normalizing again for predict new y # adding new x for predict y new and normalizing again df_new = pd.DataFrame( { "AK": [420000], "Fuel2": [4], "Kms2": [42000 * 42000], "present2": [126.1129], "Selling_Price": [1], "Present_Price": [11.23], "Kms_Driven": [42000], "Age": [10], "Owner": [1], "Fuel_Type": [2], "Seller_Type": [2], "Transmission": [2], } ) df_new1 = df3.append(df_new) scaler = preprocessing.MinMaxScaler(feature_range=(0, 1)) df5 = df_new1.drop(columns=["Selling_Price"]) norm1 = scaler.fit_transform(df5) norm_df1 = pd.DataFrame( norm1, columns=[ "AK", "Fuel2", "Kms2", "present2", "Present_Price", "Kms_Driven", "Age", "Owner", "Fuel_Type", "Seller_Type", "Transmission", ], ) x_train = norm_df1[ [ "AK", "Fuel2", "Kms2", "present2", "Present_Price", "Kms_Driven", "Age", "Owner", "Fuel_Type", "Seller_Type", "Transmission", ] ][:296] y_train = df_new1["Selling_Price"][:296].values.reshape(-1, 1) regressor.fit(x_train, y_train) x_test = norm_df1[ [ "AK", "Fuel2", "Kms2", "present2", "Present_Price", "Kms_Driven", "Age", "Owner", "Fuel_Type", "Seller_Type", "Transmission", ] ][296:] y_pred = regressor.predict(x_test) y_pred
false
0
4,630
44
4,630
4,630
129389272
# # Sales forecasting # The project is mainly to predict the future sales by using the time-series forecasting technique. # # Import Dependencies import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import scipy.stats as stats from scipy.stats import pearsonr import itertools from sklearn import preprocessing from statsmodels.tsa.stattools import kpss import statsmodels.api as sm from sklearn.model_selection import train_test_split from sklearn.metrics import mean_squared_error # # Datasets train_df = pd.read_csv("/kaggle/input/store-sales-time-series-forecasting/train.csv") test_df = pd.read_csv("/kaggle/input/store-sales-time-series-forecasting/test.csv") oil_df = pd.read_csv("/kaggle/input/store-sales-time-series-forecasting/oil.csv") transaction_df = pd.read_csv( "/kaggle/input/store-sales-time-series-forecasting/transactions.csv" ) stores_df = pd.read_csv("/kaggle/input/store-sales-time-series-forecasting/stores.csv") holiday_event_df = pd.read_csv( "/kaggle/input/store-sales-time-series-forecasting/holidays_events.csv" ) # # Understanding the Data train_df.head() test_df.head() oil_df.head() transaction_df.head() stores_df.head() holiday_event_df.head() # The sales column is the target variable. # ## Merging datasets holiday_event_df, stores_df, oil_df & train_df train_df = train_df.merge(stores_df, on="store_nbr") train_df = train_df.merge(oil_df, on="date", how="left") holiday_event_df = holiday_event_df.rename(columns={"type": "holiday_type"}) train_df = train_df.merge(holiday_event_df, on="date", how="left") train_df.head(3) train_df.info() # # Missing Values Detection train_df.isnull().sum() # percentage of missing values in train_df missing_percentages = train_df.isnull().sum() / len(train_df) * 100 print(missing_percentages) # remove columns that are having more than 30% missing values columns_to_delete = missing_percentages[missing_percentages > 30].index train_df = train_df.drop(columns=columns_to_delete) train_df.info() # # Duplicates # train_df.duplicated().any() dupes = train_df.duplicated() # dupes sum(dupes) # dropping duplicate values train_df = train_df.drop_duplicates() train_df train_df.duplicated().any() test_df.duplicated().any() # # Check if there still any missing values present in the train_df # train_df.isnull().sum() # # Calculate count, mean, std, min, 25%, 50%, 75%, max values for each column. Prepare an analysis of the difference between mean and median for each column and possible reasons for the same. train_df.describe() # # EDA train_df.info() # ## Questions # 1. Does the type of stores affect the store sales? # 2. Which family is having the highest sales? # 3. Does promotion able to improve the sales? # 4. Which city is having the most number of customers? # 5. Which state is having the most number of customers? # 6. Which of the stores has the highest sales. # 7. Which month is having the most sales, and least sales. # ### 1. Does the type of stores affect the store sales? # To answer the first question 'Does the type of stores affect the store sales?' , i will use ANOVA test. # ANOVA (Analysis of Variance) is a statistical test used to determine whether there are significant differences between the means of two or more groups. It compares the variation between the groups (due to the different categories or factors) to the variation within the groups. # H0 (>0.05)= The type of stores does not affect store sales. There is no significant difference in store sales between different types of stores. # H1 (<0.05)= The type of stores does affect store sales. There is a significant difference in store sales between different types of stores. # grouped_data = train_df.groupby("type")["sales"] # Perform the ANOVA test f_statistic, p_value = stats.f_oneway( *[grouped_data.get_group(type) for type in grouped_data.groups] ) # Print the results print("F-Statistic:", f_statistic) print("p-value:", p_value) # Based on the F-statistics and p-value above, we reject null hypothesis and accept alternative hypothesis. Hence, the type of stores does affect the store sales. There is a significant difference in store sales between different type. # Sales Vs Type plt.scatter(train_df["type"], train_df["sales"]) plt.ylabel("sales") plt.xlabel("type") plt.show() # ### 2. Which family is having the highest sales? # Pie chart # Group the data by family and calculate the total sales for each family family_sales = train_df.groupby("family")["sales"].sum() # Sort the families based on sales in descending order family_sales_sorted = family_sales.sort_values(ascending=False) # Get the top 5 families with the highest sales top_families = family_sales_sorted.head(5) # Create the pie chart plt.pie(top_families, labels=top_families.index, autopct="%1.1f%%", startangle=90) plt.title("Distribution of Sales by Family") plt.axis("equal") plt.show() # Based on the pie chart above, the GROCERY I is having the highest sales, and Baverages comes second highest. # ### 3. Does promotion able to improve the sales? # To answer the 3rd question "Does promotion able to improve the sales?" I will use Pearson correlation test to determine the relationship between the two variables, as both of the variables are numericals. The Pearson correlation coefficient measures the linear relationship between two continuous variables and ranges from -1 to +1. # H0 (>0.05)= The promotion does not affect store sales. # H1 (<0.05)= The promotion does affect store sales. # correlation, p_value = pearsonr(train_df["onpromotion"], train_df["sales"]) print("Pearson correlation coefficient:", correlation) print("p-value:", p_value) # Based on the Pearson correlation coefficient of 0.4279 and the p-value of 0.0, we can reject the null hypothesis (H0) and conclude that there is a significant relationship between promotion and store sales. Therefore, the promotion does affect store sales. # Scatter plot plt.scatter(train_df["onpromotion"], train_df["sales"]) plt.xlabel("Promotion") plt.ylabel("Sales") plt.title("Promotion vs Sales") plt.show() # ### 4. Which city is having the most most number of customers? # Count Plot # Create a count plot plt.figure(figsize=(10, 6)) # Set the figure size sns.countplot(data=train_df, x="city") plt.xlabel("City") plt.ylabel("Count") plt.title("Sales Distribution by City") plt.xticks(rotation=45) plt.show() # Based on the count plot above, the Quito city has the most sales. # ### 5. Which state is having the most number of customers? # # Count Plot # Create a count plot plt.figure(figsize=(10, 6)) # Set the figure size sns.countplot(data=train_df, x="state") plt.xlabel("state") plt.ylabel("Count") plt.title("Sales Distribution by City") plt.xticks(rotation=45) plt.show() # Based on the count plot above, Pichincha state has the most sales as compared to other states. # ### 6. Which of the stores has the highest sales. # Calculate the total sales for each store store_sales = train_df.groupby("store_nbr")["sales"].sum().reset_index() # Sort the stores based on sales in descending order store_sales = store_sales.sort_values("sales", ascending=False) # Create a bar plot plt.figure(figsize=(12, 6)) sns.barplot(data=store_sales, x="store_nbr", y="sales") plt.xlabel("Store Number") plt.ylabel("Total Sales") plt.title("Total Sales by Store") plt.xticks(rotation=45) plt.show() # ### 7. Which month is having the most sales, and least sales. # First convert the 'date' from object to date time train_df["date"] = pd.to_datetime(train_df["date"]) # create new columns 'month' 'year' train_df["month"] = train_df["date"].dt.month train_df["year"] = train_df["date"].dt.year train_df.head(7) # Group the data by month, year, and calculate the total sales monthly_sales = train_df.groupby(["month", "year"])["sales"].sum().reset_index() # Create the line chart plt.figure(figsize=(10, 6)) # Set the figure size # Get unique years and cycle through colors years = monthly_sales["year"].unique() colors = itertools.cycle(["red", "green", "blue", "orange", "purple"]) for year in years: year_data = monthly_sales[monthly_sales["year"] == year] plt.plot( year_data["month"], year_data["sales"], marker="o", color=next(colors), label=str(year), ) plt.xlabel("Month") plt.ylabel("Sales") plt.title("Monthly Sales Trend") # Customize x-axis ticks to show month names month_names = [ "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec", ] plt.xticks(range(1, 13), month_names) plt.legend() plt.show() # Overall, the orange line which is 2016 has a stable high sales since January to Dec. Between the months in 2016, December had the most sales. In other hand, in comparing to other years, 2013 had an overall lowest sales achieved, especially during February. train_df = train_df.groupby("date")["sales", "onpromotion"].sum().reset_index() print(train_df) # # Autocorrelation # Autocorrelation measures the correlation between a time series and its lagged values. Autocorrelation plots (ACF) and partial autocorrelation plots (PACF) help identify significant lag values and potential autoregressive or moving average components. # - If the autocorrelation value is close to 1 or -1, it indicates a strong positive or negative autocorrelation, respectively. # - If the autocorrelation value is close to 0, it indicates a weak or no autocorrelation. sales_series = train_df["sales"] autocorr_values = sales_series.autocorr() print("Autocorrelation:", autocorr_values) # Based on the result above, since the autocorrelation value is close to 1 (0.766), it suggests that there is a positive autocorrelation. A positive autocorrelation indicates that there is a relationship between the current sales values and the previous sales values. from statsmodels.graphics.tsaplots import plot_acf plot_acf(train_df["sales"]) from statsmodels.graphics.tsaplots import plot_pacf import matplotlib.pyplot as plt # Plot the PACF fig, ax = plt.subplots(figsize=(10, 6)) plot_pacf(train_df["sales"], ax=ax) plt.xlabel("Lag") plt.ylabel("Partial Autocorrelation") plt.title("Partial Autocorrelation Function (PACF)") plt.show() # # Stationarity Test # There are various statistical tests to check stationarity, including the Augmented Dickey-Fuller (ADF) test and the Kwiatkowski-Phillips-Schmidt-Shin (KPSS) test. # ### Augmented Dickey-Fuller (ADF) test # The Augmented Dickey-Fuller (ADF) test is a statistical test used to determine whether a time series is stationary or non-stationary. Stationarity is an important assumption in many time series analysis models. # The ADF test evaluates the null hypothesis that the time series has a unit root, indicating non-stationarity. The alternative hypothesis is that the time series is stationary. # When performing the ADF test, we obtain the ADF statistic and the p-value. The ADF statistic is a negative number and the more negative it is, the stronger the evidence against the null hypothesis. The p-value represents the probability of observing the ADF statistic or a more extreme value if the null hypothesis were true. A low p-value (below a chosen significance level, typically 0.05) indicates strong evidence against the null hypothesis and suggests that the time series is stationary. ts = train_df["sales"] import pandas as pd from statsmodels.tsa.stattools import adfuller # Perform the ADF test result = adfuller(ts) # Extract and print the test statistics and p-value adf_statistic = result[0] p_value = result[1] print("ADF Statistic:", adf_statistic) print("p-value:", p_value) # The ADF statistic of -2.616195748604853 suggests that there is some evidence against the null hypothesis of non-stationarity in the time series. However, the p-value of 0.08969592175787544 indicates that this evidence is not statistically significant at a conventional significance level of 0.05. # In simpler terms, the ADF test indicates that there may be some stationarity in the time series data, but it is not strong enough to conclude with certainty. The p-value suggests that the observed results could occur by chance under the assumption of non-stationarity. Therefore, further analysis and modeling techniques may be necessary to better understand the stationarity of the data. # ### Kwiatkowski-Phillips-Schmidt-Shin (KPSS) # The Kwiatkowski-Phillips-Schmidt-Shin (KPSS) test is another statistical test used to assess the stationarity of a time series. It is complementary to the Augmented Dickey-Fuller (ADF) test. # The KPSS test evaluates the null hypothesis that the time series is stationary against the alternative hypothesis of non-stationarity. Unlike the ADF test, which assumes the presence of a unit root, the KPSS test assumes the absence of a unit root. # The test calculates the KPSS statistic, which measures the cumulative sum of squared deviations from the mean in the series. It also provides a p-value that indicates the probability of observing the KPSS statistic or a more extreme value under the null hypothesis. # Interpreting the results of the KPSS test involves considering the KPSS statistic and the associated p-value. If the KPSS statistic is greater than the critical value at a chosen significance level (e.g., 0.05), it provides evidence against the null hypothesis of stationarity. Conversely, if the KPSS statistic is smaller than the critical value, it suggests that the time series is stationary. result = kpss(ts) # Extract and print the test statistic and p-value kpss_statistic = result[0] p_value = result[1] print("KPSS Statistic:", kpss_statistic) print("p-value:", p_value) # The KPSS test result suggests that the time series is likely non-stationary. The KPSS statistic value of 5.737661236232327 exceeds the critical value, and the p-value of 0.01 is smaller than the significance level of 0.05. Therefore, we reject the null hypothesis of stationarity, indicating the presence of a trend or non-constant variance in the time series. # # Autoregressive Integrated Moving Average Model (ARIMA) model p = 2 d = 1 q = 1 train_np = train_df["sales"].values.astype("float64") model = sm.tsa.ARIMA(train_np, order=(p, d, q)) result = model.fit() # Print the model summary print(result.summary()) # Make predictions start_idx = len(train_np) end_idx = len(train_np) + len(test_df) - 1 predictions = result.predict(start=start_idx, end=end_idx) # Print the predictions print(predictions) from statsmodels.tsa.statespace.sarimax import SARIMAX from sklearn.metrics import mean_absolute_error, mean_squared_error y_train = train_df["sales"] X_train = train_df["onpromotion"] # Define and fit the SARIMAX model model = SARIMAX(y_train, exog=X_train, order=(1, 0, 1), seasonal_order=(1, 0, 1, 7)) model_fit = model.fit() # Make predictions on the training data y_pred = model_fit.predict( start=train_df.index[0], end=train_df.index[-1], exog=X_train ) # Compute mean absolute error and mean squared error mae = mean_absolute_error(y_train, y_pred) mse = mean_squared_error(y_train, y_pred) print("MAE:", mae) print("MSE:", mse) # # Submission # submission = pd.DataFrame() submission["id"] = test_df.index submission["sales"] = np.zeros(len(test_df)) # save the submission file as a CSV file submission.to_csv("mysubmission.csv", index=False)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/389/129389272.ipynb
null
null
[{"Id": 129389272, "ScriptId": 38472204, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11491103, "CreationDate": "05/13/2023 11:24:50", "VersionNumber": 1.0, "Title": "Sales Forecasting (ARIMA MODEL)", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 461.0, "LinesInsertedFromPrevious": 461.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 5}]
null
null
null
null
# # Sales forecasting # The project is mainly to predict the future sales by using the time-series forecasting technique. # # Import Dependencies import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import scipy.stats as stats from scipy.stats import pearsonr import itertools from sklearn import preprocessing from statsmodels.tsa.stattools import kpss import statsmodels.api as sm from sklearn.model_selection import train_test_split from sklearn.metrics import mean_squared_error # # Datasets train_df = pd.read_csv("/kaggle/input/store-sales-time-series-forecasting/train.csv") test_df = pd.read_csv("/kaggle/input/store-sales-time-series-forecasting/test.csv") oil_df = pd.read_csv("/kaggle/input/store-sales-time-series-forecasting/oil.csv") transaction_df = pd.read_csv( "/kaggle/input/store-sales-time-series-forecasting/transactions.csv" ) stores_df = pd.read_csv("/kaggle/input/store-sales-time-series-forecasting/stores.csv") holiday_event_df = pd.read_csv( "/kaggle/input/store-sales-time-series-forecasting/holidays_events.csv" ) # # Understanding the Data train_df.head() test_df.head() oil_df.head() transaction_df.head() stores_df.head() holiday_event_df.head() # The sales column is the target variable. # ## Merging datasets holiday_event_df, stores_df, oil_df & train_df train_df = train_df.merge(stores_df, on="store_nbr") train_df = train_df.merge(oil_df, on="date", how="left") holiday_event_df = holiday_event_df.rename(columns={"type": "holiday_type"}) train_df = train_df.merge(holiday_event_df, on="date", how="left") train_df.head(3) train_df.info() # # Missing Values Detection train_df.isnull().sum() # percentage of missing values in train_df missing_percentages = train_df.isnull().sum() / len(train_df) * 100 print(missing_percentages) # remove columns that are having more than 30% missing values columns_to_delete = missing_percentages[missing_percentages > 30].index train_df = train_df.drop(columns=columns_to_delete) train_df.info() # # Duplicates # train_df.duplicated().any() dupes = train_df.duplicated() # dupes sum(dupes) # dropping duplicate values train_df = train_df.drop_duplicates() train_df train_df.duplicated().any() test_df.duplicated().any() # # Check if there still any missing values present in the train_df # train_df.isnull().sum() # # Calculate count, mean, std, min, 25%, 50%, 75%, max values for each column. Prepare an analysis of the difference between mean and median for each column and possible reasons for the same. train_df.describe() # # EDA train_df.info() # ## Questions # 1. Does the type of stores affect the store sales? # 2. Which family is having the highest sales? # 3. Does promotion able to improve the sales? # 4. Which city is having the most number of customers? # 5. Which state is having the most number of customers? # 6. Which of the stores has the highest sales. # 7. Which month is having the most sales, and least sales. # ### 1. Does the type of stores affect the store sales? # To answer the first question 'Does the type of stores affect the store sales?' , i will use ANOVA test. # ANOVA (Analysis of Variance) is a statistical test used to determine whether there are significant differences between the means of two or more groups. It compares the variation between the groups (due to the different categories or factors) to the variation within the groups. # H0 (>0.05)= The type of stores does not affect store sales. There is no significant difference in store sales between different types of stores. # H1 (<0.05)= The type of stores does affect store sales. There is a significant difference in store sales between different types of stores. # grouped_data = train_df.groupby("type")["sales"] # Perform the ANOVA test f_statistic, p_value = stats.f_oneway( *[grouped_data.get_group(type) for type in grouped_data.groups] ) # Print the results print("F-Statistic:", f_statistic) print("p-value:", p_value) # Based on the F-statistics and p-value above, we reject null hypothesis and accept alternative hypothesis. Hence, the type of stores does affect the store sales. There is a significant difference in store sales between different type. # Sales Vs Type plt.scatter(train_df["type"], train_df["sales"]) plt.ylabel("sales") plt.xlabel("type") plt.show() # ### 2. Which family is having the highest sales? # Pie chart # Group the data by family and calculate the total sales for each family family_sales = train_df.groupby("family")["sales"].sum() # Sort the families based on sales in descending order family_sales_sorted = family_sales.sort_values(ascending=False) # Get the top 5 families with the highest sales top_families = family_sales_sorted.head(5) # Create the pie chart plt.pie(top_families, labels=top_families.index, autopct="%1.1f%%", startangle=90) plt.title("Distribution of Sales by Family") plt.axis("equal") plt.show() # Based on the pie chart above, the GROCERY I is having the highest sales, and Baverages comes second highest. # ### 3. Does promotion able to improve the sales? # To answer the 3rd question "Does promotion able to improve the sales?" I will use Pearson correlation test to determine the relationship between the two variables, as both of the variables are numericals. The Pearson correlation coefficient measures the linear relationship between two continuous variables and ranges from -1 to +1. # H0 (>0.05)= The promotion does not affect store sales. # H1 (<0.05)= The promotion does affect store sales. # correlation, p_value = pearsonr(train_df["onpromotion"], train_df["sales"]) print("Pearson correlation coefficient:", correlation) print("p-value:", p_value) # Based on the Pearson correlation coefficient of 0.4279 and the p-value of 0.0, we can reject the null hypothesis (H0) and conclude that there is a significant relationship between promotion and store sales. Therefore, the promotion does affect store sales. # Scatter plot plt.scatter(train_df["onpromotion"], train_df["sales"]) plt.xlabel("Promotion") plt.ylabel("Sales") plt.title("Promotion vs Sales") plt.show() # ### 4. Which city is having the most most number of customers? # Count Plot # Create a count plot plt.figure(figsize=(10, 6)) # Set the figure size sns.countplot(data=train_df, x="city") plt.xlabel("City") plt.ylabel("Count") plt.title("Sales Distribution by City") plt.xticks(rotation=45) plt.show() # Based on the count plot above, the Quito city has the most sales. # ### 5. Which state is having the most number of customers? # # Count Plot # Create a count plot plt.figure(figsize=(10, 6)) # Set the figure size sns.countplot(data=train_df, x="state") plt.xlabel("state") plt.ylabel("Count") plt.title("Sales Distribution by City") plt.xticks(rotation=45) plt.show() # Based on the count plot above, Pichincha state has the most sales as compared to other states. # ### 6. Which of the stores has the highest sales. # Calculate the total sales for each store store_sales = train_df.groupby("store_nbr")["sales"].sum().reset_index() # Sort the stores based on sales in descending order store_sales = store_sales.sort_values("sales", ascending=False) # Create a bar plot plt.figure(figsize=(12, 6)) sns.barplot(data=store_sales, x="store_nbr", y="sales") plt.xlabel("Store Number") plt.ylabel("Total Sales") plt.title("Total Sales by Store") plt.xticks(rotation=45) plt.show() # ### 7. Which month is having the most sales, and least sales. # First convert the 'date' from object to date time train_df["date"] = pd.to_datetime(train_df["date"]) # create new columns 'month' 'year' train_df["month"] = train_df["date"].dt.month train_df["year"] = train_df["date"].dt.year train_df.head(7) # Group the data by month, year, and calculate the total sales monthly_sales = train_df.groupby(["month", "year"])["sales"].sum().reset_index() # Create the line chart plt.figure(figsize=(10, 6)) # Set the figure size # Get unique years and cycle through colors years = monthly_sales["year"].unique() colors = itertools.cycle(["red", "green", "blue", "orange", "purple"]) for year in years: year_data = monthly_sales[monthly_sales["year"] == year] plt.plot( year_data["month"], year_data["sales"], marker="o", color=next(colors), label=str(year), ) plt.xlabel("Month") plt.ylabel("Sales") plt.title("Monthly Sales Trend") # Customize x-axis ticks to show month names month_names = [ "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec", ] plt.xticks(range(1, 13), month_names) plt.legend() plt.show() # Overall, the orange line which is 2016 has a stable high sales since January to Dec. Between the months in 2016, December had the most sales. In other hand, in comparing to other years, 2013 had an overall lowest sales achieved, especially during February. train_df = train_df.groupby("date")["sales", "onpromotion"].sum().reset_index() print(train_df) # # Autocorrelation # Autocorrelation measures the correlation between a time series and its lagged values. Autocorrelation plots (ACF) and partial autocorrelation plots (PACF) help identify significant lag values and potential autoregressive or moving average components. # - If the autocorrelation value is close to 1 or -1, it indicates a strong positive or negative autocorrelation, respectively. # - If the autocorrelation value is close to 0, it indicates a weak or no autocorrelation. sales_series = train_df["sales"] autocorr_values = sales_series.autocorr() print("Autocorrelation:", autocorr_values) # Based on the result above, since the autocorrelation value is close to 1 (0.766), it suggests that there is a positive autocorrelation. A positive autocorrelation indicates that there is a relationship between the current sales values and the previous sales values. from statsmodels.graphics.tsaplots import plot_acf plot_acf(train_df["sales"]) from statsmodels.graphics.tsaplots import plot_pacf import matplotlib.pyplot as plt # Plot the PACF fig, ax = plt.subplots(figsize=(10, 6)) plot_pacf(train_df["sales"], ax=ax) plt.xlabel("Lag") plt.ylabel("Partial Autocorrelation") plt.title("Partial Autocorrelation Function (PACF)") plt.show() # # Stationarity Test # There are various statistical tests to check stationarity, including the Augmented Dickey-Fuller (ADF) test and the Kwiatkowski-Phillips-Schmidt-Shin (KPSS) test. # ### Augmented Dickey-Fuller (ADF) test # The Augmented Dickey-Fuller (ADF) test is a statistical test used to determine whether a time series is stationary or non-stationary. Stationarity is an important assumption in many time series analysis models. # The ADF test evaluates the null hypothesis that the time series has a unit root, indicating non-stationarity. The alternative hypothesis is that the time series is stationary. # When performing the ADF test, we obtain the ADF statistic and the p-value. The ADF statistic is a negative number and the more negative it is, the stronger the evidence against the null hypothesis. The p-value represents the probability of observing the ADF statistic or a more extreme value if the null hypothesis were true. A low p-value (below a chosen significance level, typically 0.05) indicates strong evidence against the null hypothesis and suggests that the time series is stationary. ts = train_df["sales"] import pandas as pd from statsmodels.tsa.stattools import adfuller # Perform the ADF test result = adfuller(ts) # Extract and print the test statistics and p-value adf_statistic = result[0] p_value = result[1] print("ADF Statistic:", adf_statistic) print("p-value:", p_value) # The ADF statistic of -2.616195748604853 suggests that there is some evidence against the null hypothesis of non-stationarity in the time series. However, the p-value of 0.08969592175787544 indicates that this evidence is not statistically significant at a conventional significance level of 0.05. # In simpler terms, the ADF test indicates that there may be some stationarity in the time series data, but it is not strong enough to conclude with certainty. The p-value suggests that the observed results could occur by chance under the assumption of non-stationarity. Therefore, further analysis and modeling techniques may be necessary to better understand the stationarity of the data. # ### Kwiatkowski-Phillips-Schmidt-Shin (KPSS) # The Kwiatkowski-Phillips-Schmidt-Shin (KPSS) test is another statistical test used to assess the stationarity of a time series. It is complementary to the Augmented Dickey-Fuller (ADF) test. # The KPSS test evaluates the null hypothesis that the time series is stationary against the alternative hypothesis of non-stationarity. Unlike the ADF test, which assumes the presence of a unit root, the KPSS test assumes the absence of a unit root. # The test calculates the KPSS statistic, which measures the cumulative sum of squared deviations from the mean in the series. It also provides a p-value that indicates the probability of observing the KPSS statistic or a more extreme value under the null hypothesis. # Interpreting the results of the KPSS test involves considering the KPSS statistic and the associated p-value. If the KPSS statistic is greater than the critical value at a chosen significance level (e.g., 0.05), it provides evidence against the null hypothesis of stationarity. Conversely, if the KPSS statistic is smaller than the critical value, it suggests that the time series is stationary. result = kpss(ts) # Extract and print the test statistic and p-value kpss_statistic = result[0] p_value = result[1] print("KPSS Statistic:", kpss_statistic) print("p-value:", p_value) # The KPSS test result suggests that the time series is likely non-stationary. The KPSS statistic value of 5.737661236232327 exceeds the critical value, and the p-value of 0.01 is smaller than the significance level of 0.05. Therefore, we reject the null hypothesis of stationarity, indicating the presence of a trend or non-constant variance in the time series. # # Autoregressive Integrated Moving Average Model (ARIMA) model p = 2 d = 1 q = 1 train_np = train_df["sales"].values.astype("float64") model = sm.tsa.ARIMA(train_np, order=(p, d, q)) result = model.fit() # Print the model summary print(result.summary()) # Make predictions start_idx = len(train_np) end_idx = len(train_np) + len(test_df) - 1 predictions = result.predict(start=start_idx, end=end_idx) # Print the predictions print(predictions) from statsmodels.tsa.statespace.sarimax import SARIMAX from sklearn.metrics import mean_absolute_error, mean_squared_error y_train = train_df["sales"] X_train = train_df["onpromotion"] # Define and fit the SARIMAX model model = SARIMAX(y_train, exog=X_train, order=(1, 0, 1), seasonal_order=(1, 0, 1, 7)) model_fit = model.fit() # Make predictions on the training data y_pred = model_fit.predict( start=train_df.index[0], end=train_df.index[-1], exog=X_train ) # Compute mean absolute error and mean squared error mae = mean_absolute_error(y_train, y_pred) mse = mean_squared_error(y_train, y_pred) print("MAE:", mae) print("MSE:", mse) # # Submission # submission = pd.DataFrame() submission["id"] = test_df.index submission["sales"] = np.zeros(len(test_df)) # save the submission file as a CSV file submission.to_csv("mysubmission.csv", index=False)
false
0
4,473
5
4,473
4,473
129310768
<jupyter_start><jupyter_text>Fashion Product Images Dataset ### Context Thr growing e-commerce industry presents us with a large dataset waiting to be scraped and researched upon. In addition to professionally shot high resolution product images, we also have multiple label attributes describing the product which was manually entered while cataloging. To add to this, we also have descriptive text that comments on the product characteristics. ### Content Each product is identified by an ID like 42431. You will find a map to all the products in `styles.csv`. From here, you can fetch the image for this product from `images/42431.jpg` and the complete metadata from `styles/42431.json`. To get started easily, we also have exposed some of the key product categories and it's display name in `styles.csv`. If this dataset is too large, you can start with a smaller (280MB) version here: https://www.kaggle.com/paramaggarwal/fashion-product-images-small ### Inspiration So what can you try building? Here are some suggestions: * Start with an image classifier. Use the `masterCategory` column from `styles.csv` and train a convolutional neural network. * The same can be achieved via NLP. Extract the product descriptions from `styles/42431.json` and then run a classifier to get the `masterCategory`. * Try adding more sophisticated classification by predicting the other category labels in `styles.csv` Transfer Learning is your friend and use it wisely. You can even take things much further from here: * Is it possible to build a GAN that takes a category as input and outputs an image? * Auto-encode the image attributes to be able to make a visual search engine that converts the image into a small encoding which is sent to the server to perform visual search? * Visual similarity search? Given an image, suggest other similar images. Kaggle dataset identifier: fashion-product-images-dataset <jupyter_script># ignore warnings import warnings warnings.filterwarnings("ignore") import pandas as pd import numpy as np import matplotlib.pyplot as plt import os import cv2 import random import tensorflow as tf # data generator from tensorflow.keras.preprocessing.image import ImageDataGenerator # wandb import wandb from wandb.keras import WandbCallback DATA_PATH = "/kaggle/input/fashion-product-images-dataset/" # # Exploratory and Visualizations # ## Images Dataframe images_df = pd.read_csv(os.path.join(DATA_PATH, "fashion-dataset", "images.csv")) images_df.head(2) # ## Product Meta Data Dataframe styles_df = pd.read_csv( os.path.join(DATA_PATH, "fashion-dataset", "styles.csv"), on_bad_lines="skip" ) styles_df.head(2) # ## Create Unique ID in both Dataframes images_df.head(2) # tạo id để merge với metadata df images_df["id"] = ( images_df["filename"] .apply(lambda filename: filename.replace(".jpg", "")) .astype(int) ) images_df.head(2) # ## Merging the two dataframes data = styles_df.merge(images_df, on="id", how="left") data.head(2) # chuyển filename thành filepath data["filename"] = data["filename"].apply( lambda filename: os.path.join(DATA_PATH, "fashion-dataset", "images", filename) ) data.head(2) # ## Removing Products for which images are not present # lấy danh sách ảnh trong dataset image_files = os.listdir(os.path.join(DATA_PATH, "fashion-dataset", "images")) print(len(image_files)) # các file có trong dataset data["file_found"] = data["id"].apply(lambda idx: "{}.jpg".format(idx) in image_files) data["file_found"].value_counts() # xóa bỏ file không có ảnh trong dataset data = data[data["file_found"]].reset_index(drop=True) print(data.shape) data.head(2) # ## Checking for Missing data data.isnull().sum() # ## Visualizations # trực quan một số hình ảnh trong datasets def dislay_image(image_files): random.shuffle(image_files) for idx, image_file in enumerate(image_files[0:9]): plt.subplot(3, 3, idx + 1) image_path = os.path.join(DATA_PATH, "fashion-dataset", "images", image_file) image_arr = cv2.imread(image_path) image_arr = cv2.cvtColor(image_arr, cv2.COLOR_BGR2RGB) plt.imshow(image_arr) plt.axis("off") dislay_image(image_files) # masterCategory count gr_data_masterCate = data.groupby("masterCategory").size() gr_data_masterCate_sorted = gr_data_masterCate.sort_values() gr_data_masterCate_sorted plt.figure(figsize=(10, 4)) with plt.rc_context({"ytick.color": "darkgrey"}): plt.barh( gr_data_masterCate_sorted.index, gr_data_masterCate_sorted.values, color="lightblue", ) for spine in plt.gca().spines.values(): spine.set_visible(False) plt.ylabel("$CATEGORIES$", size=15, color="darkgrey") plt.xlabel("Number of Image", size=15, color="darkgrey") plt.show() # subCategory count gr_data_subCate = data.groupby("subCategory").size() gr_data_subCate_sorted = gr_data_subCate.sort_values() len(gr_data_subCate_sorted) plt.figure(figsize=(10, 10)) with plt.rc_context({"ytick.color": "darkgrey"}): plt.barh( gr_data_subCate_sorted[-25:].index, gr_data_subCate_sorted[-25:].values, color="lightblue", ) for spine in plt.gca().spines.values(): spine.set_visible(False) plt.ylabel("$CATEGORIES$", size=15, color="darkgrey") plt.xlabel("Number of Image", size=15, color="darkgrey") plt.show() # articleType count gr_data_articleType = data.groupby("articleType").size() gr_data_articleType_sorted = gr_data_articleType.sort_values() plt.figure(figsize=(10, 10)) with plt.rc_context({"ytick.color": "darkgrey"}): plt.barh( gr_data_articleType_sorted[-25:].index, gr_data_articleType_sorted[-25:].values, color="lightblue", ) for spine in plt.gca().spines.values(): spine.set_visible(False) plt.ylabel("$CATEGORIES$", size=15, color="darkgrey") plt.xlabel("Number of Image", size=15, color="darkgrey") plt.show() # ## final data # lấy 20 danh mục categoricals = sorted(list(gr_data_subCate_sorted.index[-20:])) data_20 = data[data["subCategory"].isin(categoricals)] data_20 = data_20[["subCategory", "filename"]] data_20 data_20.groupby("subCategory").size().sort_values(ascending=False) # mỗi danh mục lấy tối đa 600 ảnh from sklearn.utils import resample, shuffle from sklearn.model_selection import train_test_split n_samples = 600 lst_df = [] for categorical in categoricals: df_class_tmp = data_20.loc[data_20["subCategory"] == categorical] if df_class_tmp.shape[0] < n_samples: df_resample_tmp = df_class_tmp else: df_resample_tmp = resample(df_class_tmp, n_samples=n_samples, random_state=42) lst_df.append(df_resample_tmp) df = pd.concat(lst_df) cate = df.groupby("subCategory").size().sort_values() # plt.figure(figsize = (10, 10)) with plt.rc_context({"ytick.color": "darkgrey"}): plt.barh(cate[-25:].index, cate[-25:].values, color="lightblue") for spine in plt.gca().spines.values(): spine.set_visible(False) plt.ylabel("$CATEGORIES$", size=15, color="darkgrey") plt.xlabel("Number of Image", size=15, color="darkgrey") plt.show() df.shape df = shuffle(df, random_state=42) df = df.reset_index(drop=True) df.rename({"subCategory": "categorical"}, axis=1, inplace=True) # final data data = df data # # Train-val-test Split from sklearn.model_selection import train_test_split train_df, test_df = train_test_split( data, test_size=0.2, random_state=42, stratify=data["categorical"] ) valid_df, test_df = train_test_split( test_df, test_size=0.5, random_state=42, stratify=test_df["categorical"] ) train_df = train_df.reset_index(drop=True) valid_df = valid_df.reset_index(drop=True) test_df = test_df.reset_index(drop=True) train_df # ## Data Augmentation datagen = ImageDataGenerator( rescale=1 / 255.0, shear_range=0.2, zoom_range=0.2, horizontal_flip=True ) train_generator = datagen.flow_from_dataframe( dataframe=train_df, target_size=(224, 224), x_col="filename", y_col="categorical", class_mode="categorical", batch_size=32, shuffle=True, seed=42, ) test_datagen = ImageDataGenerator(rescale=1 / 255.0) valid_generator = test_datagen.flow_from_dataframe( dataframe=valid_df, target_size=(224, 224), x_col="filename", y_col="categorical", class_mode="categorical", batch_size=32, shuffle=True, seed=42, ) test_generator = test_datagen.flow_from_dataframe( dataframe=test_df, x_col="filename", y_col="categorical", target_size=(224, 224), batch_size=32, class_mode="categorical", shuffle=True, seed=42, ) # tmp from keras.models import Sequential from keras.layers import ( Conv2D, MaxPooling2D, Dense, Flatten, Dropout, BatchNormalization, ) from tensorflow.keras.applications.vgg19 import VGG19 from tensorflow.keras.models import Model from tensorflow.keras.optimizers import Adam def build_model(name, weights_path=None): base_model = VGG19(weights="imagenet", include_top=False, input_shape=(224, 224, 3)) for layer in base_model.layers: layer.trainable = False x = base_model.output x = Flatten()(x) x = Dense(4096, activation="leaky_relu")(x) x = BatchNormalization()(x) x = Dropout(0.4)(x) x = Dense(1024, activation="sigmoid")(x) x = BatchNormalization()(x) x = Dropout(0.4)(x) predictions = Dense(20, activation="softmax")(x) model = Model(name=name, inputs=base_model.input, outputs=predictions) if weights_path: model.load_weights(weights_path) return model import time NAME = "vgg19-{}".format(int(time.time())) model = build_model(NAME) model.summary() lr = 0.01 epochs = 10 ## Initlisazie wandb project wandb.init( project="CBIR-fashion product dataset", name=NAME, config={ "learning_rate": lr, "Batch_normalization": True, "Batch_size": 64, "Dropout": "0.4", "architecture": "VGG19", "dataset": "fashion-product-images-dataset", "epochs": epochs, "data generator": True, }, ) wandb_callback = WandbCallback() filepath = "{}_loss_opti.hdf5".format(NAME) checkpoint1 = tf.keras.callbacks.ModelCheckpoint( filepath, monitor="val_loss", verbose=1, save_best_only=True, save_weights_only=False, mode="auto", save_freq="epoch", ) model.compile( loss="categorical_crossentropy", optimizer=Adam(learning_rate=lr), metrics=["accuracy"], ) history = model.fit_generator( train_generator, validation_data=train_generator, steps_per_epoch=train_generator.n // train_generator.batch_size, validation_steps=valid_generator.n // valid_generator.batch_size, epochs=epochs, callbacks=[checkpoint1, wandb_callback], ) # model.save(filepath) score = model.evaluate_generator(test_generator) print("Test loss:", score[0]) print("Test accuracy:", score[1]) from tensorflow.keras.models import load_model best_model = load_model(filepath) score = best_model.evaluate_generator(test_generator) print("Test loss:", score[0]) print("Test accuracy:", score[1]) IMAGESIZE = 224 CHANNELS = 3 def image_preprocess(image_path): image_orig = cv2.imread(image_path) image_arr = cv2.cvtColor(image_orig, cv2.COLOR_BGR2RGB) image_arr = cv2.resize(image_arr, (IMAGESIZE, IMAGESIZE)) image_arr = image_arr / 255.0 image_arr = image_arr.reshape(-1, IMAGESIZE, IMAGESIZE, CHANNELS) return image_arr test_df.filename[1] anchor_path = ( "/kaggle/input/fashion-product-images-dataset/fashion-dataset/images/50892.jpg" ) pos_path = ( "/kaggle/input/fashion-product-images-dataset/fashion-dataset/images/45986.jpg" ) neg_path = ( "/kaggle/input/fashion-product-images-dataset/fashion-dataset/images/29863.jpg" ) image_arr1 = image_preprocess(anchor_path) image_arr2 = image_preprocess(pos_path) image_arr3 = image_preprocess(neg_path) image_lst = [image_arr1, image_arr2, image_arr3] for i, image_arr in enumerate(image_lst): plt.subplot(1, 3, i + 1) plt.imshow(image_arr[0]) plt.axis(False) plt.show() y_hat = model.predict(image_arr) y_hat.argmax() categoricals[14] best_model.layers CBIR_model = Model(inputs=best_model.input, outputs=best_model.layers[-4].output) prehashcode1 = CBIR_model.predict(image_arr1) prehashcode2 = CBIR_model.predict(image_arr2) prehashcode3 = CBIR_model.predict(image_arr3) prehashcode.shape hashcode1 = np.where(prehashcode1 < 0.5, 0, 1) hashcode2 = np.where(prehashcode2 < 0.5, 0, 1) hashcode3 = np.where(prehashcode3 < 0.5, 0, 1) hashcode1 = hashcode1.astype("bool") hashcode2 = hashcode2.astype("bool") hashcode3 = hashcode3.astype("bool") hamming_distance = np.hamming(hashcode1.shape[0]) * np.abs(hashcode1 - hashcode2) hamming_dist = np.count_nonzero(hashcode1 != hashcode2) hamming_dist hamming_dist = np.count_nonzero(hashcode1 != hashcode3) hamming_dist hamming_dist = np.count_nonzero(hashcode2 != hashcode3) hamming_dist
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/310/129310768.ipynb
fashion-product-images-dataset
paramaggarwal
[{"Id": 129310768, "ScriptId": 38111925, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11502381, "CreationDate": "05/12/2023 16:45:42", "VersionNumber": 1.0, "Title": "CBIR_using_VGG_and_hash", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 411.0, "LinesInsertedFromPrevious": 411.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 185238363, "KernelVersionId": 129310768, "SourceDatasetVersionId": 329006}]
[{"Id": 329006, "DatasetId": 139630, "DatasourceVersionId": 342521, "CreatorUserId": 938019, "LicenseName": "Data files \u00a9 Original Authors", "CreationDate": "03/14/2019 18:57:43", "VersionNumber": 1.0, "Title": "Fashion Product Images Dataset", "Slug": "fashion-product-images-dataset", "Subtitle": "44k products with multiple category labels, descriptions and high-res images.", "Description": "### Context\n\nThr growing e-commerce industry presents us with a large dataset waiting to be scraped and researched upon. In addition to professionally shot high resolution product images, we also have multiple label attributes describing the product which was manually entered while cataloging. To add to this, we also have descriptive text that comments on the product characteristics.\n\n### Content\n\nEach product is identified by an ID like 42431. You will find a map to all the products in `styles.csv`. From here, you can fetch the image for this product from `images/42431.jpg` and the complete metadata from `styles/42431.json`.\n\nTo get started easily, we also have exposed some of the key product categories and it's display name in `styles.csv`.\n \nIf this dataset is too large, you can start with a smaller (280MB) version here:\nhttps://www.kaggle.com/paramaggarwal/fashion-product-images-small\n\n### Inspiration\n\nSo what can you try building? Here are some suggestions:\n\n* Start with an image classifier. Use the `masterCategory` column from `styles.csv` and train a convolutional neural network.\n* The same can be achieved via NLP. Extract the product descriptions from `styles/42431.json` and then run a classifier to get the `masterCategory`.\n* Try adding more sophisticated classification by predicting the other category labels in `styles.csv`\n\nTransfer Learning is your friend and use it wisely. You can even take things much further from here:\n\n* Is it possible to build a GAN that takes a category as input and outputs an image?\n* Auto-encode the image attributes to be able to make a visual search engine that converts the image into a small encoding which is sent to the server to perform visual search?\n* Visual similarity search? Given an image, suggest other similar images.", "VersionNotes": "Initial release", "TotalCompressedBytes": 12369096246.0, "TotalUncompressedBytes": 12369096246.0}]
[{"Id": 139630, "CreatorUserId": 938019, "OwnerUserId": 938019.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 329006.0, "CurrentDatasourceVersionId": 342521.0, "ForumId": 149882, "Type": 2, "CreationDate": "03/14/2019 18:57:43", "LastActivityDate": "03/14/2019", "TotalViews": 213875, "TotalDownloads": 23461, "TotalVotes": 484, "TotalKernels": 96}]
[{"Id": 938019, "UserName": "paramaggarwal", "DisplayName": "Param Aggarwal", "RegisterDate": "03/02/2017", "PerformanceTier": 0}]
# ignore warnings import warnings warnings.filterwarnings("ignore") import pandas as pd import numpy as np import matplotlib.pyplot as plt import os import cv2 import random import tensorflow as tf # data generator from tensorflow.keras.preprocessing.image import ImageDataGenerator # wandb import wandb from wandb.keras import WandbCallback DATA_PATH = "/kaggle/input/fashion-product-images-dataset/" # # Exploratory and Visualizations # ## Images Dataframe images_df = pd.read_csv(os.path.join(DATA_PATH, "fashion-dataset", "images.csv")) images_df.head(2) # ## Product Meta Data Dataframe styles_df = pd.read_csv( os.path.join(DATA_PATH, "fashion-dataset", "styles.csv"), on_bad_lines="skip" ) styles_df.head(2) # ## Create Unique ID in both Dataframes images_df.head(2) # tạo id để merge với metadata df images_df["id"] = ( images_df["filename"] .apply(lambda filename: filename.replace(".jpg", "")) .astype(int) ) images_df.head(2) # ## Merging the two dataframes data = styles_df.merge(images_df, on="id", how="left") data.head(2) # chuyển filename thành filepath data["filename"] = data["filename"].apply( lambda filename: os.path.join(DATA_PATH, "fashion-dataset", "images", filename) ) data.head(2) # ## Removing Products for which images are not present # lấy danh sách ảnh trong dataset image_files = os.listdir(os.path.join(DATA_PATH, "fashion-dataset", "images")) print(len(image_files)) # các file có trong dataset data["file_found"] = data["id"].apply(lambda idx: "{}.jpg".format(idx) in image_files) data["file_found"].value_counts() # xóa bỏ file không có ảnh trong dataset data = data[data["file_found"]].reset_index(drop=True) print(data.shape) data.head(2) # ## Checking for Missing data data.isnull().sum() # ## Visualizations # trực quan một số hình ảnh trong datasets def dislay_image(image_files): random.shuffle(image_files) for idx, image_file in enumerate(image_files[0:9]): plt.subplot(3, 3, idx + 1) image_path = os.path.join(DATA_PATH, "fashion-dataset", "images", image_file) image_arr = cv2.imread(image_path) image_arr = cv2.cvtColor(image_arr, cv2.COLOR_BGR2RGB) plt.imshow(image_arr) plt.axis("off") dislay_image(image_files) # masterCategory count gr_data_masterCate = data.groupby("masterCategory").size() gr_data_masterCate_sorted = gr_data_masterCate.sort_values() gr_data_masterCate_sorted plt.figure(figsize=(10, 4)) with plt.rc_context({"ytick.color": "darkgrey"}): plt.barh( gr_data_masterCate_sorted.index, gr_data_masterCate_sorted.values, color="lightblue", ) for spine in plt.gca().spines.values(): spine.set_visible(False) plt.ylabel("$CATEGORIES$", size=15, color="darkgrey") plt.xlabel("Number of Image", size=15, color="darkgrey") plt.show() # subCategory count gr_data_subCate = data.groupby("subCategory").size() gr_data_subCate_sorted = gr_data_subCate.sort_values() len(gr_data_subCate_sorted) plt.figure(figsize=(10, 10)) with plt.rc_context({"ytick.color": "darkgrey"}): plt.barh( gr_data_subCate_sorted[-25:].index, gr_data_subCate_sorted[-25:].values, color="lightblue", ) for spine in plt.gca().spines.values(): spine.set_visible(False) plt.ylabel("$CATEGORIES$", size=15, color="darkgrey") plt.xlabel("Number of Image", size=15, color="darkgrey") plt.show() # articleType count gr_data_articleType = data.groupby("articleType").size() gr_data_articleType_sorted = gr_data_articleType.sort_values() plt.figure(figsize=(10, 10)) with plt.rc_context({"ytick.color": "darkgrey"}): plt.barh( gr_data_articleType_sorted[-25:].index, gr_data_articleType_sorted[-25:].values, color="lightblue", ) for spine in plt.gca().spines.values(): spine.set_visible(False) plt.ylabel("$CATEGORIES$", size=15, color="darkgrey") plt.xlabel("Number of Image", size=15, color="darkgrey") plt.show() # ## final data # lấy 20 danh mục categoricals = sorted(list(gr_data_subCate_sorted.index[-20:])) data_20 = data[data["subCategory"].isin(categoricals)] data_20 = data_20[["subCategory", "filename"]] data_20 data_20.groupby("subCategory").size().sort_values(ascending=False) # mỗi danh mục lấy tối đa 600 ảnh from sklearn.utils import resample, shuffle from sklearn.model_selection import train_test_split n_samples = 600 lst_df = [] for categorical in categoricals: df_class_tmp = data_20.loc[data_20["subCategory"] == categorical] if df_class_tmp.shape[0] < n_samples: df_resample_tmp = df_class_tmp else: df_resample_tmp = resample(df_class_tmp, n_samples=n_samples, random_state=42) lst_df.append(df_resample_tmp) df = pd.concat(lst_df) cate = df.groupby("subCategory").size().sort_values() # plt.figure(figsize = (10, 10)) with plt.rc_context({"ytick.color": "darkgrey"}): plt.barh(cate[-25:].index, cate[-25:].values, color="lightblue") for spine in plt.gca().spines.values(): spine.set_visible(False) plt.ylabel("$CATEGORIES$", size=15, color="darkgrey") plt.xlabel("Number of Image", size=15, color="darkgrey") plt.show() df.shape df = shuffle(df, random_state=42) df = df.reset_index(drop=True) df.rename({"subCategory": "categorical"}, axis=1, inplace=True) # final data data = df data # # Train-val-test Split from sklearn.model_selection import train_test_split train_df, test_df = train_test_split( data, test_size=0.2, random_state=42, stratify=data["categorical"] ) valid_df, test_df = train_test_split( test_df, test_size=0.5, random_state=42, stratify=test_df["categorical"] ) train_df = train_df.reset_index(drop=True) valid_df = valid_df.reset_index(drop=True) test_df = test_df.reset_index(drop=True) train_df # ## Data Augmentation datagen = ImageDataGenerator( rescale=1 / 255.0, shear_range=0.2, zoom_range=0.2, horizontal_flip=True ) train_generator = datagen.flow_from_dataframe( dataframe=train_df, target_size=(224, 224), x_col="filename", y_col="categorical", class_mode="categorical", batch_size=32, shuffle=True, seed=42, ) test_datagen = ImageDataGenerator(rescale=1 / 255.0) valid_generator = test_datagen.flow_from_dataframe( dataframe=valid_df, target_size=(224, 224), x_col="filename", y_col="categorical", class_mode="categorical", batch_size=32, shuffle=True, seed=42, ) test_generator = test_datagen.flow_from_dataframe( dataframe=test_df, x_col="filename", y_col="categorical", target_size=(224, 224), batch_size=32, class_mode="categorical", shuffle=True, seed=42, ) # tmp from keras.models import Sequential from keras.layers import ( Conv2D, MaxPooling2D, Dense, Flatten, Dropout, BatchNormalization, ) from tensorflow.keras.applications.vgg19 import VGG19 from tensorflow.keras.models import Model from tensorflow.keras.optimizers import Adam def build_model(name, weights_path=None): base_model = VGG19(weights="imagenet", include_top=False, input_shape=(224, 224, 3)) for layer in base_model.layers: layer.trainable = False x = base_model.output x = Flatten()(x) x = Dense(4096, activation="leaky_relu")(x) x = BatchNormalization()(x) x = Dropout(0.4)(x) x = Dense(1024, activation="sigmoid")(x) x = BatchNormalization()(x) x = Dropout(0.4)(x) predictions = Dense(20, activation="softmax")(x) model = Model(name=name, inputs=base_model.input, outputs=predictions) if weights_path: model.load_weights(weights_path) return model import time NAME = "vgg19-{}".format(int(time.time())) model = build_model(NAME) model.summary() lr = 0.01 epochs = 10 ## Initlisazie wandb project wandb.init( project="CBIR-fashion product dataset", name=NAME, config={ "learning_rate": lr, "Batch_normalization": True, "Batch_size": 64, "Dropout": "0.4", "architecture": "VGG19", "dataset": "fashion-product-images-dataset", "epochs": epochs, "data generator": True, }, ) wandb_callback = WandbCallback() filepath = "{}_loss_opti.hdf5".format(NAME) checkpoint1 = tf.keras.callbacks.ModelCheckpoint( filepath, monitor="val_loss", verbose=1, save_best_only=True, save_weights_only=False, mode="auto", save_freq="epoch", ) model.compile( loss="categorical_crossentropy", optimizer=Adam(learning_rate=lr), metrics=["accuracy"], ) history = model.fit_generator( train_generator, validation_data=train_generator, steps_per_epoch=train_generator.n // train_generator.batch_size, validation_steps=valid_generator.n // valid_generator.batch_size, epochs=epochs, callbacks=[checkpoint1, wandb_callback], ) # model.save(filepath) score = model.evaluate_generator(test_generator) print("Test loss:", score[0]) print("Test accuracy:", score[1]) from tensorflow.keras.models import load_model best_model = load_model(filepath) score = best_model.evaluate_generator(test_generator) print("Test loss:", score[0]) print("Test accuracy:", score[1]) IMAGESIZE = 224 CHANNELS = 3 def image_preprocess(image_path): image_orig = cv2.imread(image_path) image_arr = cv2.cvtColor(image_orig, cv2.COLOR_BGR2RGB) image_arr = cv2.resize(image_arr, (IMAGESIZE, IMAGESIZE)) image_arr = image_arr / 255.0 image_arr = image_arr.reshape(-1, IMAGESIZE, IMAGESIZE, CHANNELS) return image_arr test_df.filename[1] anchor_path = ( "/kaggle/input/fashion-product-images-dataset/fashion-dataset/images/50892.jpg" ) pos_path = ( "/kaggle/input/fashion-product-images-dataset/fashion-dataset/images/45986.jpg" ) neg_path = ( "/kaggle/input/fashion-product-images-dataset/fashion-dataset/images/29863.jpg" ) image_arr1 = image_preprocess(anchor_path) image_arr2 = image_preprocess(pos_path) image_arr3 = image_preprocess(neg_path) image_lst = [image_arr1, image_arr2, image_arr3] for i, image_arr in enumerate(image_lst): plt.subplot(1, 3, i + 1) plt.imshow(image_arr[0]) plt.axis(False) plt.show() y_hat = model.predict(image_arr) y_hat.argmax() categoricals[14] best_model.layers CBIR_model = Model(inputs=best_model.input, outputs=best_model.layers[-4].output) prehashcode1 = CBIR_model.predict(image_arr1) prehashcode2 = CBIR_model.predict(image_arr2) prehashcode3 = CBIR_model.predict(image_arr3) prehashcode.shape hashcode1 = np.where(prehashcode1 < 0.5, 0, 1) hashcode2 = np.where(prehashcode2 < 0.5, 0, 1) hashcode3 = np.where(prehashcode3 < 0.5, 0, 1) hashcode1 = hashcode1.astype("bool") hashcode2 = hashcode2.astype("bool") hashcode3 = hashcode3.astype("bool") hamming_distance = np.hamming(hashcode1.shape[0]) * np.abs(hashcode1 - hashcode2) hamming_dist = np.count_nonzero(hashcode1 != hashcode2) hamming_dist hamming_dist = np.count_nonzero(hashcode1 != hashcode3) hamming_dist hamming_dist = np.count_nonzero(hashcode2 != hashcode3) hamming_dist
false
0
3,778
0
4,236
3,778
129310337
import pandas as pd import numpy as np # Load a dataset into a Pandas DataFrame train_proteins = pd.read_csv( "/kaggle/input/amp-parkinsons-disease-progression-prediction/train_proteins.csv" ) train_peptides = pd.read_csv( "/kaggle/input/amp-parkinsons-disease-progression-prediction/train_peptides.csv" ) train_clinical = pd.read_csv( "/kaggle/input/amp-parkinsons-disease-progression-prediction/train_clinical_data.csv" ) train_clinical.shape train_clinical.head() train_clinical.describe()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/310/129310337.ipynb
null
null
[{"Id": 129310337, "ScriptId": 38443859, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14946652, "CreationDate": "05/12/2023 16:41:03", "VersionNumber": 1.0, "Title": "GMnb-AMP-Parkinson-Progression-Competition", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 17.0, "LinesInsertedFromPrevious": 17.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import pandas as pd import numpy as np # Load a dataset into a Pandas DataFrame train_proteins = pd.read_csv( "/kaggle/input/amp-parkinsons-disease-progression-prediction/train_proteins.csv" ) train_peptides = pd.read_csv( "/kaggle/input/amp-parkinsons-disease-progression-prediction/train_peptides.csv" ) train_clinical = pd.read_csv( "/kaggle/input/amp-parkinsons-disease-progression-prediction/train_clinical_data.csv" ) train_clinical.shape train_clinical.head() train_clinical.describe()
false
0
176
0
176
176
129284284
<jupyter_start><jupyter_text>Credit Card Fraud Detection Context --------- It is important that credit card companies are able to recognize fraudulent credit card transactions so that customers are not charged for items that they did not purchase. Content --------- The dataset contains transactions made by credit cards in September 2013 by European cardholders. This dataset presents transactions that occurred in two days, where we have 492 frauds out of 284,807 transactions. The dataset is highly unbalanced, the positive class (frauds) account for 0.172% of all transactions. It contains only numerical input variables which are the result of a PCA transformation. Unfortunately, due to confidentiality issues, we cannot provide the original features and more background information about the data. Features V1, V2, ... V28 are the principal components obtained with PCA, the only features which have not been transformed with PCA are 'Time' and 'Amount'. Feature 'Time' contains the seconds elapsed between each transaction and the first transaction in the dataset. The feature 'Amount' is the transaction Amount, this feature can be used for example-dependant cost-sensitive learning. Feature 'Class' is the response variable and it takes value 1 in case of fraud and 0 otherwise. Given the class imbalance ratio, we recommend measuring the accuracy using the Area Under the Precision-Recall Curve (AUPRC). Confusion matrix accuracy is not meaningful for unbalanced classification. Update (03/05/2021) --------- A simulator for transaction data has been released as part of the practical handbook on Machine Learning for Credit Card Fraud Detection - https://fraud-detection-handbook.github.io/fraud-detection-handbook/Chapter_3_GettingStarted/SimulatedDataset.html. We invite all practitioners interested in fraud detection datasets to also check out this data simulator, and the methodologies for credit card fraud detection presented in the book. Acknowledgements --------- The dataset has been collected and analysed during a research collaboration of Worldline and the Machine Learning Group (http://mlg.ulb.ac.be) of ULB (Université Libre de Bruxelles) on big data mining and fraud detection. More details on current and past projects on related topics are available on [https://www.researchgate.net/project/Fraud-detection-5][1] and the page of the [DefeatFraud][2] project Please cite the following works: Andrea Dal Pozzolo, Olivier Caelen, Reid A. Johnson and Gianluca Bontempi. [Calibrating Probability with Undersampling for Unbalanced Classification.][3] In Symposium on Computational Intelligence and Data Mining (CIDM), IEEE, 2015 Dal Pozzolo, Andrea; Caelen, Olivier; Le Borgne, Yann-Ael; Waterschoot, Serge; Bontempi, Gianluca. [Learned lessons in credit card fraud detection from a practitioner perspective][4], Expert systems with applications,41,10,4915-4928,2014, Pergamon Dal Pozzolo, Andrea; Boracchi, Giacomo; Caelen, Olivier; Alippi, Cesare; Bontempi, Gianluca. [Credit card fraud detection: a realistic modeling and a novel learning strategy,][5] IEEE transactions on neural networks and learning systems,29,8,3784-3797,2018,IEEE Dal Pozzolo, Andrea [Adaptive Machine learning for credit card fraud detection][6] ULB MLG PhD thesis (supervised by G. Bontempi) Carcillo, Fabrizio; Dal Pozzolo, Andrea; Le Borgne, Yann-Aël; Caelen, Olivier; Mazzer, Yannis; Bontempi, Gianluca. [Scarff: a scalable framework for streaming credit card fraud detection with Spark][7], Information fusion,41, 182-194,2018,Elsevier Carcillo, Fabrizio; Le Borgne, Yann-Aël; Caelen, Olivier; Bontempi, Gianluca. [Streaming active learning strategies for real-life credit card fraud detection: assessment and visualization,][8] International Journal of Data Science and Analytics, 5,4,285-300,2018,Springer International Publishing Bertrand Lebichot, Yann-Aël Le Borgne, Liyun He, Frederic Oblé, Gianluca Bontempi [Deep-Learning Domain Adaptation Techniques for Credit Cards Fraud Detection](https://www.researchgate.net/publication/332180999_Deep-Learning_Domain_Adaptation_Techniques_for_Credit_Cards_Fraud_Detection), INNSBDDL 2019: Recent Advances in Big Data and Deep Learning, pp 78-88, 2019 Fabrizio Carcillo, Yann-Aël Le Borgne, Olivier Caelen, Frederic Oblé, Gianluca Bontempi [Combining Unsupervised and Supervised Learning in Credit Card Fraud Detection ](https://www.researchgate.net/publication/333143698_Combining_Unsupervised_and_Supervised_Learning_in_Credit_Card_Fraud_Detection) Information Sciences, 2019 Yann-Aël Le Borgne, Gianluca Bontempi [Reproducible machine Learning for Credit Card Fraud Detection - Practical Handbook ](https://www.researchgate.net/publication/351283764_Machine_Learning_for_Credit_Card_Fraud_Detection_-_Practical_Handbook) Bertrand Lebichot, Gianmarco Paldino, Wissam Siblini, Liyun He, Frederic Oblé, Gianluca Bontempi [Incremental learning strategies for credit cards fraud detection](https://www.researchgate.net/publication/352275169_Incremental_learning_strategies_for_credit_cards_fraud_detection), IInternational Journal of Data Science and Analytics [1]: https://www.researchgate.net/project/Fraud-detection-5 [2]: https://mlg.ulb.ac.be/wordpress/portfolio_page/defeatfraud-assessment-and-validation-of-deep-feature-engineering-and-learning-solutions-for-fraud-detection/ [3]: https://www.researchgate.net/publication/283349138_Calibrating_Probability_with_Undersampling_for_Unbalanced_Classification [4]: https://www.researchgate.net/publication/260837261_Learned_lessons_in_credit_card_fraud_detection_from_a_practitioner_perspective [5]: https://www.researchgate.net/publication/319867396_Credit_Card_Fraud_Detection_A_Realistic_Modeling_and_a_Novel_Learning_Strategy [6]: http://di.ulb.ac.be/map/adalpozz/pdf/Dalpozzolo2015PhD.pdf [7]: https://www.researchgate.net/publication/319616537_SCARFF_a_Scalable_Framework_for_Streaming_Credit_Card_Fraud_Detection_with_Spark [8]: https://www.researchgate.net/publication/332180999_Deep-Learning_Domain_Adaptation_Techniques_for_Credit_Cards_Fraud_Detection Kaggle dataset identifier: creditcardfraud <jupyter_script>import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.svm import SVC from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score, classification_report, confusion_matrix from sklearn.feature_selection import SelectKBest, mutual_info_classif from sklearn.covariance import EllipticEnvelope from sklearn.cluster import KMeans from sklearn.preprocessing import StandardScaler, RobustScaler from sklearn.model_selection import cross_val_score, ShuffleSplit, cross_val_predict from sklearn.model_selection import GridSearchCV from sklearn.neighbors import KernelDensity pd.set_option("display.precision", 2) scores = ["precision", "recall"] def print_dataframe(filtered_cv_results): """Pretty print for filtered dataframe""" for mean_precision, std_precision, mean_recall, std_recall, params in zip( filtered_cv_results["mean_test_precision"], filtered_cv_results["std_test_precision"], filtered_cv_results["mean_test_recall"], filtered_cv_results["std_test_recall"], filtered_cv_results["params"], ): print( f"precision: {mean_precision:0.3f} (±{std_precision:0.03f})," f" recall: {mean_recall:0.3f} (±{std_recall:0.03f})," f" for {params}" ) print() def refit_strategy(cv_results): """Define the strategy to select the best estimator. The strategy defined here is to filter-out all results below a precision threshold of 0.96, rank the remaining by recall and keep all models with one standard deviation of the best by recall. Once these models are selected, we can select the fastest model to predict. Parameters ---------- cv_results : dict of numpy (masked) ndarrays CV results as returned by the `GridSearchCV`. Returns ------- best_index : int The index of the best estimator as it appears in `cv_results`. """ scores = ["precision", "recall"] # print the info about the grid-search for the different scores precision_threshold = 0.96 cv_results_ = pd.DataFrame(cv_results) print("All grid-search results:") print_dataframe(cv_results_) # Filter-out all results below the threshold high_precision_cv_results = cv_results_[ cv_results_["mean_test_precision"] > precision_threshold ] print(f"Models with a precision higher than {precision_threshold}:") print_dataframe(high_precision_cv_results) high_precision_cv_results = high_precision_cv_results[ [ "mean_score_time", "mean_test_recall", "std_test_recall", "mean_test_precision", "std_test_precision", "rank_test_recall", "rank_test_precision", "params", ] ] # Select the most performant models in terms of recall # (within 1 sigma from the best) best_recall_std = high_precision_cv_results["mean_test_recall"].std() best_recall = high_precision_cv_results["mean_test_recall"].max() best_recall_threshold = best_recall - best_recall_std high_recall_cv_results = high_precision_cv_results[ high_precision_cv_results["mean_test_recall"] > best_recall_threshold ] print( "Out of the previously selected high precision models, we keep all the\n" "the models within one standard deviation of the highest recall model:" ) print_dataframe(high_recall_cv_results) # From the best candidates, select the fastest model to predict fastest_top_recall_high_precision_index = high_recall_cv_results[ "mean_score_time" ].idxmin() print( "\nThe selected final model is the fastest to predict out of the previously\n" "selected subset of best models based on precision and recall.\n" "Its scoring time is:\n\n" f"{high_recall_cv_results.loc[fastest_top_recall_high_precision_index]}" ) return fastest_top_recall_high_precision_index import warnings warnings.filterwarnings("ignore") df = pd.read_csv("/kaggle/input/creditcardfraud/creditcard.csv") df.head() print("Dataset size - ", df.shape) df = df.drop(columns="Time") print("Duplicates in the data set - ", df.duplicated().sum()) df = df.drop_duplicates() print("Dataset Final Size - ", df.shape) df.info() class_freq = df["Class"].value_counts() print("Overview of distribution of Fraud and non-fraud entries in Data.") print(class_freq) print("% Fraud cases in the dataset - ", class_freq[1] / class_freq[0]) df.describe(percentiles=[0.01, 0.99]).T feature_selector = SelectKBest(mutual_info_classif) feature_selected = feature_selector.fit_transform(df.drop(columns="Class"), df["Class"]) features = feature_selector.get_feature_names_out() sns.pairplot(data=df[features]) df = df.sample(frac=1, random_state=38) frauds = df.loc[df["Class"] == 1] no_frauds = df.loc[df["Class"] == 0][: frauds.shape[0]] norm_distributed_df = pd.concat([frauds, no_frauds], axis=0) new_df = norm_distributed_df.sample(frac=1, random_state=38) new_df = new_df.drop_duplicates() new_df.shape X = new_df.loc[:, features] outlier_detector = EllipticEnvelope(contamination=0.05) outlier_detector.fit(X) indices = outlier_detector.predict(X) bool_indices = np.where(indices == 1, True, False) X = new_df.loc[bool_indices, features] y = new_df.loc[bool_indices, "Class"] print("After Removing the outliers we have %s data instances", X.shape) # # Clustering km = KMeans(n_clusters=2, n_init=10, random_state=39).fit(X) cluster = km.predict(X) print(classification_report(cluster, y)) tc = km.fit_transform(X) sns.scatterplot(data=tc, x=tc[:, 0], y=tc[:, 1], hue=y) # # Model Building ss = StandardScaler() X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=39) X_train = ss.fit_transform(X_train) X_test = ss.transform(X_test) # ## Undersample - SVC tuned_parameters = [ {"kernel": ["rbf"], "gamma": [1e-3, 1e-4], "C": [1, 10, 100, 1000]}, {"kernel": ["linear"], "C": [1, 10, 100, 1000]}, ] grid_search = GridSearchCV( SVC(random_state=53), tuned_parameters, scoring=scores, refit=refit_strategy ) grid_search.fit(X_train, y_train) grid_search.best_params_ y_pred = grid_search.predict(X_test) print(classification_report(y_test, y_pred)) undersample_svc_model = grid_search.best_estimator_ # ## Undersample - RFC tuned_parameters = [{"max_depth": [4, 6, 8]}] grid_search = GridSearchCV( RandomForestClassifier(max_samples=0.8, random_state=53), tuned_parameters, scoring=scores, refit=refit_strategy, ) grid_search.fit(X_train, y_train) grid_search.best_params_ y_pred = grid_search.predict(X_test) print(classification_report(y_test, y_pred)) undersample_rfc = grid_search.best_estimator_ # # OverSampling params = {"bandwidth": np.logspace(-1, 1, 20)} grid = GridSearchCV(KernelDensity(), params) grid.fit(df.loc[df["Class"] == 1]) print("best bandwidth: {0}".format(grid.best_estimator_.bandwidth)) # use the best estimator to compute the kernel density estimate kde = grid.best_estimator_ latest_data = kde.sample(1000, random_state=10) latest_df = pd.DataFrame(latest_data, columns=df.columns) latest_df["Class"] = 1 latest_df = latest_df[latest_df["Amount"] > 0] oversample = pd.concat([latest_df, df], axis=0) oversample = oversample.sample(frac=1, random_state=38) frauds = oversample.loc[oversample["Class"] == 1] no_frauds = oversample.loc[oversample["Class"] == 0][: frauds.shape[0]] norm_distributed_df = pd.concat([frauds, no_frauds], axis=0) oversample_new_df = norm_distributed_df.sample(frac=1, random_state=38) feature_selector = SelectKBest(mutual_info_classif) feature_selected = feature_selector.fit_transform( oversample_new_df.drop(columns="Class"), oversample_new_df["Class"] ) features = feature_selector.get_feature_names_out() X = oversample_new_df.loc[:, features] outlier_detector = EllipticEnvelope(contamination=0.1) outlier_detector.fit(X) indices = outlier_detector.predict(X) bool_indices = np.where(indices == 1, True, False) X = X.loc[bool_indices] y = oversample_new_df.loc[bool_indices, "Class"] # # Model Building X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=39) ss1 = StandardScaler() X_train = ss1.fit_transform(X_train) X_test = ss1.transform(X_test) # ## SVC tuned_parameters = [ {"kernel": ["rbf"], "gamma": [1e-3, 1e-4], "C": [1, 10, 100, 1000]}, {"kernel": ["linear"], "C": [1, 10, 100, 1000]}, ] grid_search = GridSearchCV( SVC(random_state=53), tuned_parameters, scoring=scores, refit=refit_strategy ) grid_search.fit(X_train, y_train) grid_search.best_params_ y_pred = grid_search.predict(X_test) print(classification_report(y_test, y_pred)) oversample_svc_model = grid_search.best_estimator_ # ## RFC tuned_parameters = [{"max_depth": [4, 6, 8]}] grid_search = GridSearchCV( RandomForestClassifier(max_samples=0.8, random_state=53), tuned_parameters, scoring=scores, refit=refit_strategy, ) grid_search.fit(X_train, y_train) grid_search.best_params_ y_pred = grid_search.predict(X_test) print(classification_report(y_test, y_pred)) oversample_rfc = grid_search.best_estimator_
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/284/129284284.ipynb
creditcardfraud
null
[{"Id": 129284284, "ScriptId": 38357055, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 3687805, "CreationDate": "05/12/2023 12:48:59", "VersionNumber": 1.0, "Title": "Credit Card fraud detection", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 327.0, "LinesInsertedFromPrevious": 327.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 185187993, "KernelVersionId": 129284284, "SourceDatasetVersionId": 23498}]
[{"Id": 23498, "DatasetId": 310, "DatasourceVersionId": 23502, "CreatorUserId": 998023, "LicenseName": "Database: Open Database, Contents: Database Contents", "CreationDate": "03/23/2018 01:17:27", "VersionNumber": 3.0, "Title": "Credit Card Fraud Detection", "Slug": "creditcardfraud", "Subtitle": "Anonymized credit card transactions labeled as fraudulent or genuine", "Description": "Context\n---------\n\nIt is important that credit card companies are able to recognize fraudulent credit card transactions so that customers are not charged for items that they did not purchase.\n\nContent\n---------\n\nThe dataset contains transactions made by credit cards in September 2013 by European cardholders. \nThis dataset presents transactions that occurred in two days, where we have 492 frauds out of 284,807 transactions. The dataset is highly unbalanced, the positive class (frauds) account for 0.172% of all transactions.\n\nIt contains only numerical input variables which are the result of a PCA transformation. Unfortunately, due to confidentiality issues, we cannot provide the original features and more background information about the data. Features V1, V2, ... V28 are the principal components obtained with PCA, the only features which have not been transformed with PCA are 'Time' and 'Amount'. Feature 'Time' contains the seconds elapsed between each transaction and the first transaction in the dataset. The feature 'Amount' is the transaction Amount, this feature can be used for example-dependant cost-sensitive learning. Feature 'Class' is the response variable and it takes value 1 in case of fraud and 0 otherwise. \n\nGiven the class imbalance ratio, we recommend measuring the accuracy using the Area Under the Precision-Recall Curve (AUPRC). Confusion matrix accuracy is not meaningful for unbalanced classification.\n\nUpdate (03/05/2021)\n---------\n\nA simulator for transaction data has been released as part of the practical handbook on Machine Learning for Credit Card Fraud Detection - https://fraud-detection-handbook.github.io/fraud-detection-handbook/Chapter_3_GettingStarted/SimulatedDataset.html. We invite all practitioners interested in fraud detection datasets to also check out this data simulator, and the methodologies for credit card fraud detection presented in the book.\n\nAcknowledgements\n---------\n\nThe dataset has been collected and analysed during a research collaboration of Worldline and the Machine Learning Group (http://mlg.ulb.ac.be) of ULB (Universit\u00e9 Libre de Bruxelles) on big data mining and fraud detection.\nMore details on current and past projects on related topics are available on [https://www.researchgate.net/project/Fraud-detection-5][1] and the page of the [DefeatFraud][2] project\n\nPlease cite the following works: \n\nAndrea Dal Pozzolo, Olivier Caelen, Reid A. Johnson and Gianluca Bontempi. [Calibrating Probability with Undersampling for Unbalanced Classification.][3] In Symposium on Computational Intelligence and Data Mining (CIDM), IEEE, 2015\n\nDal Pozzolo, Andrea; Caelen, Olivier; Le Borgne, Yann-Ael; Waterschoot, Serge; Bontempi, Gianluca. [Learned lessons in credit card fraud detection from a practitioner perspective][4], Expert systems with applications,41,10,4915-4928,2014, Pergamon\n\nDal Pozzolo, Andrea; Boracchi, Giacomo; Caelen, Olivier; Alippi, Cesare; Bontempi, Gianluca. [Credit card fraud detection: a realistic modeling and a novel learning strategy,][5] IEEE transactions on neural networks and learning systems,29,8,3784-3797,2018,IEEE\n\nDal Pozzolo, Andrea [Adaptive Machine learning for credit card fraud detection][6] ULB MLG PhD thesis (supervised by G. Bontempi)\n\nCarcillo, Fabrizio; Dal Pozzolo, Andrea; Le Borgne, Yann-A\u00ebl; Caelen, Olivier; Mazzer, Yannis; Bontempi, Gianluca. [Scarff: a scalable framework for streaming credit card fraud detection with Spark][7], Information fusion,41, 182-194,2018,Elsevier\n\nCarcillo, Fabrizio; Le Borgne, Yann-A\u00ebl; Caelen, Olivier; Bontempi, Gianluca. [Streaming active learning strategies for real-life credit card fraud detection: assessment and visualization,][8] International Journal of Data Science and Analytics, 5,4,285-300,2018,Springer International Publishing\n\nBertrand Lebichot, Yann-A\u00ebl Le Borgne, Liyun He, Frederic Obl\u00e9, Gianluca Bontempi [Deep-Learning Domain Adaptation Techniques for Credit Cards Fraud Detection](https://www.researchgate.net/publication/332180999_Deep-Learning_Domain_Adaptation_Techniques_for_Credit_Cards_Fraud_Detection), INNSBDDL 2019: Recent Advances in Big Data and Deep Learning, pp 78-88, 2019\n\nFabrizio Carcillo, Yann-A\u00ebl Le Borgne, Olivier Caelen, Frederic Obl\u00e9, Gianluca Bontempi [Combining Unsupervised and Supervised Learning in Credit Card Fraud Detection ](https://www.researchgate.net/publication/333143698_Combining_Unsupervised_and_Supervised_Learning_in_Credit_Card_Fraud_Detection) Information Sciences, 2019\n\nYann-A\u00ebl Le Borgne, Gianluca Bontempi [Reproducible machine Learning for Credit Card Fraud Detection - Practical Handbook ](https://www.researchgate.net/publication/351283764_Machine_Learning_for_Credit_Card_Fraud_Detection_-_Practical_Handbook) \n\nBertrand Lebichot, Gianmarco Paldino, Wissam Siblini, Liyun He, Frederic Obl\u00e9, Gianluca Bontempi [Incremental learning strategies for credit cards fraud detection](https://www.researchgate.net/publication/352275169_Incremental_learning_strategies_for_credit_cards_fraud_detection), IInternational Journal of Data Science and Analytics\n\n [1]: https://www.researchgate.net/project/Fraud-detection-5\n [2]: https://mlg.ulb.ac.be/wordpress/portfolio_page/defeatfraud-assessment-and-validation-of-deep-feature-engineering-and-learning-solutions-for-fraud-detection/\n [3]: https://www.researchgate.net/publication/283349138_Calibrating_Probability_with_Undersampling_for_Unbalanced_Classification\n [4]: https://www.researchgate.net/publication/260837261_Learned_lessons_in_credit_card_fraud_detection_from_a_practitioner_perspective\n [5]: https://www.researchgate.net/publication/319867396_Credit_Card_Fraud_Detection_A_Realistic_Modeling_and_a_Novel_Learning_Strategy\n [6]: http://di.ulb.ac.be/map/adalpozz/pdf/Dalpozzolo2015PhD.pdf\n [7]: https://www.researchgate.net/publication/319616537_SCARFF_a_Scalable_Framework_for_Streaming_Credit_Card_Fraud_Detection_with_Spark\n \n[8]: https://www.researchgate.net/publication/332180999_Deep-Learning_Domain_Adaptation_Techniques_for_Credit_Cards_Fraud_Detection", "VersionNotes": "Fixed preview", "TotalCompressedBytes": 150828752.0, "TotalUncompressedBytes": 69155632.0}]
[{"Id": 310, "CreatorUserId": 14069, "OwnerUserId": NaN, "OwnerOrganizationId": 1160.0, "CurrentDatasetVersionId": 23498.0, "CurrentDatasourceVersionId": 23502.0, "ForumId": 1838, "Type": 2, "CreationDate": "11/03/2016 13:21:36", "LastActivityDate": "02/06/2018", "TotalViews": 10310781, "TotalDownloads": 564249, "TotalVotes": 10432, "TotalKernels": 4266}]
null
import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.svm import SVC from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score, classification_report, confusion_matrix from sklearn.feature_selection import SelectKBest, mutual_info_classif from sklearn.covariance import EllipticEnvelope from sklearn.cluster import KMeans from sklearn.preprocessing import StandardScaler, RobustScaler from sklearn.model_selection import cross_val_score, ShuffleSplit, cross_val_predict from sklearn.model_selection import GridSearchCV from sklearn.neighbors import KernelDensity pd.set_option("display.precision", 2) scores = ["precision", "recall"] def print_dataframe(filtered_cv_results): """Pretty print for filtered dataframe""" for mean_precision, std_precision, mean_recall, std_recall, params in zip( filtered_cv_results["mean_test_precision"], filtered_cv_results["std_test_precision"], filtered_cv_results["mean_test_recall"], filtered_cv_results["std_test_recall"], filtered_cv_results["params"], ): print( f"precision: {mean_precision:0.3f} (±{std_precision:0.03f})," f" recall: {mean_recall:0.3f} (±{std_recall:0.03f})," f" for {params}" ) print() def refit_strategy(cv_results): """Define the strategy to select the best estimator. The strategy defined here is to filter-out all results below a precision threshold of 0.96, rank the remaining by recall and keep all models with one standard deviation of the best by recall. Once these models are selected, we can select the fastest model to predict. Parameters ---------- cv_results : dict of numpy (masked) ndarrays CV results as returned by the `GridSearchCV`. Returns ------- best_index : int The index of the best estimator as it appears in `cv_results`. """ scores = ["precision", "recall"] # print the info about the grid-search for the different scores precision_threshold = 0.96 cv_results_ = pd.DataFrame(cv_results) print("All grid-search results:") print_dataframe(cv_results_) # Filter-out all results below the threshold high_precision_cv_results = cv_results_[ cv_results_["mean_test_precision"] > precision_threshold ] print(f"Models with a precision higher than {precision_threshold}:") print_dataframe(high_precision_cv_results) high_precision_cv_results = high_precision_cv_results[ [ "mean_score_time", "mean_test_recall", "std_test_recall", "mean_test_precision", "std_test_precision", "rank_test_recall", "rank_test_precision", "params", ] ] # Select the most performant models in terms of recall # (within 1 sigma from the best) best_recall_std = high_precision_cv_results["mean_test_recall"].std() best_recall = high_precision_cv_results["mean_test_recall"].max() best_recall_threshold = best_recall - best_recall_std high_recall_cv_results = high_precision_cv_results[ high_precision_cv_results["mean_test_recall"] > best_recall_threshold ] print( "Out of the previously selected high precision models, we keep all the\n" "the models within one standard deviation of the highest recall model:" ) print_dataframe(high_recall_cv_results) # From the best candidates, select the fastest model to predict fastest_top_recall_high_precision_index = high_recall_cv_results[ "mean_score_time" ].idxmin() print( "\nThe selected final model is the fastest to predict out of the previously\n" "selected subset of best models based on precision and recall.\n" "Its scoring time is:\n\n" f"{high_recall_cv_results.loc[fastest_top_recall_high_precision_index]}" ) return fastest_top_recall_high_precision_index import warnings warnings.filterwarnings("ignore") df = pd.read_csv("/kaggle/input/creditcardfraud/creditcard.csv") df.head() print("Dataset size - ", df.shape) df = df.drop(columns="Time") print("Duplicates in the data set - ", df.duplicated().sum()) df = df.drop_duplicates() print("Dataset Final Size - ", df.shape) df.info() class_freq = df["Class"].value_counts() print("Overview of distribution of Fraud and non-fraud entries in Data.") print(class_freq) print("% Fraud cases in the dataset - ", class_freq[1] / class_freq[0]) df.describe(percentiles=[0.01, 0.99]).T feature_selector = SelectKBest(mutual_info_classif) feature_selected = feature_selector.fit_transform(df.drop(columns="Class"), df["Class"]) features = feature_selector.get_feature_names_out() sns.pairplot(data=df[features]) df = df.sample(frac=1, random_state=38) frauds = df.loc[df["Class"] == 1] no_frauds = df.loc[df["Class"] == 0][: frauds.shape[0]] norm_distributed_df = pd.concat([frauds, no_frauds], axis=0) new_df = norm_distributed_df.sample(frac=1, random_state=38) new_df = new_df.drop_duplicates() new_df.shape X = new_df.loc[:, features] outlier_detector = EllipticEnvelope(contamination=0.05) outlier_detector.fit(X) indices = outlier_detector.predict(X) bool_indices = np.where(indices == 1, True, False) X = new_df.loc[bool_indices, features] y = new_df.loc[bool_indices, "Class"] print("After Removing the outliers we have %s data instances", X.shape) # # Clustering km = KMeans(n_clusters=2, n_init=10, random_state=39).fit(X) cluster = km.predict(X) print(classification_report(cluster, y)) tc = km.fit_transform(X) sns.scatterplot(data=tc, x=tc[:, 0], y=tc[:, 1], hue=y) # # Model Building ss = StandardScaler() X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=39) X_train = ss.fit_transform(X_train) X_test = ss.transform(X_test) # ## Undersample - SVC tuned_parameters = [ {"kernel": ["rbf"], "gamma": [1e-3, 1e-4], "C": [1, 10, 100, 1000]}, {"kernel": ["linear"], "C": [1, 10, 100, 1000]}, ] grid_search = GridSearchCV( SVC(random_state=53), tuned_parameters, scoring=scores, refit=refit_strategy ) grid_search.fit(X_train, y_train) grid_search.best_params_ y_pred = grid_search.predict(X_test) print(classification_report(y_test, y_pred)) undersample_svc_model = grid_search.best_estimator_ # ## Undersample - RFC tuned_parameters = [{"max_depth": [4, 6, 8]}] grid_search = GridSearchCV( RandomForestClassifier(max_samples=0.8, random_state=53), tuned_parameters, scoring=scores, refit=refit_strategy, ) grid_search.fit(X_train, y_train) grid_search.best_params_ y_pred = grid_search.predict(X_test) print(classification_report(y_test, y_pred)) undersample_rfc = grid_search.best_estimator_ # # OverSampling params = {"bandwidth": np.logspace(-1, 1, 20)} grid = GridSearchCV(KernelDensity(), params) grid.fit(df.loc[df["Class"] == 1]) print("best bandwidth: {0}".format(grid.best_estimator_.bandwidth)) # use the best estimator to compute the kernel density estimate kde = grid.best_estimator_ latest_data = kde.sample(1000, random_state=10) latest_df = pd.DataFrame(latest_data, columns=df.columns) latest_df["Class"] = 1 latest_df = latest_df[latest_df["Amount"] > 0] oversample = pd.concat([latest_df, df], axis=0) oversample = oversample.sample(frac=1, random_state=38) frauds = oversample.loc[oversample["Class"] == 1] no_frauds = oversample.loc[oversample["Class"] == 0][: frauds.shape[0]] norm_distributed_df = pd.concat([frauds, no_frauds], axis=0) oversample_new_df = norm_distributed_df.sample(frac=1, random_state=38) feature_selector = SelectKBest(mutual_info_classif) feature_selected = feature_selector.fit_transform( oversample_new_df.drop(columns="Class"), oversample_new_df["Class"] ) features = feature_selector.get_feature_names_out() X = oversample_new_df.loc[:, features] outlier_detector = EllipticEnvelope(contamination=0.1) outlier_detector.fit(X) indices = outlier_detector.predict(X) bool_indices = np.where(indices == 1, True, False) X = X.loc[bool_indices] y = oversample_new_df.loc[bool_indices, "Class"] # # Model Building X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=39) ss1 = StandardScaler() X_train = ss1.fit_transform(X_train) X_test = ss1.transform(X_test) # ## SVC tuned_parameters = [ {"kernel": ["rbf"], "gamma": [1e-3, 1e-4], "C": [1, 10, 100, 1000]}, {"kernel": ["linear"], "C": [1, 10, 100, 1000]}, ] grid_search = GridSearchCV( SVC(random_state=53), tuned_parameters, scoring=scores, refit=refit_strategy ) grid_search.fit(X_train, y_train) grid_search.best_params_ y_pred = grid_search.predict(X_test) print(classification_report(y_test, y_pred)) oversample_svc_model = grid_search.best_estimator_ # ## RFC tuned_parameters = [{"max_depth": [4, 6, 8]}] grid_search = GridSearchCV( RandomForestClassifier(max_samples=0.8, random_state=53), tuned_parameters, scoring=scores, refit=refit_strategy, ) grid_search.fit(X_train, y_train) grid_search.best_params_ y_pred = grid_search.predict(X_test) print(classification_report(y_test, y_pred)) oversample_rfc = grid_search.best_estimator_
false
0
2,971
0
4,845
2,971
129284945
<jupyter_start><jupyter_text>german-traffic-signs ### Context Detecting Street Signs is one of the most important tasks in Self Driving Cars.This dataset is a benchmark in Street signs and symbols including 43 different classes. Classifying road symbols using Deep Convolutional Neural Network is the aim of this dataset. Kaggle dataset identifier: germantrafficsigns <jupyter_script>import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # importing pickle module to unpickle the files in the dataset import pickle with open("/kaggle/input/germantrafficsigns/train.p", "rb") as f: train_data = pickle.load(f) with open("/kaggle/input/germantrafficsigns/test.p", "rb") as f: test_data = pickle.load(f) with open("/kaggle/input/germantrafficsigns/valid.p", "rb") as f: val_data = pickle.load(f) # splitting the data into our variables x_train, y_train = train_data["features"], train_data["labels"] x_test, y_test = test_data["features"], test_data["labels"] x_val, y_val = val_data["features"], val_data["labels"] # showing the size of each variable we have print(x_train.shape) print(x_test.shape) print(x_val.shape) import pandas as pd data = pd.read_csv("/kaggle/input/germantrafficsigns/signnames.csv") print(data) # from the dataframe printed below we come to know that the dataset has 43 classes. # import matplotlib for visualizing images import matplotlib.pyplot as plt # the block of code below just display an image from our data plt.imshow(x_train[0]) print(x_train[0].shape) # converting images into gray scale so that the neural network can learn the pattern easily import cv2 def gray(img): img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) return img # equalizing images to make the features in the images more porminent for the model to understand def equalize(img): img = cv2.equalizeHist(img) return img def preprocessing(img): img = gray(img) img = equalize(img) # now normalizing the images img = img / 255 return img # using map fucntion to iterate through the whole dataset and apply our preprocessing fucntion to every image import numpy as np x_train = np.array(list(map(preprocessing, x_train))) x_val = np.array(list(map(preprocessing, x_val))) x_test = np.array(list(map(preprocessing, x_test))) # showing the new preprocessed images plt.imshow(x_train[0]) print(x_train[0].shape) # converting the labels into categorical variables from keras.utils.np_utils import to_categorical y_cat_train = to_categorical(y_train, 43) y_cat_test = to_categorical(y_test, 43) y_cat_val = to_categorical(y_val, 43) # reshaping the images x_train = x_train.reshape(34799, 32, 32, 1) x_test = x_test.reshape(12630, 32, 32, 1) x_val = x_val.reshape(4410, 32, 32, 1) print(x_train.shape) # importing keras and required layers to create the model import keras from keras.models import Sequential from keras.optimizers import Adam from keras.layers import Dense from keras.layers import Flatten, Dropout from keras.layers.convolutional import Conv2D, MaxPooling2D # create model model = Sequential() model.add(Conv2D(30, (5, 5), input_shape=(32, 32, 1), activation="relu")) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(15, (3, 3), activation="relu")) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(500, activation="relu")) model.add(Dropout(0.5)) model.add(Dense(43, activation="softmax")) model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"]) print(model.summary()) model.fit(x_train, y_cat_train, epochs=20, batch_size=400, verbose=1, shuffle=1) from sklearn.metrics import classification_report prediction = model.predict_classes(x_test) print(classification_report(y_test, prediction)) model.save("street_signs.h5")
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/284/129284945.ipynb
germantrafficsigns
saadhaxxan
[{"Id": 129284945, "ScriptId": 16489704, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7209429, "CreationDate": "05/12/2023 12:53:50", "VersionNumber": 1.0, "Title": "Street-Signs-and-boards-classification-using-DCNN", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 135.0, "LinesInsertedFromPrevious": 135.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 185189021, "KernelVersionId": 129284945, "SourceDatasetVersionId": 976937}]
[{"Id": 976937, "DatasetId": 533907, "DatasourceVersionId": 1005266, "CreatorUserId": 2667524, "LicenseName": "CC0: Public Domain", "CreationDate": "02/29/2020 09:05:32", "VersionNumber": 1.0, "Title": "german-traffic-signs", "Slug": "germantrafficsigns", "Subtitle": "Traffic Signs Dataset for Classification", "Description": "### Context\n\nDetecting Street Signs is one of the most important tasks in Self Driving Cars.This dataset is a benchmark in Street signs and symbols including 43 different classes. Classifying road symbols using Deep Convolutional Neural Network is the aim of this dataset.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 533907, "CreatorUserId": 2667524, "OwnerUserId": 2667524.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 976937.0, "CurrentDatasourceVersionId": 1005266.0, "ForumId": 547357, "Type": 2, "CreationDate": "02/29/2020 09:05:32", "LastActivityDate": "02/29/2020", "TotalViews": 6704, "TotalDownloads": 672, "TotalVotes": 7, "TotalKernels": 4}]
[{"Id": 2667524, "UserName": "saadhaxxan", "DisplayName": "Saad Hassan", "RegisterDate": "01/03/2019", "PerformanceTier": 1}]
import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # importing pickle module to unpickle the files in the dataset import pickle with open("/kaggle/input/germantrafficsigns/train.p", "rb") as f: train_data = pickle.load(f) with open("/kaggle/input/germantrafficsigns/test.p", "rb") as f: test_data = pickle.load(f) with open("/kaggle/input/germantrafficsigns/valid.p", "rb") as f: val_data = pickle.load(f) # splitting the data into our variables x_train, y_train = train_data["features"], train_data["labels"] x_test, y_test = test_data["features"], test_data["labels"] x_val, y_val = val_data["features"], val_data["labels"] # showing the size of each variable we have print(x_train.shape) print(x_test.shape) print(x_val.shape) import pandas as pd data = pd.read_csv("/kaggle/input/germantrafficsigns/signnames.csv") print(data) # from the dataframe printed below we come to know that the dataset has 43 classes. # import matplotlib for visualizing images import matplotlib.pyplot as plt # the block of code below just display an image from our data plt.imshow(x_train[0]) print(x_train[0].shape) # converting images into gray scale so that the neural network can learn the pattern easily import cv2 def gray(img): img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) return img # equalizing images to make the features in the images more porminent for the model to understand def equalize(img): img = cv2.equalizeHist(img) return img def preprocessing(img): img = gray(img) img = equalize(img) # now normalizing the images img = img / 255 return img # using map fucntion to iterate through the whole dataset and apply our preprocessing fucntion to every image import numpy as np x_train = np.array(list(map(preprocessing, x_train))) x_val = np.array(list(map(preprocessing, x_val))) x_test = np.array(list(map(preprocessing, x_test))) # showing the new preprocessed images plt.imshow(x_train[0]) print(x_train[0].shape) # converting the labels into categorical variables from keras.utils.np_utils import to_categorical y_cat_train = to_categorical(y_train, 43) y_cat_test = to_categorical(y_test, 43) y_cat_val = to_categorical(y_val, 43) # reshaping the images x_train = x_train.reshape(34799, 32, 32, 1) x_test = x_test.reshape(12630, 32, 32, 1) x_val = x_val.reshape(4410, 32, 32, 1) print(x_train.shape) # importing keras and required layers to create the model import keras from keras.models import Sequential from keras.optimizers import Adam from keras.layers import Dense from keras.layers import Flatten, Dropout from keras.layers.convolutional import Conv2D, MaxPooling2D # create model model = Sequential() model.add(Conv2D(30, (5, 5), input_shape=(32, 32, 1), activation="relu")) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(15, (3, 3), activation="relu")) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(500, activation="relu")) model.add(Dropout(0.5)) model.add(Dense(43, activation="softmax")) model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"]) print(model.summary()) model.fit(x_train, y_cat_train, epochs=20, batch_size=400, verbose=1, shuffle=1) from sklearn.metrics import classification_report prediction = model.predict_classes(x_test) print(classification_report(y_test, prediction)) model.save("street_signs.h5")
false
1
1,151
0
1,237
1,151
129284646
<jupyter_start><jupyter_text>Food and their calories ### Context The data set consist of food such as soup,ice-cream,pizza,vegetables,fruits etc, the serving for which the calories are calculated. ### Content There are three columns for this dataset: Food Serving Calories ### Inspiration We would always wanted to prepare the diet chart based in the calories. This the dataset then.😄 Kaggle dataset identifier: food-and-their-calories <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import pandas as pd import random import re def extract_numeric_calories(calories_string): numeric_calories = re.findall(r"\d+", calories_string) if numeric_calories: return int(numeric_calories[0]) else: return 0 def suggest_foods(predicted_calories, food_dataset, num_suggestions=5): predicted_calories_numeric = extract_numeric_calories(predicted_calories) filtered_foods = food_dataset[ food_dataset["Calories"].apply(extract_numeric_calories) == predicted_calories_numeric ] if len(filtered_foods) >= num_suggestions: suggestions = random.sample(list(filtered_foods["Food"]), num_suggestions) return suggestions elif len(filtered_foods) > 0: suggestions = list(filtered_foods["Food"]) return suggestions else: return [] predicted_calories = "60 cals" food_dataset = pd.read_csv( "/kaggle/input/food-and-their-calories/Food and Calories - Sheet1.csv" ) suggested_foods = suggest_foods(predicted_calories, food_dataset) if suggested_foods: print("Suggested foods for", predicted_calories, "calories:") for food in suggested_foods: print(food) else: print("No food options available for the specified calorie value.") import pandas as pd import re from sklearn.metrics.pairwise import cosine_similarity from sklearn.preprocessing import MinMaxScaler from sklearn.neighbors import NearestNeighbors food_df = pd.read_csv( "/kaggle/input/food-and-their-calories/Food and Calories - Sheet1.csv" ) food_df["Calories"] = ( food_df["Calories"].apply(lambda x: re.findall(r"\d+", x)[0]).astype(float) ) scaler = MinMaxScaler() food_df["Calories"] = scaler.fit_transform(food_df["Calories"].values.reshape(-1, 1)) predicted_calories = 4 filtered_foods = food_df[ (food_df["Calories"] >= predicted_calories) & (food_df["Calories"] <= predicted_calories) ] if len(filtered_foods) > 0: food_vectors = filtered_foods.drop(["Food", "Calories"], axis=1) cosine_sim = cosine_similarity(food_vectors) k = 5 nn = NearestNeighbors(n_neighbors=k, metric="precomputed") nn.fit(cosine_sim) top_food_indices = nn.kneighbors([cosine_sim[0]], return_distance=False) recommended_foods = filtered_foods.iloc[top_food_indices[0]] for _, food in recommended_foods.iterrows(): print(f"Food: {food['Food']}") print(f"Calories: {food['Calories']}") print() else: print("No foods found within the desired calorie range.")
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/284/129284646.ipynb
food-and-their-calories
vaishnavivenkatesan
[{"Id": 129284646, "ScriptId": 38343193, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 9946883, "CreationDate": "05/12/2023 12:51:41", "VersionNumber": 1.0, "Title": "notebook20825d17fb", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 95.0, "LinesInsertedFromPrevious": 95.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 185188691, "KernelVersionId": 129284646, "SourceDatasetVersionId": 1588653}]
[{"Id": 1588653, "DatasetId": 937303, "DatasourceVersionId": 1623908, "CreatorUserId": 5592707, "LicenseName": "Attribution-NonCommercial 4.0 International (CC BY-NC 4.0)", "CreationDate": "10/24/2020 11:45:12", "VersionNumber": 1.0, "Title": "Food and their calories", "Slug": "food-and-their-calories", "Subtitle": "Variety of food and their calories based on serving", "Description": "### Context\n\nThe data set consist of food such as soup,ice-cream,pizza,vegetables,fruits etc, the serving for which the calories are calculated.\n\n\n### Content\n\nThere are three columns for this dataset: Food Serving Calories\n\n\n### Inspiration\n\nWe would always wanted to prepare the diet chart based in the calories. This the dataset then.\ud83d\ude04", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 937303, "CreatorUserId": 5592707, "OwnerUserId": 5592707.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 1588653.0, "CurrentDatasourceVersionId": 1623908.0, "ForumId": 953334, "Type": 2, "CreationDate": "10/24/2020 11:45:12", "LastActivityDate": "10/24/2020", "TotalViews": 22731, "TotalDownloads": 2284, "TotalVotes": 28, "TotalKernels": 3}]
[{"Id": 5592707, "UserName": "vaishnavivenkatesan", "DisplayName": "Vaishnavi", "RegisterDate": "08/08/2020", "PerformanceTier": 2}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import pandas as pd import random import re def extract_numeric_calories(calories_string): numeric_calories = re.findall(r"\d+", calories_string) if numeric_calories: return int(numeric_calories[0]) else: return 0 def suggest_foods(predicted_calories, food_dataset, num_suggestions=5): predicted_calories_numeric = extract_numeric_calories(predicted_calories) filtered_foods = food_dataset[ food_dataset["Calories"].apply(extract_numeric_calories) == predicted_calories_numeric ] if len(filtered_foods) >= num_suggestions: suggestions = random.sample(list(filtered_foods["Food"]), num_suggestions) return suggestions elif len(filtered_foods) > 0: suggestions = list(filtered_foods["Food"]) return suggestions else: return [] predicted_calories = "60 cals" food_dataset = pd.read_csv( "/kaggle/input/food-and-their-calories/Food and Calories - Sheet1.csv" ) suggested_foods = suggest_foods(predicted_calories, food_dataset) if suggested_foods: print("Suggested foods for", predicted_calories, "calories:") for food in suggested_foods: print(food) else: print("No food options available for the specified calorie value.") import pandas as pd import re from sklearn.metrics.pairwise import cosine_similarity from sklearn.preprocessing import MinMaxScaler from sklearn.neighbors import NearestNeighbors food_df = pd.read_csv( "/kaggle/input/food-and-their-calories/Food and Calories - Sheet1.csv" ) food_df["Calories"] = ( food_df["Calories"].apply(lambda x: re.findall(r"\d+", x)[0]).astype(float) ) scaler = MinMaxScaler() food_df["Calories"] = scaler.fit_transform(food_df["Calories"].values.reshape(-1, 1)) predicted_calories = 4 filtered_foods = food_df[ (food_df["Calories"] >= predicted_calories) & (food_df["Calories"] <= predicted_calories) ] if len(filtered_foods) > 0: food_vectors = filtered_foods.drop(["Food", "Calories"], axis=1) cosine_sim = cosine_similarity(food_vectors) k = 5 nn = NearestNeighbors(n_neighbors=k, metric="precomputed") nn.fit(cosine_sim) top_food_indices = nn.kneighbors([cosine_sim[0]], return_distance=False) recommended_foods = filtered_foods.iloc[top_food_indices[0]] for _, food in recommended_foods.iterrows(): print(f"Food: {food['Food']}") print(f"Calories: {food['Calories']}") print() else: print("No foods found within the desired calorie range.")
false
1
932
0
1,052
932
129284987
<jupyter_start><jupyter_text>Video Game Sales This dataset contains a list of video games with sales greater than 100,000 copies. It was generated by a scrape of [vgchartz.com][1]. Fields include * Rank - Ranking of overall sales * Name - The games name * Platform - Platform of the games release (i.e. PC,PS4, etc.) * Year - Year of the game's release * Genre - Genre of the game * Publisher - Publisher of the game * NA_Sales - Sales in North America (in millions) * EU_Sales - Sales in Europe (in millions) * JP_Sales - Sales in Japan (in millions) * Other_Sales - Sales in the rest of the world (in millions) * Global_Sales - Total worldwide sales. The script to scrape the data is available at https://github.com/GregorUT/vgchartzScrape. It is based on BeautifulSoup using Python. There are 16,598 records. 2 records were dropped due to incomplete information. [1]: http://www.vgchartz.com/ Kaggle dataset identifier: videogamesales <jupyter_code>import pandas as pd df = pd.read_csv('videogamesales/vgsales.csv') df.info() <jupyter_output><class 'pandas.core.frame.DataFrame'> RangeIndex: 16598 entries, 0 to 16597 Data columns (total 11 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Rank 16598 non-null int64 1 Name 16598 non-null object 2 Platform 16598 non-null object 3 Year 16327 non-null float64 4 Genre 16598 non-null object 5 Publisher 16540 non-null object 6 NA_Sales 16598 non-null float64 7 EU_Sales 16598 non-null float64 8 JP_Sales 16598 non-null float64 9 Other_Sales 16598 non-null float64 10 Global_Sales 16598 non-null float64 dtypes: float64(6), int64(1), object(4) memory usage: 1.4+ MB <jupyter_text>Examples: { "Rank": 1, "Name": "Wii Sports", "Platform": "Wii", "Year": 2006, "Genre": "Sports", "Publisher": "Nintendo", "NA_Sales": 41.49, "EU_Sales": 29.02, "JP_Sales": 3.77, "Other_Sales": 8.46, "Global_Sales": 82.74 } { "Rank": 2, "Name": "Super Mario Bros.", "Platform": "NES", "Year": 1985, "Genre": "Platform", "Publisher": "Nintendo", "NA_Sales": 29.08, "EU_Sales": 3.58, "JP_Sales": 6.8100000000000005, "Other_Sales": 0.77, "Global_Sales": 40.24 } { "Rank": 3, "Name": "Mario Kart Wii", "Platform": "Wii", "Year": 2008, "Genre": "Racing", "Publisher": "Nintendo", "NA_Sales": 15.85, "EU_Sales": 12.88, "JP_Sales": 3.79, "Other_Sales": 3.31, "Global_Sales": 35.82 } { "Rank": 4, "Name": "Wii Sports Resort", "Platform": "Wii", "Year": 2009, "Genre": "Sports", "Publisher": "Nintendo", "NA_Sales": 15.75, "EU_Sales": 11.01, "JP_Sales": 3.2800000000000002, "Other_Sales": 2.96, "Global_Sales": 33.0 } <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session df = pd.read_csv("/kaggle/input/videogamesales/vgsales.csv") df game_over_ten_thousand = df[df["Global_Sales"] > 0.01] game_over_ten_thousand # # Which company is the most common video game publisher? video_game_publisher = game_over_ten_thousand["Publisher"].mode() video_game_publisher[0] # # What’s the most common platform? video_game_platform = game_over_ten_thousand["Platform"].mode() video_game_platform[0] # # What about the most common genre? video_game_genre = game_over_ten_thousand["Genre"].mode() video_game_genre[0] # # What are the top 20 highest grossing games? highest_grossing_games = df.nlargest(20, "Global_Sales") highest_grossing_games # # For North American video game sales, what’s the median? # ### Provide a secondary output showing ten games surrounding the median sales output # ### Assume that games with same median value are sorted in descending order. # sorted_NA = game_over_ten_thousand.sort_values(["NA_Sales"], ascending=False) median = game_over_ten_thousand["NA_Sales"].median() mid_row = len(sorted_NA) // 2 ten_games = sorted_NA.iloc[mid_row - 4 : mid_row + 5] print("the median is: ", median) ten_games # # For the top-selling game of all time, how many standard deviations above/below the mean are its sales for North America? max_Global_Sales = game_over_ten_thousand["Global_Sales"].max() row_for_max_global_sales = game_over_ten_thousand[ game_over_ten_thousand["Global_Sales"] == max_Global_Sales ] max_NA_sales = row_for_max_global_sales["NA_Sales"] max_NA_sales_value = max_NA_sales[0] max_NA_sales_value mean_sales_NA = game_over_ten_thousand["NA_Sales"].mean() mean_sales_NA std_for_sales_NA = game_over_ten_thousand["NA_Sales"].std() std_for_sales_NA result = (max_NA_sales_value - mean_sales_NA) / std_for_sales_NA result # # The Nintendo Wii seems to have outdone itself with games. How does its average number of sales compare with all of the other platforms? filter_platforms = game_over_ten_thousand.groupby("Platform")["Global_Sales"] filter_platforms average_of_Wii = filter_platforms.mean()["Wii"] average_of_averages = filter_platforms.mean().mean() average_of_averages if average_of_Wii > average_of_averages: print( f"the average of Wii is {average_of_Wii} that is bigger than the averages of other platforms" ) else: print( f"the average of Wii is {average_of_Wii} that is less than the averages of other platforms" ) # # Come up with 3 more questions that can be answered with this data set. # ### what is the most commen genre in EU?¶ # video_game_publisher = game_over_ten_thousand.groupby("Genre") video_game_publisher["EU_Sales"].count().sort_values(ascending=False).head(1) # ### what is the top 10 highest games sales in Eu? # highest_grossing_games = df.nlargest(10, "EU_Sales") highest_grossing_games # ### what is the lowest company on sales in EU? # highest_grossing_games = df.sort_values("EU_Sales").head(1) highest_grossing_games
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/284/129284987.ipynb
videogamesales
gregorut
[{"Id": 129284987, "ScriptId": 38345080, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 15001180, "CreationDate": "05/12/2023 12:54:07", "VersionNumber": 2.0, "Title": "notebook62d2995610", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 108.0, "LinesInsertedFromPrevious": 18.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 90.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 185189082, "KernelVersionId": 129284987, "SourceDatasetVersionId": 618}]
[{"Id": 618, "DatasetId": 284, "DatasourceVersionId": 618, "CreatorUserId": 462330, "LicenseName": "Unknown", "CreationDate": "10/26/2016 09:10:49", "VersionNumber": 2.0, "Title": "Video Game Sales", "Slug": "videogamesales", "Subtitle": "Analyze sales data from more than 16,500 games.", "Description": "This dataset contains a list of video games with sales greater than 100,000 copies. It was generated by a scrape of [vgchartz.com][1].\n\nFields include\n\n* Rank - Ranking of overall sales\n\n* Name - The games name\n\n* Platform - Platform of the games release (i.e. PC,PS4, etc.)\n\n* Year - Year of the game's release\n\n* Genre - Genre of the game\n\n* Publisher - Publisher of the game\n\n* NA_Sales - Sales in North America (in millions)\n\n* EU_Sales - Sales in Europe (in millions)\n\n* JP_Sales - Sales in Japan (in millions)\n\n* Other_Sales - Sales in the rest of the world (in millions)\n\n* Global_Sales - Total worldwide sales.\n\nThe script to scrape the data is available at https://github.com/GregorUT/vgchartzScrape.\nIt is based on BeautifulSoup using Python.\nThere are 16,598 records. 2 records were dropped due to incomplete information.\n\n\n [1]: http://www.vgchartz.com/", "VersionNotes": "Cleaned up formating", "TotalCompressedBytes": 1355781.0, "TotalUncompressedBytes": 1355781.0}]
[{"Id": 284, "CreatorUserId": 462330, "OwnerUserId": 462330.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 618.0, "CurrentDatasourceVersionId": 618.0, "ForumId": 1788, "Type": 2, "CreationDate": "10/26/2016 08:17:30", "LastActivityDate": "02/06/2018", "TotalViews": 1798828, "TotalDownloads": 471172, "TotalVotes": 5485, "TotalKernels": 1480}]
[{"Id": 462330, "UserName": "gregorut", "DisplayName": "GregorySmith", "RegisterDate": "11/09/2015", "PerformanceTier": 1}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session df = pd.read_csv("/kaggle/input/videogamesales/vgsales.csv") df game_over_ten_thousand = df[df["Global_Sales"] > 0.01] game_over_ten_thousand # # Which company is the most common video game publisher? video_game_publisher = game_over_ten_thousand["Publisher"].mode() video_game_publisher[0] # # What’s the most common platform? video_game_platform = game_over_ten_thousand["Platform"].mode() video_game_platform[0] # # What about the most common genre? video_game_genre = game_over_ten_thousand["Genre"].mode() video_game_genre[0] # # What are the top 20 highest grossing games? highest_grossing_games = df.nlargest(20, "Global_Sales") highest_grossing_games # # For North American video game sales, what’s the median? # ### Provide a secondary output showing ten games surrounding the median sales output # ### Assume that games with same median value are sorted in descending order. # sorted_NA = game_over_ten_thousand.sort_values(["NA_Sales"], ascending=False) median = game_over_ten_thousand["NA_Sales"].median() mid_row = len(sorted_NA) // 2 ten_games = sorted_NA.iloc[mid_row - 4 : mid_row + 5] print("the median is: ", median) ten_games # # For the top-selling game of all time, how many standard deviations above/below the mean are its sales for North America? max_Global_Sales = game_over_ten_thousand["Global_Sales"].max() row_for_max_global_sales = game_over_ten_thousand[ game_over_ten_thousand["Global_Sales"] == max_Global_Sales ] max_NA_sales = row_for_max_global_sales["NA_Sales"] max_NA_sales_value = max_NA_sales[0] max_NA_sales_value mean_sales_NA = game_over_ten_thousand["NA_Sales"].mean() mean_sales_NA std_for_sales_NA = game_over_ten_thousand["NA_Sales"].std() std_for_sales_NA result = (max_NA_sales_value - mean_sales_NA) / std_for_sales_NA result # # The Nintendo Wii seems to have outdone itself with games. How does its average number of sales compare with all of the other platforms? filter_platforms = game_over_ten_thousand.groupby("Platform")["Global_Sales"] filter_platforms average_of_Wii = filter_platforms.mean()["Wii"] average_of_averages = filter_platforms.mean().mean() average_of_averages if average_of_Wii > average_of_averages: print( f"the average of Wii is {average_of_Wii} that is bigger than the averages of other platforms" ) else: print( f"the average of Wii is {average_of_Wii} that is less than the averages of other platforms" ) # # Come up with 3 more questions that can be answered with this data set. # ### what is the most commen genre in EU?¶ # video_game_publisher = game_over_ten_thousand.groupby("Genre") video_game_publisher["EU_Sales"].count().sort_values(ascending=False).head(1) # ### what is the top 10 highest games sales in Eu? # highest_grossing_games = df.nlargest(10, "EU_Sales") highest_grossing_games # ### what is the lowest company on sales in EU? # highest_grossing_games = df.sort_values("EU_Sales").head(1) highest_grossing_games
[{"videogamesales/vgsales.csv": {"column_names": "[\"Rank\", \"Name\", \"Platform\", \"Year\", \"Genre\", \"Publisher\", \"NA_Sales\", \"EU_Sales\", \"JP_Sales\", \"Other_Sales\", \"Global_Sales\"]", "column_data_types": "{\"Rank\": \"int64\", \"Name\": \"object\", \"Platform\": \"object\", \"Year\": \"float64\", \"Genre\": \"object\", \"Publisher\": \"object\", \"NA_Sales\": \"float64\", \"EU_Sales\": \"float64\", \"JP_Sales\": \"float64\", \"Other_Sales\": \"float64\", \"Global_Sales\": \"float64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 16598 entries, 0 to 16597\nData columns (total 11 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Rank 16598 non-null int64 \n 1 Name 16598 non-null object \n 2 Platform 16598 non-null object \n 3 Year 16327 non-null float64\n 4 Genre 16598 non-null object \n 5 Publisher 16540 non-null object \n 6 NA_Sales 16598 non-null float64\n 7 EU_Sales 16598 non-null float64\n 8 JP_Sales 16598 non-null float64\n 9 Other_Sales 16598 non-null float64\n 10 Global_Sales 16598 non-null float64\ndtypes: float64(6), int64(1), object(4)\nmemory usage: 1.4+ MB\n", "summary": "{\"Rank\": {\"count\": 16598.0, \"mean\": 8300.605253645017, \"std\": 4791.853932896403, \"min\": 1.0, \"25%\": 4151.25, \"50%\": 8300.5, \"75%\": 12449.75, \"max\": 16600.0}, \"Year\": {\"count\": 16327.0, \"mean\": 2006.4064433147546, \"std\": 5.828981114712805, \"min\": 1980.0, \"25%\": 2003.0, \"50%\": 2007.0, \"75%\": 2010.0, \"max\": 2020.0}, \"NA_Sales\": {\"count\": 16598.0, \"mean\": 0.26466742981082064, \"std\": 0.8166830292988796, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.08, \"75%\": 0.24, \"max\": 41.49}, \"EU_Sales\": {\"count\": 16598.0, \"mean\": 0.14665200626581515, \"std\": 0.5053512312869116, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.02, \"75%\": 0.11, \"max\": 29.02}, \"JP_Sales\": {\"count\": 16598.0, \"mean\": 0.077781660441017, \"std\": 0.30929064808220297, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.04, \"max\": 10.22}, \"Other_Sales\": {\"count\": 16598.0, \"mean\": 0.0480630196409206, \"std\": 0.18858840291271461, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.01, \"75%\": 0.04, \"max\": 10.57}, \"Global_Sales\": {\"count\": 16598.0, \"mean\": 0.5374406555006628, \"std\": 1.5550279355699124, \"min\": 0.01, \"25%\": 0.06, \"50%\": 0.17, \"75%\": 0.47, \"max\": 82.74}}", "examples": "{\"Rank\":{\"0\":1,\"1\":2,\"2\":3,\"3\":4},\"Name\":{\"0\":\"Wii Sports\",\"1\":\"Super Mario Bros.\",\"2\":\"Mario Kart Wii\",\"3\":\"Wii Sports Resort\"},\"Platform\":{\"0\":\"Wii\",\"1\":\"NES\",\"2\":\"Wii\",\"3\":\"Wii\"},\"Year\":{\"0\":2006.0,\"1\":1985.0,\"2\":2008.0,\"3\":2009.0},\"Genre\":{\"0\":\"Sports\",\"1\":\"Platform\",\"2\":\"Racing\",\"3\":\"Sports\"},\"Publisher\":{\"0\":\"Nintendo\",\"1\":\"Nintendo\",\"2\":\"Nintendo\",\"3\":\"Nintendo\"},\"NA_Sales\":{\"0\":41.49,\"1\":29.08,\"2\":15.85,\"3\":15.75},\"EU_Sales\":{\"0\":29.02,\"1\":3.58,\"2\":12.88,\"3\":11.01},\"JP_Sales\":{\"0\":3.77,\"1\":6.81,\"2\":3.79,\"3\":3.28},\"Other_Sales\":{\"0\":8.46,\"1\":0.77,\"2\":3.31,\"3\":2.96},\"Global_Sales\":{\"0\":82.74,\"1\":40.24,\"2\":35.82,\"3\":33.0}}"}}]
true
1
<start_data_description><data_path>videogamesales/vgsales.csv: <column_names> ['Rank', 'Name', 'Platform', 'Year', 'Genre', 'Publisher', 'NA_Sales', 'EU_Sales', 'JP_Sales', 'Other_Sales', 'Global_Sales'] <column_types> {'Rank': 'int64', 'Name': 'object', 'Platform': 'object', 'Year': 'float64', 'Genre': 'object', 'Publisher': 'object', 'NA_Sales': 'float64', 'EU_Sales': 'float64', 'JP_Sales': 'float64', 'Other_Sales': 'float64', 'Global_Sales': 'float64'} <dataframe_Summary> {'Rank': {'count': 16598.0, 'mean': 8300.605253645017, 'std': 4791.853932896403, 'min': 1.0, '25%': 4151.25, '50%': 8300.5, '75%': 12449.75, 'max': 16600.0}, 'Year': {'count': 16327.0, 'mean': 2006.4064433147546, 'std': 5.828981114712805, 'min': 1980.0, '25%': 2003.0, '50%': 2007.0, '75%': 2010.0, 'max': 2020.0}, 'NA_Sales': {'count': 16598.0, 'mean': 0.26466742981082064, 'std': 0.8166830292988796, 'min': 0.0, '25%': 0.0, '50%': 0.08, '75%': 0.24, 'max': 41.49}, 'EU_Sales': {'count': 16598.0, 'mean': 0.14665200626581515, 'std': 0.5053512312869116, 'min': 0.0, '25%': 0.0, '50%': 0.02, '75%': 0.11, 'max': 29.02}, 'JP_Sales': {'count': 16598.0, 'mean': 0.077781660441017, 'std': 0.30929064808220297, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.04, 'max': 10.22}, 'Other_Sales': {'count': 16598.0, 'mean': 0.0480630196409206, 'std': 0.18858840291271461, 'min': 0.0, '25%': 0.0, '50%': 0.01, '75%': 0.04, 'max': 10.57}, 'Global_Sales': {'count': 16598.0, 'mean': 0.5374406555006628, 'std': 1.5550279355699124, 'min': 0.01, '25%': 0.06, '50%': 0.17, '75%': 0.47, 'max': 82.74}} <dataframe_info> RangeIndex: 16598 entries, 0 to 16597 Data columns (total 11 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Rank 16598 non-null int64 1 Name 16598 non-null object 2 Platform 16598 non-null object 3 Year 16327 non-null float64 4 Genre 16598 non-null object 5 Publisher 16540 non-null object 6 NA_Sales 16598 non-null float64 7 EU_Sales 16598 non-null float64 8 JP_Sales 16598 non-null float64 9 Other_Sales 16598 non-null float64 10 Global_Sales 16598 non-null float64 dtypes: float64(6), int64(1), object(4) memory usage: 1.4+ MB <some_examples> {'Rank': {'0': 1, '1': 2, '2': 3, '3': 4}, 'Name': {'0': 'Wii Sports', '1': 'Super Mario Bros.', '2': 'Mario Kart Wii', '3': 'Wii Sports Resort'}, 'Platform': {'0': 'Wii', '1': 'NES', '2': 'Wii', '3': 'Wii'}, 'Year': {'0': 2006.0, '1': 1985.0, '2': 2008.0, '3': 2009.0}, 'Genre': {'0': 'Sports', '1': 'Platform', '2': 'Racing', '3': 'Sports'}, 'Publisher': {'0': 'Nintendo', '1': 'Nintendo', '2': 'Nintendo', '3': 'Nintendo'}, 'NA_Sales': {'0': 41.49, '1': 29.08, '2': 15.85, '3': 15.75}, 'EU_Sales': {'0': 29.02, '1': 3.58, '2': 12.88, '3': 11.01}, 'JP_Sales': {'0': 3.77, '1': 6.81, '2': 3.79, '3': 3.28}, 'Other_Sales': {'0': 8.46, '1': 0.77, '2': 3.31, '3': 2.96}, 'Global_Sales': {'0': 82.74, '1': 40.24, '2': 35.82, '3': 33.0}} <end_description>
1,167
0
2,281
1,167
129367548
<jupyter_start><jupyter_text>daun_jagung_dataset Kaggle dataset identifier: daun-jagung-dataset <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import tensorflow as tf from tensorflow.keras import models, layers import matplotlib.pyplot as plt IMAGE_SIZE = 256 BATCH_SIZE = 32 CHANNELS = 3 EPOCHS = 40 dataset = tf.keras.preprocessing.image_dataset_from_directory( "/kaggle/input/second-maize-dataset/corn disease", shuffle=True, image_size=(IMAGE_SIZE, IMAGE_SIZE), batch_size=BATCH_SIZE, ) class_name = dataset.class_names class_name import matplotlib.image as mpimg for image_batch, labels_batch in dataset.take(1): print(image_batch.shape) print(labels_batch.numpy()) # **VISUALISASI BEBERAPA IMAGE DARI DATA** plt.figure(figsize=(15, 15)) for image_batch, labels_batch in dataset.take(1): for i in range(12): ax = plt.subplot(3, 4, i + 1) plt.imshow(image_batch[i].numpy().astype("uint8")) plt.title(class_name[labels_batch[i]]) plt.axis("off") # **Function Membagi Dataset** # Dataset dibagi menjadi 3 set. # 1. Training: Dataset yang digunakan untuk pelatihan. # 2. Validasi: Dataset yang akan diuji saat pelatihan. # 3. Test: Data yang akan diuji saat melatih model. len(dataset) # 80% ==> training # 20% ==> 10% validation, 10% test train_size = 0.8 len(dataset) * train_size train_ds = dataset.take(109) len(train_ds) test_ds = dataset.skip(109) len(test_ds) val_size = 0.1 len(dataset) * val_size val_ds = test_ds.take(13) len(val_ds) test_ds = test_ds.skip(13) len(test_ds) def get_dataset_partitions_tf( ds, train_split=0.8, val_split=0.1, test_split=0.1, shuffle=True, shuffle_size=10000 ): ds_size = len(ds) if shuffle: ds = ds.shuffle(shuffle_size, seed=12) train_size = int(train_split * ds_size) val_size = int(val_split * ds_size) train_ds = ds.take(train_size) val_ds = ds.skip(train_size).take(val_size) test_ds = ds.skip(train_size).take(val_size) return train_ds, val_ds, test_ds train_ds, val_ds, test_ds = get_dataset_partitions_tf(dataset) len(train_ds) len(val_ds) len(test_ds) train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=tf.data.AUTOTUNE) val_ds = val_ds.cache().shuffle(1000).prefetch(buffer_size=tf.data.AUTOTUNE) test_ds = test_ds.cache().shuffle(1000).prefetch(buffer_size=tf.data.AUTOTUNE) # **MEMBUAT MODEL** # **membuat layer untuk mengubah ukuran dan normalisasi** # Sebelum memasukkan gambar ke jaringan, kita harus mengubah ukurannya ke ukuran yang diinginkan. Selain itu, untuk meningkatkan performa model, kita harus menormalkan nilai piksel gambar resize_and_rescale = tf.keras.Sequential( [ layers.experimental.preprocessing.Resizing(IMAGE_SIZE, IMAGE_SIZE), layers.experimental.preprocessing.Rescaling(1.0 / 225), ] ) # **Data Augmentation** # 1. RandomFlip: horizontal and vertical # 2. RandomRotation: 0.2 data_augmentation = tf.keras.Sequential( [ layers.experimental.preprocessing.RandomFlip("horizontal_and_vertical"), layers.experimental.preprocessing.RandomRotation(0.2), ] ) from tensorflow.keras import activations # **ARSITEKTUR MODEL** input_shape = (BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, CHANNELS) n_classes = 4 model = models.Sequential( [ resize_and_rescale, data_augmentation, layers.Conv2D(32, (3, 3), activation="relu", input_shape=input_shape), layers.MaxPooling2D((2, 2)), layers.Conv2D(64, kernel_size=(3, 3), activation="relu"), layers.MaxPooling2D((2, 2)), layers.Conv2D(64, kernel_size=(3, 3), activation="relu"), layers.MaxPooling2D((2, 2)), # # 1 # layers.Conv2D(64, kernel_size = (3,3), activation ='relu'), # layers.MaxPooling2D((2,2)), layers.Conv2D(64, (3, 3), activation="relu"), layers.MaxPooling2D((2, 2)), layers.Conv2D(64, (3, 3), activation="relu"), layers.MaxPooling2D((2, 2)), layers.Conv2D(64, (3, 3), activation="relu"), layers.MaxPooling2D((2, 2)), layers.Flatten(), layers.Dense(64, activation="relu"), layers.Dense(n_classes, activation="softmax"), ] ) model.build(input_shape=input_shape) model.summary() # **optimasi ADAM** model.compile( optimizer="adam", loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=["accuracy"], ) history = model.fit( train_ds, epochs=EPOCHS, batch_size=BATCH_SIZE, verbose=1, validation_data=val_ds ) scores = model.evaluate(test_ds) scores # **Plotting akurasi dan loss** history.params history.history.keys() acc = history.history["accuracy"] val_acc = history.history["val_accuracy"] loss = history.history["loss"] val_loss = history.history["val_loss"] plt.figure(figsize=(8, 8)) plt.subplot(1, 2, 1) plt.plot(range(EPOCHS), acc, label="Training Accuracy") plt.plot(range(EPOCHS), val_acc, label="Validation Accuracy") plt.legend(loc="lower right") plt.title("Training and Validation Accuracy") plt.subplot(1, 2, 2) plt.plot(range(EPOCHS), loss, label="Training Loss") plt.plot(range(EPOCHS), val_loss, label="Validation Loss") plt.legend(loc="upper right") plt.title("Training and Validation Loss") # **menjalankan prediksi pada sebuah gambar** for ( images_batch, labels_batch, ) in test_ds.take(1): first_image = images_batch[0].numpy().astype("uint8") first_label = labels_batch[0].numpy() print("first image to predict") plt.imshow(first_image) print("actual label:", class_name[first_label]) batch_prediction = model.predict(images_batch) print("predicted label:", class_name[np.argmax(batch_prediction[0])]) # **menulis Fungsi untuk kesimpulan deteksi gambar** def predict(model, img): img_array = tf.keras.preprocessing.image.img_to_array(images[i].numpy()) img_array = tf.expand_dims(img_array, 0) # create a batch predictions = model.predict(img_array) predicted_class = class_name[np.argmax(predictions[0])] confidence = round(100 * (np.max(predictions[0])), 2) return predicted_class, confidence # **Menjalankan Prediksi kesimpulan pada kumpulan gambar** plt.figure(figsize=(15, 15)) for images, labels in test_ds.take(1): for i in range(9): ax = plt.subplot(3, 3, i + 1) plt.imshow(images[i].numpy().astype("uint8")) predicted_class, confidence = predict(model, images[i].numpy()) actual_class = class_name[labels[i]] plt.title( f"Actual: {actual_class},\n Predicted: {predicted_class}.\n Confidence: {confidence}%" ) plt.axis("off") # model_version=1 # model.save(f"../models/{model_version}") import joblib # joblib.dump(model,'knn_joblib') filename = "model2" model.save(filename) # SAVE MODEL filename = "model.h5" model.save(filename) # SAVE MODEL filename = "model.tflite" model.save(filename) # SAVE MODEL filename = "modeltf.tflite" model.save(filename) import tensorflow as tf tf.keras.models.save_model(model, "model.pbtxt") converter = tf.lite.TFLiteConverter.from_keras_model(model=model) model_tflite = converter.convert() open("MachineLearningModel.tflite", "wb").write(model_tflite) # import tensorflow as tf # # Convert the model # converter = tf.lite.TFLiteConverter.from_saved_model(/kaggle/working/model.h5) # path to the SavedModel directory # tflite_model = converter.convert() # # Save the model. # with open('modell.tflite', 'wb') as f: # f.write(tflite_model) # # Save the model. # with open('model.tflite', 'wb') as f: # f.write(tflite_model)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/367/129367548.ipynb
daun-jagung-dataset
andril22
[{"Id": 129367548, "ScriptId": 33547736, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 9166171, "CreationDate": "05/13/2023 07:25:54", "VersionNumber": 1.0, "Title": "SimpleCnn_jadi", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 300.0, "LinesInsertedFromPrevious": 300.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 185349007, "KernelVersionId": 129367548, "SourceDatasetVersionId": 4737965}]
[{"Id": 4737965, "DatasetId": 2741726, "DatasourceVersionId": 4800941, "CreatorUserId": 9166171, "LicenseName": "Unknown", "CreationDate": "12/18/2022 02:22:29", "VersionNumber": 1.0, "Title": "daun_jagung_dataset", "Slug": "daun-jagung-dataset", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 2741726, "CreatorUserId": 9166171, "OwnerUserId": 9166171.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 4737965.0, "CurrentDatasourceVersionId": 4800941.0, "ForumId": 2775198, "Type": 2, "CreationDate": "12/18/2022 02:22:29", "LastActivityDate": "12/18/2022", "TotalViews": 267, "TotalDownloads": 13, "TotalVotes": 0, "TotalKernels": 1}]
[{"Id": 9166171, "UserName": "andril22", "DisplayName": "Andril22_", "RegisterDate": "12/14/2021", "PerformanceTier": 0}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import tensorflow as tf from tensorflow.keras import models, layers import matplotlib.pyplot as plt IMAGE_SIZE = 256 BATCH_SIZE = 32 CHANNELS = 3 EPOCHS = 40 dataset = tf.keras.preprocessing.image_dataset_from_directory( "/kaggle/input/second-maize-dataset/corn disease", shuffle=True, image_size=(IMAGE_SIZE, IMAGE_SIZE), batch_size=BATCH_SIZE, ) class_name = dataset.class_names class_name import matplotlib.image as mpimg for image_batch, labels_batch in dataset.take(1): print(image_batch.shape) print(labels_batch.numpy()) # **VISUALISASI BEBERAPA IMAGE DARI DATA** plt.figure(figsize=(15, 15)) for image_batch, labels_batch in dataset.take(1): for i in range(12): ax = plt.subplot(3, 4, i + 1) plt.imshow(image_batch[i].numpy().astype("uint8")) plt.title(class_name[labels_batch[i]]) plt.axis("off") # **Function Membagi Dataset** # Dataset dibagi menjadi 3 set. # 1. Training: Dataset yang digunakan untuk pelatihan. # 2. Validasi: Dataset yang akan diuji saat pelatihan. # 3. Test: Data yang akan diuji saat melatih model. len(dataset) # 80% ==> training # 20% ==> 10% validation, 10% test train_size = 0.8 len(dataset) * train_size train_ds = dataset.take(109) len(train_ds) test_ds = dataset.skip(109) len(test_ds) val_size = 0.1 len(dataset) * val_size val_ds = test_ds.take(13) len(val_ds) test_ds = test_ds.skip(13) len(test_ds) def get_dataset_partitions_tf( ds, train_split=0.8, val_split=0.1, test_split=0.1, shuffle=True, shuffle_size=10000 ): ds_size = len(ds) if shuffle: ds = ds.shuffle(shuffle_size, seed=12) train_size = int(train_split * ds_size) val_size = int(val_split * ds_size) train_ds = ds.take(train_size) val_ds = ds.skip(train_size).take(val_size) test_ds = ds.skip(train_size).take(val_size) return train_ds, val_ds, test_ds train_ds, val_ds, test_ds = get_dataset_partitions_tf(dataset) len(train_ds) len(val_ds) len(test_ds) train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=tf.data.AUTOTUNE) val_ds = val_ds.cache().shuffle(1000).prefetch(buffer_size=tf.data.AUTOTUNE) test_ds = test_ds.cache().shuffle(1000).prefetch(buffer_size=tf.data.AUTOTUNE) # **MEMBUAT MODEL** # **membuat layer untuk mengubah ukuran dan normalisasi** # Sebelum memasukkan gambar ke jaringan, kita harus mengubah ukurannya ke ukuran yang diinginkan. Selain itu, untuk meningkatkan performa model, kita harus menormalkan nilai piksel gambar resize_and_rescale = tf.keras.Sequential( [ layers.experimental.preprocessing.Resizing(IMAGE_SIZE, IMAGE_SIZE), layers.experimental.preprocessing.Rescaling(1.0 / 225), ] ) # **Data Augmentation** # 1. RandomFlip: horizontal and vertical # 2. RandomRotation: 0.2 data_augmentation = tf.keras.Sequential( [ layers.experimental.preprocessing.RandomFlip("horizontal_and_vertical"), layers.experimental.preprocessing.RandomRotation(0.2), ] ) from tensorflow.keras import activations # **ARSITEKTUR MODEL** input_shape = (BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, CHANNELS) n_classes = 4 model = models.Sequential( [ resize_and_rescale, data_augmentation, layers.Conv2D(32, (3, 3), activation="relu", input_shape=input_shape), layers.MaxPooling2D((2, 2)), layers.Conv2D(64, kernel_size=(3, 3), activation="relu"), layers.MaxPooling2D((2, 2)), layers.Conv2D(64, kernel_size=(3, 3), activation="relu"), layers.MaxPooling2D((2, 2)), # # 1 # layers.Conv2D(64, kernel_size = (3,3), activation ='relu'), # layers.MaxPooling2D((2,2)), layers.Conv2D(64, (3, 3), activation="relu"), layers.MaxPooling2D((2, 2)), layers.Conv2D(64, (3, 3), activation="relu"), layers.MaxPooling2D((2, 2)), layers.Conv2D(64, (3, 3), activation="relu"), layers.MaxPooling2D((2, 2)), layers.Flatten(), layers.Dense(64, activation="relu"), layers.Dense(n_classes, activation="softmax"), ] ) model.build(input_shape=input_shape) model.summary() # **optimasi ADAM** model.compile( optimizer="adam", loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=["accuracy"], ) history = model.fit( train_ds, epochs=EPOCHS, batch_size=BATCH_SIZE, verbose=1, validation_data=val_ds ) scores = model.evaluate(test_ds) scores # **Plotting akurasi dan loss** history.params history.history.keys() acc = history.history["accuracy"] val_acc = history.history["val_accuracy"] loss = history.history["loss"] val_loss = history.history["val_loss"] plt.figure(figsize=(8, 8)) plt.subplot(1, 2, 1) plt.plot(range(EPOCHS), acc, label="Training Accuracy") plt.plot(range(EPOCHS), val_acc, label="Validation Accuracy") plt.legend(loc="lower right") plt.title("Training and Validation Accuracy") plt.subplot(1, 2, 2) plt.plot(range(EPOCHS), loss, label="Training Loss") plt.plot(range(EPOCHS), val_loss, label="Validation Loss") plt.legend(loc="upper right") plt.title("Training and Validation Loss") # **menjalankan prediksi pada sebuah gambar** for ( images_batch, labels_batch, ) in test_ds.take(1): first_image = images_batch[0].numpy().astype("uint8") first_label = labels_batch[0].numpy() print("first image to predict") plt.imshow(first_image) print("actual label:", class_name[first_label]) batch_prediction = model.predict(images_batch) print("predicted label:", class_name[np.argmax(batch_prediction[0])]) # **menulis Fungsi untuk kesimpulan deteksi gambar** def predict(model, img): img_array = tf.keras.preprocessing.image.img_to_array(images[i].numpy()) img_array = tf.expand_dims(img_array, 0) # create a batch predictions = model.predict(img_array) predicted_class = class_name[np.argmax(predictions[0])] confidence = round(100 * (np.max(predictions[0])), 2) return predicted_class, confidence # **Menjalankan Prediksi kesimpulan pada kumpulan gambar** plt.figure(figsize=(15, 15)) for images, labels in test_ds.take(1): for i in range(9): ax = plt.subplot(3, 3, i + 1) plt.imshow(images[i].numpy().astype("uint8")) predicted_class, confidence = predict(model, images[i].numpy()) actual_class = class_name[labels[i]] plt.title( f"Actual: {actual_class},\n Predicted: {predicted_class}.\n Confidence: {confidence}%" ) plt.axis("off") # model_version=1 # model.save(f"../models/{model_version}") import joblib # joblib.dump(model,'knn_joblib') filename = "model2" model.save(filename) # SAVE MODEL filename = "model.h5" model.save(filename) # SAVE MODEL filename = "model.tflite" model.save(filename) # SAVE MODEL filename = "modeltf.tflite" model.save(filename) import tensorflow as tf tf.keras.models.save_model(model, "model.pbtxt") converter = tf.lite.TFLiteConverter.from_keras_model(model=model) model_tflite = converter.convert() open("MachineLearningModel.tflite", "wb").write(model_tflite) # import tensorflow as tf # # Convert the model # converter = tf.lite.TFLiteConverter.from_saved_model(/kaggle/working/model.h5) # path to the SavedModel directory # tflite_model = converter.convert() # # Save the model. # with open('modell.tflite', 'wb') as f: # f.write(tflite_model) # # Save the model. # with open('model.tflite', 'wb') as f: # f.write(tflite_model)
false
0
2,690
0
2,720
2,690
129367151
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import json train_xl = pd.read_json( "/kaggle/input/train-data/computers_train_xlarge.json", lines=True ) train_xl.head() # # **Are there null data in the dataset?** # print("Are there any null values?") print(train_xl.isnull().values.any(), "\n") # Find out how many - This gives you the breakdown per column print("How many null values are in each column?"), print(train_xl.isnull().sum(), "\n") # Get total overall null values print("How many null values are in the data in total?") print(train_xl.isnull().sum().sum()) # #### Removing all null values will make the dataset too small for training algorithm. # I will remove some columns that have too many null values. # Delete columns price_left' , 'price_right', 'specTableContent_left', 'specTableContent_right','keyValuePairs_left','keyValuePairs_right', 'id_left' , 'id_right', 'pair_id' train_xl = train_xl.drop( [ "price_left", "price_right", "specTableContent_left", "specTableContent_right", "keyValuePairs_left", "keyValuePairs_right", "id_left", "id_right", "pair_id", ], axis=1, ) train_xl = train_xl.dropna() train_xl.head(2) train_data = train_xl.drop(["label"], axis=1) label = train_xl[["label"]] import re # Remove punctuation and apply case folding def preprocessor(text): text = re.sub("<[^>]*>", "", text) emoticons = re.findall("(?::|;|=)(?:-_)?(?:\)|\(|D|P)", text) text = re.sub("[\W]+", " ", text.lower()) + " ".join(emoticons).replace("-", "") return text # apply the preprocessor to all description_left train_data["category_left"] = train_data["category_left"].apply(preprocessor) train_data["category_right"] = train_data["category_right"].apply(preprocessor) train_data["brand_left"] = train_data["brand_left"].apply(preprocessor) train_data["brand_right"] = train_data["brand_right"].apply(preprocessor) train_data["description_left"] = train_data["description_left"].apply(preprocessor) train_data["description_right"] = train_data["description_right"].apply(preprocessor) train_data["title_left"] = train_data["title_left"].apply(preprocessor) train_data["title_right"] = train_data["title_right"].apply(preprocessor) train_data.head() # Copy the Title column of the dataframe into a numpy array train_data["category_left"] = train_data["category_left"].to_numpy() train_data["category_right"] = train_data["category_right"].to_numpy() train_data["brand_left"] = train_data["brand_left"].to_numpy() train_data["brand_right"] = train_data["brand_right"].to_numpy() train_data["description_left"] = train_data["description_left"].to_numpy() train_data["description_right"] = train_data["description_right"].to_numpy() train_data["title_left"] = train_data["title_left"].to_numpy() train_data["title_right"] = train_data["title_right"].to_numpy() # view the data train_data.head() train_data["title_right"]
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/367/129367151.ipynb
null
null
[{"Id": 129367151, "ScriptId": 38239302, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7523258, "CreationDate": "05/13/2023 07:21:01", "VersionNumber": 2.0, "Title": "Product Matching Using Logistics Regression", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 91.0, "LinesInsertedFromPrevious": 48.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 43.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import json train_xl = pd.read_json( "/kaggle/input/train-data/computers_train_xlarge.json", lines=True ) train_xl.head() # # **Are there null data in the dataset?** # print("Are there any null values?") print(train_xl.isnull().values.any(), "\n") # Find out how many - This gives you the breakdown per column print("How many null values are in each column?"), print(train_xl.isnull().sum(), "\n") # Get total overall null values print("How many null values are in the data in total?") print(train_xl.isnull().sum().sum()) # #### Removing all null values will make the dataset too small for training algorithm. # I will remove some columns that have too many null values. # Delete columns price_left' , 'price_right', 'specTableContent_left', 'specTableContent_right','keyValuePairs_left','keyValuePairs_right', 'id_left' , 'id_right', 'pair_id' train_xl = train_xl.drop( [ "price_left", "price_right", "specTableContent_left", "specTableContent_right", "keyValuePairs_left", "keyValuePairs_right", "id_left", "id_right", "pair_id", ], axis=1, ) train_xl = train_xl.dropna() train_xl.head(2) train_data = train_xl.drop(["label"], axis=1) label = train_xl[["label"]] import re # Remove punctuation and apply case folding def preprocessor(text): text = re.sub("<[^>]*>", "", text) emoticons = re.findall("(?::|;|=)(?:-_)?(?:\)|\(|D|P)", text) text = re.sub("[\W]+", " ", text.lower()) + " ".join(emoticons).replace("-", "") return text # apply the preprocessor to all description_left train_data["category_left"] = train_data["category_left"].apply(preprocessor) train_data["category_right"] = train_data["category_right"].apply(preprocessor) train_data["brand_left"] = train_data["brand_left"].apply(preprocessor) train_data["brand_right"] = train_data["brand_right"].apply(preprocessor) train_data["description_left"] = train_data["description_left"].apply(preprocessor) train_data["description_right"] = train_data["description_right"].apply(preprocessor) train_data["title_left"] = train_data["title_left"].apply(preprocessor) train_data["title_right"] = train_data["title_right"].apply(preprocessor) train_data.head() # Copy the Title column of the dataframe into a numpy array train_data["category_left"] = train_data["category_left"].to_numpy() train_data["category_right"] = train_data["category_right"].to_numpy() train_data["brand_left"] = train_data["brand_left"].to_numpy() train_data["brand_right"] = train_data["brand_right"].to_numpy() train_data["description_left"] = train_data["description_left"].to_numpy() train_data["description_right"] = train_data["description_right"].to_numpy() train_data["title_left"] = train_data["title_left"].to_numpy() train_data["title_right"] = train_data["title_right"].to_numpy() # view the data train_data.head() train_data["title_right"]
false
0
1,063
0
1,063
1,063
129897023
import pandas as pd df = pd.read_csv("/kaggle/input/body-sensors/left_arm_raw.csv") df greater_than_60 = (df["Elevation_angle"] > 60).mean() * 100 between_30_60 = ( (df["Elevation_angle"] >= 30) & (df["Elevation_angle"] <= 60) ).mean() * 100 less_than_30 = (df["Elevation_angle"] < 30).mean() * 100 average_angle = df["Elevation_angle"].mean() print(f"Percentage of angles > 60: {greater_than_60:.2f}%") print(f"Percentage of angles between 30 and 60: {between_30_60:.2f}%") print(f"Percentage of angles < 30: {less_than_30:.2f}%") print(f"Average angle: {average_angle:.2f}") df["Duration"] = df["Timestamp_Android"].diff() # Calculate the duration of angles greater than 60 greater_than_60_duration = df.loc[df["Elevation_angle"] > 60, "Duration"].sum() # Calculate the duration of angles between 30 and 60 between_30_60_duration = df.loc[ (df["Elevation_angle"] >= 30) & (df["Elevation_angle"] <= 60), "Duration" ].sum() # Calculate the duration of angles less than 30 less_than_30_duration = df.loc[df["Elevation_angle"] < 30, "Duration"].sum() # Calculate the total duration total_duration = df["Duration"].sum() print(f"Duration of angles > 60: {greater_than_60_duration:.2f} seconds") print(f"Duration of angles between 30 and 60: {between_30_60_duration:.2f} seconds") print(f"Duration of angles < 30: {less_than_30_duration:.2f} seconds") print(f"Total duration: {total_duration:.2f} seconds") def calculate_stats(file): df = pd.read_csv(file) greater_than_60 = (df["Elevation_angle"] > 60).mean() * 100 between_30_60 = ( (df["Elevation_angle"] >= 30) & (df["Elevation_angle"] <= 60) ).mean() * 100 less_than_30 = (df["Elevation_angle"] < 30).mean() * 100 average_angle = df["Elevation_angle"].mean() df["Duration"] = df["Timestamp_Android"].diff() greater_than_60_duration = df.loc[df["Elevation_angle"] > 60, "Duration"].sum() between_30_60_duration = df.loc[ (df["Elevation_angle"] >= 30) & (df["Elevation_angle"] <= 60), "Duration" ].sum() less_than_30_duration = df.loc[df["Elevation_angle"] < 30, "Duration"].sum() total_duration = df["Duration"].sum() response = { "angles": { "greater_than_60": greater_than_60, "between_30_60": between_30_60, "less_than_30": less_than_30, "average": average_angle, }, "duration": { "greater_than_60_duration": greater_than_60_duration, "between_30_60_duration": between_30_60_duration, "less_than_30_duration": less_than_30_duration, "duration": total_duration, }, } return response test = calculate_stats("/kaggle/input/body-sensors/left_arm_raw.csv") print(test) test2 = calculate_stats("/kaggle/input/body-sensors/right_arm_raw.csv") print(test2) test3 = calculate_stats("/kaggle/input/body-sensors/trunk_raw.csv") print(test3)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/897/129897023.ipynb
null
null
[{"Id": 129897023, "ScriptId": 38638072, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7117453, "CreationDate": "05/17/2023 09:24:43", "VersionNumber": 1.0, "Title": "notebook52c71eb284", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 74.0, "LinesInsertedFromPrevious": 74.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import pandas as pd df = pd.read_csv("/kaggle/input/body-sensors/left_arm_raw.csv") df greater_than_60 = (df["Elevation_angle"] > 60).mean() * 100 between_30_60 = ( (df["Elevation_angle"] >= 30) & (df["Elevation_angle"] <= 60) ).mean() * 100 less_than_30 = (df["Elevation_angle"] < 30).mean() * 100 average_angle = df["Elevation_angle"].mean() print(f"Percentage of angles > 60: {greater_than_60:.2f}%") print(f"Percentage of angles between 30 and 60: {between_30_60:.2f}%") print(f"Percentage of angles < 30: {less_than_30:.2f}%") print(f"Average angle: {average_angle:.2f}") df["Duration"] = df["Timestamp_Android"].diff() # Calculate the duration of angles greater than 60 greater_than_60_duration = df.loc[df["Elevation_angle"] > 60, "Duration"].sum() # Calculate the duration of angles between 30 and 60 between_30_60_duration = df.loc[ (df["Elevation_angle"] >= 30) & (df["Elevation_angle"] <= 60), "Duration" ].sum() # Calculate the duration of angles less than 30 less_than_30_duration = df.loc[df["Elevation_angle"] < 30, "Duration"].sum() # Calculate the total duration total_duration = df["Duration"].sum() print(f"Duration of angles > 60: {greater_than_60_duration:.2f} seconds") print(f"Duration of angles between 30 and 60: {between_30_60_duration:.2f} seconds") print(f"Duration of angles < 30: {less_than_30_duration:.2f} seconds") print(f"Total duration: {total_duration:.2f} seconds") def calculate_stats(file): df = pd.read_csv(file) greater_than_60 = (df["Elevation_angle"] > 60).mean() * 100 between_30_60 = ( (df["Elevation_angle"] >= 30) & (df["Elevation_angle"] <= 60) ).mean() * 100 less_than_30 = (df["Elevation_angle"] < 30).mean() * 100 average_angle = df["Elevation_angle"].mean() df["Duration"] = df["Timestamp_Android"].diff() greater_than_60_duration = df.loc[df["Elevation_angle"] > 60, "Duration"].sum() between_30_60_duration = df.loc[ (df["Elevation_angle"] >= 30) & (df["Elevation_angle"] <= 60), "Duration" ].sum() less_than_30_duration = df.loc[df["Elevation_angle"] < 30, "Duration"].sum() total_duration = df["Duration"].sum() response = { "angles": { "greater_than_60": greater_than_60, "between_30_60": between_30_60, "less_than_30": less_than_30, "average": average_angle, }, "duration": { "greater_than_60_duration": greater_than_60_duration, "between_30_60_duration": between_30_60_duration, "less_than_30_duration": less_than_30_duration, "duration": total_duration, }, } return response test = calculate_stats("/kaggle/input/body-sensors/left_arm_raw.csv") print(test) test2 = calculate_stats("/kaggle/input/body-sensors/right_arm_raw.csv") print(test2) test3 = calculate_stats("/kaggle/input/body-sensors/trunk_raw.csv") print(test3)
false
0
1,033
0
1,033
1,033
129897780
<jupyter_start><jupyter_text>Big Bang Theory All Seasons Dataset ### Context Whoa! Being a huge fan of this series, finally I'm using my skills to analyse the data of related to the series. I am very excited to see how you guys will use this dataset. ### Content The lives of four socially awkward friends, Leonard, Sheldon, Howard and Raj, take a wild turn when they meet the beautiful and free-spirited Penny. This dataset content data of all the seasons, The columns are as follow: * No.overall - Number of Episode Overall * No. inseason - Number of Episode in the specific Season * Title - Title of the Episode * Directed by - Director of the Episode * Written by - Writer of the Episode * Original air date - Original Date when the Episode was Aired * Prod.code - Alphanumeric production code of an episode * U.S. viewers(millions) - Number of US Viewers in Millions * Season - Number of Season Kaggle dataset identifier: big-bang-theory-all-seasons-dataset <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session emp = [ ("siva", 34, "banglour", 90), ("Ravi", 32, "chennai", 96), ("kavi", 12, "kolkata", 89), ("jadu", 13, "bombay", 99), ("mahatha", 45, "Delhi", 89), ] df = pd.DataFrame(emp, columns=["name", "age", "city", "marks"]) df df.dtypes df["age"] = df["age"].astype("object") df.dtypes df1 = pd.read_csv("/kaggle/input/big-bang-theory-all-seasons-dataset/dataset.csv") df1.info() df1.columns df1["Original air date"] df1["Original air date"] = pd.to_datetime(df1["Original air date"]) df1.info() df1["Season"] = df1["Season"].astype("object") df1.info()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/897/129897780.ipynb
big-bang-theory-all-seasons-dataset
shivavashishtha
[{"Id": 129897780, "ScriptId": 38635493, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11466021, "CreationDate": "05/17/2023 09:29:56", "VersionNumber": 1.0, "Title": "L9 changing the data types", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 48.0, "LinesInsertedFromPrevious": 48.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 186309307, "KernelVersionId": 129897780, "SourceDatasetVersionId": 3261857}]
[{"Id": 3261857, "DatasetId": 1976176, "DatasourceVersionId": 3312258, "CreatorUserId": 4972751, "LicenseName": "CC0: Public Domain", "CreationDate": "03/06/2022 11:56:41", "VersionNumber": 1.0, "Title": "Big Bang Theory All Seasons Dataset", "Slug": "big-bang-theory-all-seasons-dataset", "Subtitle": "Complete Data of Big Bang Theory All Seasons scrapped from Wikipedia", "Description": "### Context\n\nWhoa! Being a huge fan of this series, finally I'm using my skills to analyse the data of related to the series. I am very excited to see how you guys will use this dataset. \n\n### Content\nThe lives of four socially awkward friends, Leonard, Sheldon, Howard and Raj, take a wild turn when they meet the beautiful and free-spirited Penny.\nThis dataset content data of all the seasons, The columns are as follow:\n* No.overall - Number of Episode Overall\n* No. inseason - Number of Episode in the specific Season\n* Title - Title of the Episode\n* Directed by - Director of the Episode\n* Written by - Writer of the Episode\n* Original air date - Original Date when the Episode was Aired\n* Prod.code - Alphanumeric production code of an episode\n* U.S. viewers(millions) - Number of US Viewers in Millions\n* Season - Number of Season\n\n### Acknowledgements\n\nWikipedia - I have used Web Scraping to scrap the data from wikipedia.\n\n### Inspiration\nSharing my work with others and, watching others doing great this with it is always a great inspiration", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 1976176, "CreatorUserId": 4972751, "OwnerUserId": 4972751.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 3261857.0, "CurrentDatasourceVersionId": 3312258.0, "ForumId": 2000436, "Type": 2, "CreationDate": "03/06/2022 11:56:41", "LastActivityDate": "03/06/2022", "TotalViews": 4771, "TotalDownloads": 345, "TotalVotes": 16, "TotalKernels": 26}]
[{"Id": 4972751, "UserName": "shivavashishtha", "DisplayName": "Shiva Vashishtha", "RegisterDate": "04/27/2020", "PerformanceTier": 2}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session emp = [ ("siva", 34, "banglour", 90), ("Ravi", 32, "chennai", 96), ("kavi", 12, "kolkata", 89), ("jadu", 13, "bombay", 99), ("mahatha", 45, "Delhi", 89), ] df = pd.DataFrame(emp, columns=["name", "age", "city", "marks"]) df df.dtypes df["age"] = df["age"].astype("object") df.dtypes df1 = pd.read_csv("/kaggle/input/big-bang-theory-all-seasons-dataset/dataset.csv") df1.info() df1.columns df1["Original air date"] df1["Original air date"] = pd.to_datetime(df1["Original air date"]) df1.info() df1["Season"] = df1["Season"].astype("object") df1.info()
false
1
423
0
689
423
129897908
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session url = "https://www.news18.com/cricketnext/ipl-auction-2022/mi-players-list-8/" intial = pd.read_html(url) intial[1] gujarat_titans = intial[1] gujarat_titans["Team"] = "Gujarat_Titians" gujarat_titans.rename(columns={"2022 Squad GT": "player"}, inplace=True) gujarat_titans.head(3) csk = intial[2] csk["Team"] = "Chennai Super Kings" csk.rename(columns={"2022 Squad CSK": "player"}, inplace=True) final = gujarat_titans.append(csk, ignore_index=True) final dc = intial[3] dc["Team"] = "Delhi Capitals" dc.rename(columns={"2022 Squad DC": "player"}, inplace=True) final = final.append(dc, ignore_index=True) final kkr = intial[4] kkr["Team"] = "kolkataknight Riders" kkr.rename(columns={"2022 Squad KKR": "player"}, inplace=True) final = final.append(kkr, ignore_index=True) final pbks = intial[5] pbks["Team"] = "Punjab Kings" pbks.rename(columns={"2022 Squad PBKS": "player"}, inplace=True) final = final.append(pbks, ignore_index=True) final lsg = intial[6] lsg["Team"] = "Lucknow Super Gaints" lsg.rename(columns={"2022 Squad LSG": "player"}, inplace=True) final = final.append(lsg, ignore_index=True) final mi = intial[7] mi["Team"] = "Mumbai Indians" mi.rename(columns={"2022 Squad MI": "player"}, inplace=True) final = final.append(mi, ignore_index=True) final rcb = intial[8] rcb["Team"] = "Royal Challengers Banglour" rcb.rename(columns={"2022 Squad RCB": "player"}, inplace=True) final = final.append(rcb, ignore_index=True) final rr = intial[9] rr["Team"] = "Royal Challenges" rr.rename(columns={"2022 Squad RR": "player"}, inplace=True) final = final.append(rr, ignore_index=True) final srh = intial[10] srh["Team"] = "Sun Risisie Hyderbad" srh.rename(columns={"2022 Squad SRH": "player"}, inplace=True) final = final.append(srh, ignore_index=True) final.head(2) final.columns intial[11] text = intial[11] text.rename(columns={"Base Price IN ₹ (CR.)": "Base Price"}, inplace=True) text.rename(columns={"Players": "player"}, inplace=True) text["COST IN ₹ (CR.)"] = np.nan text["Cost IN $ (000)"] = np.nan text.drop("Base Price IN $ (000)", axis=1, inplace=True) text["Team"] = "Unsold" text = text[ [ "player", "Base Price", "TYPE", "COST IN ₹ (CR.)", "Cost IN $ (000)", "2021 Squad", "Team", ] ] final = final.append(text, ignore_index=True) final final.to_csv("ipl-2022-data.csv")
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/897/129897908.ipynb
null
null
[{"Id": 129897908, "ScriptId": 37153584, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11466021, "CreationDate": "05/17/2023 09:30:57", "VersionNumber": 1.0, "Title": "L6 CREATING A IPL DATASET", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 110.0, "LinesInsertedFromPrevious": 110.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session url = "https://www.news18.com/cricketnext/ipl-auction-2022/mi-players-list-8/" intial = pd.read_html(url) intial[1] gujarat_titans = intial[1] gujarat_titans["Team"] = "Gujarat_Titians" gujarat_titans.rename(columns={"2022 Squad GT": "player"}, inplace=True) gujarat_titans.head(3) csk = intial[2] csk["Team"] = "Chennai Super Kings" csk.rename(columns={"2022 Squad CSK": "player"}, inplace=True) final = gujarat_titans.append(csk, ignore_index=True) final dc = intial[3] dc["Team"] = "Delhi Capitals" dc.rename(columns={"2022 Squad DC": "player"}, inplace=True) final = final.append(dc, ignore_index=True) final kkr = intial[4] kkr["Team"] = "kolkataknight Riders" kkr.rename(columns={"2022 Squad KKR": "player"}, inplace=True) final = final.append(kkr, ignore_index=True) final pbks = intial[5] pbks["Team"] = "Punjab Kings" pbks.rename(columns={"2022 Squad PBKS": "player"}, inplace=True) final = final.append(pbks, ignore_index=True) final lsg = intial[6] lsg["Team"] = "Lucknow Super Gaints" lsg.rename(columns={"2022 Squad LSG": "player"}, inplace=True) final = final.append(lsg, ignore_index=True) final mi = intial[7] mi["Team"] = "Mumbai Indians" mi.rename(columns={"2022 Squad MI": "player"}, inplace=True) final = final.append(mi, ignore_index=True) final rcb = intial[8] rcb["Team"] = "Royal Challengers Banglour" rcb.rename(columns={"2022 Squad RCB": "player"}, inplace=True) final = final.append(rcb, ignore_index=True) final rr = intial[9] rr["Team"] = "Royal Challenges" rr.rename(columns={"2022 Squad RR": "player"}, inplace=True) final = final.append(rr, ignore_index=True) final srh = intial[10] srh["Team"] = "Sun Risisie Hyderbad" srh.rename(columns={"2022 Squad SRH": "player"}, inplace=True) final = final.append(srh, ignore_index=True) final.head(2) final.columns intial[11] text = intial[11] text.rename(columns={"Base Price IN ₹ (CR.)": "Base Price"}, inplace=True) text.rename(columns={"Players": "player"}, inplace=True) text["COST IN ₹ (CR.)"] = np.nan text["Cost IN $ (000)"] = np.nan text.drop("Base Price IN $ (000)", axis=1, inplace=True) text["Team"] = "Unsold" text = text[ [ "player", "Base Price", "TYPE", "COST IN ₹ (CR.)", "Cost IN $ (000)", "2021 Squad", "Team", ] ] final = final.append(text, ignore_index=True) final final.to_csv("ipl-2022-data.csv")
false
0
1,118
0
1,118
1,118
129559629
<jupyter_start><jupyter_text>CAPTCHA Images ### Context This dataset contains CAPTCHA (Completely Automated Public Turing test to tell Computers and Humans Apart) images. Built in 1997 as way for users to identify and block bots (in order to prevent spam, DDOS etc.). They have since then been replace by reCAPTCHA because they are breakable using Artificial Intelligence (as I encourage you to do). ### Content The images are 5 letter words that can contain numbers. The images have had noise applied to them (blur and a line). They are 200 x 50 PNGs. Kaggle dataset identifier: captcha-version-2-images <jupyter_script># # Travail IA - Machine Learning # ## LAREU MATHIEU - LA2 IG2I - 2023 # L'objectif de ce travail est de réussir à décrypter un captcha en noir et blanc composé de 5 caractères. # On compte un total de 19 caractères différents (2,3,4,5,6,7,8,b,c,d,e,f,g,m,n,p,w,x,y) et l'objectif pour chaque captcha est de réussir à découper chaque caractère puis d'utiliser un FC Classifier pour analyser et prédire les 5 caractères du captcha. # ## Importation des librairies import os import cv2 as cv import numpy as np import pandas as pd import matplotlib.pyplot as plt import matplotlib.image as mpimg import seaborn as sns from sklearn.model_selection import train_test_split import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers kernel = np.ones((5, 2), np.uint8) kernel2 = np.ones((2, 2), np.uint8) img_folder = "/kaggle/input/captcha-version-2-images/samples/samples/" # ## Affichage d'une image def plot_(img1): plt.figure(figsize=(20, 5)) plt.subplot(1, 2, 1) plt.imshow(img1, "gray") plt.axis("off") image_test = cv.imread(img_folder + "2en7g.png", cv.IMREAD_GRAYSCALE) plot_(image_test) # ## Amélioration de la qualité de l'image pour simplifier la reconnaissance # ### Retrait du fond # On va utiliser la fonction ***AdaptativeThresholding*** de la bibliothèque OpenCV en python pour créer une image binarisée (noir et blanc uniquement). # Les paramètres sont : # * image_test : notre image # * 255 : la valeur maximale de la couleur des pixels blancs dans l'image binarisée # * cv2.ADAPTIVE_THRESH_GAUSSIAN_C = la méthode de calcul du seuil des pixels qui utilise la méthode de la moyenne pondérée gaussienne # * cv2.THRESH_BINARY = le type de seuil à appliquer. Ici on veut que les pixels soient blancs si la valeur est supérieure au seuil, sinon ils seront noirs # * 145 = la taille du voisinage pour calculer le seuil de chaque pixel # * 0 = une constante qui permet d'ajuster finement le seuil en fonction des caractéristiques de l'image thresh = cv.adaptiveThreshold( image_test, 255, cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY, 145, 0 ) plot_(thresh) # ### Amélioration de la qualité de notre image # Nous allons désormais essayer de cacher les lignes qui passent par dessus le code afin que la prediction soit plus simple par la suite. Pour ce faire, nous allons utiliser la fonction ***morphologyEx*** de la bibliothèque OpenCV en Python closing = cv.morphologyEx(thresh, cv.MORPH_CLOSE, kernel) plot_(closing) dilation = cv.dilate(closing, kernel2, iterations=1) plot_(dilation) blurring = cv2.GaussianBlur(dilation, (1, 1), 0) plot_(blurring)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/559/129559629.ipynb
captcha-version-2-images
fournierp
[{"Id": 129559629, "ScriptId": 38401206, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14297646, "CreationDate": "05/14/2023 20:44:11", "VersionNumber": 1.0, "Title": "CAPTCHA-LM", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 83.0, "LinesInsertedFromPrevious": 83.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 185742483, "KernelVersionId": 129559629, "SourceDatasetVersionId": 306654}]
[{"Id": 306654, "DatasetId": 38019, "DatasourceVersionId": 319594, "CreatorUserId": 1912216, "LicenseName": "Other (specified in description)", "CreationDate": "02/27/2019 03:04:17", "VersionNumber": 2.0, "Title": "CAPTCHA Images", "Slug": "captcha-version-2-images", "Subtitle": "Version 2 CAPTCHA Images", "Description": "### Context\n\nThis dataset contains CAPTCHA (Completely Automated Public Turing test to tell Computers and Humans Apart) images. Built in 1997 as way for users to identify and block bots (in order to prevent spam, DDOS etc.). They have since then been replace by reCAPTCHA because they are breakable using Artificial Intelligence (as I encourage you to do).\n\n### Content\n\nThe images are 5 letter words that can contain numbers. The images have had noise applied to them (blur and a line). They are 200 x 50 PNGs.\n\n### Acknowledgements\n\nThe dataset comes from [Wilhelmy, Rodrigo & Rosas, Horacio. (2013). captcha dataset.][1] \n[1]: https://www.researchgate.net/publication/248380891_captcha_dataset\n\nThumbnail image from [Accessibility of CAPTCHAs]\n[2]: http://www.bespecular.com/blog/accessibility-of-captchas/\n### Inspiration\n\nThis dataset is a perfect opportunity to attempt to make Optical Character Recognition algorithms.", "VersionNotes": "Updated (correct mislabled image)", "TotalCompressedBytes": 9084225.0, "TotalUncompressedBytes": 9084225.0}]
[{"Id": 38019, "CreatorUserId": 1912216, "OwnerUserId": 1912216.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 306654.0, "CurrentDatasourceVersionId": 319594.0, "ForumId": 46467, "Type": 2, "CreationDate": "07/20/2018 15:28:48", "LastActivityDate": "07/20/2018", "TotalViews": 143419, "TotalDownloads": 13999, "TotalVotes": 304, "TotalKernels": 92}]
[{"Id": 1912216, "UserName": "fournierp", "DisplayName": "Fournierp", "RegisterDate": "05/14/2018", "PerformanceTier": 1}]
# # Travail IA - Machine Learning # ## LAREU MATHIEU - LA2 IG2I - 2023 # L'objectif de ce travail est de réussir à décrypter un captcha en noir et blanc composé de 5 caractères. # On compte un total de 19 caractères différents (2,3,4,5,6,7,8,b,c,d,e,f,g,m,n,p,w,x,y) et l'objectif pour chaque captcha est de réussir à découper chaque caractère puis d'utiliser un FC Classifier pour analyser et prédire les 5 caractères du captcha. # ## Importation des librairies import os import cv2 as cv import numpy as np import pandas as pd import matplotlib.pyplot as plt import matplotlib.image as mpimg import seaborn as sns from sklearn.model_selection import train_test_split import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers kernel = np.ones((5, 2), np.uint8) kernel2 = np.ones((2, 2), np.uint8) img_folder = "/kaggle/input/captcha-version-2-images/samples/samples/" # ## Affichage d'une image def plot_(img1): plt.figure(figsize=(20, 5)) plt.subplot(1, 2, 1) plt.imshow(img1, "gray") plt.axis("off") image_test = cv.imread(img_folder + "2en7g.png", cv.IMREAD_GRAYSCALE) plot_(image_test) # ## Amélioration de la qualité de l'image pour simplifier la reconnaissance # ### Retrait du fond # On va utiliser la fonction ***AdaptativeThresholding*** de la bibliothèque OpenCV en python pour créer une image binarisée (noir et blanc uniquement). # Les paramètres sont : # * image_test : notre image # * 255 : la valeur maximale de la couleur des pixels blancs dans l'image binarisée # * cv2.ADAPTIVE_THRESH_GAUSSIAN_C = la méthode de calcul du seuil des pixels qui utilise la méthode de la moyenne pondérée gaussienne # * cv2.THRESH_BINARY = le type de seuil à appliquer. Ici on veut que les pixels soient blancs si la valeur est supérieure au seuil, sinon ils seront noirs # * 145 = la taille du voisinage pour calculer le seuil de chaque pixel # * 0 = une constante qui permet d'ajuster finement le seuil en fonction des caractéristiques de l'image thresh = cv.adaptiveThreshold( image_test, 255, cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY, 145, 0 ) plot_(thresh) # ### Amélioration de la qualité de notre image # Nous allons désormais essayer de cacher les lignes qui passent par dessus le code afin que la prediction soit plus simple par la suite. Pour ce faire, nous allons utiliser la fonction ***morphologyEx*** de la bibliothèque OpenCV en Python closing = cv.morphologyEx(thresh, cv.MORPH_CLOSE, kernel) plot_(closing) dilation = cv.dilate(closing, kernel2, iterations=1) plot_(dilation) blurring = cv2.GaussianBlur(dilation, (1, 1), 0) plot_(blurring)
false
0
893
0
1,051
893
129559372
# Published on May 14, 2023 by Marília Prata, mpwolke import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import seaborn as sns import json # Ignore warnings import warnings warnings.filterwarnings("ignore") # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # ![](https://brainstation-23.com/wp-content/uploads/2022/06/Kaggle-Participation.jpg)Brain Station 23 df = pd.read_csv( "/kaggle/input/2023-kaggle-ai-report/kaggle_writeups_20230510.csv", delimiter=",", encoding="UTF-8", ) pd.set_option("display.max_columns", None) df.head() # #Missing Values df.isnull().sum() # By Alaa Sedeeq https://www.kaggle.com/alaasedeeq/commonlit-readability-eda/execution # Define a function to plot a bar plot easily def bar_plot(df, x, x_title, y, title, colors=None, text=None): fig = px.bar( x=x, y=y, text=text, labels={x: x_title.title()}, # replaces default labels by column name data_frame=df, color=colors, barmode="group", template="simple_white", color_discrete_sequence=px.colors.qualitative.Prism, ) texts = [df[col].values for col in y] for i, t in enumerate(texts): fig.data[i].text = t fig.data[i].textposition = "inside" fig["layout"].title = title for trace in fig.data: trace.name = trace.name.replace("_", " ").title() fig.update_yaxes(tickprefix="", showgrid=True) fig.show() # By Alaa Sedeeq https://www.kaggle.com/alaasedeeq/commonlit-readability-eda/execution # lets define a function to plot a histogram plot easily def hist_plot(df, x, title): fig = px.histogram(x=df[x], color_discrete_sequence=colors, opacity=0.8) fig["layout"].title = title fig.update_yaxes(tickprefix="", showgrid=True) fig.show() # #Word Frequency on Writeup's Titles # By Alaa Sedeeq https://www.kaggle.com/alaasedeeq/commonlit-readability-eda/execution import plotly.express as px import plotly.graph_objs as go import itertools # Code by Alaa Sedeeq https://www.kaggle.com/alaasedeeq/commonlit-readability-eda/comments#1292516 # Find words spreading (each word frequency) freq_d = pd.Series(" ".join(df["Title of Writeup"]).split()).value_counts() # Plot the words distribution fig = px.line(freq_d, title="Word frequency on Writeups Titles") fig.update_layout(showlegend=False) # #Replace the Nans, otherwise you'll get that error below (Float found=> Nan) # TypeError: sequence item 105: expected str instance, float found # categorical features with missing values categorical_nan = [ feature for feature in df.columns if df[feature].isna().sum() > 0 and df[feature].dtypes == "O" ] print(categorical_nan) # replacing missing values in categorical features for feature in categorical_nan: df[feature] = df[feature].fillna("None") df[categorical_nan].isna().sum() # #Word frequency on Writeups # By Alaa Sedeeq https://www.kaggle.com/alaasedeeq/commonlit-readability-eda/execution import plotly.express as px import plotly.graph_objs as go import itertools # Code by Alaa Sedeeq https://www.kaggle.com/alaasedeeq/commonlit-readability-eda/comments#1292516 # Find words spreading (each word frequency) freq_d = pd.Series(" ".join(df["Writeup"]).split()).value_counts() # Plot the words distribution fig = px.line(freq_d, title="Word frequency on Writeups") fig.update_layout(showlegend=False) prepared_as_text = [line for line in df["Title of Writeup"]] text_prepared_results = "/n".join(prepared_as_text) text = " ".join(t for t in df["Title of Writeup"]) words_list = text.split() word_freq = {} for word in set(words_list): word_freq[word] = words_list.count(word) # sorting the dictionary word_freq = dict(sorted(word_freq.items(), reverse=True, key=lambda item: item[1])) import itertools # sort the data and put it in a data frame for the visualization word_freq_temp = dict(itertools.islice(word_freq.items(), 25)) word_freq_df = pd.DataFrame( word_freq_temp.items(), columns=["word", "count"] ).sort_values("count", ascending=False) # #Frequent words on Writeups Titles import plotly.express as px # Code by Alaa Sedeeq https://www.kaggle.com/alaasedeeq/commonlit-readability-eda/comments#1292516 bar_plot( word_freq_df.reset_index(), "word", "Words", ["count"], title="20 Frequent words on Writeups Titles", ) import nltk import string from wordcloud import WordCloud nltk.download("stopwords") from nltk.corpus import stopwords stop = stopwords.words("english") from nltk.stem import WordNetLemmatizer from textblob import TextBlob, Word from collections import Counter # Code by Alaa Sedeeq https://www.kaggle.com/alaasedeeq/commonlit-readability-eda/comments#1292516 # Bigrams from nltk.util import ngrams def get_n_grans_count(text, n_grams, min_freq): output = {} tokens = nltk.word_tokenize(text) # Create the n_gram if n_grams == 2: gs = nltk.bigrams(tokens) elif n_grams == 3: gs = nltk.trigrams(tokens) else: return "Only 2_grams and 3_grams are supported" # compute frequency distribution for all the bigrams in the text fdist = nltk.FreqDist(gs) for k, v in fdist.items(): if v > min_freq: index = " ".join(k) output[index] = v return output # #Bigrams two_grams = get_n_grans_count(text, n_grams=2, min_freq=10) two_grams_df = pd.DataFrame(data=two_grams.items()) two_grams_df = two_grams_df.sort_values(by=1, ascending=False).rename( columns={0: "Two grams", 1: "Count"} ) two_grams_df # #Frequent Bigram on Writeup Titles bar_plot( two_grams_df.iloc[:20], "Two grams", "Two grams", ["Count"], title="Frequent bigram on Writeup Titles", ) # #Trigrams three_grams = get_n_grans_count(text, n_grams=3, min_freq=0) three_grams_df = pd.DataFrame(data=three_grams.items()) three_grams_df = three_grams_df.sort_values(by=1, ascending=False).rename( columns={0: "Three grams", 1: "Count"} ) three_grams_df bar_plot( three_grams_df.iloc[:20], "Three grams", "Three grams", ["Count"], title="Frequent trigrams on Writeups Titles", ) # #Words length words_length = {} for word in set(words_list): words_length[word] = len(word) words_length = dict( sorted(words_length.items(), reverse=True, key=lambda item: item[1]) ) # sort the data and put it in a data frame for the visualization word_length_temp = dict(itertools.islice(words_length.items(), 25)) words_length_df = pd.DataFrame( words_length.items(), columns=["Title of Writeup", "count"] ).sort_values("count", ascending=False) # #Sentence level analysis # Sentence level analysis Text statistics include sentence length distribution, minimum, maximum, and average length. To check the sentence length distribution. Code and output are as follows: df["sentence_len"] = df["Title of Writeup"].str.len() print( "Max length : {} \nMin length : {} \nAverage Length : {}".format( max(df["sentence_len"]), min(df["sentence_len"]), df["sentence_len"].mean() ) ) # #The Lnogest Sentece on Writeups Titles # the longest sentence we have df[df["sentence_len"] == max(df["sentence_len"])]["Title of Writeup"].values[0] # #The Shortest sentence: 'models' # the shortest sentence we have df[df["sentence_len"] == min(df["sentence_len"])]["Title of Writeup"].values[0] # #Sentences Lenght Distribution colors = px.colors.qualitative.Prism hist_plot(df, "sentence_len", title="Sentences lenght distribution with spaces") # #Competition's Titles # By Rob Mulla https://www.kaggle.com/code/robikscube/sign-language-recognition-eda-twitch-stream fig, ax = plt.subplots(figsize=(4, 4)) df["Title of Competition"].value_counts().head(10).sort_values(ascending=True).plot( kind="barh", color="g", ax=ax, title="Kaggle Title of Competitions" ) ax.set_xlabel("Number of Training Examples") plt.show() df["Title of Writeup"].value_counts() # #Titles of Writeups: 1st place Solution # We have a difference between 1st place (lower case: 44 times) and 1st Place (upper case) # By Rob Mulla https://www.kaggle.com/code/robikscube/sign-language-recognition-eda-twitch-stream fig, ax = plt.subplots(figsize=(4, 4)) df["Title of Writeup"].value_counts().head(10).sort_values(ascending=True).plot( kind="barh", color="r", ax=ax, title="Kaggle Competitions Writeups Rock!" ) ax.set_xlabel("Number of Training Examples") plt.show() # #Checking Writeup Titles # We have Solution, Place,Medal, Silver and even Bronze. Where is the Gold? Not many, just for fews. ##Code by Taha07 https://www.kaggle.com/taha07/data-scientists-jobs-analysis-visualization/notebook from wordcloud import WordCloud from wordcloud import STOPWORDS stopwords = set(STOPWORDS) wordcloud = WordCloud( background_color="black", colormap="Set2", height=2000, width=2000 ).generate(str(df["Title of Writeup"])) plt.rcParams["figure.figsize"] = (12, 12) plt.axis("off") plt.imshow(wordcloud) plt.title("Title of Kaggle Competitions Writeups") plt.show() # #Thank, Organisers, Hosting, Surprised, Congratulation, Fun and Kaggle: Writeups ##Code by Taha07 https://www.kaggle.com/taha07/data-scientists-jobs-analysis-visualization/notebook from wordcloud import WordCloud from wordcloud import STOPWORDS stopwords = set(STOPWORDS) wordcloud = WordCloud( background_color="lightblue", colormap="Set3", height=2000, width=2000 ).generate(str(df["Writeup"])) plt.rcParams["figure.figsize"] = (12, 12) plt.axis("off") plt.imshow(wordcloud) plt.title("Kaggle Competitions Writeups") plt.show() import glob import spacy from spacy import displacy from pathlib import Path from sklearn.feature_extraction.text import CountVectorizer from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator # Code by Abu Bakar https://www.kaggle.com/bakar31/writing-evaluation-noob-eda/notebook def get_top_n_words(corpus, n=None, remove_stop_words=False, n_words=1): if remove_stop_words: vec = CountVectorizer(stop_words="english", ngram_range=(n_words, n_words)).fit( corpus ) else: vec = CountVectorizer(ngram_range=(n_words, n_words)).fit(corpus) bag_of_words = vec.transform(corpus) sum_words = bag_of_words.sum(axis=0) words_freq = [(word, sum_words[0, idx]) for word, idx in vec.vocabulary_.items()] words_freq = sorted(words_freq, key=lambda x: x[1], reverse=True) return words_freq[:n] # #Working on: Solution sharing by Jiwei Liu 1st Place - 9 years ago # Competition: Tradeshift Text Classification # https://www.kaggle.com/c/3984/discussion/10901 # https://www.kaggle.com/competitions/tradeshift-text-classification/overview # Only 3 Kaggle Notebooks! import spacy from spacy.lang.ru.examples import sentences nlp = spacy.load("en_core_web_sm") doc = nlp( """Hi, sorry to disappoint you that there is no magic but brute forcing and many many machine hours. All our work are based on Dmitry and tinrtgu's great benchmarks, and Tianqi Chen's great tool Xgboost. https://github.com/tqchen/xgboost Many many thanks! You are the true heroes! Our winning solution ensembles 14 two-stage xgb models and 7 online models. Our best single xgb model gets 0.0043835/0.0044595 for public and private LB. It is generated as follows: 1) Use the second half training data as base and the first half training data as meta, instead of random split. (this is key!) 2) we use four base classifiers: random forest for numerical features, SGDClassifier for sparse features, online logistic for all features and xgb for all features. 3) For meta classifier, we use xgb with depth 18, 120 trees and 0.09 eta. The xgb models could be memory intensive. We use a 8-core 32 GB memory server for most of our submissions. Thank my boss for the machine :P We will make a formal description and code release after some cleaning up. Cheers!""" ) entities = [(i, i.label_, i.label) for i in doc.ents] entities # print(doc.text) # for token in doc: # print(token.text, token.pos_, token.dep_) # #We will make a formal description and code release after some cleaning up. Cheers! # That part of the Solution I understood. displacy.render(doc, style="ent", jupyter=True) # #Tokenization import spacy nlp = spacy.load("en_core_web_sm") doc = nlp( """Hi, sorry to disappoint you that there is no magic but brute forcing and many many machine hours. All our work are based on Dmitry and tinrtgu's great benchmarks, and Tianqi Chen's great tool Xgboost. https://github.com/tqchen/xgboost Many many thanks! You are the true heroes! Our winning solution ensembles 14 two-stage xgb models and 7 online models. Our best single xgb model gets 0.0043835/0.0044595 for public and private LB. It is generated as follows: 1) Use the second half training data as base and the first half training data as meta, instead of random split. (this is key!) 2) we use four base classifiers: random forest for numerical features, SGDClassifier for sparse features, online logistic for all features and xgb for all features. 3) For meta classifier, we use xgb with depth 18, 120 trees and 0.09 eta. The xgb models could be memory intensive. We use a 8-core 32 GB memory server for most of our submissions. Thank my boss for the machine :P We will make a formal description and code release after some cleaning up. Cheers!""" ) for token in doc: print(token.text) # #Part-Of-Speech (POS) Tagging nlp = spacy.load("en_core_web_sm") # Create an nlp object doc = nlp( """Hi, sorry to disappoint you that there is no magic but brute forcing and many many machine hours. All our work are based on Dmitry and tinrtgu's great benchmarks, and Tianqi Chen's great tool Xgboost. https://github.com/tqchen/xgboost Many many thanks! You are the true heroes! Our winning solution ensembles 14 two-stage xgb models and 7 online models. Our best single xgb model gets 0.0043835/0.0044595 for public and private LB. It is generated as follows: 1) Use the second half training data as base and the first half training data as meta, instead of random split. (this is key!) 2) we use four base classifiers: random forest for numerical features, SGDClassifier for sparse features, online logistic for all features and xgb for all features. 3) For meta classifier, we use xgb with depth 18, 120 trees and 0.09 eta. The xgb models could be memory intensive. We use a 8-core 32 GB memory server for most of our submissions. Thank my boss for the machine :P We will make a formal description and code release after some cleaning up. Cheers!""" ) # Iterate over the tokens for token in doc: # Print the token and its part-of-speech tag print(token, token.tag_, token.pos_, spacy.explain(token.tag_)) # #WordCloud - Spacy from wordcloud import WordCloud, STOPWORDS import matplotlib.pyplot as plt nlp = spacy.load("en_core_web_sm") # make sure to use larger model! tokens = nlp( """Hi, sorry to disappoint you that there is no magic but brute forcing and many many machine hours. All our work are based on Dmitry and tinrtgu's great benchmarks, and Tianqi Chen's great tool Xgboost. https://github.com/tqchen/xgboost Many many thanks! You are the true heroes! Our winning solution ensembles 14 two-stage xgb models and 7 online models. Our best single xgb model gets 0.0043835/0.0044595 for public and private LB. It is generated as follows: 1) Use the second half training data as base and the first half training data as meta, instead of random split. (this is key!) 2) we use four base classifiers: random forest for numerical features, SGDClassifier for sparse features, online logistic for all features and xgb for all features. 3) For meta classifier, we use xgb with depth 18, 120 trees and 0.09 eta. The xgb models could be memory intensive. We use a 8-core 32 GB memory server for most of our submissions. Thank my boss for the machine :P We will make a formal description and code release after some cleaning up. Cheers!""" ) newText = "" for word in tokens: if word.pos_ in ["ADJ", "NOUN"]: newText = " ".join((newText, word.text.lower())) wordcloud = WordCloud( stopwords=STOPWORDS, colormap="Reds", background_color="blue" ).generate(newText) plt.imshow(wordcloud, interpolation="bilinear") plt.axis("off") plt.show() # #Post-Mortem. Posted Eleven years ago - Most Creative Title among Many, many Solutions # Post-Mortem # By BarrenWuffet (9th Place on that "Raising Money to Fund an Organizational Mission" competition) # Unfortunately, Barren "last seen on Kaggle" was 6 months ago. I hope to see you soon back. # "Anybody do or find anything interesting in this dataset? Or find any good tools for working with large data sets?" # "I found it barely manageable given the size. It also took me awhile to wrap by head around what was in each of the files. Also the database restrictions (1 and 2 but not 3, 1 and 2 and 3, 2 and 3 but not 1, etc) made a really difficult task much harder and considerably less enjoyable. I ended up having to write 12 large files (and accompanying SQL code) for each model to compensate for this." # "I started out with a ~2% sample of the training data in R but even this was rough. I tried using the ff package without much luck. I ended up doing most of the data manipulation using SQL Server 2008 R2 Express and the SSMS which I found to be a bright spot in the whole process as it performed really well given the data size. I especially appreciated the data import manager which helps with wide data sets." # "My best model ended up just being prior averages for prospectid, zip5, and packageid with linear regression. I predicted donation amount (not amount2) and response rate using separate models. I'd then do predictedGift^1.15 * predictedResponseRate for a final prediction." # "I tried to use some of the demographic data but had a hard time as I was using zip5 as the key to get state abbreviations as a factor, but some of the zip codes cross state lines which leads to duplicates and zip9, even when indexed, just took too long." # "I think the contest was a cool idea but would have been much better on just one of the 3 databases. Without that restriction I would have had more time to explore the demographic and historical data. The one thing I would have really liked to explore is people's giving before the training period especially as much of the mailings seemed to be political and the test data is sitting within 12 months of a presidential election." # https://www.kaggle.com/c/2863/discussion/2712 mor = df[(df["Title of Writeup"] == "Post-Mortem")].reset_index(drop=True) mor.head()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/559/129559372.ipynb
null
null
[{"Id": 129559372, "ScriptId": 38491070, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 3012786, "CreationDate": "05/14/2023 20:41:13", "VersionNumber": 2.0, "Title": "Kaggle Competitions still Rock Spacy!", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 537.0, "LinesInsertedFromPrevious": 232.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 305.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# Published on May 14, 2023 by Marília Prata, mpwolke import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import seaborn as sns import json # Ignore warnings import warnings warnings.filterwarnings("ignore") # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # ![](https://brainstation-23.com/wp-content/uploads/2022/06/Kaggle-Participation.jpg)Brain Station 23 df = pd.read_csv( "/kaggle/input/2023-kaggle-ai-report/kaggle_writeups_20230510.csv", delimiter=",", encoding="UTF-8", ) pd.set_option("display.max_columns", None) df.head() # #Missing Values df.isnull().sum() # By Alaa Sedeeq https://www.kaggle.com/alaasedeeq/commonlit-readability-eda/execution # Define a function to plot a bar plot easily def bar_plot(df, x, x_title, y, title, colors=None, text=None): fig = px.bar( x=x, y=y, text=text, labels={x: x_title.title()}, # replaces default labels by column name data_frame=df, color=colors, barmode="group", template="simple_white", color_discrete_sequence=px.colors.qualitative.Prism, ) texts = [df[col].values for col in y] for i, t in enumerate(texts): fig.data[i].text = t fig.data[i].textposition = "inside" fig["layout"].title = title for trace in fig.data: trace.name = trace.name.replace("_", " ").title() fig.update_yaxes(tickprefix="", showgrid=True) fig.show() # By Alaa Sedeeq https://www.kaggle.com/alaasedeeq/commonlit-readability-eda/execution # lets define a function to plot a histogram plot easily def hist_plot(df, x, title): fig = px.histogram(x=df[x], color_discrete_sequence=colors, opacity=0.8) fig["layout"].title = title fig.update_yaxes(tickprefix="", showgrid=True) fig.show() # #Word Frequency on Writeup's Titles # By Alaa Sedeeq https://www.kaggle.com/alaasedeeq/commonlit-readability-eda/execution import plotly.express as px import plotly.graph_objs as go import itertools # Code by Alaa Sedeeq https://www.kaggle.com/alaasedeeq/commonlit-readability-eda/comments#1292516 # Find words spreading (each word frequency) freq_d = pd.Series(" ".join(df["Title of Writeup"]).split()).value_counts() # Plot the words distribution fig = px.line(freq_d, title="Word frequency on Writeups Titles") fig.update_layout(showlegend=False) # #Replace the Nans, otherwise you'll get that error below (Float found=> Nan) # TypeError: sequence item 105: expected str instance, float found # categorical features with missing values categorical_nan = [ feature for feature in df.columns if df[feature].isna().sum() > 0 and df[feature].dtypes == "O" ] print(categorical_nan) # replacing missing values in categorical features for feature in categorical_nan: df[feature] = df[feature].fillna("None") df[categorical_nan].isna().sum() # #Word frequency on Writeups # By Alaa Sedeeq https://www.kaggle.com/alaasedeeq/commonlit-readability-eda/execution import plotly.express as px import plotly.graph_objs as go import itertools # Code by Alaa Sedeeq https://www.kaggle.com/alaasedeeq/commonlit-readability-eda/comments#1292516 # Find words spreading (each word frequency) freq_d = pd.Series(" ".join(df["Writeup"]).split()).value_counts() # Plot the words distribution fig = px.line(freq_d, title="Word frequency on Writeups") fig.update_layout(showlegend=False) prepared_as_text = [line for line in df["Title of Writeup"]] text_prepared_results = "/n".join(prepared_as_text) text = " ".join(t for t in df["Title of Writeup"]) words_list = text.split() word_freq = {} for word in set(words_list): word_freq[word] = words_list.count(word) # sorting the dictionary word_freq = dict(sorted(word_freq.items(), reverse=True, key=lambda item: item[1])) import itertools # sort the data and put it in a data frame for the visualization word_freq_temp = dict(itertools.islice(word_freq.items(), 25)) word_freq_df = pd.DataFrame( word_freq_temp.items(), columns=["word", "count"] ).sort_values("count", ascending=False) # #Frequent words on Writeups Titles import plotly.express as px # Code by Alaa Sedeeq https://www.kaggle.com/alaasedeeq/commonlit-readability-eda/comments#1292516 bar_plot( word_freq_df.reset_index(), "word", "Words", ["count"], title="20 Frequent words on Writeups Titles", ) import nltk import string from wordcloud import WordCloud nltk.download("stopwords") from nltk.corpus import stopwords stop = stopwords.words("english") from nltk.stem import WordNetLemmatizer from textblob import TextBlob, Word from collections import Counter # Code by Alaa Sedeeq https://www.kaggle.com/alaasedeeq/commonlit-readability-eda/comments#1292516 # Bigrams from nltk.util import ngrams def get_n_grans_count(text, n_grams, min_freq): output = {} tokens = nltk.word_tokenize(text) # Create the n_gram if n_grams == 2: gs = nltk.bigrams(tokens) elif n_grams == 3: gs = nltk.trigrams(tokens) else: return "Only 2_grams and 3_grams are supported" # compute frequency distribution for all the bigrams in the text fdist = nltk.FreqDist(gs) for k, v in fdist.items(): if v > min_freq: index = " ".join(k) output[index] = v return output # #Bigrams two_grams = get_n_grans_count(text, n_grams=2, min_freq=10) two_grams_df = pd.DataFrame(data=two_grams.items()) two_grams_df = two_grams_df.sort_values(by=1, ascending=False).rename( columns={0: "Two grams", 1: "Count"} ) two_grams_df # #Frequent Bigram on Writeup Titles bar_plot( two_grams_df.iloc[:20], "Two grams", "Two grams", ["Count"], title="Frequent bigram on Writeup Titles", ) # #Trigrams three_grams = get_n_grans_count(text, n_grams=3, min_freq=0) three_grams_df = pd.DataFrame(data=three_grams.items()) three_grams_df = three_grams_df.sort_values(by=1, ascending=False).rename( columns={0: "Three grams", 1: "Count"} ) three_grams_df bar_plot( three_grams_df.iloc[:20], "Three grams", "Three grams", ["Count"], title="Frequent trigrams on Writeups Titles", ) # #Words length words_length = {} for word in set(words_list): words_length[word] = len(word) words_length = dict( sorted(words_length.items(), reverse=True, key=lambda item: item[1]) ) # sort the data and put it in a data frame for the visualization word_length_temp = dict(itertools.islice(words_length.items(), 25)) words_length_df = pd.DataFrame( words_length.items(), columns=["Title of Writeup", "count"] ).sort_values("count", ascending=False) # #Sentence level analysis # Sentence level analysis Text statistics include sentence length distribution, minimum, maximum, and average length. To check the sentence length distribution. Code and output are as follows: df["sentence_len"] = df["Title of Writeup"].str.len() print( "Max length : {} \nMin length : {} \nAverage Length : {}".format( max(df["sentence_len"]), min(df["sentence_len"]), df["sentence_len"].mean() ) ) # #The Lnogest Sentece on Writeups Titles # the longest sentence we have df[df["sentence_len"] == max(df["sentence_len"])]["Title of Writeup"].values[0] # #The Shortest sentence: 'models' # the shortest sentence we have df[df["sentence_len"] == min(df["sentence_len"])]["Title of Writeup"].values[0] # #Sentences Lenght Distribution colors = px.colors.qualitative.Prism hist_plot(df, "sentence_len", title="Sentences lenght distribution with spaces") # #Competition's Titles # By Rob Mulla https://www.kaggle.com/code/robikscube/sign-language-recognition-eda-twitch-stream fig, ax = plt.subplots(figsize=(4, 4)) df["Title of Competition"].value_counts().head(10).sort_values(ascending=True).plot( kind="barh", color="g", ax=ax, title="Kaggle Title of Competitions" ) ax.set_xlabel("Number of Training Examples") plt.show() df["Title of Writeup"].value_counts() # #Titles of Writeups: 1st place Solution # We have a difference between 1st place (lower case: 44 times) and 1st Place (upper case) # By Rob Mulla https://www.kaggle.com/code/robikscube/sign-language-recognition-eda-twitch-stream fig, ax = plt.subplots(figsize=(4, 4)) df["Title of Writeup"].value_counts().head(10).sort_values(ascending=True).plot( kind="barh", color="r", ax=ax, title="Kaggle Competitions Writeups Rock!" ) ax.set_xlabel("Number of Training Examples") plt.show() # #Checking Writeup Titles # We have Solution, Place,Medal, Silver and even Bronze. Where is the Gold? Not many, just for fews. ##Code by Taha07 https://www.kaggle.com/taha07/data-scientists-jobs-analysis-visualization/notebook from wordcloud import WordCloud from wordcloud import STOPWORDS stopwords = set(STOPWORDS) wordcloud = WordCloud( background_color="black", colormap="Set2", height=2000, width=2000 ).generate(str(df["Title of Writeup"])) plt.rcParams["figure.figsize"] = (12, 12) plt.axis("off") plt.imshow(wordcloud) plt.title("Title of Kaggle Competitions Writeups") plt.show() # #Thank, Organisers, Hosting, Surprised, Congratulation, Fun and Kaggle: Writeups ##Code by Taha07 https://www.kaggle.com/taha07/data-scientists-jobs-analysis-visualization/notebook from wordcloud import WordCloud from wordcloud import STOPWORDS stopwords = set(STOPWORDS) wordcloud = WordCloud( background_color="lightblue", colormap="Set3", height=2000, width=2000 ).generate(str(df["Writeup"])) plt.rcParams["figure.figsize"] = (12, 12) plt.axis("off") plt.imshow(wordcloud) plt.title("Kaggle Competitions Writeups") plt.show() import glob import spacy from spacy import displacy from pathlib import Path from sklearn.feature_extraction.text import CountVectorizer from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator # Code by Abu Bakar https://www.kaggle.com/bakar31/writing-evaluation-noob-eda/notebook def get_top_n_words(corpus, n=None, remove_stop_words=False, n_words=1): if remove_stop_words: vec = CountVectorizer(stop_words="english", ngram_range=(n_words, n_words)).fit( corpus ) else: vec = CountVectorizer(ngram_range=(n_words, n_words)).fit(corpus) bag_of_words = vec.transform(corpus) sum_words = bag_of_words.sum(axis=0) words_freq = [(word, sum_words[0, idx]) for word, idx in vec.vocabulary_.items()] words_freq = sorted(words_freq, key=lambda x: x[1], reverse=True) return words_freq[:n] # #Working on: Solution sharing by Jiwei Liu 1st Place - 9 years ago # Competition: Tradeshift Text Classification # https://www.kaggle.com/c/3984/discussion/10901 # https://www.kaggle.com/competitions/tradeshift-text-classification/overview # Only 3 Kaggle Notebooks! import spacy from spacy.lang.ru.examples import sentences nlp = spacy.load("en_core_web_sm") doc = nlp( """Hi, sorry to disappoint you that there is no magic but brute forcing and many many machine hours. All our work are based on Dmitry and tinrtgu's great benchmarks, and Tianqi Chen's great tool Xgboost. https://github.com/tqchen/xgboost Many many thanks! You are the true heroes! Our winning solution ensembles 14 two-stage xgb models and 7 online models. Our best single xgb model gets 0.0043835/0.0044595 for public and private LB. It is generated as follows: 1) Use the second half training data as base and the first half training data as meta, instead of random split. (this is key!) 2) we use four base classifiers: random forest for numerical features, SGDClassifier for sparse features, online logistic for all features and xgb for all features. 3) For meta classifier, we use xgb with depth 18, 120 trees and 0.09 eta. The xgb models could be memory intensive. We use a 8-core 32 GB memory server for most of our submissions. Thank my boss for the machine :P We will make a formal description and code release after some cleaning up. Cheers!""" ) entities = [(i, i.label_, i.label) for i in doc.ents] entities # print(doc.text) # for token in doc: # print(token.text, token.pos_, token.dep_) # #We will make a formal description and code release after some cleaning up. Cheers! # That part of the Solution I understood. displacy.render(doc, style="ent", jupyter=True) # #Tokenization import spacy nlp = spacy.load("en_core_web_sm") doc = nlp( """Hi, sorry to disappoint you that there is no magic but brute forcing and many many machine hours. All our work are based on Dmitry and tinrtgu's great benchmarks, and Tianqi Chen's great tool Xgboost. https://github.com/tqchen/xgboost Many many thanks! You are the true heroes! Our winning solution ensembles 14 two-stage xgb models and 7 online models. Our best single xgb model gets 0.0043835/0.0044595 for public and private LB. It is generated as follows: 1) Use the second half training data as base and the first half training data as meta, instead of random split. (this is key!) 2) we use four base classifiers: random forest for numerical features, SGDClassifier for sparse features, online logistic for all features and xgb for all features. 3) For meta classifier, we use xgb with depth 18, 120 trees and 0.09 eta. The xgb models could be memory intensive. We use a 8-core 32 GB memory server for most of our submissions. Thank my boss for the machine :P We will make a formal description and code release after some cleaning up. Cheers!""" ) for token in doc: print(token.text) # #Part-Of-Speech (POS) Tagging nlp = spacy.load("en_core_web_sm") # Create an nlp object doc = nlp( """Hi, sorry to disappoint you that there is no magic but brute forcing and many many machine hours. All our work are based on Dmitry and tinrtgu's great benchmarks, and Tianqi Chen's great tool Xgboost. https://github.com/tqchen/xgboost Many many thanks! You are the true heroes! Our winning solution ensembles 14 two-stage xgb models and 7 online models. Our best single xgb model gets 0.0043835/0.0044595 for public and private LB. It is generated as follows: 1) Use the second half training data as base and the first half training data as meta, instead of random split. (this is key!) 2) we use four base classifiers: random forest for numerical features, SGDClassifier for sparse features, online logistic for all features and xgb for all features. 3) For meta classifier, we use xgb with depth 18, 120 trees and 0.09 eta. The xgb models could be memory intensive. We use a 8-core 32 GB memory server for most of our submissions. Thank my boss for the machine :P We will make a formal description and code release after some cleaning up. Cheers!""" ) # Iterate over the tokens for token in doc: # Print the token and its part-of-speech tag print(token, token.tag_, token.pos_, spacy.explain(token.tag_)) # #WordCloud - Spacy from wordcloud import WordCloud, STOPWORDS import matplotlib.pyplot as plt nlp = spacy.load("en_core_web_sm") # make sure to use larger model! tokens = nlp( """Hi, sorry to disappoint you that there is no magic but brute forcing and many many machine hours. All our work are based on Dmitry and tinrtgu's great benchmarks, and Tianqi Chen's great tool Xgboost. https://github.com/tqchen/xgboost Many many thanks! You are the true heroes! Our winning solution ensembles 14 two-stage xgb models and 7 online models. Our best single xgb model gets 0.0043835/0.0044595 for public and private LB. It is generated as follows: 1) Use the second half training data as base and the first half training data as meta, instead of random split. (this is key!) 2) we use four base classifiers: random forest for numerical features, SGDClassifier for sparse features, online logistic for all features and xgb for all features. 3) For meta classifier, we use xgb with depth 18, 120 trees and 0.09 eta. The xgb models could be memory intensive. We use a 8-core 32 GB memory server for most of our submissions. Thank my boss for the machine :P We will make a formal description and code release after some cleaning up. Cheers!""" ) newText = "" for word in tokens: if word.pos_ in ["ADJ", "NOUN"]: newText = " ".join((newText, word.text.lower())) wordcloud = WordCloud( stopwords=STOPWORDS, colormap="Reds", background_color="blue" ).generate(newText) plt.imshow(wordcloud, interpolation="bilinear") plt.axis("off") plt.show() # #Post-Mortem. Posted Eleven years ago - Most Creative Title among Many, many Solutions # Post-Mortem # By BarrenWuffet (9th Place on that "Raising Money to Fund an Organizational Mission" competition) # Unfortunately, Barren "last seen on Kaggle" was 6 months ago. I hope to see you soon back. # "Anybody do or find anything interesting in this dataset? Or find any good tools for working with large data sets?" # "I found it barely manageable given the size. It also took me awhile to wrap by head around what was in each of the files. Also the database restrictions (1 and 2 but not 3, 1 and 2 and 3, 2 and 3 but not 1, etc) made a really difficult task much harder and considerably less enjoyable. I ended up having to write 12 large files (and accompanying SQL code) for each model to compensate for this." # "I started out with a ~2% sample of the training data in R but even this was rough. I tried using the ff package without much luck. I ended up doing most of the data manipulation using SQL Server 2008 R2 Express and the SSMS which I found to be a bright spot in the whole process as it performed really well given the data size. I especially appreciated the data import manager which helps with wide data sets." # "My best model ended up just being prior averages for prospectid, zip5, and packageid with linear regression. I predicted donation amount (not amount2) and response rate using separate models. I'd then do predictedGift^1.15 * predictedResponseRate for a final prediction." # "I tried to use some of the demographic data but had a hard time as I was using zip5 as the key to get state abbreviations as a factor, but some of the zip codes cross state lines which leads to duplicates and zip9, even when indexed, just took too long." # "I think the contest was a cool idea but would have been much better on just one of the 3 databases. Without that restriction I would have had more time to explore the demographic and historical data. The one thing I would have really liked to explore is people's giving before the training period especially as much of the mailings seemed to be political and the test data is sitting within 12 months of a presidential election." # https://www.kaggle.com/c/2863/discussion/2712 mor = df[(df["Title of Writeup"] == "Post-Mortem")].reset_index(drop=True) mor.head()
false
0
5,811
0
5,811
5,811
129559161
<jupyter_start><jupyter_text>ImageNet 1000 (mini) ### Context https://github.com/pytorch/examples/tree/master/imagenet Kaggle dataset identifier: imagenetmini-1000 <jupyter_script>import os del os.environ["PYTHONPATH"] import sys print("Python version") print(sys.version) print("Version info.") print(sys.version_info) # GPU #!wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/cuda-ubuntu1804.pin #!mv cuda-ubuntu1804.pin /etc/apt/preferences.d/cuda-repository-pin-600 #!wget https://developer.download.nvidia.com/compute/cuda/11.1.1/local_installers/cuda-repo-ubuntu1804-11-1-local_11.1.1-455.32.00-1_amd64.deb #!dpkg -i cuda-repo-ubuntu1804-11-1-local_11.1.1-455.32.00-1_amd64.deb #!apt-key add /var/cuda-repo-ubuntu1804-11-1-local/7fa2af80.pub #!echo "deb https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64 /" > /etc/apt/sources.list.d/cuda.list #!echo "deb https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64 /" > /etc/apt/sources.list.d/nvidia-ml.list #!apt-get update #!apt-get -y install cuda #!wget http://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64/nvidia-machine-learning-repo-ubuntu1804_1.0.0-1_amd64.deb #!apt-get --assume-yes install ./nvidia-machine-learning-repo-ubuntu1804_1.0.0-1_amd64.deb #!apt-get update # GPU # CPU # %%capture # #!python3 -m pip install https://github.com/quic/aimet/releases/download/1.25.0/AimetCommon-torch_cpu_1.25.0-cp38-cp38-linux_x86_64.whl #!python3 -m pip install https://github.com/quic/aimet/releases/download/1.25.0/AimetTorch-torch_cpu_1.25.0-cp38-cp38-linux_x86_64.whl -f https://download.pytorch.org/whl/torch_stable.html #!python3 -m pip install https://github.com/quic/aimet/releases/download/1.25.0/Aimet-torch_cpu_1.25.0-cp38-cp38-linux_x86_64.whl #!python -m site #!ls ~/miniconda3/envs/py38/lib/python3.8/site-packages #! python3 -c 'import sysconfig; print(sysconfig.get_paths()["purelib"])' #! python -m site #! cat ~/miniconda3/envs/py38/lib/python3.8/site-packages/aimet_common/bin/reqs_deb_common.txt | xargs apt-get --assume-yes install # GPU # GPU # If you installed the CUDA 11.x drivers # ln -s /usr/local/cuda-11.0 /usr/local/cuda # OR if you installed the CUDA 10.x drivers # ln -s /usr/local/cuda-10.0 /usr/local/cuda #! echo $PYTHONPATH # pip freeze #!cat ~/miniconda3/envs/py38/lib/python3.8/site-packages/aimet_common/bin/envsetup.sh import os os.environ[ "LD_LIBRARY_PATH" ] = "/usr/local/nvidia/lib:/usr/local/nvidia/lib64:/usr/local/cuda/lib64:/usr/local/cuda/targets/x86_64-linux/lib" os.environ["CUDA_TOOLKIT_PATH"] = "/usr/local/cuda" os.environ["CUDNN_INSTALL_PATH"] = "/usr/local/cuda" os.environ["CUDA_HOME"] = "/usr/local/cuda" os.environ["NVIDIA_DRIVER_CAPABILITIES"] = "compute,utility" os.environ["NVIDIA_VISIBLE_DEVICES"] = "all" os.environ[ "PYTHONPATH" ] = "/root/miniconda3/envs/py38/lib/python3.8/site-packages/aimet_common/x86_64-linux-gnu" os.environ[ "LD_LIBRARY_PATH" ] += ":/root/miniconda3/envs/py38/lib/python3.8/site-packages/aimet_common" os.environ[ "LD_LIBRARY_PATH" ] += ":/root/miniconda3/envs/py38/lib/python3.8/site-packages/aimet_common/x86_64-linux-gnu" os.environ[ "LD_LIBRARY_PATH" ] += ":/root/miniconda3/envs/py38/lib/python3.8/site-packages" os.environ["LD_LIBRARY_PATH"] += ":/usr/lib/x86_64-linux-gnu/" #! ls /root/miniconda3/envs/py38/lib/python3.8/site-packages/aimet_common # os.environ['LD_LIBRARY_PATH'] import sys sys.path.append("/root/miniconda3/envs/py38/lib/python3.8/site-packages/aimet_common") #! ls /root/miniconda3/envs/py38/lib/python3.8/site-packages/aimet_common #! sudo mv /root/miniconda3/envs/py38/lib/python3.8/site-packages/aimet_common/libpymo.cpython-38-x86_64-linux-gnu.so /root/miniconda3/envs/py38/lib/python3.8/site-packages/aimet_common/libpymo.so #! sudo mv /root/miniconda3/envs/py38/lib/python3.8/site-packages/aimet_common/libpymo.so /root/miniconda3/envs/py38/lib/python3.8/site-packages/aimet_common/libpymo.cpython-38-x86_64-linux-gnu.so #! ls /root/miniconda3/envs/py38/lib/python3.8/site-packages/aimet_common # # Python # update image_net_dataloader """ Creates data-loader for Image-Net dataset source: https://github.com/quic/aimet/blob/develop/Examples/torch/utils/image_net_data_loader.py """ import logging import os from torchvision import transforms from torchvision.datasets.folder import default_loader, has_file_allowed_extension from torch.utils.data import Dataset import torch.utils.data as torch_data from Examples.common import image_net_config logger = logging.getLogger("Dataloader") IMG_EXTENSIONS = ".jpg", ".jpeg", ".png", ".ppm", ".bmp", ".pgm", ".tif" def make_dataset( directory: str, class_to_idx: dict, extensions: tuple, num_samples_per_class: int ) -> list: """ Creates a dataset of images with num_samples_per_class images in each class :param directory: The string path to the data directory. :param class_to_idx: A dictionary mapping the name of the class to the index (label) :param extensions: list of valid extensions to load data :param num_samples_per_class: Number of samples to use per class. :return: list of images containing the entire dataset. """ images = [] num_classes = 0 directory = os.path.expanduser(directory) for class_name in sorted(class_to_idx.keys()): class_path = os.path.join(directory, class_name) if os.path.isdir(class_path): class_idx = class_to_idx[class_name] class_images = add_images_for_class( class_path, extensions, num_samples_per_class, class_idx ) images.extend(class_images) num_classes += 1 logger.info("Dataset consists of %d images in %d classes", len(images), num_classes) return images def add_images_for_class( class_path: str, extensions: tuple, num_samples_per_class: int, class_idx: int ) -> list: """ For a given class, adds num_samples_per_class images to a list. :param class_path: The string path to the class directory. :param extensions: List of valid extensions to load data :param num_samples_per_class: Number of samples to use per class. :param class_idx: numerical index of class. :return: list of images for given class. """ class_images = [] count = 0 for file_name in os.listdir(class_path): if num_samples_per_class and count >= num_samples_per_class: break if has_file_allowed_extension(file_name, extensions): image_path = os.path.join(class_path, file_name) item = (image_path, class_idx) class_images.append(item) count += 1 return class_images class ImageFolder(Dataset): """ Dataset class inspired by torchvision.datasets.folder.DatasetFolder for images organized as individual files grouped by category. """ def __init__( self, root: str, transform=None, target_transform=None, num_samples_per_class: int = None, ): """ :param root: The path to the data directory. :param transform: The required processing to be applied on the sample. :param target_transform: The required processing to be applied on the target. :param num_samples_per_class: Number of samples to use per class. """ Dataset.__init__(self) classes, class_to_idx = self._find_classes(root) self.samples = make_dataset( root, class_to_idx, IMG_EXTENSIONS, num_samples_per_class ) if not self.samples: raise ( RuntimeError( "Found 0 files in sub folders of: {}\nSupported extensions are: {}".format( root, ",".join(IMG_EXTENSIONS) ) ) ) self.root = root self.loader = default_loader self.extensions = IMG_EXTENSIONS self.classes = classes self.class_to_idx = class_to_idx self.targets = [s[1] for s in self.samples] self.transform = transform self.target_transform = target_transform self.imgs = self.samples @staticmethod def _find_classes(directory: str): classes = [ d for d in os.listdir(directory) if os.path.isdir(os.path.join(directory, d)) ] classes.sort() class_to_idx = {classes[i]: i for i in range(len(classes))} return classes, class_to_idx def __getitem__(self, index: int): path, target = self.samples[index] sample = self.loader(path) if self.transform is not None: sample = self.transform(sample) if self.target_transform is not None: target = self.target_transform(target) return sample, target def __len__(self): return len(self.samples) class ImageNetDataLoader: """ For loading Validation data from the ImageNet dataset. """ def __init__( self, images_dir: str, image_size: int, batch_size: int = 128, is_training: bool = False, num_workers: int = 8, num_samples_per_class: int = None, ): """ :param images_dir: The path to the data directory :param image_size: The length of the image :param batch_size: The batch size to use for training and validation :param is_training: Indicates whether to load the training or validation data :param num_workers: Indiicates to the data loader how many sub-processes to use for data loading. :param num_samples_per_class: Number of samples to use per class. """ # For normalization, mean and std dev values are calculated per channel # and can be found on the web. normalize = transforms.Normalize( mean=image_net_config.dataset["images_mean"], std=image_net_config.dataset["images_std"], ) self.train_transforms = transforms.Compose( [ transforms.RandomResizedCrop(image_size), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize, ] ) self.val_transforms = transforms.Compose( [ transforms.Resize(image_size + 24), transforms.CenterCrop(image_size), transforms.ToTensor(), normalize, ] ) if is_training: data_set = ImageFolder( root=os.path.join(images_dir, "train"), transform=self.train_transforms, num_samples_per_class=num_samples_per_class, ) else: data_set = ImageFolder( root=os.path.join(images_dir, "val"), transform=self.val_transforms, num_samples_per_class=num_samples_per_class, ) self._data_loader = torch_data.DataLoader( data_set, batch_size=batch_size, shuffle=is_training, num_workers=num_workers, ) @property def data_loader(self) -> torch_data.DataLoader: """ Returns the data-loader """ return self._data_loader # adaround script ROOT_DATA_AND_OUTPUTS = "./" N = 16 # number of classes and samples per class BIWIDTH = 4 BIWIDTH_ACTIVATION = 8 # quantization on the input and output of the layer DATASET_FOLDER_PATH = "../input/imagenetmini-1000/imagenet-mini/" # batch size of evaluation is 32 # it is recommended to have ~2000 images # this means we need 62 batches # in the paper they used 2048 images -> 64 batches !!!!!!!!! ADAROUND_NUM_BATCHES = 64 ADAROUND_ITERATIONS = 20000 # ADAROUND_NUM_BATCHES = 16 # ADAROUND_ITERATIONS = 10000 # in paper their claim to have accuracy ~68.6% import datetime DATASET_DIR = f"{ROOT_DATA_AND_OUTPUTS}{DATASET_FOLDER_PATH}" output_dir = ( f'{ROOT_DATA_AND_OUTPUTS}output_{datetime.datetime.now().strftime("%Y%m%d_%H%M")}/' ) import os os.makedirs(output_dir, exist_ok=True) import os import torch from Examples.common import image_net_config from Examples.torch.utils.image_net_evaluator import ImageNetEvaluator # from Examples.torch.utils.image_net_data_loader import ImageNetDataLoader from imagenet_dataloader import ImageNetDataLoader class ImageNetDataPipeline: @staticmethod def get_val_dataloader() -> torch.utils.data.DataLoader: """ Instantiates a validation dataloader for ImageNet dataset and returns it """ data_loader = ImageNetDataLoader( DATASET_DIR, image_size=image_net_config.dataset["image_size"], batch_size=image_net_config.evaluation["batch_size"], is_training=False, num_workers=image_net_config.evaluation["num_workers"], num_samples_per_class=N, ).data_loader return data_loader @staticmethod def evaluate(model: torch.nn.Module, use_cuda: bool) -> float: """ Given a torch model, evaluates its Top-1 accuracy on the dataset :param model: the model to evaluate :param use_cuda: whether or not the GPU should be used. """ evaluator = ImageNetEvaluator( DATASET_DIR, image_size=image_net_config.dataset["image_size"], batch_size=image_net_config.evaluation["batch_size"], num_workers=image_net_config.evaluation["num_workers"], num_val_samples_per_class=N, ) return evaluator.evaluate(model, iterations=None, use_cuda=use_cuda) from torchvision.models import resnet18 model = resnet18(pretrained=True) # model preperation from aimet_torch.model_preparer import prepare_model model = prepare_model(model) # move to device use_cuda = False if torch.cuda.is_available(): use_cuda = True model.to(torch.device("cuda")) print("Using cuda: {}".format(use_cuda)) # accuracy of the original model accuracy = ImageNetDataPipeline.evaluate(model, use_cuda) from termcolor import colored print( colored( "###########################################################################################################", "green", ) ) print(colored(f"Original model accuracy: {accuracy}", "red")) print( colored( "###########################################################################################################", "green", ) ) from aimet_torch.batch_norm_fold import fold_all_batch_norms _ = fold_all_batch_norms(model, input_shapes=(1, 3, 224, 224)) from aimet_common.defs import QuantScheme from aimet_torch.quantsim import QuantizationSimModel dummy_input = torch.rand( 1, 3, 224, 224 ) # Shape for each ImageNet sample is (3 channels) x (224 height) x (224 width) if use_cuda: dummy_input = dummy_input.cuda() # https://arxiv.org/pdf/2201.08442.pdf # in the paper they use min-max ----> quant_scheme=QuantScheme.post_training_tf sim = QuantizationSimModel( model=model, # quant_scheme=QuantScheme.post_training_tf_enhanced, quant_scheme=QuantScheme.post_training_tf, dummy_input=dummy_input, default_output_bw=BIWIDTH_ACTIVATION, default_param_bw=BIWIDTH, ) def pass_calibration_data(sim_model, use_cuda): data_loader = ImageNetDataPipeline.get_val_dataloader() batch_size = data_loader.batch_size if use_cuda: device = torch.device("cuda") else: device = torch.device("cpu") sim_model.eval() samples = 1000 batch_cntr = 0 with torch.no_grad(): for input_data, target_data in data_loader: inputs_batch = input_data.to(device) sim_model(inputs_batch) batch_cntr += 1 if (batch_cntr * batch_size) > samples: break sim.compute_encodings( forward_pass_callback=pass_calibration_data, forward_pass_callback_args=use_cuda ) accuracy = ImageNetDataPipeline.evaluate(sim.model, use_cuda) print( colored( "###########################################################################################################", "green", ) ) print(colored(f"Simulated quantized model accuracy: {accuracy}", "red")) print( colored( "###########################################################################################################", "green", ) ) # AdaRound from aimet_torch.adaround.adaround_weight import Adaround, AdaroundParameters data_loader = ImageNetDataPipeline.get_val_dataloader() params = AdaroundParameters( data_loader=data_loader, num_batches=ADAROUND_NUM_BATCHES, default_num_iterations=ADAROUND_ITERATIONS, ) dummy_input = torch.rand(1, 3, 224, 224) if use_cuda: dummy_input = dummy_input.cuda() os.makedirs(f"{output_dir}", exist_ok=True) ada_model = Adaround.apply_adaround( model, dummy_input, params, path=f"{output_dir}", filename_prefix="adaround", default_param_bw=BIWIDTH, # default_quant_scheme=QuantScheme.post_training_tf_enhanced default_quant_scheme=QuantScheme.post_training_tf, ) # model ready to use sim = QuantizationSimModel( model=ada_model, dummy_input=dummy_input, # quant_scheme=QuantScheme.post_training_tf_enhanced, quant_scheme=QuantScheme.post_training_tf, default_output_bw=BIWIDTH_ACTIVATION, default_param_bw=BIWIDTH, ) sim.set_and_freeze_param_encodings(encoding_path=f"{output_dir}adaround.encodings") sim.compute_encodings( forward_pass_callback=pass_calibration_data, forward_pass_callback_args=use_cuda ) accuracy = ImageNetDataPipeline.evaluate(sim.model, use_cuda) print( colored( "###########################################################################################################", "green", ) ) print(colored(f"Quantized (after AdaRound) model accuracy: {accuracy}", "red")) print( colored( "###########################################################################################################", "green", ) ) dummy_input = dummy_input.cpu() sim.export( path=output_dir, filename_prefix="resnet18_after_adaround", dummy_input=dummy_input )
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/559/129559161.ipynb
imagenetmini-1000
ifigotin
[{"Id": 129559161, "ScriptId": 38109697, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 8749054, "CreationDate": "05/14/2023 20:37:51", "VersionNumber": 14.0, "Title": "aimnet", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 583.0, "LinesInsertedFromPrevious": 210.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 373.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 185741617, "KernelVersionId": 129559161, "SourceDatasetVersionId": 998277}]
[{"Id": 998277, "DatasetId": 547506, "DatasourceVersionId": 1026923, "CreatorUserId": 2424380, "LicenseName": "Unknown", "CreationDate": "03/10/2020 01:05:11", "VersionNumber": 1.0, "Title": "ImageNet 1000 (mini)", "Slug": "imagenetmini-1000", "Subtitle": "1000 samples from ImageNet", "Description": "### Context\n\nhttps://github.com/pytorch/examples/tree/master/imagenet\n\n### Acknowledgements\n\nhttps://github.com/pytorch/examples/tree/master/imagenet", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 547506, "CreatorUserId": 2424380, "OwnerUserId": 2424380.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 998277.0, "CurrentDatasourceVersionId": 1026923.0, "ForumId": 561077, "Type": 2, "CreationDate": "03/10/2020 01:05:11", "LastActivityDate": "03/10/2020", "TotalViews": 62479, "TotalDownloads": 11891, "TotalVotes": 134, "TotalKernels": 57}]
[{"Id": 2424380, "UserName": "ifigotin", "DisplayName": "Ilya Figotin", "RegisterDate": "10/29/2018", "PerformanceTier": 1}]
import os del os.environ["PYTHONPATH"] import sys print("Python version") print(sys.version) print("Version info.") print(sys.version_info) # GPU #!wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/cuda-ubuntu1804.pin #!mv cuda-ubuntu1804.pin /etc/apt/preferences.d/cuda-repository-pin-600 #!wget https://developer.download.nvidia.com/compute/cuda/11.1.1/local_installers/cuda-repo-ubuntu1804-11-1-local_11.1.1-455.32.00-1_amd64.deb #!dpkg -i cuda-repo-ubuntu1804-11-1-local_11.1.1-455.32.00-1_amd64.deb #!apt-key add /var/cuda-repo-ubuntu1804-11-1-local/7fa2af80.pub #!echo "deb https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64 /" > /etc/apt/sources.list.d/cuda.list #!echo "deb https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64 /" > /etc/apt/sources.list.d/nvidia-ml.list #!apt-get update #!apt-get -y install cuda #!wget http://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64/nvidia-machine-learning-repo-ubuntu1804_1.0.0-1_amd64.deb #!apt-get --assume-yes install ./nvidia-machine-learning-repo-ubuntu1804_1.0.0-1_amd64.deb #!apt-get update # GPU # CPU # %%capture # #!python3 -m pip install https://github.com/quic/aimet/releases/download/1.25.0/AimetCommon-torch_cpu_1.25.0-cp38-cp38-linux_x86_64.whl #!python3 -m pip install https://github.com/quic/aimet/releases/download/1.25.0/AimetTorch-torch_cpu_1.25.0-cp38-cp38-linux_x86_64.whl -f https://download.pytorch.org/whl/torch_stable.html #!python3 -m pip install https://github.com/quic/aimet/releases/download/1.25.0/Aimet-torch_cpu_1.25.0-cp38-cp38-linux_x86_64.whl #!python -m site #!ls ~/miniconda3/envs/py38/lib/python3.8/site-packages #! python3 -c 'import sysconfig; print(sysconfig.get_paths()["purelib"])' #! python -m site #! cat ~/miniconda3/envs/py38/lib/python3.8/site-packages/aimet_common/bin/reqs_deb_common.txt | xargs apt-get --assume-yes install # GPU # GPU # If you installed the CUDA 11.x drivers # ln -s /usr/local/cuda-11.0 /usr/local/cuda # OR if you installed the CUDA 10.x drivers # ln -s /usr/local/cuda-10.0 /usr/local/cuda #! echo $PYTHONPATH # pip freeze #!cat ~/miniconda3/envs/py38/lib/python3.8/site-packages/aimet_common/bin/envsetup.sh import os os.environ[ "LD_LIBRARY_PATH" ] = "/usr/local/nvidia/lib:/usr/local/nvidia/lib64:/usr/local/cuda/lib64:/usr/local/cuda/targets/x86_64-linux/lib" os.environ["CUDA_TOOLKIT_PATH"] = "/usr/local/cuda" os.environ["CUDNN_INSTALL_PATH"] = "/usr/local/cuda" os.environ["CUDA_HOME"] = "/usr/local/cuda" os.environ["NVIDIA_DRIVER_CAPABILITIES"] = "compute,utility" os.environ["NVIDIA_VISIBLE_DEVICES"] = "all" os.environ[ "PYTHONPATH" ] = "/root/miniconda3/envs/py38/lib/python3.8/site-packages/aimet_common/x86_64-linux-gnu" os.environ[ "LD_LIBRARY_PATH" ] += ":/root/miniconda3/envs/py38/lib/python3.8/site-packages/aimet_common" os.environ[ "LD_LIBRARY_PATH" ] += ":/root/miniconda3/envs/py38/lib/python3.8/site-packages/aimet_common/x86_64-linux-gnu" os.environ[ "LD_LIBRARY_PATH" ] += ":/root/miniconda3/envs/py38/lib/python3.8/site-packages" os.environ["LD_LIBRARY_PATH"] += ":/usr/lib/x86_64-linux-gnu/" #! ls /root/miniconda3/envs/py38/lib/python3.8/site-packages/aimet_common # os.environ['LD_LIBRARY_PATH'] import sys sys.path.append("/root/miniconda3/envs/py38/lib/python3.8/site-packages/aimet_common") #! ls /root/miniconda3/envs/py38/lib/python3.8/site-packages/aimet_common #! sudo mv /root/miniconda3/envs/py38/lib/python3.8/site-packages/aimet_common/libpymo.cpython-38-x86_64-linux-gnu.so /root/miniconda3/envs/py38/lib/python3.8/site-packages/aimet_common/libpymo.so #! sudo mv /root/miniconda3/envs/py38/lib/python3.8/site-packages/aimet_common/libpymo.so /root/miniconda3/envs/py38/lib/python3.8/site-packages/aimet_common/libpymo.cpython-38-x86_64-linux-gnu.so #! ls /root/miniconda3/envs/py38/lib/python3.8/site-packages/aimet_common # # Python # update image_net_dataloader """ Creates data-loader for Image-Net dataset source: https://github.com/quic/aimet/blob/develop/Examples/torch/utils/image_net_data_loader.py """ import logging import os from torchvision import transforms from torchvision.datasets.folder import default_loader, has_file_allowed_extension from torch.utils.data import Dataset import torch.utils.data as torch_data from Examples.common import image_net_config logger = logging.getLogger("Dataloader") IMG_EXTENSIONS = ".jpg", ".jpeg", ".png", ".ppm", ".bmp", ".pgm", ".tif" def make_dataset( directory: str, class_to_idx: dict, extensions: tuple, num_samples_per_class: int ) -> list: """ Creates a dataset of images with num_samples_per_class images in each class :param directory: The string path to the data directory. :param class_to_idx: A dictionary mapping the name of the class to the index (label) :param extensions: list of valid extensions to load data :param num_samples_per_class: Number of samples to use per class. :return: list of images containing the entire dataset. """ images = [] num_classes = 0 directory = os.path.expanduser(directory) for class_name in sorted(class_to_idx.keys()): class_path = os.path.join(directory, class_name) if os.path.isdir(class_path): class_idx = class_to_idx[class_name] class_images = add_images_for_class( class_path, extensions, num_samples_per_class, class_idx ) images.extend(class_images) num_classes += 1 logger.info("Dataset consists of %d images in %d classes", len(images), num_classes) return images def add_images_for_class( class_path: str, extensions: tuple, num_samples_per_class: int, class_idx: int ) -> list: """ For a given class, adds num_samples_per_class images to a list. :param class_path: The string path to the class directory. :param extensions: List of valid extensions to load data :param num_samples_per_class: Number of samples to use per class. :param class_idx: numerical index of class. :return: list of images for given class. """ class_images = [] count = 0 for file_name in os.listdir(class_path): if num_samples_per_class and count >= num_samples_per_class: break if has_file_allowed_extension(file_name, extensions): image_path = os.path.join(class_path, file_name) item = (image_path, class_idx) class_images.append(item) count += 1 return class_images class ImageFolder(Dataset): """ Dataset class inspired by torchvision.datasets.folder.DatasetFolder for images organized as individual files grouped by category. """ def __init__( self, root: str, transform=None, target_transform=None, num_samples_per_class: int = None, ): """ :param root: The path to the data directory. :param transform: The required processing to be applied on the sample. :param target_transform: The required processing to be applied on the target. :param num_samples_per_class: Number of samples to use per class. """ Dataset.__init__(self) classes, class_to_idx = self._find_classes(root) self.samples = make_dataset( root, class_to_idx, IMG_EXTENSIONS, num_samples_per_class ) if not self.samples: raise ( RuntimeError( "Found 0 files in sub folders of: {}\nSupported extensions are: {}".format( root, ",".join(IMG_EXTENSIONS) ) ) ) self.root = root self.loader = default_loader self.extensions = IMG_EXTENSIONS self.classes = classes self.class_to_idx = class_to_idx self.targets = [s[1] for s in self.samples] self.transform = transform self.target_transform = target_transform self.imgs = self.samples @staticmethod def _find_classes(directory: str): classes = [ d for d in os.listdir(directory) if os.path.isdir(os.path.join(directory, d)) ] classes.sort() class_to_idx = {classes[i]: i for i in range(len(classes))} return classes, class_to_idx def __getitem__(self, index: int): path, target = self.samples[index] sample = self.loader(path) if self.transform is not None: sample = self.transform(sample) if self.target_transform is not None: target = self.target_transform(target) return sample, target def __len__(self): return len(self.samples) class ImageNetDataLoader: """ For loading Validation data from the ImageNet dataset. """ def __init__( self, images_dir: str, image_size: int, batch_size: int = 128, is_training: bool = False, num_workers: int = 8, num_samples_per_class: int = None, ): """ :param images_dir: The path to the data directory :param image_size: The length of the image :param batch_size: The batch size to use for training and validation :param is_training: Indicates whether to load the training or validation data :param num_workers: Indiicates to the data loader how many sub-processes to use for data loading. :param num_samples_per_class: Number of samples to use per class. """ # For normalization, mean and std dev values are calculated per channel # and can be found on the web. normalize = transforms.Normalize( mean=image_net_config.dataset["images_mean"], std=image_net_config.dataset["images_std"], ) self.train_transforms = transforms.Compose( [ transforms.RandomResizedCrop(image_size), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize, ] ) self.val_transforms = transforms.Compose( [ transforms.Resize(image_size + 24), transforms.CenterCrop(image_size), transforms.ToTensor(), normalize, ] ) if is_training: data_set = ImageFolder( root=os.path.join(images_dir, "train"), transform=self.train_transforms, num_samples_per_class=num_samples_per_class, ) else: data_set = ImageFolder( root=os.path.join(images_dir, "val"), transform=self.val_transforms, num_samples_per_class=num_samples_per_class, ) self._data_loader = torch_data.DataLoader( data_set, batch_size=batch_size, shuffle=is_training, num_workers=num_workers, ) @property def data_loader(self) -> torch_data.DataLoader: """ Returns the data-loader """ return self._data_loader # adaround script ROOT_DATA_AND_OUTPUTS = "./" N = 16 # number of classes and samples per class BIWIDTH = 4 BIWIDTH_ACTIVATION = 8 # quantization on the input and output of the layer DATASET_FOLDER_PATH = "../input/imagenetmini-1000/imagenet-mini/" # batch size of evaluation is 32 # it is recommended to have ~2000 images # this means we need 62 batches # in the paper they used 2048 images -> 64 batches !!!!!!!!! ADAROUND_NUM_BATCHES = 64 ADAROUND_ITERATIONS = 20000 # ADAROUND_NUM_BATCHES = 16 # ADAROUND_ITERATIONS = 10000 # in paper their claim to have accuracy ~68.6% import datetime DATASET_DIR = f"{ROOT_DATA_AND_OUTPUTS}{DATASET_FOLDER_PATH}" output_dir = ( f'{ROOT_DATA_AND_OUTPUTS}output_{datetime.datetime.now().strftime("%Y%m%d_%H%M")}/' ) import os os.makedirs(output_dir, exist_ok=True) import os import torch from Examples.common import image_net_config from Examples.torch.utils.image_net_evaluator import ImageNetEvaluator # from Examples.torch.utils.image_net_data_loader import ImageNetDataLoader from imagenet_dataloader import ImageNetDataLoader class ImageNetDataPipeline: @staticmethod def get_val_dataloader() -> torch.utils.data.DataLoader: """ Instantiates a validation dataloader for ImageNet dataset and returns it """ data_loader = ImageNetDataLoader( DATASET_DIR, image_size=image_net_config.dataset["image_size"], batch_size=image_net_config.evaluation["batch_size"], is_training=False, num_workers=image_net_config.evaluation["num_workers"], num_samples_per_class=N, ).data_loader return data_loader @staticmethod def evaluate(model: torch.nn.Module, use_cuda: bool) -> float: """ Given a torch model, evaluates its Top-1 accuracy on the dataset :param model: the model to evaluate :param use_cuda: whether or not the GPU should be used. """ evaluator = ImageNetEvaluator( DATASET_DIR, image_size=image_net_config.dataset["image_size"], batch_size=image_net_config.evaluation["batch_size"], num_workers=image_net_config.evaluation["num_workers"], num_val_samples_per_class=N, ) return evaluator.evaluate(model, iterations=None, use_cuda=use_cuda) from torchvision.models import resnet18 model = resnet18(pretrained=True) # model preperation from aimet_torch.model_preparer import prepare_model model = prepare_model(model) # move to device use_cuda = False if torch.cuda.is_available(): use_cuda = True model.to(torch.device("cuda")) print("Using cuda: {}".format(use_cuda)) # accuracy of the original model accuracy = ImageNetDataPipeline.evaluate(model, use_cuda) from termcolor import colored print( colored( "###########################################################################################################", "green", ) ) print(colored(f"Original model accuracy: {accuracy}", "red")) print( colored( "###########################################################################################################", "green", ) ) from aimet_torch.batch_norm_fold import fold_all_batch_norms _ = fold_all_batch_norms(model, input_shapes=(1, 3, 224, 224)) from aimet_common.defs import QuantScheme from aimet_torch.quantsim import QuantizationSimModel dummy_input = torch.rand( 1, 3, 224, 224 ) # Shape for each ImageNet sample is (3 channels) x (224 height) x (224 width) if use_cuda: dummy_input = dummy_input.cuda() # https://arxiv.org/pdf/2201.08442.pdf # in the paper they use min-max ----> quant_scheme=QuantScheme.post_training_tf sim = QuantizationSimModel( model=model, # quant_scheme=QuantScheme.post_training_tf_enhanced, quant_scheme=QuantScheme.post_training_tf, dummy_input=dummy_input, default_output_bw=BIWIDTH_ACTIVATION, default_param_bw=BIWIDTH, ) def pass_calibration_data(sim_model, use_cuda): data_loader = ImageNetDataPipeline.get_val_dataloader() batch_size = data_loader.batch_size if use_cuda: device = torch.device("cuda") else: device = torch.device("cpu") sim_model.eval() samples = 1000 batch_cntr = 0 with torch.no_grad(): for input_data, target_data in data_loader: inputs_batch = input_data.to(device) sim_model(inputs_batch) batch_cntr += 1 if (batch_cntr * batch_size) > samples: break sim.compute_encodings( forward_pass_callback=pass_calibration_data, forward_pass_callback_args=use_cuda ) accuracy = ImageNetDataPipeline.evaluate(sim.model, use_cuda) print( colored( "###########################################################################################################", "green", ) ) print(colored(f"Simulated quantized model accuracy: {accuracy}", "red")) print( colored( "###########################################################################################################", "green", ) ) # AdaRound from aimet_torch.adaround.adaround_weight import Adaround, AdaroundParameters data_loader = ImageNetDataPipeline.get_val_dataloader() params = AdaroundParameters( data_loader=data_loader, num_batches=ADAROUND_NUM_BATCHES, default_num_iterations=ADAROUND_ITERATIONS, ) dummy_input = torch.rand(1, 3, 224, 224) if use_cuda: dummy_input = dummy_input.cuda() os.makedirs(f"{output_dir}", exist_ok=True) ada_model = Adaround.apply_adaround( model, dummy_input, params, path=f"{output_dir}", filename_prefix="adaround", default_param_bw=BIWIDTH, # default_quant_scheme=QuantScheme.post_training_tf_enhanced default_quant_scheme=QuantScheme.post_training_tf, ) # model ready to use sim = QuantizationSimModel( model=ada_model, dummy_input=dummy_input, # quant_scheme=QuantScheme.post_training_tf_enhanced, quant_scheme=QuantScheme.post_training_tf, default_output_bw=BIWIDTH_ACTIVATION, default_param_bw=BIWIDTH, ) sim.set_and_freeze_param_encodings(encoding_path=f"{output_dir}adaround.encodings") sim.compute_encodings( forward_pass_callback=pass_calibration_data, forward_pass_callback_args=use_cuda ) accuracy = ImageNetDataPipeline.evaluate(sim.model, use_cuda) print( colored( "###########################################################################################################", "green", ) ) print(colored(f"Quantized (after AdaRound) model accuracy: {accuracy}", "red")) print( colored( "###########################################################################################################", "green", ) ) dummy_input = dummy_input.cpu() sim.export( path=output_dir, filename_prefix="resnet18_after_adaround", dummy_input=dummy_input )
false
0
5,363
0
5,416
5,363
129559323
<jupyter_start><jupyter_text>Iris Species The Iris dataset was used in R.A. Fisher's classic 1936 paper, [The Use of Multiple Measurements in Taxonomic Problems](http://rcs.chemometrics.ru/Tutorials/classification/Fisher.pdf), and can also be found on the [UCI Machine Learning Repository][1]. It includes three iris species with 50 samples each as well as some properties about each flower. One flower species is linearly separable from the other two, but the other two are not linearly separable from each other. The columns in this dataset are: - Id - SepalLengthCm - SepalWidthCm - PetalLengthCm - PetalWidthCm - Species [![Sepal Width vs. Sepal Length](https://www.kaggle.io/svf/138327/e401fb2cc596451b1e4d025aaacda95f/sepalWidthvsLength.png)](https://www.kaggle.com/benhamner/d/uciml/iris/sepal-width-vs-length) [1]: http://archive.ics.uci.edu/ml/ Kaggle dataset identifier: iris <jupyter_script>import pandas as pd import seaborn as sns from matplotlib import pyplot as plt from sklearn.metrics import * from sklearn.model_selection import * from sklearn.impute import KNNImputer from imblearn.under_sampling import RandomUnderSampler from imblearn.over_sampling import SMOTE from sklearn.linear_model import LogisticRegression from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import ( RandomForestClassifier, ExtraTreesClassifier, VotingClassifier, ) from xgboost import XGBClassifier df = pd.read_csv("/kaggle/input/iris/Iris.csv") df.head(20) df.shape df["Species"].value_counts() df.corr() df1 = df.drop(["Id"], axis=1) df1.head() sns.pairplot(df1, hue="Species") sns.boxplot(data=df, x="Species", y="PetalWidthCm") sns.boxplot(data=df, x="Species", y="SepalLengthCm") sns.violinplot(data=df, x="Species", y="SepalWidthCm") sns.histplot(data=df, x="SepalWidthCm", hue="Species", fill=True) sns.kdeplot( data=df, x="SepalWidthCm", y="PetalWidthCm", hue="Species", fill=True, alpha=0.5 ) y = df["Species"] X = df.drop(["Species"], axis=1) model = LogisticRegression(solver="liblinear") model.fit(X, y) y_hat = model.predict(X) accuracy_score(y, y_hat) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2) model = RandomForestClassifier(max_depth=5, n_estimators=500) model.fit(X_train, y_train) y_hat = model.predict(X_test) accuracy_score(y_test, y_hat)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/559/129559323.ipynb
iris
null
[{"Id": 129559323, "ScriptId": 38523170, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14297692, "CreationDate": "05/14/2023 20:40:26", "VersionNumber": 1.0, "Title": "notebook3613bce99b", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 58.0, "LinesInsertedFromPrevious": 58.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 185741835, "KernelVersionId": 129559323, "SourceDatasetVersionId": 420}]
[{"Id": 420, "DatasetId": 19, "DatasourceVersionId": 420, "CreatorUserId": 1, "LicenseName": "CC0: Public Domain", "CreationDate": "09/27/2016 07:38:05", "VersionNumber": 2.0, "Title": "Iris Species", "Slug": "iris", "Subtitle": "Classify iris plants into three species in this classic dataset", "Description": "The Iris dataset was used in R.A. Fisher's classic 1936 paper, [The Use of Multiple Measurements in Taxonomic Problems](http://rcs.chemometrics.ru/Tutorials/classification/Fisher.pdf), and can also be found on the [UCI Machine Learning Repository][1].\n\nIt includes three iris species with 50 samples each as well as some properties about each flower. One flower species is linearly separable from the other two, but the other two are not linearly separable from each other.\n\nThe columns in this dataset are:\n\n - Id\n - SepalLengthCm\n - SepalWidthCm\n - PetalLengthCm\n - PetalWidthCm\n - Species\n\n[![Sepal Width vs. Sepal Length](https://www.kaggle.io/svf/138327/e401fb2cc596451b1e4d025aaacda95f/sepalWidthvsLength.png)](https://www.kaggle.com/benhamner/d/uciml/iris/sepal-width-vs-length)\n\n\n [1]: http://archive.ics.uci.edu/ml/", "VersionNotes": "Republishing files so they're formally in our system", "TotalCompressedBytes": 15347.0, "TotalUncompressedBytes": 15347.0}]
[{"Id": 19, "CreatorUserId": 1, "OwnerUserId": NaN, "OwnerOrganizationId": 7.0, "CurrentDatasetVersionId": 420.0, "CurrentDatasourceVersionId": 420.0, "ForumId": 997, "Type": 2, "CreationDate": "01/12/2016 00:33:31", "LastActivityDate": "02/06/2018", "TotalViews": 1637863, "TotalDownloads": 423540, "TotalVotes": 3416, "TotalKernels": 6420}]
null
import pandas as pd import seaborn as sns from matplotlib import pyplot as plt from sklearn.metrics import * from sklearn.model_selection import * from sklearn.impute import KNNImputer from imblearn.under_sampling import RandomUnderSampler from imblearn.over_sampling import SMOTE from sklearn.linear_model import LogisticRegression from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import ( RandomForestClassifier, ExtraTreesClassifier, VotingClassifier, ) from xgboost import XGBClassifier df = pd.read_csv("/kaggle/input/iris/Iris.csv") df.head(20) df.shape df["Species"].value_counts() df.corr() df1 = df.drop(["Id"], axis=1) df1.head() sns.pairplot(df1, hue="Species") sns.boxplot(data=df, x="Species", y="PetalWidthCm") sns.boxplot(data=df, x="Species", y="SepalLengthCm") sns.violinplot(data=df, x="Species", y="SepalWidthCm") sns.histplot(data=df, x="SepalWidthCm", hue="Species", fill=True) sns.kdeplot( data=df, x="SepalWidthCm", y="PetalWidthCm", hue="Species", fill=True, alpha=0.5 ) y = df["Species"] X = df.drop(["Species"], axis=1) model = LogisticRegression(solver="liblinear") model.fit(X, y) y_hat = model.predict(X) accuracy_score(y, y_hat) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2) model = RandomForestClassifier(max_depth=5, n_estimators=500) model.fit(X_train, y_train) y_hat = model.predict(X_test) accuracy_score(y_test, y_hat)
false
0
492
0
791
492
129903911
<jupyter_start><jupyter_text>Air Passenger Data for Time Series Analysis ### Context This data is used for making ARIMA model forecasting. ### Content This contains the increasing rate of passenger Kaggle dataset identifier: air-passenger-data-for-time-series-analysis <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # # Load Dataset df = pd.read_csv( "/kaggle/input/air-passenger-data-for-time-series-analysis/AirPassengers.csv" ) df # # Exploratory Data Analysis (EDA) # ## View Dataset Description df["Month"] = pd.to_datetime(df["Month"], format="%Y-%m") # df['Month'] = df['Month'].dt.strftime('%Y-%m') df.info() df # ## Change Data Index df.set_index("Month", inplace=True) # ## Data Visualization import matplotlib.pyplot as plt import datetime plt.figure(figsize=(12, 6)) plt.plot(df) plt.xlabel("Time") # plt.xticks(rotation=45) plt.ylabel("Num of Passengers") plt.title("US Airline Num of Passengers Trend 1949 - 1960") plt.show() # There is a positive trend with some repetitive pattern # # Time Series Decomposition from statsmodels.tsa.seasonal import seasonal_decompose from dateutil.parser import parse # ## Additive Decomposition additive_dec = seasonal_decompose(df, model="additive", period=30) plt.figure(figsize=(12, 8)) additive_dec.plot() plt.suptitle("Additive Decomposition", fontsize=12) plt.tight_layout() plt.show() multiplicative_dec = seasonal_decompose(df, model="multiplicative", period=30) plt.figure(figsize=(12, 8)) multiplicative_dec.plot() plt.suptitle("Multiplicative Decomposition", fontsize=12) plt.tight_layout() plt.show() # Residual in additive decomposition still have a pattern, while in multiplicative is not really showing and quite random. # # Stationary Test for Time Series from statsmodels.tsa.stattools import adfuller, kpss from statsmodels.graphics.tsaplots import plot_acf # ## Augmented Dickey Fuller Test (ADF Test) # H0: time series data is non-stationary # H1: time series data is stationary # p-value reject null hypothesis (H0) result = adfuller(df.values, autolag="AIC") print(f"ADF Statistic: {result[0]}") print(f"p-value: {result[1]}") # ## KPSS Test # H0: time series data is stationary # H1: time series data is non-stationary # p-value reject null hypothesis (H0) result = kpss(df) print("KPSS Statistic:", result[0]) print("p-value:", result[1]) # ## Rolling Test # plt.plot(df['Month'], df['#Passengers']) rolling_mean = df.rolling(6).mean() rolling_std = df.rolling(6).std() plt.plot(df, label="Passenger Data") plt.plot(rolling_mean, color="red", label="Rolling Num of Passenger Mean") plt.plot( rolling_std, color="green", label="Rolling Passenger Number Standard Deviation" ) plt.xlabel("Time") plt.title("Passenger Time Series, Rolling Mean, Standard Deviation") plt.legend(loc="best") plt.show() # From two test result above, We can see that current data is non-stationary # # Find ARIMA Order Term from statsmodels.graphics.tsaplots import plot_acf, plot_pacf import matplotlib.pyplot as plt # ## ARIMA d Parameter plt.rcParams.update({"figure.figsize": (12, 8), "figure.dpi": 120}) # Original Series fig, axes = plt.subplots(3, 2, sharex=False) axes[0, 0].plot(df) axes[0, 0].set_title("Original Series") plot_acf(df.values, ax=axes[0, 1]) # 1st Differencing axes[1, 0].plot(df.diff()) axes[1, 0].set_title("1st Order Differencing") plot_acf(df.diff().dropna(), ax=axes[1, 1]) # 2nd Differencing axes[2, 0].plot(df.diff().diff()) axes[2, 0].set_title("2nd Order Differencing") plot_acf(df.diff().diff().dropna(), ax=axes[2, 1]) plt.tight_layout() plt.show() # From visualization above, we can see at 2nd order differencing the lag has gone on the negative side, this means that in the 2nd order the series has become over the difference. So d = 1 # ### AR Term (p) plt.rcParams.update({"figure.figsize": (9, 6), "figure.dpi": 120}) fig, axes = plt.subplots(2, 2, sharex=False) # PACF plot of 1st differenced series axes[0, 0].plot(df.diff()) axes[0, 0].set_title("1st Differencing") axes[0, 1].set(ylim=(0, 5)) plot_pacf(df.diff().dropna(), ax=axes[0, 1]) # PACF plot of 2nd differenced series axes[1, 0].plot(df.diff().diff()) axes[1, 0].set_title("2nd Differencing") axes[1, 1].set(ylim=(0, 5)) plot_pacf(df.diff().diff().dropna(), ax=axes[1, 1]) plt.tight_layout() plt.show() # Here we can in 1st differencing, the first lag is above limit. So select p = 1 # ### MA Term (q) plt.rcParams.update({"figure.figsize": (9, 6), "figure.dpi": 120}) fig, axes = plt.subplots(2, 2, sharex=False) # ACF plot of 1st differenced series axes[0, 0].plot(df.diff()) axes[0, 0].set_title("1st Differencing") axes[0, 1].set(ylim=(0, 5)) plot_acf(df.diff().dropna(), ax=axes[0, 1]) # ACF plot of 2nd differenced series axes[1, 0].plot(df.diff().diff()) axes[1, 0].set_title("2nd Differencing") axes[1, 1].set(ylim=(0, 5)) plot_acf(df.diff().diff().dropna(), ax=axes[1, 1]) plt.tight_layout() plt.show() # For MA term, select q = 2 # ## Splitting Data # train = dfs2.loc[:'1959'] # test = dfs2.loc['1960':] # # train.rename({'#Passenger':''}) # # Model Building from statsmodels.tsa.arima.model import ARIMA Arima = ARIMA(df, order=(1, 2, 2)) Ar = Arima.fit() Ar.summary() # Plot residual errors residuals = pd.DataFrame(Ar.resid) fig, ax = plt.subplots(1, 2, figsize=(12, 6)) # plt.figure() residuals.plot(title="Residuals", ax=ax[0]) residuals.plot(kind="kde", title="Density", ax=ax[1]) plt.show() # # Evaluation from statsmodels.graphics.tsaplots import plot_predict def forecast_accuracy(forecast, actual): mape = np.mean(np.abs(forecast - actual) / np.abs(actual)) # MAPE # me = np.mean(forecast - actual) # ME mae = np.mean(np.abs(forecast - actual)) # MAE # mpe = np.mean((forecast - actual)/actual) # MPE rmse = np.mean((forecast - actual) ** 2) ** 0.5 # RMSE corr = np.corrcoef(forecast, actual)[0, 1] # corr # mins = np.amin(np.hstack([forecast[:,None], # actual[:,None]]), axis=1) # maxs = np.amax(np.hstack([forecast[:,None], # actual[:,None]]), axis=1) # minmax = 1 - np.mean(mins/maxs) # minmax # acf1 = acf(fc-test)[1] # ACF1 return {"mape": mape, "mae": mae, "rmse": rmse} # Create Training and Test train = df.loc[:"1955"] test = df.loc["1956":] # Re-train ARIMA model = ARIMA(train, order=(1, 1, 1)) Ar = model.fit() # Forecast fc = Ar.get_forecast(60).summary_frame() # fc_conf_int = fc.conf_int(alpha=0.05) fc = pd.DataFrame(fc).rename({"mean": "#Passengers"}, axis=1) # fc fig, ax = plt.subplots(figsize=(12, 6)) plt.plot(train, label="train time series") plt.plot(test, label="test time series") # plot_predict(Ar, '1956', '1962', ax=ax) plt.plot(fc["#Passengers"]) plt.fill_between( fc.index, fc["mean_ci_lower"], fc["mean_ci_upper"], color="k", alpha=0.15 ) plt.legend(loc="upper left") plt.show() # fc['#Passengers'].values # test # len(test) forecast_accuracy(fc["#Passengers"], test["#Passengers"]) # ## Auto ARIMA import pmdarima as pm model = pm.auto_arima( train.values, start_p=1, start_q=1, test="adf", # use adftest to find optimal 'd' max_p=5, max_q=5, # maximum p and q m=1, # frequency of series d=None, # let model determine 'd' seasonal=False, # No Seasonality start_P=0, D=0, trace=True, error_action="ignore", suppress_warnings=True, stepwise=True, ) print(model.summary()) # Forecast n_periods = 60 fitted, confint = model.predict(n_periods=n_periods, return_conf_int=True) index_of_fc = pd.date_range(train.index[-1], periods=n_periods, freq="MS") # make series for plotting purpose fitted_series = pd.Series(fitted, index=index_of_fc) lower_series = pd.Series(confint[:, 0], index=index_of_fc) upper_series = pd.Series(confint[:, 1], index=index_of_fc) # Plot plt.plot(train) plt.plot(fitted_series, color="darkgreen") plt.fill_between(lower_series.index, lower_series, upper_series, color="k", alpha=0.15) plt.title("Auto ARIMA") plt.show() # # Plot # plt.plot(dfs2) # plt.plot(fc, color='darkgreen') # plt.fill_between(lower_series.index, # lower_series, # upper_series, # color='k', alpha=.15) # plt.title("SARIMA - Final Forecast of a10 - Drug Sales") # plt.show() # ## SARIMA # Seasonal - fit stepwise auto-ARIMA smodel = pm.auto_arima( train, start_p=1, start_q=1, test="adf", max_p=5, max_q=5, m=12, start_P=0, seasonal=True, d=None, D=1, trace=True, error_action="ignore", suppress_warnings=True, stepwise=True, ) smodel.summary() # Forecast n_periods = 60 fitted, confint = smodel.predict(n_periods=n_periods, return_conf_int=True) index_of_fc = pd.date_range(train.index[-1], periods=n_periods, freq="MS") # make series for plotting purpose fitted_series = pd.Series(fitted, index=index_of_fc) lower_series = pd.Series(confint[:, 0], index=index_of_fc) upper_series = pd.Series(confint[:, 1], index=index_of_fc) # Plot plt.plot(train) plt.plot(fitted_series, color="darkgreen") plt.fill_between(lower_series.index, lower_series, upper_series, color="k", alpha=0.15) plt.title("Auto ARIMA") plt.show() # Arima = ARIMA(df, order=(0,1,1), seasonal_order=(2,1,[],12)) # Ar = Arima.fit() # Ar.summary() # Forecast fc = Ar.forecast(24, alpha=0.05) # 95% conf fc = pd.DataFrame(fc).rename({"predicted_mean": "#Passengers"}, axis=1) fig, ax = plt.subplots(figsize=(12, 6)) ax = df.plot(ax=ax, label="train time series") ax.set_label("sdf") ax = test.plot(ax=ax, label="test time series") # plot_predict(Ar, '1959', '1961', ax=ax) ax = fc.plot(ax=ax) ax = plt.legend(loc="best") plt.show() print("mape", np.mean(np.abs(fc - test) / np.abs(test))) # MAPE) print("rmse", np.mean((fc - test) ** 2) ** 0.5) # RMSE fig, ax = plt.subplots(figsize=(12, 6)) ax = df.plot(ax=ax, label="train time series") ax.set_label("sdf") # ax = test.plot(ax=ax, label='test time series') # plot_predict(Ar, '1959', '1961', ax=ax) ax = fc.plot(ax=ax) ax = plt.legend(loc="best") plt.show() n_periods = 24 fitted, confint = smodel.predict(n_periods=n_periods, return_conf_int=True) index_of_fc = pd.date_range(df.index[-1], periods=n_periods, freq="MS") # make series for plotting purpose fitted_series = pd.Series(fitted, index=index_of_fc) lower_series = pd.Series(confint[:, 0], index=index_of_fc) upper_series = pd.Series(confint[:, 1], index=index_of_fc) # Plot plt.plot(df) plt.plot(fitted_series, color="darkgreen") plt.fill_between(lower_series.index, lower_series, upper_series, color="k", alpha=0.15) plt.title("SARIMA - Final Forecast of a10 - Drug Sales") plt.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/903/129903911.ipynb
air-passenger-data-for-time-series-analysis
ashfakyeafi
[{"Id": 129903911, "ScriptId": 38534427, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6654637, "CreationDate": "05/17/2023 10:23:28", "VersionNumber": 5.0, "Title": "Airline Passenger Forecasting using ARIMA", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 393.0, "LinesInsertedFromPrevious": 160.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 233.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 186318097, "KernelVersionId": 129903911, "SourceDatasetVersionId": 2504188}]
[{"Id": 2504188, "DatasetId": 1516462, "DatasourceVersionId": 2546888, "CreatorUserId": 5154008, "LicenseName": "CC0: Public Domain", "CreationDate": "08/06/2021 14:46:29", "VersionNumber": 1.0, "Title": "Air Passenger Data for Time Series Analysis", "Slug": "air-passenger-data-for-time-series-analysis", "Subtitle": "There is a list of passenger data from year 1949 to 1960", "Description": "### Context\n\nThis data is used for making ARIMA model forecasting.\n\n\n### Content\n\nThis contains the increasing rate of passenger\n\n\n### Acknowledgements\n\nWe wouldn't be here without the help of others. If you owe any attributions or thanks, include them here along with any citations of past research.\n\n\n### Inspiration\n\nYour data will be in front of the world's largest data science community. What questions do you want to see answered?", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 1516462, "CreatorUserId": 5154008, "OwnerUserId": 5154008.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2504188.0, "CurrentDatasourceVersionId": 2546888.0, "ForumId": 1536251, "Type": 2, "CreationDate": "08/06/2021 14:46:29", "LastActivityDate": "08/06/2021", "TotalViews": 11264, "TotalDownloads": 1480, "TotalVotes": 43, "TotalKernels": 9}]
[{"Id": 5154008, "UserName": "ashfakyeafi", "DisplayName": "Ashfak Yeafi", "RegisterDate": "05/24/2020", "PerformanceTier": 3}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # # Load Dataset df = pd.read_csv( "/kaggle/input/air-passenger-data-for-time-series-analysis/AirPassengers.csv" ) df # # Exploratory Data Analysis (EDA) # ## View Dataset Description df["Month"] = pd.to_datetime(df["Month"], format="%Y-%m") # df['Month'] = df['Month'].dt.strftime('%Y-%m') df.info() df # ## Change Data Index df.set_index("Month", inplace=True) # ## Data Visualization import matplotlib.pyplot as plt import datetime plt.figure(figsize=(12, 6)) plt.plot(df) plt.xlabel("Time") # plt.xticks(rotation=45) plt.ylabel("Num of Passengers") plt.title("US Airline Num of Passengers Trend 1949 - 1960") plt.show() # There is a positive trend with some repetitive pattern # # Time Series Decomposition from statsmodels.tsa.seasonal import seasonal_decompose from dateutil.parser import parse # ## Additive Decomposition additive_dec = seasonal_decompose(df, model="additive", period=30) plt.figure(figsize=(12, 8)) additive_dec.plot() plt.suptitle("Additive Decomposition", fontsize=12) plt.tight_layout() plt.show() multiplicative_dec = seasonal_decompose(df, model="multiplicative", period=30) plt.figure(figsize=(12, 8)) multiplicative_dec.plot() plt.suptitle("Multiplicative Decomposition", fontsize=12) plt.tight_layout() plt.show() # Residual in additive decomposition still have a pattern, while in multiplicative is not really showing and quite random. # # Stationary Test for Time Series from statsmodels.tsa.stattools import adfuller, kpss from statsmodels.graphics.tsaplots import plot_acf # ## Augmented Dickey Fuller Test (ADF Test) # H0: time series data is non-stationary # H1: time series data is stationary # p-value reject null hypothesis (H0) result = adfuller(df.values, autolag="AIC") print(f"ADF Statistic: {result[0]}") print(f"p-value: {result[1]}") # ## KPSS Test # H0: time series data is stationary # H1: time series data is non-stationary # p-value reject null hypothesis (H0) result = kpss(df) print("KPSS Statistic:", result[0]) print("p-value:", result[1]) # ## Rolling Test # plt.plot(df['Month'], df['#Passengers']) rolling_mean = df.rolling(6).mean() rolling_std = df.rolling(6).std() plt.plot(df, label="Passenger Data") plt.plot(rolling_mean, color="red", label="Rolling Num of Passenger Mean") plt.plot( rolling_std, color="green", label="Rolling Passenger Number Standard Deviation" ) plt.xlabel("Time") plt.title("Passenger Time Series, Rolling Mean, Standard Deviation") plt.legend(loc="best") plt.show() # From two test result above, We can see that current data is non-stationary # # Find ARIMA Order Term from statsmodels.graphics.tsaplots import plot_acf, plot_pacf import matplotlib.pyplot as plt # ## ARIMA d Parameter plt.rcParams.update({"figure.figsize": (12, 8), "figure.dpi": 120}) # Original Series fig, axes = plt.subplots(3, 2, sharex=False) axes[0, 0].plot(df) axes[0, 0].set_title("Original Series") plot_acf(df.values, ax=axes[0, 1]) # 1st Differencing axes[1, 0].plot(df.diff()) axes[1, 0].set_title("1st Order Differencing") plot_acf(df.diff().dropna(), ax=axes[1, 1]) # 2nd Differencing axes[2, 0].plot(df.diff().diff()) axes[2, 0].set_title("2nd Order Differencing") plot_acf(df.diff().diff().dropna(), ax=axes[2, 1]) plt.tight_layout() plt.show() # From visualization above, we can see at 2nd order differencing the lag has gone on the negative side, this means that in the 2nd order the series has become over the difference. So d = 1 # ### AR Term (p) plt.rcParams.update({"figure.figsize": (9, 6), "figure.dpi": 120}) fig, axes = plt.subplots(2, 2, sharex=False) # PACF plot of 1st differenced series axes[0, 0].plot(df.diff()) axes[0, 0].set_title("1st Differencing") axes[0, 1].set(ylim=(0, 5)) plot_pacf(df.diff().dropna(), ax=axes[0, 1]) # PACF plot of 2nd differenced series axes[1, 0].plot(df.diff().diff()) axes[1, 0].set_title("2nd Differencing") axes[1, 1].set(ylim=(0, 5)) plot_pacf(df.diff().diff().dropna(), ax=axes[1, 1]) plt.tight_layout() plt.show() # Here we can in 1st differencing, the first lag is above limit. So select p = 1 # ### MA Term (q) plt.rcParams.update({"figure.figsize": (9, 6), "figure.dpi": 120}) fig, axes = plt.subplots(2, 2, sharex=False) # ACF plot of 1st differenced series axes[0, 0].plot(df.diff()) axes[0, 0].set_title("1st Differencing") axes[0, 1].set(ylim=(0, 5)) plot_acf(df.diff().dropna(), ax=axes[0, 1]) # ACF plot of 2nd differenced series axes[1, 0].plot(df.diff().diff()) axes[1, 0].set_title("2nd Differencing") axes[1, 1].set(ylim=(0, 5)) plot_acf(df.diff().diff().dropna(), ax=axes[1, 1]) plt.tight_layout() plt.show() # For MA term, select q = 2 # ## Splitting Data # train = dfs2.loc[:'1959'] # test = dfs2.loc['1960':] # # train.rename({'#Passenger':''}) # # Model Building from statsmodels.tsa.arima.model import ARIMA Arima = ARIMA(df, order=(1, 2, 2)) Ar = Arima.fit() Ar.summary() # Plot residual errors residuals = pd.DataFrame(Ar.resid) fig, ax = plt.subplots(1, 2, figsize=(12, 6)) # plt.figure() residuals.plot(title="Residuals", ax=ax[0]) residuals.plot(kind="kde", title="Density", ax=ax[1]) plt.show() # # Evaluation from statsmodels.graphics.tsaplots import plot_predict def forecast_accuracy(forecast, actual): mape = np.mean(np.abs(forecast - actual) / np.abs(actual)) # MAPE # me = np.mean(forecast - actual) # ME mae = np.mean(np.abs(forecast - actual)) # MAE # mpe = np.mean((forecast - actual)/actual) # MPE rmse = np.mean((forecast - actual) ** 2) ** 0.5 # RMSE corr = np.corrcoef(forecast, actual)[0, 1] # corr # mins = np.amin(np.hstack([forecast[:,None], # actual[:,None]]), axis=1) # maxs = np.amax(np.hstack([forecast[:,None], # actual[:,None]]), axis=1) # minmax = 1 - np.mean(mins/maxs) # minmax # acf1 = acf(fc-test)[1] # ACF1 return {"mape": mape, "mae": mae, "rmse": rmse} # Create Training and Test train = df.loc[:"1955"] test = df.loc["1956":] # Re-train ARIMA model = ARIMA(train, order=(1, 1, 1)) Ar = model.fit() # Forecast fc = Ar.get_forecast(60).summary_frame() # fc_conf_int = fc.conf_int(alpha=0.05) fc = pd.DataFrame(fc).rename({"mean": "#Passengers"}, axis=1) # fc fig, ax = plt.subplots(figsize=(12, 6)) plt.plot(train, label="train time series") plt.plot(test, label="test time series") # plot_predict(Ar, '1956', '1962', ax=ax) plt.plot(fc["#Passengers"]) plt.fill_between( fc.index, fc["mean_ci_lower"], fc["mean_ci_upper"], color="k", alpha=0.15 ) plt.legend(loc="upper left") plt.show() # fc['#Passengers'].values # test # len(test) forecast_accuracy(fc["#Passengers"], test["#Passengers"]) # ## Auto ARIMA import pmdarima as pm model = pm.auto_arima( train.values, start_p=1, start_q=1, test="adf", # use adftest to find optimal 'd' max_p=5, max_q=5, # maximum p and q m=1, # frequency of series d=None, # let model determine 'd' seasonal=False, # No Seasonality start_P=0, D=0, trace=True, error_action="ignore", suppress_warnings=True, stepwise=True, ) print(model.summary()) # Forecast n_periods = 60 fitted, confint = model.predict(n_periods=n_periods, return_conf_int=True) index_of_fc = pd.date_range(train.index[-1], periods=n_periods, freq="MS") # make series for plotting purpose fitted_series = pd.Series(fitted, index=index_of_fc) lower_series = pd.Series(confint[:, 0], index=index_of_fc) upper_series = pd.Series(confint[:, 1], index=index_of_fc) # Plot plt.plot(train) plt.plot(fitted_series, color="darkgreen") plt.fill_between(lower_series.index, lower_series, upper_series, color="k", alpha=0.15) plt.title("Auto ARIMA") plt.show() # # Plot # plt.plot(dfs2) # plt.plot(fc, color='darkgreen') # plt.fill_between(lower_series.index, # lower_series, # upper_series, # color='k', alpha=.15) # plt.title("SARIMA - Final Forecast of a10 - Drug Sales") # plt.show() # ## SARIMA # Seasonal - fit stepwise auto-ARIMA smodel = pm.auto_arima( train, start_p=1, start_q=1, test="adf", max_p=5, max_q=5, m=12, start_P=0, seasonal=True, d=None, D=1, trace=True, error_action="ignore", suppress_warnings=True, stepwise=True, ) smodel.summary() # Forecast n_periods = 60 fitted, confint = smodel.predict(n_periods=n_periods, return_conf_int=True) index_of_fc = pd.date_range(train.index[-1], periods=n_periods, freq="MS") # make series for plotting purpose fitted_series = pd.Series(fitted, index=index_of_fc) lower_series = pd.Series(confint[:, 0], index=index_of_fc) upper_series = pd.Series(confint[:, 1], index=index_of_fc) # Plot plt.plot(train) plt.plot(fitted_series, color="darkgreen") plt.fill_between(lower_series.index, lower_series, upper_series, color="k", alpha=0.15) plt.title("Auto ARIMA") plt.show() # Arima = ARIMA(df, order=(0,1,1), seasonal_order=(2,1,[],12)) # Ar = Arima.fit() # Ar.summary() # Forecast fc = Ar.forecast(24, alpha=0.05) # 95% conf fc = pd.DataFrame(fc).rename({"predicted_mean": "#Passengers"}, axis=1) fig, ax = plt.subplots(figsize=(12, 6)) ax = df.plot(ax=ax, label="train time series") ax.set_label("sdf") ax = test.plot(ax=ax, label="test time series") # plot_predict(Ar, '1959', '1961', ax=ax) ax = fc.plot(ax=ax) ax = plt.legend(loc="best") plt.show() print("mape", np.mean(np.abs(fc - test) / np.abs(test))) # MAPE) print("rmse", np.mean((fc - test) ** 2) ** 0.5) # RMSE fig, ax = plt.subplots(figsize=(12, 6)) ax = df.plot(ax=ax, label="train time series") ax.set_label("sdf") # ax = test.plot(ax=ax, label='test time series') # plot_predict(Ar, '1959', '1961', ax=ax) ax = fc.plot(ax=ax) ax = plt.legend(loc="best") plt.show() n_periods = 24 fitted, confint = smodel.predict(n_periods=n_periods, return_conf_int=True) index_of_fc = pd.date_range(df.index[-1], periods=n_periods, freq="MS") # make series for plotting purpose fitted_series = pd.Series(fitted, index=index_of_fc) lower_series = pd.Series(confint[:, 0], index=index_of_fc) upper_series = pd.Series(confint[:, 1], index=index_of_fc) # Plot plt.plot(df) plt.plot(fitted_series, color="darkgreen") plt.fill_between(lower_series.index, lower_series, upper_series, color="k", alpha=0.15) plt.title("SARIMA - Final Forecast of a10 - Drug Sales") plt.show()
false
1
4,064
0
4,132
4,064
129903554
<jupyter_start><jupyter_text>Intel Image Classification ### Context This is image data of Natural Scenes around the world. ### Content This Data contains around 25k images of size 150x150 distributed under 6 categories. {'buildings' -&gt; 0, 'forest' -&gt; 1, 'glacier' -&gt; 2, 'mountain' -&gt; 3, 'sea' -&gt; 4, 'street' -&gt; 5 } The Train, Test and Prediction data is separated in each zip files. There are around 14k images in Train, 3k in Test and 7k in Prediction. This data was initially published on https://datahack.analyticsvidhya.com by Intel to host a Image classification Challenge. Kaggle dataset identifier: intel-image-classification <jupyter_script># ## Setup from google.colab import drive drive.mount("/content/drive") import os import cv2 import numpy as np import matplotlib.pyplot as plt from PIL import Image import tensorflow as tf from keras import backend as K from keras.models import load_model from tensorflow.keras.utils import img_to_array from tensorflow.keras.optimizers import Adam, RMSprop from tensorflow.keras.callbacks import ReduceLROnPlateau from tensorflow.keras.preprocessing.image import ImageDataGenerator # ## Paths base_dir = "/content/drive/MyDrive/Image classification/dataset" train_dir = "/content/drive/MyDrive/Image classification/dataset/train" train_street_dir = "/content/drive/MyDrive/Image classification/dataset/train/street" train_sea_dir = "/content/drive/MyDrive/Image classification/dataset/train/sea" train_mountain_dir = ( "/content/drive/MyDrive/Image classification/dataset/train/mountain" ) train_glacier_dir = "/content/drive/MyDrive/Image classification/dataset/train/glacier" train_forest_dir = "/content/drive/MyDrive/Image classification/dataset/train/forest" train_buildings_dir = ( "/content/drive/MyDrive/Image classification/dataset/train/buildings" ) test_dir = "/content/drive/MyDrive/Image classification/dataset/test" test_street_dir = "/content/drive/MyDrive/Image classification/dataset/test/street" test_sea_dir = "/content/drive/MyDrive/Image classification/dataset/test/sea" test_mountain_dir = "/content/drive/MyDrive/Image classification/dataset/test/mountain" test_glacier_dir = "/content/drive/MyDrive/Image classification/dataset/test/glacier" test_forest_dir = "/content/drive/MyDrive/Image classification/dataset/test/forest" test_buildings_dir = ( "/content/drive/MyDrive/Image classification/dataset/test/buildings" ) validation_dir = "/content/drive/MyDrive/Image classification/dataset/validation" validation_street_dir = ( "/content/drive/MyDrive/Image classification/dataset/validation/street" ) validation_sea_dir = ( "/content/drive/MyDrive/Image classification/dataset/validation/sea" ) validation_mountain_dir = ( "/content/drive/MyDrive/Image classification/dataset/validation/mountain" ) validation_glacier_dir = ( "/content/drive/MyDrive/Image classification/dataset/validation/glacier" ) validation_forest_dir = ( "/content/drive/MyDrive/Image classification/dataset/validation/forest" ) validation_buildings_dir = ( "/content/drive/MyDrive/Image classification/dataset/validation/buildings" ) num_street_train = len(os.listdir(train_street_dir)) num_sea_train = len(os.listdir(train_sea_dir)) num_mountain_train = len(os.listdir(train_mountain_dir)) num_glacier_train = len(os.listdir(train_glacier_dir)) num_forest_train = len(os.listdir(train_forest_dir)) num_buildings_train = len(os.listdir(train_buildings_dir)) total_train = ( num_street_train + num_sea_train + num_mountain_train + num_glacier_train + num_forest_train + num_buildings_train ) num_street_test = len(os.listdir(test_street_dir)) num_sea_test = len(os.listdir(test_sea_dir)) num_mountain_test = len(os.listdir(test_mountain_dir)) num_glacier_test = len(os.listdir(test_glacier_dir)) num_forest_test = len(os.listdir(test_forest_dir)) num_buildings_test = len(os.listdir(test_buildings_dir)) total_test = ( num_street_test + num_sea_test + num_mountain_test + num_glacier_test + num_forest_test + num_buildings_test ) num_street_validation = len(os.listdir(validation_street_dir)) num_sea_validation = len(os.listdir(validation_sea_dir)) num_mountain_validation = len(os.listdir(validation_mountain_dir)) num_glacier_validation = len(os.listdir(validation_glacier_dir)) num_forest_validation = len(os.listdir(validation_forest_dir)) num_buildings_validation = len(os.listdir(validation_buildings_dir)) total_validation = ( num_street_validation + num_sea_validation + num_mountain_validation + num_glacier_validation + num_forest_validation + num_buildings_validation ) # ## count print("Number of street images in train:", num_street_train) print("Number of sea images in train:", num_sea_train) print("Number of mountain images in train:", num_mountain_train) print("Number of glacier images in train:", num_glacier_train) print("Number of forest images in train:", num_forest_train) print("Number of buildings images in train:", num_buildings_train) print("Total of images in train:", total_train) print("Number of street images in test:", num_street_test) print("Number of sea images in test:", num_sea_test) print("Number of mountain images in test:", num_mountain_test) print("Number of glacier images in test:", num_glacier_test) print("Number of forest images in test:", num_forest_test) print("Number of buildings images in test:", num_buildings_test) print("Total of images in test:", total_test) print("Number of street images in validation:", num_street_validation) print("Number of sea images in validation:", num_sea_validation) print("Number of mountain images in validation:", num_mountain_validation) print("Number of glacier images in validation:", num_glacier_validation) print("Number of forest images in validation:", num_forest_validation) print("Number of buildings images in validation:", num_buildings_validation) print("Total of images in validation:", total_validation) # ## configuration IMG_SHAPE = 224 batch_size = 32 # ## preprocess data (train, test, validation) image_gen_train = ImageDataGenerator(rescale=1.0 / 255) train_data_gen = image_gen_train.flow_from_directory( batch_size=batch_size, directory=train_dir, shuffle=True, target_size=(IMG_SHAPE, IMG_SHAPE), class_mode="categorical", ) image_generator_validation = ImageDataGenerator(rescale=1.0 / 255) val_data_gen = image_generator_validation.flow_from_directory( batch_size=batch_size, directory=validation_dir, target_size=(IMG_SHAPE, IMG_SHAPE), class_mode="categorical", ) image_gen_test = ImageDataGenerator(rescale=1.0 / 255) test_data_gen = image_gen_test.flow_from_directory( batch_size=batch_size, directory=test_dir, target_size=(IMG_SHAPE, IMG_SHAPE), class_mode="categorical", ) class myCallback(tf.keras.callbacks.Callback): def on_epoch_end(self, epoch, logs={}): if logs.get("val_acc") > 0.85: print("\nReached 85% accuracy so cancelling training!") self.model.stop_training = True # ## download VGG-16 weights # We know VGG-16 is trained with many classes, so if we use (top_layer = True), then we need to retrain it on all classes at which VGG-16 trained, but if we use (top_layer = False), then in retraining, we only need to add our training classes. pre_trained_model = tf.keras.applications.VGG16( input_shape=(224, 224, 3), include_top=False, weights="imagenet" ) # Now, we need to freeze the training layers of VGG-16. (because VGG-16, is already trained on huge data). for layer in pre_trained_model.layers: # print(layer.name) layer.trainable = False last_layer = pre_trained_model.get_layer("block5_pool") last_output = last_layer.output x = tf.keras.layers.GlobalMaxPooling2D()(last_output) x = tf.keras.layers.Dense(512, activation="relu")(x) x = tf.keras.layers.Dropout(0.5)(x) x = tf.keras.layers.Dense(6, activation="softmax")(x) # Now, we need to merge the original VGG-16 layers, with our custom layers. model = tf.keras.Model(pre_trained_model.input, x) # for “Multiclass classification”, change the loss with categorical_crossentropy. model.compile( optimizer="adam", loss=tf.keras.losses.categorical_crossentropy, metrics=["acc"] ) model.summary() # ## Train the model print(tf.config.list_physical_devices("GPU")) os.environ["CUDA_VISIBLE_DEVICES"] = "0" callbacks = myCallback() history = model.fit_generator( generator=train_data_gen, validation_data=val_data_gen, steps_per_epoch=(total_train // batch_size), epochs=100, validation_steps=(total_validation // batch_size), verbose=1, callbacks=[callbacks], ) # vgg_classifier = model.fit(train_data_gen, # steps_per_epoch=(total_train//batch_size), epochs = 60, validation_data=val_data_gen, # validation_steps=(total_validation//batch_size),batch_size = batch_size, verbose = 1) # ## Evaluate the model result = model.evaluate(test_data_gen, batch_size=batch_size) print("test_loss, test accuracy", result) print(test_data_gen.class_indices) true_classes = test_data_gen.classes class_names = ["buildings", "forest", "glacier", "mountain", "sea", "street"] class_indices = test_data_gen.class_indices class_indices = dict((v, k) for k, v in class_indices.items()) from tensorflow.keras.preprocessing import image # Define the number of images to plot per class num_images = 10 # Loop over the class directories in the test directory for class_dir in os.listdir(test_dir): if class_dir in class_names: # Load the first num_images images from the current class directory images = [] filenames = os.listdir(os.path.join(test_dir, class_dir)) for i in range(num_images): img_path = os.path.join(test_dir, class_dir, filenames[i]) img = image.load_img(img_path, target_size=(224, 224)) images.append(np.array(img)) images = np.array(images) # Get the true labels for the images true_labels = [ class_indices[test_data_gen.class_indices[class_dir]] ] * num_images # Make predictions on the images preds = model.predict(images) pred_labels = [class_indices[np.argmax(pred)] for pred in preds] # Plot the images with their predicted labels fig, axs = plt.subplots(2, 5, figsize=(15, 7)) fig.suptitle(f"{class_dir} Predictions") for i in range(num_images): axs[i // 5, i % 5].imshow(images[i]) axs[i // 5, i % 5].set_title( f"Pred: {pred_labels[i]}\nTrue: {true_labels[i]}" ) axs[i // 5, i % 5].axis("off") plt.show() # ## Save the model # model_json = model.to_json() with open( "/content/drive/MyDrive/Image classification/VGG_Classifier.json", "w" ) as json_file: json_file.write(model_json) model.save("/content/drive/MyDrive/Image classification/VGG_Classifier.h5") print("Saved model to disk") model.save_weights("/content/drive/MyDrive/Image classification/VGG_weights.h5") model = tf.keras.saving.load_model( "/content/drive/MyDrive/Image classification/VGG_Classifier.h5", custom_objects=None, compile=True, safe_mode=True, )
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/903/129903554.ipynb
intel-image-classification
puneet6060
[{"Id": 129903554, "ScriptId": 38640554, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11821002, "CreationDate": "05/17/2023 10:20:29", "VersionNumber": 1.0, "Title": "VGG16 - Image Classification", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 233.0, "LinesInsertedFromPrevious": 233.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
[{"Id": 186317636, "KernelVersionId": 129903554, "SourceDatasetVersionId": 269359}]
[{"Id": 269359, "DatasetId": 111880, "DatasourceVersionId": 281586, "CreatorUserId": 2307235, "LicenseName": "Data files \u00a9 Original Authors", "CreationDate": "01/30/2019 09:22:58", "VersionNumber": 2.0, "Title": "Intel Image Classification", "Slug": "intel-image-classification", "Subtitle": "Image Scene Classification of Multiclass", "Description": "### Context\n\nThis is image data of Natural Scenes around the world. \n\n### Content\n\nThis Data contains around 25k images of size 150x150 distributed under 6 categories.\n{'buildings' -&gt; 0, \n'forest' -&gt; 1,\n'glacier' -&gt; 2,\n'mountain' -&gt; 3,\n'sea' -&gt; 4,\n'street' -&gt; 5 }\n\nThe Train, Test and Prediction data is separated in each zip files. There are around 14k images in Train, 3k in Test and 7k in Prediction.\nThis data was initially published on https://datahack.analyticsvidhya.com by Intel to host a Image classification Challenge.\n\n\n### Acknowledgements\n\nThanks to https://datahack.analyticsvidhya.com for the challenge and Intel for the Data\n\nPhoto by [Jan B\u00f6ttinger on Unsplash][1]\n\n### Inspiration\n\nWant to build powerful Neural network that can classify these images with more accuracy.\n\n\n [1]: https://unsplash.com/photos/27xFENkt-lc", "VersionNotes": "Added Prediction Images", "TotalCompressedBytes": 108365415.0, "TotalUncompressedBytes": 361713334.0}]
[{"Id": 111880, "CreatorUserId": 2307235, "OwnerUserId": 2307235.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 269359.0, "CurrentDatasourceVersionId": 281586.0, "ForumId": 121691, "Type": 2, "CreationDate": "01/29/2019 10:37:42", "LastActivityDate": "01/29/2019", "TotalViews": 441103, "TotalDownloads": 83887, "TotalVotes": 1345, "TotalKernels": 815}]
[{"Id": 2307235, "UserName": "puneet6060", "DisplayName": "Puneet Bansal", "RegisterDate": "10/01/2018", "PerformanceTier": 0}]
# ## Setup from google.colab import drive drive.mount("/content/drive") import os import cv2 import numpy as np import matplotlib.pyplot as plt from PIL import Image import tensorflow as tf from keras import backend as K from keras.models import load_model from tensorflow.keras.utils import img_to_array from tensorflow.keras.optimizers import Adam, RMSprop from tensorflow.keras.callbacks import ReduceLROnPlateau from tensorflow.keras.preprocessing.image import ImageDataGenerator # ## Paths base_dir = "/content/drive/MyDrive/Image classification/dataset" train_dir = "/content/drive/MyDrive/Image classification/dataset/train" train_street_dir = "/content/drive/MyDrive/Image classification/dataset/train/street" train_sea_dir = "/content/drive/MyDrive/Image classification/dataset/train/sea" train_mountain_dir = ( "/content/drive/MyDrive/Image classification/dataset/train/mountain" ) train_glacier_dir = "/content/drive/MyDrive/Image classification/dataset/train/glacier" train_forest_dir = "/content/drive/MyDrive/Image classification/dataset/train/forest" train_buildings_dir = ( "/content/drive/MyDrive/Image classification/dataset/train/buildings" ) test_dir = "/content/drive/MyDrive/Image classification/dataset/test" test_street_dir = "/content/drive/MyDrive/Image classification/dataset/test/street" test_sea_dir = "/content/drive/MyDrive/Image classification/dataset/test/sea" test_mountain_dir = "/content/drive/MyDrive/Image classification/dataset/test/mountain" test_glacier_dir = "/content/drive/MyDrive/Image classification/dataset/test/glacier" test_forest_dir = "/content/drive/MyDrive/Image classification/dataset/test/forest" test_buildings_dir = ( "/content/drive/MyDrive/Image classification/dataset/test/buildings" ) validation_dir = "/content/drive/MyDrive/Image classification/dataset/validation" validation_street_dir = ( "/content/drive/MyDrive/Image classification/dataset/validation/street" ) validation_sea_dir = ( "/content/drive/MyDrive/Image classification/dataset/validation/sea" ) validation_mountain_dir = ( "/content/drive/MyDrive/Image classification/dataset/validation/mountain" ) validation_glacier_dir = ( "/content/drive/MyDrive/Image classification/dataset/validation/glacier" ) validation_forest_dir = ( "/content/drive/MyDrive/Image classification/dataset/validation/forest" ) validation_buildings_dir = ( "/content/drive/MyDrive/Image classification/dataset/validation/buildings" ) num_street_train = len(os.listdir(train_street_dir)) num_sea_train = len(os.listdir(train_sea_dir)) num_mountain_train = len(os.listdir(train_mountain_dir)) num_glacier_train = len(os.listdir(train_glacier_dir)) num_forest_train = len(os.listdir(train_forest_dir)) num_buildings_train = len(os.listdir(train_buildings_dir)) total_train = ( num_street_train + num_sea_train + num_mountain_train + num_glacier_train + num_forest_train + num_buildings_train ) num_street_test = len(os.listdir(test_street_dir)) num_sea_test = len(os.listdir(test_sea_dir)) num_mountain_test = len(os.listdir(test_mountain_dir)) num_glacier_test = len(os.listdir(test_glacier_dir)) num_forest_test = len(os.listdir(test_forest_dir)) num_buildings_test = len(os.listdir(test_buildings_dir)) total_test = ( num_street_test + num_sea_test + num_mountain_test + num_glacier_test + num_forest_test + num_buildings_test ) num_street_validation = len(os.listdir(validation_street_dir)) num_sea_validation = len(os.listdir(validation_sea_dir)) num_mountain_validation = len(os.listdir(validation_mountain_dir)) num_glacier_validation = len(os.listdir(validation_glacier_dir)) num_forest_validation = len(os.listdir(validation_forest_dir)) num_buildings_validation = len(os.listdir(validation_buildings_dir)) total_validation = ( num_street_validation + num_sea_validation + num_mountain_validation + num_glacier_validation + num_forest_validation + num_buildings_validation ) # ## count print("Number of street images in train:", num_street_train) print("Number of sea images in train:", num_sea_train) print("Number of mountain images in train:", num_mountain_train) print("Number of glacier images in train:", num_glacier_train) print("Number of forest images in train:", num_forest_train) print("Number of buildings images in train:", num_buildings_train) print("Total of images in train:", total_train) print("Number of street images in test:", num_street_test) print("Number of sea images in test:", num_sea_test) print("Number of mountain images in test:", num_mountain_test) print("Number of glacier images in test:", num_glacier_test) print("Number of forest images in test:", num_forest_test) print("Number of buildings images in test:", num_buildings_test) print("Total of images in test:", total_test) print("Number of street images in validation:", num_street_validation) print("Number of sea images in validation:", num_sea_validation) print("Number of mountain images in validation:", num_mountain_validation) print("Number of glacier images in validation:", num_glacier_validation) print("Number of forest images in validation:", num_forest_validation) print("Number of buildings images in validation:", num_buildings_validation) print("Total of images in validation:", total_validation) # ## configuration IMG_SHAPE = 224 batch_size = 32 # ## preprocess data (train, test, validation) image_gen_train = ImageDataGenerator(rescale=1.0 / 255) train_data_gen = image_gen_train.flow_from_directory( batch_size=batch_size, directory=train_dir, shuffle=True, target_size=(IMG_SHAPE, IMG_SHAPE), class_mode="categorical", ) image_generator_validation = ImageDataGenerator(rescale=1.0 / 255) val_data_gen = image_generator_validation.flow_from_directory( batch_size=batch_size, directory=validation_dir, target_size=(IMG_SHAPE, IMG_SHAPE), class_mode="categorical", ) image_gen_test = ImageDataGenerator(rescale=1.0 / 255) test_data_gen = image_gen_test.flow_from_directory( batch_size=batch_size, directory=test_dir, target_size=(IMG_SHAPE, IMG_SHAPE), class_mode="categorical", ) class myCallback(tf.keras.callbacks.Callback): def on_epoch_end(self, epoch, logs={}): if logs.get("val_acc") > 0.85: print("\nReached 85% accuracy so cancelling training!") self.model.stop_training = True # ## download VGG-16 weights # We know VGG-16 is trained with many classes, so if we use (top_layer = True), then we need to retrain it on all classes at which VGG-16 trained, but if we use (top_layer = False), then in retraining, we only need to add our training classes. pre_trained_model = tf.keras.applications.VGG16( input_shape=(224, 224, 3), include_top=False, weights="imagenet" ) # Now, we need to freeze the training layers of VGG-16. (because VGG-16, is already trained on huge data). for layer in pre_trained_model.layers: # print(layer.name) layer.trainable = False last_layer = pre_trained_model.get_layer("block5_pool") last_output = last_layer.output x = tf.keras.layers.GlobalMaxPooling2D()(last_output) x = tf.keras.layers.Dense(512, activation="relu")(x) x = tf.keras.layers.Dropout(0.5)(x) x = tf.keras.layers.Dense(6, activation="softmax")(x) # Now, we need to merge the original VGG-16 layers, with our custom layers. model = tf.keras.Model(pre_trained_model.input, x) # for “Multiclass classification”, change the loss with categorical_crossentropy. model.compile( optimizer="adam", loss=tf.keras.losses.categorical_crossentropy, metrics=["acc"] ) model.summary() # ## Train the model print(tf.config.list_physical_devices("GPU")) os.environ["CUDA_VISIBLE_DEVICES"] = "0" callbacks = myCallback() history = model.fit_generator( generator=train_data_gen, validation_data=val_data_gen, steps_per_epoch=(total_train // batch_size), epochs=100, validation_steps=(total_validation // batch_size), verbose=1, callbacks=[callbacks], ) # vgg_classifier = model.fit(train_data_gen, # steps_per_epoch=(total_train//batch_size), epochs = 60, validation_data=val_data_gen, # validation_steps=(total_validation//batch_size),batch_size = batch_size, verbose = 1) # ## Evaluate the model result = model.evaluate(test_data_gen, batch_size=batch_size) print("test_loss, test accuracy", result) print(test_data_gen.class_indices) true_classes = test_data_gen.classes class_names = ["buildings", "forest", "glacier", "mountain", "sea", "street"] class_indices = test_data_gen.class_indices class_indices = dict((v, k) for k, v in class_indices.items()) from tensorflow.keras.preprocessing import image # Define the number of images to plot per class num_images = 10 # Loop over the class directories in the test directory for class_dir in os.listdir(test_dir): if class_dir in class_names: # Load the first num_images images from the current class directory images = [] filenames = os.listdir(os.path.join(test_dir, class_dir)) for i in range(num_images): img_path = os.path.join(test_dir, class_dir, filenames[i]) img = image.load_img(img_path, target_size=(224, 224)) images.append(np.array(img)) images = np.array(images) # Get the true labels for the images true_labels = [ class_indices[test_data_gen.class_indices[class_dir]] ] * num_images # Make predictions on the images preds = model.predict(images) pred_labels = [class_indices[np.argmax(pred)] for pred in preds] # Plot the images with their predicted labels fig, axs = plt.subplots(2, 5, figsize=(15, 7)) fig.suptitle(f"{class_dir} Predictions") for i in range(num_images): axs[i // 5, i % 5].imshow(images[i]) axs[i // 5, i % 5].set_title( f"Pred: {pred_labels[i]}\nTrue: {true_labels[i]}" ) axs[i // 5, i % 5].axis("off") plt.show() # ## Save the model # model_json = model.to_json() with open( "/content/drive/MyDrive/Image classification/VGG_Classifier.json", "w" ) as json_file: json_file.write(model_json) model.save("/content/drive/MyDrive/Image classification/VGG_Classifier.h5") print("Saved model to disk") model.save_weights("/content/drive/MyDrive/Image classification/VGG_weights.h5") model = tf.keras.saving.load_model( "/content/drive/MyDrive/Image classification/VGG_Classifier.h5", custom_objects=None, compile=True, safe_mode=True, )
false
0
3,218
1
3,419
3,218
129903890
# Amazon Reviews Sentiment - VADER & RoBERTa # Preface # In the bustling world of online shopping, customer reviews have become a powerful voice that shapes our purchasing decisions. Each day, millions of people flock to Amazon, the e-commerce giant, to explore an endless array of products and discover what others have to say about them. It is in this vast ocean of reviews that our project finds its purpose – to uncover the overall sentiment of Amazon customers through the analysis of their invaluable feedback. # With the goal of delving into the minds of consumers, we embarked on a journey through a massive dataset containing a plethora of Amazon reviews. Our mission was to extract insights, uncover patterns, and decipher the underlying sentiment that reverberates within these candid testimonials. We knew that behind each review lay a story, a personal experience, and an opinion waiting to be heard. # As we ventured into this project, we understood that sentiment analysis would be the key to unlocking the collective sentiment hidden within the sea of reviews. Armed with the power of Natural Language Processing and machine learning, we set out to analyze the text, decode emotions, and reveal the sentiment that influenced the customer's overall perception of the products they encountered. # Throughout our exploration, we encountered both challenges and triumphs. We meticulously examined the dataset, taking into account various factors such as ratings, review length, and the inherent positivity or negativity conveyed through the customers' words. We employed advanced techniques, leveraging state-of-the-art models like Roberta and Vader, to discern the sentiment expressed in each review. Our quest was to paint a comprehensive picture of the sentiments prevailing among the Amazon customer community. # Through the rich tapestry of reviews, we uncovered fascinating insights. We witnessed the sheer diversity of sentiments, ranging from exuberant praise to scathing criticism. We noticed the varying degrees of positivity, negativity, and neutrality that shaped the overall sentiment of the customers. We marveled at the power of language and its ability to influence perception and purchase decisions. # This project is an exploration, an ode to the voice of the customers who have left their mark on the digital landscape. It is a testament to the immense value of their opinions and the role they play in shaping the modern consumer landscape. As we present our findings and delve into the world of sentiments, I invite you to join me on this captivating journey through the realm of Amazon reviews. Together, let us unravel the sentiment that lies within the words and experiences of countless customers, and gain a deeper understanding of the sentiments that underpin the Amazon shopping experience. # Libraries # basics import pandas as pd import numpy as np # visualisation import matplotlib.pyplot as plt import seaborn as sns from tqdm import tqdm # regular expression import re import string # NLP toolkit import nltk from nltk.corpus import stopwords from nltk.stem.porter import PorterStemmer # VADER from nltk.sentiment import SentimentIntensityAnalyzer # RoBERTa from transformers import AutoModelForSequenceClassification, AutoTokenizer from scipy.special import softmax # other from collections import Counter import warnings warnings.filterwarnings("ignore") # Loading Datasets data1 = pd.read_csv("/kaggle/input/amazon-reviews/data1.csv") data2 = pd.read_csv("/kaggle/input/amazon-reviews/data2.csv") data3 = pd.read_csv("/kaggle/input/amazon-reviews/data3.csv") data1.head(2) data2.head(2) data3.head(2) # Basic Checks print("SHAPES") print("data1", data1.shape) print("======================") print("data2", data2.shape) print("======================") print("data3", data3.shape) # In this project, the aim is to find the sentiments of the customers towards the company based on the reviews provided by them. The project will not include any other form of anaylis based on given data in this project. # So, we will look for only the relevant features from all the three datasets and make a separate combined dataset of all print("COLUMNS:") for data in [data1, data2, data3]: print(data.columns) print("======================================") # extracting common columns: set(data1.columns).intersection(set(data2.columns)).intersection(set(data2.columns)) # To fullfill the score of our project, we just need following columns: # - id # - reviews.date # - reviews.rating # - reviews.text # - reviews.title reviews = pd.DataFrame() for data in tqdm([data1, data2, data3]): df = data[["id", "reviews.date", "reviews.rating", "reviews.text", "reviews.title"]] reviews = pd.concat([reviews, df], ignore_index=True) reviews.columns = ["id", "date", "rating", "review", "title"] reviews.head(3) reviews.info() # - There are total 34929 reviews for products # - Only rating in numeric column # - There are missing values in the dataset reviews.describe() # - The average rating is approximately 4.52, which suggests that, on average, customers are generally positive about the products being reviewed. # - The standard deviation of 0.912454 indicates a moderate degree of variation in the ratings. This means that there is some diversity in customer opinions, with some ratings deviating from the mean. # - The minimum rating is 1, while the maximum rating is 5. This indicates that the dataset includes the full range of possible ratings, allowing for a comprehensive analysis of sentiment. # - Overall, these observations indicate that the dataset predominantly consists of positive reviews, as indicated by the high mean and median ratings. However, the presence of a standard deviation and the range of ratings suggest that there is still some variation in customer sentiment, allowing for a more nuanced analysis of the reviews. reviews.isna().mean() # We would prefer to have maximum reviews so that the model will be well-trained to carry out sentiment analysis. Review column has no missing value. But there are 30 titles missing for reviews. Additionally, around 10% values are missing from data column whereas 12% reviews have no information available for ratings # We would be losing out on data if we drop any rows or column. But for some analysis, missing values might give us trouble. # We can go for a simple solution. Create a relica of dataset and drop all missing values in it no_missing_reviews = reviews.copy() no_missing_reviews.dropna(subset=["rating"], inplace=True) # Thus, all the missing values are handled. # Exploratory Data Analysis plt.hist(x="rating", data=no_missing_reviews) # Since the values are discrete, the histogram acts as countplot. # - The majority of the reviews (24,116) have a rating of 5. This indicates a strong positive sentiment among customers, as the highest rating is the most prevalent in the dataset. This sounds like good news for the company. # - When considering ratings of 4 and 5 combined, there are a total of 31,208 reviews. This indicates that approximately 90% of the reviews in the dataset are positive, as they fall into the higher rating range. # - Combining ratings of 1, 2, and 3, there are a total of 3,301 reviews that fall into this category. Analyzing these reviews can provide insights into potential areas of improvement or specific issues that customers encountered. # These observations highlight the overall positive sentiment among customers, but also indicate the presence of moderate and negative reviews. Analyzing the content and sentiments of these reviews can provide valuable insights for improving products, addressing customer concerns, and enhancing the overall customer experience. no_missing_reviews.head(3) # number of characters, words and sentences in each review no_missing_reviews["characters"] = no_missing_reviews["review"].apply(len) no_missing_reviews["words"] = no_missing_reviews["review"].apply( lambda x: len(nltk.word_tokenize(x)) ) no_missing_reviews["sentences"] = no_missing_reviews["review"].apply( lambda x: len(nltk.sent_tokenize(x)) ) no_missing_reviews.head(3) no_missing_reviews[["characters", "words", "sentences"]].describe() # **Maximum Lengths:** The maximum values for the 'characters', 'words', and 'sentences' features are quite high, with the longest review having 19,739 characters, 4,017 words, and 125 sentences. These extremely long reviews could potentially contain detailed and extensive feedback or comments. # **Minimum Lengths:** The minimum values for all three features are 1, indicating the presence of extremely short reviews. These reviews might be very concise or could be outliers, possibly lacking substantial information or meaningful content. # **Average Lengths:** The mean values reveal the average length of reviews. On average, the reviews contain approximately 176 characters, 37 words, and 2.65 sentences. These averages can serve as baselines for understanding the typical length of reviews in the dataset. # **High Standard Deviations:** The standard deviations for the 'characters' and 'words' features are relatively high, indicating significant variation in the length of reviews. This suggests a wide range of review lengths, implying that some reviews are much longer or shorter than the average.''** # review with one character no_missing_reviews[no_missing_reviews["characters"] == 1] # Single character reviews do not convey any meaning. Hence they will contribute much to out models. So lets drop these rows no_missing_reviews.drop( index=no_missing_reviews[no_missing_reviews["characters"] == 1].index, inplace=True ) no_missing_reviews[no_missing_reviews["characters"] == 2].head() # We can still see there are some non-alphabet characters in the review. We will deal during the data cleaning # review with one words no_missing_reviews[no_missing_reviews["words"] == 1].head() # We want to know what are the text differentiator among reviews corresponding to high rating and comparatively lower ratings. We will create two categories, lets say, first group with product having ratings greater than 3 and other group with ratings less than or equal to 3 no_missing_reviews["rating_type"] = no_missing_reviews["rating"].apply( lambda x: "high" if x > 3 else "low" ) # average characters, words, sentences count based on rating type data1 = pd.DataFrame(no_missing_reviews.groupby("rating_type")["characters"].mean()) data2 = pd.DataFrame(no_missing_reviews.groupby("rating_type")["words"].mean()) data3 = pd.DataFrame(no_missing_reviews.groupby("rating_type")["sentences"].mean()) data = pd.concat([data1, data2, data3], axis=1) data fig, ax = plt.subplots(1, 3, figsize=(15, 5)) sns.barplot(x=data.index, y="characters", data=data, ax=ax[0]) sns.barplot(x=data.index, y="words", data=data, ax=ax[1]) sns.barplot(x=data.index, y="sentences", data=data, ax=ax[2]) ax[0].set_title("character count") ax[1].set_title("word count") ax[2].set_title("sentence count") plt.tight_layout() sns.pairplot(no_missing_reviews, hue="rating_type") corr = no_missing_reviews.corr() mask = np.triu(np.ones_like(corr, dtype=bool)) sns.heatmap(corr, mask=mask, annot=True, cmap="flare") # Data Processing # Data preprocessing is a crucial step in natural language processing (NLP) tasks, including sentiment analysis. It involves transforming raw text data into a format that is suitable for analysis and modeling. By applying various preprocessing techniques, we can clean and standardize the textual data, reducing noise and irrelevant information. # In the context of Amazon review sentiment analysis, data preprocessing plays a vital role in improving the accuracy and effectiveness of the analysis. The steps you mentioned are common techniques used in text preprocessing to enhance the quality of the data and extract meaningful features. Let's discuss each step in more detail: # **Remove punctuation:** Punctuation marks like commas, periods, or exclamation marks do not carry significant sentiment information and can be safely removed. This step helps in reducing the dimensionality of the data and simplifying the subsequent analysis. # **Lowercasing:** Converting all the text to lowercase ensures that the analysis treats words with the same spelling but different cases as the same. It helps in avoiding redundant duplicate features and improves the accuracy of downstream tasks such as sentiment analysis. # **Remove stop words:** Stop words are commonly used words that do not contribute much to the overall sentiment or meaning of the text, such as "a," "an," or "the." Removing these words helps to reduce noise and focus on the more important content-bearing words. # **Remove emojis:** Emojis are graphical representations used to express emotions. In sentiment analysis, they can add noise to the text and may not carry direct semantic meaning. Removing emojis helps to simplify the text and improve the interpretability of the sentiment analysis results. # **Tokenization:** Tokenization is the process of breaking down a text into individual tokens, such as words or subwords. It helps in preparing the text for further analysis and feature extraction. # **Stemming:** Stemming involves reducing words to their root or base form. For example, converting "running," "runs," and "ran" to the common stem "run." This step helps to normalize the words and reduce the dimensionality of the feature space. # By performing these preprocessing steps, we can clean the Amazon review data, standardize it, and remove noise and irrelevant information. This will provide a cleaner and more representative dataset for sentiment analysis, allowing for more accurate and insightful results. # instantiating PorterSteemer object ps = PorterStemmer() def transform_text(text): # lower casing text = text.lower() # removing html tags pattern = re.compile("<.*?>") text = pattern.sub(r"", text) # removing urls pattern_url = re.compile(r"https?://\S+|www\.\S+") text = pattern_url.sub(r"", text) # removing punctuations for ( char ) in string.punctuation: ###punctuation_marks '!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~' text = text.replace(char, "") # tokenization text = nltk.word_tokenize(text) # removing stop words new_text = [] for word in text: if word not in stopwords.words("english"): new_text.append(word) # stemming new_text_stem = [] for word in new_text: word = ps.stem(word) new_text_stem.append(word) return " ".join(new_text_stem) reviews["transformed_review"] = reviews["review"].apply(transform_text) reviews.head(3) # sample review example = reviews["transformed_review"][np.random.randint(len(reviews))] example ratings = [] for i, row in reviews.iterrows(): if i in no_missing_reviews.index: type = no_missing_reviews.loc[i, "rating_type"] ratings.append(type) else: ratings.append("NA") reviews["rating_type"] = ratings reviews.head(3) l = reviews[reviews["rating_type"] == "high"]["transformed_review"].tolist() word_corpus_high = [] for sent in l: for word in sent.split(): word_corpus_high.append(word) positves = pd.DataFrame(Counter(word_corpus_high).most_common(20)) plt.figure(figsize=(10, 6)) sns.barplot(x=positves[0], y=positves[1]) plt.xticks(rotation=90) plt.xlabel("words") plt.ylabel("frequency") plt.title("Most frequent words in Positive Reviews") # Words such as 'great', 'love', 'like' appear the most in positive reviews l = reviews[reviews["rating_type"] == "low"]["transformed_review"].tolist() word_corpus_low = [] for sent in l: for word in sent.split(): word_corpus_low.append(word) negatives = pd.DataFrame(Counter(word_corpus_low).most_common(20)) plt.figure(figsize=(10, 6)) sns.barplot(x=negatives[0], y=negatives[1]) plt.xticks(rotation=90) plt.xlabel("words") plt.ylabel("frequency") plt.title("Most frequent words in Negative Reviews") # Negative reviews mostly focus on word 'battery'. It might suggest that the battery product is the area where Amazon need to look at. It attracted most of the negative reviews. # VADER (Valence Aware Dictionary and sEntiment Reasoner) # VADER (Valence Aware Dictionary and sEntiment Reasoner) is a lexicon and rule-based sentiment analysis tool specifically designed for analyzing sentiments expressed in social media text. It is built on a pre-existing list of lexical features that have been labeled with their corresponding sentiment intensities. The VADER sentiment score calculates the sentiment polarity (positive, negative, or neutral) and the sentiment intensity (how strong the sentiment is) of a given text. # **Advantages of VADER Sentiment Score:** # - Built for Social Media Text: VADER is particularly effective for analyzing sentiments in social media text, such as tweets or online reviews. It handles informal language, slangs, and emoticons commonly used in these platforms. # - Rule-Based Approach: VADER utilizes a rule-based approach, which makes it more interpretable compared to machine learning-based models. The rules are designed to capture linguistic nuances and sentiment intensity. # - Domain Adaptability: VADER is not limited to specific domains or topics. It can be applied to a wide range of domains and does not require domain-specific training data. # - Handles Negation and Capitalization: VADER is capable of understanding the impact of negations and capitalization on sentiment. It considers the context in which words appear, allowing it to handle phrases like "not good" correctly. # **Disadvantages of VADER Sentiment Score:** # - Lexicon Limitations: The sentiment analysis accuracy of VADER heavily relies on the lexicon it is built upon. While VADER's lexicon is extensive, it may not capture all possible variations or new emerging words, leading to potential inaccuracies. # - Contextual Ambiguity: VADER's rule-based approach might struggle with sentences that contain sarcasm, irony, or other forms of ambiguous contexts. These cases may require a deeper understanding of the context to accurately determine sentiment. # - Lack of Granularity: VADER provides sentiment scores as positive, negative, or neutral, but it does not offer fine-grained sentiment labels. It may not distinguish between subtle nuances of sentiment or provide detailed sentiment analysis. # **Helpfulness in Sentiment Analysis:** # VADER's strengths lie in its ability to handle variety of text, adapt to different domains, and consider contextual factors like negations. Its rule-based approach provides transparency and interpretability. Due to these advantages, VADER is a valuable tool for sentiment analysis tasks in social media monitoring, brand reputation analysis, customer feedback analysis, and other applications where quick sentiment insights are required. However, for more nuanced and complex sentiment analysis tasks, machine learning-based approaches may be more appropriate. # intstantiating SentimentIntensityAnalyzer class sia = SentimentIntensityAnalyzer() reviews = reviews.reset_index() sentiment = {} for index, row in tqdm(reviews.iterrows(), total=len(reviews)): text = row["transformed_review"] id = row["index"] sentiment[id] = sia.polarity_scores(text) vader_scores = pd.DataFrame(sentiment).T.reset_index() vader = reviews.merge(vader_scores, on="index") vader.head() sns.barplot(x="rating_type", y="compound", data=vader) fig, ax = plt.subplots(1, 4, figsize=(20, 5)) sns.barplot(x="rating", y="pos", data=vader, ax=ax[0]) sns.barplot(x="rating", y="neg", data=vader, ax=ax[1]) sns.barplot(x="rating", y="neu", data=vader, ax=ax[2]) sns.barplot(x="rating", y="compound", data=vader, ax=ax[3]) ax[0].set_title("Rating vs Positive Score") ax[1].set_title("Rating vs Negative Score") ax[2].set_title("Rating vs Neutral Score") ax[3].set_title("Rating vs Compound Score") # RoBERTa (Robustly Optimized BERT approach) # RoBERTa (Robustly Optimized BERT approach) is a transformer-based language model that has been pretrained on a large corpus of unlabeled text data. It is similar to the VADER (Valence Aware Dictionary and sEntiment Reasoner) sentiment analysis model in the sense that both are powerful tools used in sentiment analysis tasks. # RoBERTa, based on the BERT (Bidirectional Encoder Representations from Transformers) architecture, excels in understanding the contextual meaning of words and sentences. It captures the relationships and dependencies among words, enabling it to generate more accurate sentiment predictions. # **Advantages of RoBERTa Pretrained Model:** # - Contextual Understanding: RoBERTa has been pretrained on a massive amount of text data, which helps it grasp the nuances of language and context. This contextual understanding enables more accurate sentiment analysis by considering the surrounding words and their meanings. # - Fine-tuning Capabilities: The RoBERTa model can be fine-tuned on specific sentiment analysis tasks using labeled data. This allows it to adapt and specialize its predictions for the particular sentiment classification problem at hand. # - Language-Agnostic: RoBERTa is designed to work effectively across multiple languages, making it suitable for sentiment analysis tasks in diverse linguistic contexts. # - State-of-the-Art Performance: RoBERTa has achieved state-of-the-art performance on various natural language processing (NLP) benchmarks and competitions. Its advanced architecture and training methodology contribute to its impressive accuracy and robustness. # **Disadvantages of RoBERTa Pretrained Model:** # - Computational Resources: Training and fine-tuning RoBERTa models can be computationally intensive and may require substantial computational resources, including high-performance GPUs or TPUs. # - Data Dependency: RoBERTa heavily relies on large amounts of labeled data for fine-tuning, and the quality and representativeness of the training data can significantly impact its performance. # - Interpretability: Transformer-based models like RoBERTa are known to be "black-box" models, meaning they provide accurate predictions but lack interpretability. Understanding the specific reasons behind the sentiment predictions made by RoBERTa can be challenging. # RoBERTa, with its strong contextual understanding and ability to capture intricate linguistic patterns, proves to be a valuable tool for sentiment analysis tasks. Its performance, combined with the ability to fine-tune for specific applications, makes it a popular choice in the field of NLP and sentiment analysis. # Tasks: # emoji, emotion, hate, irony, offensive, sentiment task = "sentiment" MODEL = f"cardiffnlp/twitter-roberta-base-{task}" tokenizer = AutoTokenizer.from_pretrained(MODEL) model = AutoModelForSequenceClassification.from_pretrained(MODEL) def roberta_scores(example): encoded_text = tokenizer(example, return_tensors="pt") output = model(**encoded_text) scores = output[0][0].detach().numpy() scores = softmax(scores) scores_dict = { "roberta_neg": scores[0], "roberta_neu": scores[1], "roberta_pos": scores[2], } return scores_dict sentiments_roberta = {} for ind, row in tqdm(reviews.iterrows(), total=len(reviews)): try: text = row["transformed_review"] id = row["index"] roberta_result = roberta_scores(text) sentiments_roberta[id] = roberta_result except RuntimeError: print(r"Error in row with {} index".format(id)) roberta = pd.DataFrame(sentiments_roberta).T.reset_index() roberta.head() roberta = reviews.merge(roberta, on="index") sentiment_df = roberta.merge( vader[["index", "neg", "neu", "pos", "compound"]], on="index" ) fig, ax = plt.subplots(1, 3, figsize=(20, 5)) sns.barplot(x="rating", y="roberta_pos", data=sentiment_df, ax=ax[0]) sns.barplot(x="rating", y="roberta_neg", data=sentiment_df, ax=ax[1]) sns.barplot(x="rating", y="roberta_neu", data=sentiment_df, ax=ax[2]) ax[0].set_title("Rating vs Roberta Positive Score") ax[1].set_title("Rating vs Roberta Negative Score") ax[2].set_title("Rating vs Roberta Neutral Score") # Comparing RoBERTa and VADER def get_sentiment_scores(review): # roberta encoded_text = tokenizer(review, return_tensors="pt") output = model(**encoded_text) scores = output[0][0].detach().numpy() scores = softmax(scores) scores_roberta = {"neg": scores[0], "neu": scores[1], "pos": scores[2]} scores_vader = sia.polarity_scores(review) del scores_vader["compound"] combined = {} combined["roberta"] = scores_roberta combined["vader"] = scores_vader df = pd.DataFrame(combined) # Set the width of each bar bar_width = 0.25 # Calculate the x-coordinates for each dataset x_roberta = np.arange(len(df)) x_vader = x_roberta + bar_width # Create the figure and axis fig, ax = plt.subplots() # Plot the bars for each dataset ax.bar(x_roberta, df["roberta"], width=bar_width, label="Roberta") ax.bar(x_vader, df["vader"], width=bar_width, label="Vader") # Set the x-axis tick labels ax.set_xticks(x_roberta + bar_width / 2) ax.set_xticklabels(df.index) ax.set_title(review) # Add a legend ax.legend() return fig # another sample review example = reviews["transformed_review"][np.random.randint(len(reviews))] example get_sentiment_scores(example) # Based on analysis done on tons of reviews, several differences can be observed. Here is a summary of the comparison: # **Sentiment Differentiation:** The Roberta model shows a better ability to differentiate between positive, negative, and neutral sentiments compared to the Vader model. The Roberta model assigns higher scores to the positive and negative categories, while the Vader model assigns higher scores to the neutral category. # **Valuing Neutrality:** The Vader model tends to value neutrality more for most of the reviews. This is evident from the higher neutral score assigned by the Vader model compared to the Roberta model for the given review. # **Sensitivity to Negativity:** The Roberta model appears to be more sensitive to negative sentiment compared to the Vader model. This is indicated by the higher negative score assigned by the Roberta model for the given review. # **Overall Sentiment Polarity:** The Roberta model assigns a higher positive score and a lower neutral score compared to the Vader model for the given review. This suggests that the Roberta model perceives the review as more positive overall, while the Vader model perceives it as more neutral. # Overall, the comparison highlights that the Roberta model tends to provide more nuanced sentiment analysis by differentiating between positive, negative, and neutral sentiments, while the Vader model leans towards valuing neutrality and may be less sensitive to negativity. # ## Discrepancies # ### Case1: User gave low rating but Model identify Positive sentiment odd_ones = sentiment_df[sentiment_df["rating"] == 1].sort_values( "roberta_pos", ascending=False ) odd_ones.head(3) index = 0 print(odd_ones["review"].values[index]) get_sentiment_scores(odd_ones["transformed_review"].values[index]) # Review seems positive. but user gave it low rating. Roberta manage to get the correct sentiment from the review. Notice that Vader say that the reviews is more of neutral sentiment which is wrong index = 1 print(odd_ones["review"].values[index]) get_sentiment_scores(odd_ones["transformed_review"].values[index]) # This review tends more on negative side. But both Roberta and Vader say that it is either neutral or positive. It could be because of the absense of an negative words and presence of words such as 'working', 'good', 'still'. index = 2 print(odd_ones["review"].values[index]) get_sentiment_scores(odd_ones["transformed_review"].values[index]) # This looks fine. The user wrote a positve review but gave 1 rating. The review conveys a strong positive sentiment which is correctly captured by Roberta. Vader again seems confused between neutral and positive sentiments # ### Case2: User gave high rating but Model identify negative sentiment odd_ones1 = sentiment_df[sentiment_df["rating"] == 5].sort_values( "roberta_neg", ascending=False ) odd_ones1.head(3) index = 0 print(odd_ones1["review"].values[index]) get_sentiment_scores(odd_ones1["transformed_review"].values[index]) # The review has strong postive sentiment. It seems that, both models fail to detect it because of the usage of word 'wrong' in positve context. This is an example of instances where Roberta model unable to capture the context with play of words. As usual, Vader is sticking with Neutral Sentiments index = 1 print(odd_ones1["review"].values[index]) get_sentiment_scores(odd_ones1["transformed_review"].values[index]) # The user wanted to say that the product is great for xbox which has very positive sentiment. However, Roberta model seems to have got infuenced by the word 'sucking' which was used as an adjective for xbox controller. index = 2 print(odd_ones1["review"].values[index]) get_sentiment_scores(odd_ones1["transformed_review"].values[index]) # In this instance, Vader model delivered correct output. The review sounds like the user do not bother much about performance. Roberta models classify it as negative sentiment. This could be because of words such as 'never' and 'don't'. # With all of these example, we can say that, while the Roberta model offers improved sentiment analysis capabilities compared to the Vader model, it is essential to acknowledge its limitations. Here are some limitations of the Roberta model: # **Language Dependency:** The Roberta model's performance can vary depending on the language it is trained on. If the model is trained on a specific language, it may not generalize well to other languages. Therefore, its effectiveness in sentiment analysis tasks may be limited to the languages it was trained on. # **Data Bias:** The performance of the Roberta model is influenced by the training data it receives. If the training data contains biases, such as underrepresentation or overrepresentation of certain demographics or perspectives, the model may inherit those biases. This can result in biased sentiment analysis outputs, affecting the reliability and fairness of the model. # **Contextual Understanding:** While the Roberta model has a strong understanding of contextual language, it may still struggle with certain nuances, sarcasm, or context-specific sentiments. These limitations stem from the complexities of language comprehension, and the model may not always capture the intended sentiment accurately in such cases. # **Interpretability:** Like many deep learning models, the Roberta model's internal workings and decision-making process can be challenging to interpret. It can be challenging to understand how the model arrives at its sentiment analysis outputs, making it difficult to explain its predictions or identify potential biases or errors. # While it offers advancements over traditional models like Vader, it is crucial to assess RoBERTa's performance within specific contexts, validate its outputs, and consider potential biases and limitations when interpreting the results. # Overall Sentiments Analysis from the Dataset # To calculate the overall sentiment of a dataset based on the Roberta scores, we will: # - Take the average ofthe sentiment scores for each sentiment category (negative, neutral, positive) across all the data points. # - Determine the sentiment with the highest average value as the overall sentiment of the dataset. roberta.head(2) new_rob = roberta[["roberta_neg", "roberta_neu", "roberta_pos"]] # Summing up the sentiment scores overall_sentiment = new_rob.mean(axis=0).idxmax() print("Overall Sentiment:", overall_sentiment)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/903/129903890.ipynb
null
null
[{"Id": 129903890, "ScriptId": 38610622, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11739628, "CreationDate": "05/17/2023 10:23:14", "VersionNumber": 1.0, "Title": "Amazon Reviews Sentiment - VADER & RoBERTa", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 607.0, "LinesInsertedFromPrevious": 607.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
null
null
null
null
# Amazon Reviews Sentiment - VADER & RoBERTa # Preface # In the bustling world of online shopping, customer reviews have become a powerful voice that shapes our purchasing decisions. Each day, millions of people flock to Amazon, the e-commerce giant, to explore an endless array of products and discover what others have to say about them. It is in this vast ocean of reviews that our project finds its purpose – to uncover the overall sentiment of Amazon customers through the analysis of their invaluable feedback. # With the goal of delving into the minds of consumers, we embarked on a journey through a massive dataset containing a plethora of Amazon reviews. Our mission was to extract insights, uncover patterns, and decipher the underlying sentiment that reverberates within these candid testimonials. We knew that behind each review lay a story, a personal experience, and an opinion waiting to be heard. # As we ventured into this project, we understood that sentiment analysis would be the key to unlocking the collective sentiment hidden within the sea of reviews. Armed with the power of Natural Language Processing and machine learning, we set out to analyze the text, decode emotions, and reveal the sentiment that influenced the customer's overall perception of the products they encountered. # Throughout our exploration, we encountered both challenges and triumphs. We meticulously examined the dataset, taking into account various factors such as ratings, review length, and the inherent positivity or negativity conveyed through the customers' words. We employed advanced techniques, leveraging state-of-the-art models like Roberta and Vader, to discern the sentiment expressed in each review. Our quest was to paint a comprehensive picture of the sentiments prevailing among the Amazon customer community. # Through the rich tapestry of reviews, we uncovered fascinating insights. We witnessed the sheer diversity of sentiments, ranging from exuberant praise to scathing criticism. We noticed the varying degrees of positivity, negativity, and neutrality that shaped the overall sentiment of the customers. We marveled at the power of language and its ability to influence perception and purchase decisions. # This project is an exploration, an ode to the voice of the customers who have left their mark on the digital landscape. It is a testament to the immense value of their opinions and the role they play in shaping the modern consumer landscape. As we present our findings and delve into the world of sentiments, I invite you to join me on this captivating journey through the realm of Amazon reviews. Together, let us unravel the sentiment that lies within the words and experiences of countless customers, and gain a deeper understanding of the sentiments that underpin the Amazon shopping experience. # Libraries # basics import pandas as pd import numpy as np # visualisation import matplotlib.pyplot as plt import seaborn as sns from tqdm import tqdm # regular expression import re import string # NLP toolkit import nltk from nltk.corpus import stopwords from nltk.stem.porter import PorterStemmer # VADER from nltk.sentiment import SentimentIntensityAnalyzer # RoBERTa from transformers import AutoModelForSequenceClassification, AutoTokenizer from scipy.special import softmax # other from collections import Counter import warnings warnings.filterwarnings("ignore") # Loading Datasets data1 = pd.read_csv("/kaggle/input/amazon-reviews/data1.csv") data2 = pd.read_csv("/kaggle/input/amazon-reviews/data2.csv") data3 = pd.read_csv("/kaggle/input/amazon-reviews/data3.csv") data1.head(2) data2.head(2) data3.head(2) # Basic Checks print("SHAPES") print("data1", data1.shape) print("======================") print("data2", data2.shape) print("======================") print("data3", data3.shape) # In this project, the aim is to find the sentiments of the customers towards the company based on the reviews provided by them. The project will not include any other form of anaylis based on given data in this project. # So, we will look for only the relevant features from all the three datasets and make a separate combined dataset of all print("COLUMNS:") for data in [data1, data2, data3]: print(data.columns) print("======================================") # extracting common columns: set(data1.columns).intersection(set(data2.columns)).intersection(set(data2.columns)) # To fullfill the score of our project, we just need following columns: # - id # - reviews.date # - reviews.rating # - reviews.text # - reviews.title reviews = pd.DataFrame() for data in tqdm([data1, data2, data3]): df = data[["id", "reviews.date", "reviews.rating", "reviews.text", "reviews.title"]] reviews = pd.concat([reviews, df], ignore_index=True) reviews.columns = ["id", "date", "rating", "review", "title"] reviews.head(3) reviews.info() # - There are total 34929 reviews for products # - Only rating in numeric column # - There are missing values in the dataset reviews.describe() # - The average rating is approximately 4.52, which suggests that, on average, customers are generally positive about the products being reviewed. # - The standard deviation of 0.912454 indicates a moderate degree of variation in the ratings. This means that there is some diversity in customer opinions, with some ratings deviating from the mean. # - The minimum rating is 1, while the maximum rating is 5. This indicates that the dataset includes the full range of possible ratings, allowing for a comprehensive analysis of sentiment. # - Overall, these observations indicate that the dataset predominantly consists of positive reviews, as indicated by the high mean and median ratings. However, the presence of a standard deviation and the range of ratings suggest that there is still some variation in customer sentiment, allowing for a more nuanced analysis of the reviews. reviews.isna().mean() # We would prefer to have maximum reviews so that the model will be well-trained to carry out sentiment analysis. Review column has no missing value. But there are 30 titles missing for reviews. Additionally, around 10% values are missing from data column whereas 12% reviews have no information available for ratings # We would be losing out on data if we drop any rows or column. But for some analysis, missing values might give us trouble. # We can go for a simple solution. Create a relica of dataset and drop all missing values in it no_missing_reviews = reviews.copy() no_missing_reviews.dropna(subset=["rating"], inplace=True) # Thus, all the missing values are handled. # Exploratory Data Analysis plt.hist(x="rating", data=no_missing_reviews) # Since the values are discrete, the histogram acts as countplot. # - The majority of the reviews (24,116) have a rating of 5. This indicates a strong positive sentiment among customers, as the highest rating is the most prevalent in the dataset. This sounds like good news for the company. # - When considering ratings of 4 and 5 combined, there are a total of 31,208 reviews. This indicates that approximately 90% of the reviews in the dataset are positive, as they fall into the higher rating range. # - Combining ratings of 1, 2, and 3, there are a total of 3,301 reviews that fall into this category. Analyzing these reviews can provide insights into potential areas of improvement or specific issues that customers encountered. # These observations highlight the overall positive sentiment among customers, but also indicate the presence of moderate and negative reviews. Analyzing the content and sentiments of these reviews can provide valuable insights for improving products, addressing customer concerns, and enhancing the overall customer experience. no_missing_reviews.head(3) # number of characters, words and sentences in each review no_missing_reviews["characters"] = no_missing_reviews["review"].apply(len) no_missing_reviews["words"] = no_missing_reviews["review"].apply( lambda x: len(nltk.word_tokenize(x)) ) no_missing_reviews["sentences"] = no_missing_reviews["review"].apply( lambda x: len(nltk.sent_tokenize(x)) ) no_missing_reviews.head(3) no_missing_reviews[["characters", "words", "sentences"]].describe() # **Maximum Lengths:** The maximum values for the 'characters', 'words', and 'sentences' features are quite high, with the longest review having 19,739 characters, 4,017 words, and 125 sentences. These extremely long reviews could potentially contain detailed and extensive feedback or comments. # **Minimum Lengths:** The minimum values for all three features are 1, indicating the presence of extremely short reviews. These reviews might be very concise or could be outliers, possibly lacking substantial information or meaningful content. # **Average Lengths:** The mean values reveal the average length of reviews. On average, the reviews contain approximately 176 characters, 37 words, and 2.65 sentences. These averages can serve as baselines for understanding the typical length of reviews in the dataset. # **High Standard Deviations:** The standard deviations for the 'characters' and 'words' features are relatively high, indicating significant variation in the length of reviews. This suggests a wide range of review lengths, implying that some reviews are much longer or shorter than the average.''** # review with one character no_missing_reviews[no_missing_reviews["characters"] == 1] # Single character reviews do not convey any meaning. Hence they will contribute much to out models. So lets drop these rows no_missing_reviews.drop( index=no_missing_reviews[no_missing_reviews["characters"] == 1].index, inplace=True ) no_missing_reviews[no_missing_reviews["characters"] == 2].head() # We can still see there are some non-alphabet characters in the review. We will deal during the data cleaning # review with one words no_missing_reviews[no_missing_reviews["words"] == 1].head() # We want to know what are the text differentiator among reviews corresponding to high rating and comparatively lower ratings. We will create two categories, lets say, first group with product having ratings greater than 3 and other group with ratings less than or equal to 3 no_missing_reviews["rating_type"] = no_missing_reviews["rating"].apply( lambda x: "high" if x > 3 else "low" ) # average characters, words, sentences count based on rating type data1 = pd.DataFrame(no_missing_reviews.groupby("rating_type")["characters"].mean()) data2 = pd.DataFrame(no_missing_reviews.groupby("rating_type")["words"].mean()) data3 = pd.DataFrame(no_missing_reviews.groupby("rating_type")["sentences"].mean()) data = pd.concat([data1, data2, data3], axis=1) data fig, ax = plt.subplots(1, 3, figsize=(15, 5)) sns.barplot(x=data.index, y="characters", data=data, ax=ax[0]) sns.barplot(x=data.index, y="words", data=data, ax=ax[1]) sns.barplot(x=data.index, y="sentences", data=data, ax=ax[2]) ax[0].set_title("character count") ax[1].set_title("word count") ax[2].set_title("sentence count") plt.tight_layout() sns.pairplot(no_missing_reviews, hue="rating_type") corr = no_missing_reviews.corr() mask = np.triu(np.ones_like(corr, dtype=bool)) sns.heatmap(corr, mask=mask, annot=True, cmap="flare") # Data Processing # Data preprocessing is a crucial step in natural language processing (NLP) tasks, including sentiment analysis. It involves transforming raw text data into a format that is suitable for analysis and modeling. By applying various preprocessing techniques, we can clean and standardize the textual data, reducing noise and irrelevant information. # In the context of Amazon review sentiment analysis, data preprocessing plays a vital role in improving the accuracy and effectiveness of the analysis. The steps you mentioned are common techniques used in text preprocessing to enhance the quality of the data and extract meaningful features. Let's discuss each step in more detail: # **Remove punctuation:** Punctuation marks like commas, periods, or exclamation marks do not carry significant sentiment information and can be safely removed. This step helps in reducing the dimensionality of the data and simplifying the subsequent analysis. # **Lowercasing:** Converting all the text to lowercase ensures that the analysis treats words with the same spelling but different cases as the same. It helps in avoiding redundant duplicate features and improves the accuracy of downstream tasks such as sentiment analysis. # **Remove stop words:** Stop words are commonly used words that do not contribute much to the overall sentiment or meaning of the text, such as "a," "an," or "the." Removing these words helps to reduce noise and focus on the more important content-bearing words. # **Remove emojis:** Emojis are graphical representations used to express emotions. In sentiment analysis, they can add noise to the text and may not carry direct semantic meaning. Removing emojis helps to simplify the text and improve the interpretability of the sentiment analysis results. # **Tokenization:** Tokenization is the process of breaking down a text into individual tokens, such as words or subwords. It helps in preparing the text for further analysis and feature extraction. # **Stemming:** Stemming involves reducing words to their root or base form. For example, converting "running," "runs," and "ran" to the common stem "run." This step helps to normalize the words and reduce the dimensionality of the feature space. # By performing these preprocessing steps, we can clean the Amazon review data, standardize it, and remove noise and irrelevant information. This will provide a cleaner and more representative dataset for sentiment analysis, allowing for more accurate and insightful results. # instantiating PorterSteemer object ps = PorterStemmer() def transform_text(text): # lower casing text = text.lower() # removing html tags pattern = re.compile("<.*?>") text = pattern.sub(r"", text) # removing urls pattern_url = re.compile(r"https?://\S+|www\.\S+") text = pattern_url.sub(r"", text) # removing punctuations for ( char ) in string.punctuation: ###punctuation_marks '!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~' text = text.replace(char, "") # tokenization text = nltk.word_tokenize(text) # removing stop words new_text = [] for word in text: if word not in stopwords.words("english"): new_text.append(word) # stemming new_text_stem = [] for word in new_text: word = ps.stem(word) new_text_stem.append(word) return " ".join(new_text_stem) reviews["transformed_review"] = reviews["review"].apply(transform_text) reviews.head(3) # sample review example = reviews["transformed_review"][np.random.randint(len(reviews))] example ratings = [] for i, row in reviews.iterrows(): if i in no_missing_reviews.index: type = no_missing_reviews.loc[i, "rating_type"] ratings.append(type) else: ratings.append("NA") reviews["rating_type"] = ratings reviews.head(3) l = reviews[reviews["rating_type"] == "high"]["transformed_review"].tolist() word_corpus_high = [] for sent in l: for word in sent.split(): word_corpus_high.append(word) positves = pd.DataFrame(Counter(word_corpus_high).most_common(20)) plt.figure(figsize=(10, 6)) sns.barplot(x=positves[0], y=positves[1]) plt.xticks(rotation=90) plt.xlabel("words") plt.ylabel("frequency") plt.title("Most frequent words in Positive Reviews") # Words such as 'great', 'love', 'like' appear the most in positive reviews l = reviews[reviews["rating_type"] == "low"]["transformed_review"].tolist() word_corpus_low = [] for sent in l: for word in sent.split(): word_corpus_low.append(word) negatives = pd.DataFrame(Counter(word_corpus_low).most_common(20)) plt.figure(figsize=(10, 6)) sns.barplot(x=negatives[0], y=negatives[1]) plt.xticks(rotation=90) plt.xlabel("words") plt.ylabel("frequency") plt.title("Most frequent words in Negative Reviews") # Negative reviews mostly focus on word 'battery'. It might suggest that the battery product is the area where Amazon need to look at. It attracted most of the negative reviews. # VADER (Valence Aware Dictionary and sEntiment Reasoner) # VADER (Valence Aware Dictionary and sEntiment Reasoner) is a lexicon and rule-based sentiment analysis tool specifically designed for analyzing sentiments expressed in social media text. It is built on a pre-existing list of lexical features that have been labeled with their corresponding sentiment intensities. The VADER sentiment score calculates the sentiment polarity (positive, negative, or neutral) and the sentiment intensity (how strong the sentiment is) of a given text. # **Advantages of VADER Sentiment Score:** # - Built for Social Media Text: VADER is particularly effective for analyzing sentiments in social media text, such as tweets or online reviews. It handles informal language, slangs, and emoticons commonly used in these platforms. # - Rule-Based Approach: VADER utilizes a rule-based approach, which makes it more interpretable compared to machine learning-based models. The rules are designed to capture linguistic nuances and sentiment intensity. # - Domain Adaptability: VADER is not limited to specific domains or topics. It can be applied to a wide range of domains and does not require domain-specific training data. # - Handles Negation and Capitalization: VADER is capable of understanding the impact of negations and capitalization on sentiment. It considers the context in which words appear, allowing it to handle phrases like "not good" correctly. # **Disadvantages of VADER Sentiment Score:** # - Lexicon Limitations: The sentiment analysis accuracy of VADER heavily relies on the lexicon it is built upon. While VADER's lexicon is extensive, it may not capture all possible variations or new emerging words, leading to potential inaccuracies. # - Contextual Ambiguity: VADER's rule-based approach might struggle with sentences that contain sarcasm, irony, or other forms of ambiguous contexts. These cases may require a deeper understanding of the context to accurately determine sentiment. # - Lack of Granularity: VADER provides sentiment scores as positive, negative, or neutral, but it does not offer fine-grained sentiment labels. It may not distinguish between subtle nuances of sentiment or provide detailed sentiment analysis. # **Helpfulness in Sentiment Analysis:** # VADER's strengths lie in its ability to handle variety of text, adapt to different domains, and consider contextual factors like negations. Its rule-based approach provides transparency and interpretability. Due to these advantages, VADER is a valuable tool for sentiment analysis tasks in social media monitoring, brand reputation analysis, customer feedback analysis, and other applications where quick sentiment insights are required. However, for more nuanced and complex sentiment analysis tasks, machine learning-based approaches may be more appropriate. # intstantiating SentimentIntensityAnalyzer class sia = SentimentIntensityAnalyzer() reviews = reviews.reset_index() sentiment = {} for index, row in tqdm(reviews.iterrows(), total=len(reviews)): text = row["transformed_review"] id = row["index"] sentiment[id] = sia.polarity_scores(text) vader_scores = pd.DataFrame(sentiment).T.reset_index() vader = reviews.merge(vader_scores, on="index") vader.head() sns.barplot(x="rating_type", y="compound", data=vader) fig, ax = plt.subplots(1, 4, figsize=(20, 5)) sns.barplot(x="rating", y="pos", data=vader, ax=ax[0]) sns.barplot(x="rating", y="neg", data=vader, ax=ax[1]) sns.barplot(x="rating", y="neu", data=vader, ax=ax[2]) sns.barplot(x="rating", y="compound", data=vader, ax=ax[3]) ax[0].set_title("Rating vs Positive Score") ax[1].set_title("Rating vs Negative Score") ax[2].set_title("Rating vs Neutral Score") ax[3].set_title("Rating vs Compound Score") # RoBERTa (Robustly Optimized BERT approach) # RoBERTa (Robustly Optimized BERT approach) is a transformer-based language model that has been pretrained on a large corpus of unlabeled text data. It is similar to the VADER (Valence Aware Dictionary and sEntiment Reasoner) sentiment analysis model in the sense that both are powerful tools used in sentiment analysis tasks. # RoBERTa, based on the BERT (Bidirectional Encoder Representations from Transformers) architecture, excels in understanding the contextual meaning of words and sentences. It captures the relationships and dependencies among words, enabling it to generate more accurate sentiment predictions. # **Advantages of RoBERTa Pretrained Model:** # - Contextual Understanding: RoBERTa has been pretrained on a massive amount of text data, which helps it grasp the nuances of language and context. This contextual understanding enables more accurate sentiment analysis by considering the surrounding words and their meanings. # - Fine-tuning Capabilities: The RoBERTa model can be fine-tuned on specific sentiment analysis tasks using labeled data. This allows it to adapt and specialize its predictions for the particular sentiment classification problem at hand. # - Language-Agnostic: RoBERTa is designed to work effectively across multiple languages, making it suitable for sentiment analysis tasks in diverse linguistic contexts. # - State-of-the-Art Performance: RoBERTa has achieved state-of-the-art performance on various natural language processing (NLP) benchmarks and competitions. Its advanced architecture and training methodology contribute to its impressive accuracy and robustness. # **Disadvantages of RoBERTa Pretrained Model:** # - Computational Resources: Training and fine-tuning RoBERTa models can be computationally intensive and may require substantial computational resources, including high-performance GPUs or TPUs. # - Data Dependency: RoBERTa heavily relies on large amounts of labeled data for fine-tuning, and the quality and representativeness of the training data can significantly impact its performance. # - Interpretability: Transformer-based models like RoBERTa are known to be "black-box" models, meaning they provide accurate predictions but lack interpretability. Understanding the specific reasons behind the sentiment predictions made by RoBERTa can be challenging. # RoBERTa, with its strong contextual understanding and ability to capture intricate linguistic patterns, proves to be a valuable tool for sentiment analysis tasks. Its performance, combined with the ability to fine-tune for specific applications, makes it a popular choice in the field of NLP and sentiment analysis. # Tasks: # emoji, emotion, hate, irony, offensive, sentiment task = "sentiment" MODEL = f"cardiffnlp/twitter-roberta-base-{task}" tokenizer = AutoTokenizer.from_pretrained(MODEL) model = AutoModelForSequenceClassification.from_pretrained(MODEL) def roberta_scores(example): encoded_text = tokenizer(example, return_tensors="pt") output = model(**encoded_text) scores = output[0][0].detach().numpy() scores = softmax(scores) scores_dict = { "roberta_neg": scores[0], "roberta_neu": scores[1], "roberta_pos": scores[2], } return scores_dict sentiments_roberta = {} for ind, row in tqdm(reviews.iterrows(), total=len(reviews)): try: text = row["transformed_review"] id = row["index"] roberta_result = roberta_scores(text) sentiments_roberta[id] = roberta_result except RuntimeError: print(r"Error in row with {} index".format(id)) roberta = pd.DataFrame(sentiments_roberta).T.reset_index() roberta.head() roberta = reviews.merge(roberta, on="index") sentiment_df = roberta.merge( vader[["index", "neg", "neu", "pos", "compound"]], on="index" ) fig, ax = plt.subplots(1, 3, figsize=(20, 5)) sns.barplot(x="rating", y="roberta_pos", data=sentiment_df, ax=ax[0]) sns.barplot(x="rating", y="roberta_neg", data=sentiment_df, ax=ax[1]) sns.barplot(x="rating", y="roberta_neu", data=sentiment_df, ax=ax[2]) ax[0].set_title("Rating vs Roberta Positive Score") ax[1].set_title("Rating vs Roberta Negative Score") ax[2].set_title("Rating vs Roberta Neutral Score") # Comparing RoBERTa and VADER def get_sentiment_scores(review): # roberta encoded_text = tokenizer(review, return_tensors="pt") output = model(**encoded_text) scores = output[0][0].detach().numpy() scores = softmax(scores) scores_roberta = {"neg": scores[0], "neu": scores[1], "pos": scores[2]} scores_vader = sia.polarity_scores(review) del scores_vader["compound"] combined = {} combined["roberta"] = scores_roberta combined["vader"] = scores_vader df = pd.DataFrame(combined) # Set the width of each bar bar_width = 0.25 # Calculate the x-coordinates for each dataset x_roberta = np.arange(len(df)) x_vader = x_roberta + bar_width # Create the figure and axis fig, ax = plt.subplots() # Plot the bars for each dataset ax.bar(x_roberta, df["roberta"], width=bar_width, label="Roberta") ax.bar(x_vader, df["vader"], width=bar_width, label="Vader") # Set the x-axis tick labels ax.set_xticks(x_roberta + bar_width / 2) ax.set_xticklabels(df.index) ax.set_title(review) # Add a legend ax.legend() return fig # another sample review example = reviews["transformed_review"][np.random.randint(len(reviews))] example get_sentiment_scores(example) # Based on analysis done on tons of reviews, several differences can be observed. Here is a summary of the comparison: # **Sentiment Differentiation:** The Roberta model shows a better ability to differentiate between positive, negative, and neutral sentiments compared to the Vader model. The Roberta model assigns higher scores to the positive and negative categories, while the Vader model assigns higher scores to the neutral category. # **Valuing Neutrality:** The Vader model tends to value neutrality more for most of the reviews. This is evident from the higher neutral score assigned by the Vader model compared to the Roberta model for the given review. # **Sensitivity to Negativity:** The Roberta model appears to be more sensitive to negative sentiment compared to the Vader model. This is indicated by the higher negative score assigned by the Roberta model for the given review. # **Overall Sentiment Polarity:** The Roberta model assigns a higher positive score and a lower neutral score compared to the Vader model for the given review. This suggests that the Roberta model perceives the review as more positive overall, while the Vader model perceives it as more neutral. # Overall, the comparison highlights that the Roberta model tends to provide more nuanced sentiment analysis by differentiating between positive, negative, and neutral sentiments, while the Vader model leans towards valuing neutrality and may be less sensitive to negativity. # ## Discrepancies # ### Case1: User gave low rating but Model identify Positive sentiment odd_ones = sentiment_df[sentiment_df["rating"] == 1].sort_values( "roberta_pos", ascending=False ) odd_ones.head(3) index = 0 print(odd_ones["review"].values[index]) get_sentiment_scores(odd_ones["transformed_review"].values[index]) # Review seems positive. but user gave it low rating. Roberta manage to get the correct sentiment from the review. Notice that Vader say that the reviews is more of neutral sentiment which is wrong index = 1 print(odd_ones["review"].values[index]) get_sentiment_scores(odd_ones["transformed_review"].values[index]) # This review tends more on negative side. But both Roberta and Vader say that it is either neutral or positive. It could be because of the absense of an negative words and presence of words such as 'working', 'good', 'still'. index = 2 print(odd_ones["review"].values[index]) get_sentiment_scores(odd_ones["transformed_review"].values[index]) # This looks fine. The user wrote a positve review but gave 1 rating. The review conveys a strong positive sentiment which is correctly captured by Roberta. Vader again seems confused between neutral and positive sentiments # ### Case2: User gave high rating but Model identify negative sentiment odd_ones1 = sentiment_df[sentiment_df["rating"] == 5].sort_values( "roberta_neg", ascending=False ) odd_ones1.head(3) index = 0 print(odd_ones1["review"].values[index]) get_sentiment_scores(odd_ones1["transformed_review"].values[index]) # The review has strong postive sentiment. It seems that, both models fail to detect it because of the usage of word 'wrong' in positve context. This is an example of instances where Roberta model unable to capture the context with play of words. As usual, Vader is sticking with Neutral Sentiments index = 1 print(odd_ones1["review"].values[index]) get_sentiment_scores(odd_ones1["transformed_review"].values[index]) # The user wanted to say that the product is great for xbox which has very positive sentiment. However, Roberta model seems to have got infuenced by the word 'sucking' which was used as an adjective for xbox controller. index = 2 print(odd_ones1["review"].values[index]) get_sentiment_scores(odd_ones1["transformed_review"].values[index]) # In this instance, Vader model delivered correct output. The review sounds like the user do not bother much about performance. Roberta models classify it as negative sentiment. This could be because of words such as 'never' and 'don't'. # With all of these example, we can say that, while the Roberta model offers improved sentiment analysis capabilities compared to the Vader model, it is essential to acknowledge its limitations. Here are some limitations of the Roberta model: # **Language Dependency:** The Roberta model's performance can vary depending on the language it is trained on. If the model is trained on a specific language, it may not generalize well to other languages. Therefore, its effectiveness in sentiment analysis tasks may be limited to the languages it was trained on. # **Data Bias:** The performance of the Roberta model is influenced by the training data it receives. If the training data contains biases, such as underrepresentation or overrepresentation of certain demographics or perspectives, the model may inherit those biases. This can result in biased sentiment analysis outputs, affecting the reliability and fairness of the model. # **Contextual Understanding:** While the Roberta model has a strong understanding of contextual language, it may still struggle with certain nuances, sarcasm, or context-specific sentiments. These limitations stem from the complexities of language comprehension, and the model may not always capture the intended sentiment accurately in such cases. # **Interpretability:** Like many deep learning models, the Roberta model's internal workings and decision-making process can be challenging to interpret. It can be challenging to understand how the model arrives at its sentiment analysis outputs, making it difficult to explain its predictions or identify potential biases or errors. # While it offers advancements over traditional models like Vader, it is crucial to assess RoBERTa's performance within specific contexts, validate its outputs, and consider potential biases and limitations when interpreting the results. # Overall Sentiments Analysis from the Dataset # To calculate the overall sentiment of a dataset based on the Roberta scores, we will: # - Take the average ofthe sentiment scores for each sentiment category (negative, neutral, positive) across all the data points. # - Determine the sentiment with the highest average value as the overall sentiment of the dataset. roberta.head(2) new_rob = roberta[["roberta_neg", "roberta_neu", "roberta_pos"]] # Summing up the sentiment scores overall_sentiment = new_rob.mean(axis=0).idxmax() print("Overall Sentiment:", overall_sentiment)
false
0
7,985
1
7,985
7,985
129974150
<jupyter_start><jupyter_text>South Korean Lottery Numbers ### Background The South Korean lottery pays out millions of dollars to the winners. To date, there have been over 1000 draws (1 a week). The numbers are drawn by a vacuum sucking up plastic balls with the winning numbers written on them. Many South Korean citizens speculate that this system is rigged (or at least not 100% fair) because many numbers have been chosen unproportionally. Is it possible that choosing certain numbers will improve one's chances of winning? ### Data <ul> <li><strong>TIME</strong> - The nth lottery draw</li> <li><strong>NUM1</strong> - Winning number 1</li> <li><strong>NUM2</strong> - Winning number 2</li> <li><strong>NUM3</strong> - Winning number 3</li> <li><strong>NUM4</strong> - Winning number 4</li> <li><strong>NUM5</strong> - Winning number 5</li> <li><strong>NUM6</strong> - Winning number 6</li> <li><strong>BONUS</strong> - Winning bonus number</li> </ul> ### Additional Info Per draw, 6 numbers are chosen + 1 bonus number Of the 6 primary numbers, if at least 3 are correct the ticket is a winner. The bonus number will add a bonus if 5 out of 6 primary numbers are correct. The order of the numbers do not matter. They will always be from least-&gt;greatest. The following needs to be taken into consideration to calculate if your model is making money: -Each guess of lottery numbers costs about $1 (you can guess an unlimited amount of times) -If 3 numbers match you win $5 (ie. if you guess 5 times and only one ticket wins, you get your money back) -if 4 numbers match you win $100 -if 5 numbers match you win $1,000 -if 5 numbers and the bonus number match you win $10,000 -if all 6 numbers are correct you get the jackpot (usually at least $100,000-&gt; $10M) ### Source https://m.dhlottery.co.kr/ Kaggle dataset identifier: south-korean-lottery-numbers <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session df = pd.read_csv("/kaggle/input/south-korean-lottery-numbers/fake_lotto.csv") df import seaborn as sns countplot = sns.countplot(data=df, x="NUM1") num_1 = df[df.NUM1 >= 20] by_time = num_1.groupby("TIME", as_index=False) mean_by_time = by_time["NUM1"].mean() mean_by_time = mean_by_time.head() barplot = sns.barplot(x="TIME", y="NUM1", data=mean_by_time) # ### if I take the first 5 values where the winning number1 >20, he got the maximum value in round number 6 by_time = df.groupby("TIME", as_index=False).NUM2.mean().head() relplot = sns.relplot(x="TIME", y="NUM2", data=by_time)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/974/129974150.ipynb
south-korean-lottery-numbers
calebreigada
[{"Id": 129974150, "ScriptId": 38659267, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14989378, "CreationDate": "05/17/2023 20:57:40", "VersionNumber": 3.0, "Title": "Data_Visualization", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 40.0, "LinesInsertedFromPrevious": 7.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 33.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
[{"Id": 186415008, "KernelVersionId": 129974150, "SourceDatasetVersionId": 3233531}]
[{"Id": 3233531, "DatasetId": 1946000, "DatasourceVersionId": 3283627, "CreatorUserId": 5720392, "LicenseName": "CC0: Public Domain", "CreationDate": "02/27/2022 05:16:41", "VersionNumber": 2.0, "Title": "South Korean Lottery Numbers", "Slug": "south-korean-lottery-numbers", "Subtitle": "Determining the best numbers to choose in the South Korean Lottery", "Description": "### Background\nThe South Korean lottery pays out millions of dollars to the winners. To date, there have been over 1000 draws (1 a week). The numbers are drawn by a vacuum sucking up plastic balls with the winning numbers written on them. Many South Korean citizens speculate that this system is rigged (or at least not 100% fair) because many numbers have been chosen unproportionally. Is it possible that choosing certain numbers will improve one's chances of winning?\n\n### Data\n<ul>\n<li><strong>TIME</strong> - The nth lottery draw</li>\n<li><strong>NUM1</strong> - Winning number 1</li>\n<li><strong>NUM2</strong> - Winning number 2</li>\n<li><strong>NUM3</strong> - Winning number 3</li>\n<li><strong>NUM4</strong> - Winning number 4</li>\n<li><strong>NUM5</strong> - Winning number 5</li>\n<li><strong>NUM6</strong> - Winning number 6</li>\n<li><strong>BONUS</strong> - Winning bonus number</li>\n</ul>\n\n\n### Additional Info\nPer draw, 6 numbers are chosen + 1 bonus number\nOf the 6 primary numbers, if at least 3 are correct the ticket is a winner.\nThe bonus number will add a bonus if 5 out of 6 primary numbers are correct.\nThe order of the numbers do not matter. They will always be from least-&gt;greatest.\n\nThe following needs to be taken into consideration to calculate if your model is making money:\n-Each guess of lottery numbers costs about $1 (you can guess an unlimited amount of times)\n-If 3 numbers match you win $5 (ie. if you guess 5 times and only one ticket wins, you get your money back)\n-if 4 numbers match you win $100\n-if 5 numbers match you win $1,000\n-if 5 numbers and the bonus number match you win $10,000\n-if all 6 numbers are correct you get the jackpot (usually at least $100,000-&gt; $10M)\n\n\n\n### Source\nhttps://m.dhlottery.co.kr/", "VersionNotes": "Data Update 2022/02/27", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 1946000, "CreatorUserId": 5720392, "OwnerUserId": 5720392.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 3233531.0, "CurrentDatasourceVersionId": 3283627.0, "ForumId": 1969854, "Type": 2, "CreationDate": "02/20/2022 10:08:58", "LastActivityDate": "02/20/2022", "TotalViews": 9985, "TotalDownloads": 762, "TotalVotes": 34, "TotalKernels": 3}]
[{"Id": 5720392, "UserName": "calebreigada", "DisplayName": "Caleb Reigada", "RegisterDate": "09/04/2020", "PerformanceTier": 3}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session df = pd.read_csv("/kaggle/input/south-korean-lottery-numbers/fake_lotto.csv") df import seaborn as sns countplot = sns.countplot(data=df, x="NUM1") num_1 = df[df.NUM1 >= 20] by_time = num_1.groupby("TIME", as_index=False) mean_by_time = by_time["NUM1"].mean() mean_by_time = mean_by_time.head() barplot = sns.barplot(x="TIME", y="NUM1", data=mean_by_time) # ### if I take the first 5 values where the winning number1 >20, he got the maximum value in round number 6 by_time = df.groupby("TIME", as_index=False).NUM2.mean().head() relplot = sns.relplot(x="TIME", y="NUM2", data=by_time)
false
1
413
1
993
413
129974788
# # What is HuggingFace's Diffusers ? # > Diffusers is the go-to library for state-of-the-art pretrained diffusion models for generating images, audio, and even 3D structures of molecules. It is a modular toolbox that can be used to build our own inference soulution i.e to train our own diffusion models. # > Diffusion models are trained to denoise random Gaussian noise step-by-step to generate a sample of interest, such as an image or audio. # Diffusers Consists of three main componets # 1. State-of-art diffusion pipeline for interences.The Diffusion Pipeline is a high-level end-to-end class designed to rapidly generate samples from pretrained diffusion models for inference. # 2. Interchangeable noise schedulers for balancing trade-offs between generation speed and quality.Popular pretrained model architectures and modules that can be used as building blocks for creating diffusion systems # 3. Pretrained models that can be used as a building blocks and combined with schedulers for creating our own end to end diffusion systems. Many different schedulers - algorithms that control how noise is added for training, and how to generate denoised images during inference. # Hence first we learn how to use the DiffusionPipeline for inference and then going through how to combine a model and scheduler to replicate what's happening insie the diffusionpipeline. # Make sure to install the all library # 1. Diffusers for Pipelines # 2. Accelerate for speedup the model loading for inferences and training # 3. Transformers for running the most popular diffusion algorithms like stable diffusion # DiffusersPipeline help us to use as a pretrained # diffusion system for making inferences. It is end-to-end system containing model and scheduler. It can be used out-of-box for many tasks like # 1. Unconditional Image Generation : Generate an image from gaussian noise # 2. Text-Guided Image Generation : generate an image given a text prompt # 3. Text-Guided Image-to-Image Translation : adapt an image guided by a text prompt # 4. Text-Guided Image-Inpainting : fill the masked part of an image given the image, the mask and a text prompt # 5. Text-Guided Depth-to-Image Translations : adapt parts of an image guided by a text prompt while preserving structure via depth estimation # We will start with the following steps # 1. Creating an instance of a DiffusionPipeline and also specifying which pipeline checkpoint we wants to download. # Import the importent library like DiffusionPipeline from diffusers from diffusers import DiffusionPipeline # Load the model with from_pretrained method pipeline = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") # Now, DiffusionPipeline downloads and caches the all modeling, tokenization and scheduling components and Stable Diffusion Pipeline composed of the UNet2DConditionalModel and PNDMScheduler etc. # Note : Please try to run these pipelines on GPU becouse stable diffusion model roughly consists of 1.4 billion parameters therefor we just move the generator object to the GPU just like in pytorch pipeline.to("cuda") # Now, we will pass the text prompt to the our stable diffusion pipeline to generate an image and then access the denoised image. # Note : by default, the image output is wrapped in a `PIL.Image` object. image = pipeline( "An Image of the cow in a green field taking bath in the river" ).images[0] image # to save images image.save("Image_of_cow_in_river.png") image = pipeline("An Image of children taking bath in canal as fun").images[0] image image = pipeline("An Image of lord ram in ancient time with his dharma patni").images[0] image image = pipeline("An Image of Lord ram in the modern avatar").images[0] image image = pipeline("An Image of british brutality on indians").images[0] image
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/974/129974788.ipynb
null
null
[{"Id": 129974788, "ScriptId": 38663988, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 8324004, "CreationDate": "05/17/2023 21:07:05", "VersionNumber": 2.0, "Title": "Diffusors_DiffusionPipeline", "EvaluationDate": "05/17/2023", "IsChange": false, "TotalLines": 65.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 65.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 2}]
null
null
null
null
# # What is HuggingFace's Diffusers ? # > Diffusers is the go-to library for state-of-the-art pretrained diffusion models for generating images, audio, and even 3D structures of molecules. It is a modular toolbox that can be used to build our own inference soulution i.e to train our own diffusion models. # > Diffusion models are trained to denoise random Gaussian noise step-by-step to generate a sample of interest, such as an image or audio. # Diffusers Consists of three main componets # 1. State-of-art diffusion pipeline for interences.The Diffusion Pipeline is a high-level end-to-end class designed to rapidly generate samples from pretrained diffusion models for inference. # 2. Interchangeable noise schedulers for balancing trade-offs between generation speed and quality.Popular pretrained model architectures and modules that can be used as building blocks for creating diffusion systems # 3. Pretrained models that can be used as a building blocks and combined with schedulers for creating our own end to end diffusion systems. Many different schedulers - algorithms that control how noise is added for training, and how to generate denoised images during inference. # Hence first we learn how to use the DiffusionPipeline for inference and then going through how to combine a model and scheduler to replicate what's happening insie the diffusionpipeline. # Make sure to install the all library # 1. Diffusers for Pipelines # 2. Accelerate for speedup the model loading for inferences and training # 3. Transformers for running the most popular diffusion algorithms like stable diffusion # DiffusersPipeline help us to use as a pretrained # diffusion system for making inferences. It is end-to-end system containing model and scheduler. It can be used out-of-box for many tasks like # 1. Unconditional Image Generation : Generate an image from gaussian noise # 2. Text-Guided Image Generation : generate an image given a text prompt # 3. Text-Guided Image-to-Image Translation : adapt an image guided by a text prompt # 4. Text-Guided Image-Inpainting : fill the masked part of an image given the image, the mask and a text prompt # 5. Text-Guided Depth-to-Image Translations : adapt parts of an image guided by a text prompt while preserving structure via depth estimation # We will start with the following steps # 1. Creating an instance of a DiffusionPipeline and also specifying which pipeline checkpoint we wants to download. # Import the importent library like DiffusionPipeline from diffusers from diffusers import DiffusionPipeline # Load the model with from_pretrained method pipeline = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") # Now, DiffusionPipeline downloads and caches the all modeling, tokenization and scheduling components and Stable Diffusion Pipeline composed of the UNet2DConditionalModel and PNDMScheduler etc. # Note : Please try to run these pipelines on GPU becouse stable diffusion model roughly consists of 1.4 billion parameters therefor we just move the generator object to the GPU just like in pytorch pipeline.to("cuda") # Now, we will pass the text prompt to the our stable diffusion pipeline to generate an image and then access the denoised image. # Note : by default, the image output is wrapped in a `PIL.Image` object. image = pipeline( "An Image of the cow in a green field taking bath in the river" ).images[0] image # to save images image.save("Image_of_cow_in_river.png") image = pipeline("An Image of children taking bath in canal as fun").images[0] image image = pipeline("An Image of lord ram in ancient time with his dharma patni").images[0] image image = pipeline("An Image of Lord ram in the modern avatar").images[0] image image = pipeline("An Image of british brutality on indians").images[0] image
false
0
932
2
932
932
129974198
from auptitcafe.menus import Menus import pandas as pd menu_instance = Menus() menus = "menus.csv" menu_instance.to_csv(menus) df = pd.read_csv(menus) df import duckdb con = duckdb.connect(database="auptitcafe.duckdb", read_only=False) con.execute( """create or replace table menus( titre_plat varchar not null, prix integer not null, category varchar not null check (category in ('PLAT','DESSERT')), recette varchar not null, image_url varchar not null );""" ) con.execute("""insert into menus SELECT * from 'menus.csv';""") df = con.query( """select * from menus where category = 'PLAT';""" ).to_df() df
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/974/129974198.ipynb
null
null
[{"Id": 129974198, "ScriptId": 38393360, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 12771330, "CreationDate": "05/17/2023 20:58:18", "VersionNumber": 6.0, "Title": "Au p'tit caf\u00e9 (pypi package)", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 31.0, "LinesInsertedFromPrevious": 16.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 15.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
from auptitcafe.menus import Menus import pandas as pd menu_instance = Menus() menus = "menus.csv" menu_instance.to_csv(menus) df = pd.read_csv(menus) df import duckdb con = duckdb.connect(database="auptitcafe.duckdb", read_only=False) con.execute( """create or replace table menus( titre_plat varchar not null, prix integer not null, category varchar not null check (category in ('PLAT','DESSERT')), recette varchar not null, image_url varchar not null );""" ) con.execute("""insert into menus SELECT * from 'menus.csv';""") df = con.query( """select * from menus where category = 'PLAT';""" ).to_df() df
false
0
201
0
201
201
129974277
# ![](https://i.kym-cdn.com/entries/icons/original/000/014/737/07_trust-fall.gif) # trust game import numpy as np import pandas as pd import matplotlib.pyplot as plt import matplotlib as mpl import seaborn as sns # Reset rcParams to default values mpl.rcParams.update() # globally setting seaborn sns.set(style="ticks", palette="muted", font_scale=1.2, context="talk") mpl.rcParams["lines.linewidth"] = 2 mpl.rcParams["lines.markersize"] = 10 mpl.rcParams["font.size"] = 16 dirname = "/kaggle/input/economic-games-ba/" sl_all_avg = pd.read_csv(dirname + "sl_all_avg.csv") sl_both75_avg = pd.read_csv(dirname + "sl_both75_avg.csv") sl_both75_unfold = pd.read_csv(dirname + "sl_both75_unfold.csv") sl_dg75_avg = pd.read_csv(dirname + "sl_dg75_avg.csv") sl_dg75_unfold = pd.read_csv(dirname + "sl_dg75_unfold.csv") sl_tg75_avg = pd.read_csv(dirname + "sl_tg75_avg.csv") sl_tg75_unfold = pd.read_csv(dirname + "sl_tg75_unfold.csv") # TG by gender sl_all_avg[sl_all_avg["Gender"] == "Female"]["TG_trustor"].hist() sl_all_avg[sl_all_avg["Gender"] == "Male"]["TG_trustor"].hist() plt.legend(["F", "M"]) plt.title("Histogram of TG_trustor by Gender")
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/974/129974277.ipynb
null
null
[{"Id": 129974277, "ScriptId": 38662336, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 15152859, "CreationDate": "05/17/2023 20:59:35", "VersionNumber": 1.0, "Title": "EDA on decisions of strategic uncertainty", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 41.0, "LinesInsertedFromPrevious": 41.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# ![](https://i.kym-cdn.com/entries/icons/original/000/014/737/07_trust-fall.gif) # trust game import numpy as np import pandas as pd import matplotlib.pyplot as plt import matplotlib as mpl import seaborn as sns # Reset rcParams to default values mpl.rcParams.update() # globally setting seaborn sns.set(style="ticks", palette="muted", font_scale=1.2, context="talk") mpl.rcParams["lines.linewidth"] = 2 mpl.rcParams["lines.markersize"] = 10 mpl.rcParams["font.size"] = 16 dirname = "/kaggle/input/economic-games-ba/" sl_all_avg = pd.read_csv(dirname + "sl_all_avg.csv") sl_both75_avg = pd.read_csv(dirname + "sl_both75_avg.csv") sl_both75_unfold = pd.read_csv(dirname + "sl_both75_unfold.csv") sl_dg75_avg = pd.read_csv(dirname + "sl_dg75_avg.csv") sl_dg75_unfold = pd.read_csv(dirname + "sl_dg75_unfold.csv") sl_tg75_avg = pd.read_csv(dirname + "sl_tg75_avg.csv") sl_tg75_unfold = pd.read_csv(dirname + "sl_tg75_unfold.csv") # TG by gender sl_all_avg[sl_all_avg["Gender"] == "Female"]["TG_trustor"].hist() sl_all_avg[sl_all_avg["Gender"] == "Male"]["TG_trustor"].hist() plt.legend(["F", "M"]) plt.title("Histogram of TG_trustor by Gender")
false
0
454
0
454
454
129974168
<jupyter_start><jupyter_text>Intel Image Classification ### Context This is image data of Natural Scenes around the world. ### Content This Data contains around 25k images of size 150x150 distributed under 6 categories. {'buildings' -&gt; 0, 'forest' -&gt; 1, 'glacier' -&gt; 2, 'mountain' -&gt; 3, 'sea' -&gt; 4, 'street' -&gt; 5 } The Train, Test and Prediction data is separated in each zip files. There are around 14k images in Train, 3k in Test and 7k in Prediction. This data was initially published on https://datahack.analyticsvidhya.com by Intel to host a Image classification Challenge. Kaggle dataset identifier: intel-image-classification <jupyter_script># # Assignment-05 Convolutional Neural Networks # ### Students: # - Sharon Sarai Maygua Mendiola # - Franklin Ruben Rosembluth Prado # Utils to run notebook on Kaggle import os import cv2 import glob import pickle import matplotlib import numpy as np import pandas as pd import imageio as im import seaborn as sns import tensorflow as tf import matplotlib.image as mpimg import matplotlib.pyplot as plt from PIL import Image from tensorflow import keras from keras import models from pickle import dump from pickle import load from tensorflow import keras from tensorflow.keras.utils import ( to_categorical, plot_model, img_to_array, load_img, array_to_img, ) from tensorflow.keras import regularizers from tensorflow.keras.models import Sequential, Model from tensorflow.keras.layers import ( Conv2D, MaxPooling2D, Flatten, Dense, Dropout, Activation, BatchNormalization, ) from tensorflow.keras.preprocessing import image_dataset_from_directory from tensorflow.keras.preprocessing.image import ImageDataGenerator from tensorflow.keras.callbacks import ModelCheckpoint from tensorflow.keras import layers from tensorflow.keras.layers import LeakyReLU from tensorflow.keras.losses import categorical_crossentropy from tensorflow.keras.optimizers import Adam, RMSprop from sklearn.model_selection import train_test_split from sklearn.metrics import confusion_matrix from sklearn.metrics import classification_report from sklearn.utils import shuffle from keras.preprocessing.image import ImageDataGenerator from keras.callbacks import ModelCheckpoint # from keras.preprocessing import image import keras.utils as image # load and save files with pickle def save_pickle(file, file_name): dump(file, open(file_name, "wb")) print("Saved: %s" % file_name) def load_pickle(file_name): return load(open(file_name, "rb")) # PATHS # path to the folder containing the subfolders with the training images trainpath = "/kaggle/input/intel-image-classification/seg_train/seg_train" # path to the folder containing the subfolders with the testing images testpath = "/kaggle/input/intel-image-classification/seg_test/seg_test" predpath = "/kaggle/input/intel-image-classification/seg_pred/seg_pred" # Tensorflow datasets creator from directory, making images to categorical # Not used, because we wanted to learn how to label our images by our own train_ds = image_dataset_from_directory( trainpath, seed=123, image_size=(150, 150), batch_size=64, label_mode="categorical" ) test_ds = image_dataset_from_directory( testpath, seed=123, image_size=(150, 150), batch_size=64, label_mode="categorical" ) print("Train class names:", train_ds.class_names) print("Test class names:", test_ds.class_names) plt.figure(figsize=(5, 5)) for images, labels in train_ds.take(1): for i in range(9): ax = plt.subplot(3, 3, i + 1) plt.imshow(images[i].numpy().astype("uint8")) label = tf.argmax(labels[i]).numpy() plt.title(train_ds.class_names[label]) plt.axis("off") # # Labeling # - This dataset needed some pre-processing. # - The images were generally labeled, since they were in categorized folders. However, for training it is necessary that each image is associated with its label, so each of the training and test images was labeled. # - The validation images could not be processed in this way because they were not categorized. # With this objective, the *'def labeling'* function was created, which also transforms the text labels to numeric labels and converts the lists in which the images and labels had been stored, into numpy arrays of type (float32) and type (int32). # This is because working with this type of data reduces the amount of storage memory, improves model performance, and because Keras needs its input data to be of this type. # Also to reduce the amount of the images size, we resized all the images with (150x150) in labeling to normalize after concludes the labels of each image. # Create a dictionary to change text labels into int numerical labels (Ordered alphabetically) class_names = ["buildings", "forest", "glacier", "mountain", "sea", "street"] class_labels = {class_name: i for i, class_name in enumerate(class_names)} print(class_labels) # Resize of images IMAGE_SIZE = (150, 150) # def for labeling def labeling(folder_path, images, labels): # loop through all subfolders in the folder_path for label in os.listdir(folder_path): # get the path to the subfolder label_path = os.path.join(folder_path, label) # convert label text to label number label_number = class_labels[label] # loop through all images in subfolder for file_name in os.listdir(label_path): # upload image using Pillow image = Image.open(os.path.join(label_path, file_name)) # resize image to desired size image = image.resize(IMAGE_SIZE) # convert the image to a Numpy array image = np.array(image) # add image to testing_image list images.append(image) # add image label to testing_label list labels.append(label_number) # convert the images and labels list to numpy array images = np.array(images, dtype="float32") labels = np.array(labels, dtype="int32") return images, labels # # Data Visualization # In this section you can see the results of the labeling. # An image of the training set is plotted and its label is printed, both are consistent. # Training labeling # list to store the images and their labels training_images = [] training_labels = [] x_train, y_train = labeling(trainpath, training_images, training_labels) # Testing labeling # list to store the images and their labels testing_images = [] testing_labels = [] x_test, y_test = labeling(testpath, testing_images, testing_labels) plt.imshow(training_images[5]) print(f"label: {training_labels[5]}, name: {class_names[training_labels[5]]}") # # Data preparation # - This part of the code has to be mean with hot-encodes, normalization, and splits of the data. # In the first part, we find the number of unique classes in the training tag set and then converts the categorical tags into a one-hot encoding representation for the training and test tag sets. # Find the unique numbers from the train labels num_clases = len(np.unique(y_train)) # Change the labels from categorical to one-hot encoding y_train = to_categorical(y_train, num_clases) y_test = to_categorical(y_test, num_clases) # Visualize y_train after one hot encoding y_train[0] # ### Normalization # Second Part, the train and test images are normalized to make sure that all images have comparable pixel values and are in a manageable range. # This helps to improve the accuracy of the model and to reduce the variance of the input data. # The normalization being used here is known as **"Z-score normalization"** or **"standard normalization"**. # The mean and standard deviation of the training data are calculated and then used to scale both the training and test data, the formula used is: **(x - mean) / standard deviation** # This normalization centers the data at zero and scales the units to have a variance of one. The constant 1e-7 is added to the denominator to avoid a possible division by zero in case the standard deviation is very small. # Using Z-score normalization to converge faster and improve accuracy mean = np.mean(x_train) std = np.std(x_train) x_train = (x_train - mean) / (std + 1e-7) x_test = (x_test - mean) / (std + 1e-7) # Finally, 10 percent of the **train set** is separated for validation since the set destined for validation was not labeled. # We chose to do the validation in this way (training by sending the train data and validation) to save time, make better use of the data, detect overfitting problems early and optimize the overall performance of the model. # Split train and test x_train, x_valid, y_train, y_valid = train_test_split( x_train, y_train, test_size=0.1, random_state=13 ) print("train splitted:", x_train.shape[0]) print("val:", x_valid.shape[0]) print("test:", x_test.shape[0]) # # Building our Convnet Model # Convnet architecture: # Having the input images ready, these images go through a convolution network that extracts features (edges, textures, etc.) at first in a very superficial way and then, as it goes deeper into the network, much more complex features are extracted. # These convolution layers are linked to a maxpooling layer that reduces complexity by limiting the length and width of the images. And so layer after layer of stacked convolutions with maxpooling will give us back an image that is smaller and smaller but deeper in its meaning and information. # Next, a layer called Flatten is applied to flat the image input. # ### General parameters: # **Kernel (filters)**: # We know that images are understood as arrays of pixels. # The kernel is also a matrix (but smaller) that moves from the upper left corner to the lower right corner of the image, going step by step until it completes the entire image by doing a little mathematical operation called convolution. And in this tour, a mathematical multiplication operation is executed that obtains the data and patterns for each row and column of the image. # The result of this convolution results in a new image with certain features highlighted. Thus, the objective of the filters is to detect features of the input images. # In our model we started using 32 filters in the first layer that were later increased in the following layers. # **Padding:** # It is a margin that is added to the image so that when performing the convolution operation the resulting image does not reduce its size. 'same' is used so that it does not alter the characteristics of the original images. # In our model we use padding = 'same'. # **Maxpooling:** # Reduces the size of the images resulting from the convolution thanks to a kernel that highlights only the most relevant features of the image. # ### Parameters of the optimized models: # **Regularizers:** # We use the L2 regularization (which controls the magnitude of the weights) # kernel_regularizer=regularizers.l2(w_regularizer) is used to apply L2 regularization to the weights of a convolutional layer on a CNN. It helps to avoid overfitting and improve the generalizability of the model by penalizing large weights. # **Batch normalization:** # It is a normalization within the hidden layers, throught the training, since the weights vary constantly, these values can be standardized within the network. This facilitates gradient descent and works in batches. The result is standardized data even within the network training. # Which brings the resulting tensor to one dimension. And having only one dimension, the classification is done by stacking dense layers as it was done in typical neural networks. # filter_batch = 32 w_regulatizer = 1e-4 model = Sequential() ## conv 1 model.add( Conv2D( filter_batch, (3, 3), padding="same", kernel_regularizer=regularizers.l2(w_regulatizer), input_shape=x_train.shape[1:], ) ) model.add(Activation("relu")) model.add(BatchNormalization()) ## conv 2 model.add( Conv2D( filter_batch, (3, 3), padding="same", kernel_regularizer=regularizers.l2(w_regulatizer), ) ) model.add(Activation("relu")) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.2)) ## conv 3 model.add( Conv2D( 2 * filter_batch, (3, 3), padding="same", kernel_regularizer=regularizers.l2(w_regulatizer), ) ) model.add(Activation("relu")) model.add(BatchNormalization()) model.add(Dropout(0.2)) ## conv 4 model.add( Conv2D( 2 * filter_batch, (3, 3), padding="same", kernel_regularizer=regularizers.l2(w_regulatizer), ) ) model.add(Activation("relu")) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.3)) ## conv 5 model.add( Conv2D( 4 * filter_batch, (3, 3), padding="same", kernel_regularizer=regularizers.l2(w_regulatizer), ) ) model.add(Activation("relu")) model.add(BatchNormalization()) ## conv 6 model.add( Conv2D( 4 * filter_batch, (3, 3), padding="same", kernel_regularizer=regularizers.l2(w_regulatizer), ) ) model.add(Activation("relu")) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.4)) ## Clasificacion - Flatten model.add(Flatten()) model.add(Dense(num_clases, activation="softmax")) # summary of the created convnet # model.summary() # This is the summary of our first Convnet created plotted: plot_model_file = "first_model.png" plot_model(model, to_file=plot_model_file, show_shapes=True) # As we can observe from the summary of our model, our first Convnet utilizes images that have already been resized to 150x150 and with each convolutional layer that passes, the size of the image is reduced by half, starting from the original size of 150 pixels and reaching 18 pixels as its limit. # With that we can see the importance to understand the extent to which size can be reduced because if we exceed too many small pixels like 2 or 1 as the minimum resize, the prediction accuracy could be bad. # ### Data Augmentation # By using the ImageDataGenerator its parameters, we generate augmented images on the fly during the training. # These augmented images can then be used to train our saved model, enhancing and helping to generalize improve performance on unseen images. ## Data augmentation datagen = ImageDataGenerator( rotation_range=15, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=True, vertical_flip=True, ) # # Compiling our first model # Using Adam optimizer and categorical_crossentropy lost function model.compile(loss="categorical_crossentropy", optimizer=Adam(), metrics=["accuracy"]) # **Callbacks:** early stopping and checkpoints # Early Stopping: # When a neural network has stopped optimizing the accuracy or the metric that we put in 'monitor', if this metric does not rise, this can decide to end the training when the network begins to diverge. # Checkpoint: # With this we can fully execute the training network throughout the epochs that we defined but in this file the weights of the neural network that had a better accuracy are saved. This ensures that the model is always the best. checkpoint_model_name = "my_first_model.hdf5" checkpoint = ModelCheckpoint( checkpoint_model_name, verbose=1, save_best_only=True, monitor="val_accuracy" ) BATCH_SIZE = 128 EPOCHS = 40 history_first_model = model.fit( datagen.flow(x_train, y_train, batch_size=BATCH_SIZE), callbacks=[checkpoint], steps_per_epoch=x_train.shape[0] // BATCH_SIZE, epochs=EPOCHS, verbose=1, validation_data=(x_valid, y_valid), ) # Defining accuracy and loss plot funtion def plot_loss_accuracy(hist, save_image_filename, len_epochs): epochs = len_epochs acc = hist.history["accuracy"] val_acc = hist.history["val_accuracy"] loss = hist.history["loss"] val_loss = hist.history["val_loss"] epochs_range = range(epochs) plt.figure(figsize=(12, 5)) plt.subplot(1, 2, 1) plt.plot(epochs_range, acc, "r--", label="Training Accuracy") plt.plot(epochs_range, val_acc, "b-", label="Validation Accuracy") plt.legend(loc="lower right") plt.title("Training and Validation Accuracy") plt.subplot(1, 2, 2) plt.plot(epochs_range, loss, "r--", label="Training Loss") plt.plot(epochs_range, val_loss, "b-", label="Validation Loss") plt.legend(loc="upper right") plt.title("Training and Validation Loss") plt.savefig(save_image_filename) plt.show() # call the function to plot the curves plot_loss_accuracy(history_first_model, "loss_accuracy_40.png", EPOCHS) # Saving history of our first model fit save_first_history_model_file = "history_first_model.pkl" save_pickle(history_first_model, save_first_history_model_file) # Saving our first model save_first_model_file = "first-model-40-epochs.pkl" save_pickle(model, save_first_model_file) # Loading our first model with history plot saved_history_first_model = load_pickle("/kaggle/working/history_first_model.pkl") plot_loss_accuracy(saved_history_first_model, "loss_accuracy_40_saved.png", EPOCHS) # Loading our first model with history plot saved_first_model = load_pickle("/kaggle/working/first-model-40-epochs.pkl") # # Visualizing intermediate activations Images img_path = ( "/kaggle/input/intel-image-classification/seg_test/seg_test/glacier/20253.jpg" ) img = image.load_img(img_path, target_size=IMAGE_SIZE) img_tensor = image.img_to_array(img) img_tensor = np.expand_dims(img_tensor, axis=0) img_tensor /= 255.0 plt.imshow(img_tensor[0]) plt.show() print(img_tensor.shape) # Loading our first model # predicting images x = image.img_to_array(img) x = np.expand_dims(x, axis=0) images = np.vstack([x]) # classes = my_pickle_model.predict_classes(images, batch_size=10) predict_x = saved_first_model.predict(images) classes_x = np.argmax(predict_x, axis=1) print("Predicted class is:", classes_x) # ### Instantiating a model from an input tensor and a list of output tensors # Extracts the outputs of the top 12 layers layer_outputs = [layer.output for layer in saved_first_model.layers[:12]] # Creates a model that will return these outputs, given the model input activation_model = Model(inputs=saved_first_model.input, outputs=layer_outputs) # Returns a list of five Numpy arrays: one array per layer activation activations = activation_model.predict(img_tensor) first_layer_activation = activations[0] print(first_layer_activation.shape) plt.matshow(first_layer_activation[0, :, :, 4], cmap="viridis") # ### Visualizing every channel in every intermediate activation layer_names = [] for layer in saved_first_model.layers[:12]: layer_names.append( layer.name ) # Names of the layers, so you can have them as part of your plot images_per_row = 16 for layer_name, layer_activation in zip( layer_names, activations ): # Displays the feature maps n_features = layer_activation.shape[-1] # Number of features in the feature map size = layer_activation.shape[ 1 ] # The feature map has shape (1, size, size, n_features). n_cols = ( n_features // images_per_row ) # Tiles the activation channels in this matrix display_grid = np.zeros((size * n_cols, images_per_row * size)) for col in range(n_cols): # Tiles each filter into a big horizontal grid for row in range(images_per_row): channel_image = layer_activation[0, :, :, col * images_per_row + row] channel_image -= ( channel_image.mean() ) # Post-processes the feature to make it visually palatable channel_image /= channel_image.std() channel_image *= 64 channel_image += 128 channel_image = np.clip(channel_image, 0, 255).astype("uint8") display_grid[ col * size : (col + 1) * size, # Displays the grid row * size : (row + 1) * size, ] = channel_image scale = 1.0 / size plt.figure(figsize=(scale * display_grid.shape[1], scale * display_grid.shape[0])) plt.title(layer_name) plt.grid(False) plt.imshow(display_grid, aspect="auto", cmap="viridis")
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/974/129974168.ipynb
intel-image-classification
puneet6060
[{"Id": 129974168, "ScriptId": 38658728, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 10470120, "CreationDate": "05/17/2023 20:57:56", "VersionNumber": 2.0, "Title": "Final-Assignment-05", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 476.0, "LinesInsertedFromPrevious": 324.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 152.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 186415064, "KernelVersionId": 129974168, "SourceDatasetVersionId": 269359}]
[{"Id": 269359, "DatasetId": 111880, "DatasourceVersionId": 281586, "CreatorUserId": 2307235, "LicenseName": "Data files \u00a9 Original Authors", "CreationDate": "01/30/2019 09:22:58", "VersionNumber": 2.0, "Title": "Intel Image Classification", "Slug": "intel-image-classification", "Subtitle": "Image Scene Classification of Multiclass", "Description": "### Context\n\nThis is image data of Natural Scenes around the world. \n\n### Content\n\nThis Data contains around 25k images of size 150x150 distributed under 6 categories.\n{'buildings' -&gt; 0, \n'forest' -&gt; 1,\n'glacier' -&gt; 2,\n'mountain' -&gt; 3,\n'sea' -&gt; 4,\n'street' -&gt; 5 }\n\nThe Train, Test and Prediction data is separated in each zip files. There are around 14k images in Train, 3k in Test and 7k in Prediction.\nThis data was initially published on https://datahack.analyticsvidhya.com by Intel to host a Image classification Challenge.\n\n\n### Acknowledgements\n\nThanks to https://datahack.analyticsvidhya.com for the challenge and Intel for the Data\n\nPhoto by [Jan B\u00f6ttinger on Unsplash][1]\n\n### Inspiration\n\nWant to build powerful Neural network that can classify these images with more accuracy.\n\n\n [1]: https://unsplash.com/photos/27xFENkt-lc", "VersionNotes": "Added Prediction Images", "TotalCompressedBytes": 108365415.0, "TotalUncompressedBytes": 361713334.0}]
[{"Id": 111880, "CreatorUserId": 2307235, "OwnerUserId": 2307235.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 269359.0, "CurrentDatasourceVersionId": 281586.0, "ForumId": 121691, "Type": 2, "CreationDate": "01/29/2019 10:37:42", "LastActivityDate": "01/29/2019", "TotalViews": 441103, "TotalDownloads": 83887, "TotalVotes": 1345, "TotalKernels": 815}]
[{"Id": 2307235, "UserName": "puneet6060", "DisplayName": "Puneet Bansal", "RegisterDate": "10/01/2018", "PerformanceTier": 0}]
# # Assignment-05 Convolutional Neural Networks # ### Students: # - Sharon Sarai Maygua Mendiola # - Franklin Ruben Rosembluth Prado # Utils to run notebook on Kaggle import os import cv2 import glob import pickle import matplotlib import numpy as np import pandas as pd import imageio as im import seaborn as sns import tensorflow as tf import matplotlib.image as mpimg import matplotlib.pyplot as plt from PIL import Image from tensorflow import keras from keras import models from pickle import dump from pickle import load from tensorflow import keras from tensorflow.keras.utils import ( to_categorical, plot_model, img_to_array, load_img, array_to_img, ) from tensorflow.keras import regularizers from tensorflow.keras.models import Sequential, Model from tensorflow.keras.layers import ( Conv2D, MaxPooling2D, Flatten, Dense, Dropout, Activation, BatchNormalization, ) from tensorflow.keras.preprocessing import image_dataset_from_directory from tensorflow.keras.preprocessing.image import ImageDataGenerator from tensorflow.keras.callbacks import ModelCheckpoint from tensorflow.keras import layers from tensorflow.keras.layers import LeakyReLU from tensorflow.keras.losses import categorical_crossentropy from tensorflow.keras.optimizers import Adam, RMSprop from sklearn.model_selection import train_test_split from sklearn.metrics import confusion_matrix from sklearn.metrics import classification_report from sklearn.utils import shuffle from keras.preprocessing.image import ImageDataGenerator from keras.callbacks import ModelCheckpoint # from keras.preprocessing import image import keras.utils as image # load and save files with pickle def save_pickle(file, file_name): dump(file, open(file_name, "wb")) print("Saved: %s" % file_name) def load_pickle(file_name): return load(open(file_name, "rb")) # PATHS # path to the folder containing the subfolders with the training images trainpath = "/kaggle/input/intel-image-classification/seg_train/seg_train" # path to the folder containing the subfolders with the testing images testpath = "/kaggle/input/intel-image-classification/seg_test/seg_test" predpath = "/kaggle/input/intel-image-classification/seg_pred/seg_pred" # Tensorflow datasets creator from directory, making images to categorical # Not used, because we wanted to learn how to label our images by our own train_ds = image_dataset_from_directory( trainpath, seed=123, image_size=(150, 150), batch_size=64, label_mode="categorical" ) test_ds = image_dataset_from_directory( testpath, seed=123, image_size=(150, 150), batch_size=64, label_mode="categorical" ) print("Train class names:", train_ds.class_names) print("Test class names:", test_ds.class_names) plt.figure(figsize=(5, 5)) for images, labels in train_ds.take(1): for i in range(9): ax = plt.subplot(3, 3, i + 1) plt.imshow(images[i].numpy().astype("uint8")) label = tf.argmax(labels[i]).numpy() plt.title(train_ds.class_names[label]) plt.axis("off") # # Labeling # - This dataset needed some pre-processing. # - The images were generally labeled, since they were in categorized folders. However, for training it is necessary that each image is associated with its label, so each of the training and test images was labeled. # - The validation images could not be processed in this way because they were not categorized. # With this objective, the *'def labeling'* function was created, which also transforms the text labels to numeric labels and converts the lists in which the images and labels had been stored, into numpy arrays of type (float32) and type (int32). # This is because working with this type of data reduces the amount of storage memory, improves model performance, and because Keras needs its input data to be of this type. # Also to reduce the amount of the images size, we resized all the images with (150x150) in labeling to normalize after concludes the labels of each image. # Create a dictionary to change text labels into int numerical labels (Ordered alphabetically) class_names = ["buildings", "forest", "glacier", "mountain", "sea", "street"] class_labels = {class_name: i for i, class_name in enumerate(class_names)} print(class_labels) # Resize of images IMAGE_SIZE = (150, 150) # def for labeling def labeling(folder_path, images, labels): # loop through all subfolders in the folder_path for label in os.listdir(folder_path): # get the path to the subfolder label_path = os.path.join(folder_path, label) # convert label text to label number label_number = class_labels[label] # loop through all images in subfolder for file_name in os.listdir(label_path): # upload image using Pillow image = Image.open(os.path.join(label_path, file_name)) # resize image to desired size image = image.resize(IMAGE_SIZE) # convert the image to a Numpy array image = np.array(image) # add image to testing_image list images.append(image) # add image label to testing_label list labels.append(label_number) # convert the images and labels list to numpy array images = np.array(images, dtype="float32") labels = np.array(labels, dtype="int32") return images, labels # # Data Visualization # In this section you can see the results of the labeling. # An image of the training set is plotted and its label is printed, both are consistent. # Training labeling # list to store the images and their labels training_images = [] training_labels = [] x_train, y_train = labeling(trainpath, training_images, training_labels) # Testing labeling # list to store the images and their labels testing_images = [] testing_labels = [] x_test, y_test = labeling(testpath, testing_images, testing_labels) plt.imshow(training_images[5]) print(f"label: {training_labels[5]}, name: {class_names[training_labels[5]]}") # # Data preparation # - This part of the code has to be mean with hot-encodes, normalization, and splits of the data. # In the first part, we find the number of unique classes in the training tag set and then converts the categorical tags into a one-hot encoding representation for the training and test tag sets. # Find the unique numbers from the train labels num_clases = len(np.unique(y_train)) # Change the labels from categorical to one-hot encoding y_train = to_categorical(y_train, num_clases) y_test = to_categorical(y_test, num_clases) # Visualize y_train after one hot encoding y_train[0] # ### Normalization # Second Part, the train and test images are normalized to make sure that all images have comparable pixel values and are in a manageable range. # This helps to improve the accuracy of the model and to reduce the variance of the input data. # The normalization being used here is known as **"Z-score normalization"** or **"standard normalization"**. # The mean and standard deviation of the training data are calculated and then used to scale both the training and test data, the formula used is: **(x - mean) / standard deviation** # This normalization centers the data at zero and scales the units to have a variance of one. The constant 1e-7 is added to the denominator to avoid a possible division by zero in case the standard deviation is very small. # Using Z-score normalization to converge faster and improve accuracy mean = np.mean(x_train) std = np.std(x_train) x_train = (x_train - mean) / (std + 1e-7) x_test = (x_test - mean) / (std + 1e-7) # Finally, 10 percent of the **train set** is separated for validation since the set destined for validation was not labeled. # We chose to do the validation in this way (training by sending the train data and validation) to save time, make better use of the data, detect overfitting problems early and optimize the overall performance of the model. # Split train and test x_train, x_valid, y_train, y_valid = train_test_split( x_train, y_train, test_size=0.1, random_state=13 ) print("train splitted:", x_train.shape[0]) print("val:", x_valid.shape[0]) print("test:", x_test.shape[0]) # # Building our Convnet Model # Convnet architecture: # Having the input images ready, these images go through a convolution network that extracts features (edges, textures, etc.) at first in a very superficial way and then, as it goes deeper into the network, much more complex features are extracted. # These convolution layers are linked to a maxpooling layer that reduces complexity by limiting the length and width of the images. And so layer after layer of stacked convolutions with maxpooling will give us back an image that is smaller and smaller but deeper in its meaning and information. # Next, a layer called Flatten is applied to flat the image input. # ### General parameters: # **Kernel (filters)**: # We know that images are understood as arrays of pixels. # The kernel is also a matrix (but smaller) that moves from the upper left corner to the lower right corner of the image, going step by step until it completes the entire image by doing a little mathematical operation called convolution. And in this tour, a mathematical multiplication operation is executed that obtains the data and patterns for each row and column of the image. # The result of this convolution results in a new image with certain features highlighted. Thus, the objective of the filters is to detect features of the input images. # In our model we started using 32 filters in the first layer that were later increased in the following layers. # **Padding:** # It is a margin that is added to the image so that when performing the convolution operation the resulting image does not reduce its size. 'same' is used so that it does not alter the characteristics of the original images. # In our model we use padding = 'same'. # **Maxpooling:** # Reduces the size of the images resulting from the convolution thanks to a kernel that highlights only the most relevant features of the image. # ### Parameters of the optimized models: # **Regularizers:** # We use the L2 regularization (which controls the magnitude of the weights) # kernel_regularizer=regularizers.l2(w_regularizer) is used to apply L2 regularization to the weights of a convolutional layer on a CNN. It helps to avoid overfitting and improve the generalizability of the model by penalizing large weights. # **Batch normalization:** # It is a normalization within the hidden layers, throught the training, since the weights vary constantly, these values can be standardized within the network. This facilitates gradient descent and works in batches. The result is standardized data even within the network training. # Which brings the resulting tensor to one dimension. And having only one dimension, the classification is done by stacking dense layers as it was done in typical neural networks. # filter_batch = 32 w_regulatizer = 1e-4 model = Sequential() ## conv 1 model.add( Conv2D( filter_batch, (3, 3), padding="same", kernel_regularizer=regularizers.l2(w_regulatizer), input_shape=x_train.shape[1:], ) ) model.add(Activation("relu")) model.add(BatchNormalization()) ## conv 2 model.add( Conv2D( filter_batch, (3, 3), padding="same", kernel_regularizer=regularizers.l2(w_regulatizer), ) ) model.add(Activation("relu")) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.2)) ## conv 3 model.add( Conv2D( 2 * filter_batch, (3, 3), padding="same", kernel_regularizer=regularizers.l2(w_regulatizer), ) ) model.add(Activation("relu")) model.add(BatchNormalization()) model.add(Dropout(0.2)) ## conv 4 model.add( Conv2D( 2 * filter_batch, (3, 3), padding="same", kernel_regularizer=regularizers.l2(w_regulatizer), ) ) model.add(Activation("relu")) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.3)) ## conv 5 model.add( Conv2D( 4 * filter_batch, (3, 3), padding="same", kernel_regularizer=regularizers.l2(w_regulatizer), ) ) model.add(Activation("relu")) model.add(BatchNormalization()) ## conv 6 model.add( Conv2D( 4 * filter_batch, (3, 3), padding="same", kernel_regularizer=regularizers.l2(w_regulatizer), ) ) model.add(Activation("relu")) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.4)) ## Clasificacion - Flatten model.add(Flatten()) model.add(Dense(num_clases, activation="softmax")) # summary of the created convnet # model.summary() # This is the summary of our first Convnet created plotted: plot_model_file = "first_model.png" plot_model(model, to_file=plot_model_file, show_shapes=True) # As we can observe from the summary of our model, our first Convnet utilizes images that have already been resized to 150x150 and with each convolutional layer that passes, the size of the image is reduced by half, starting from the original size of 150 pixels and reaching 18 pixels as its limit. # With that we can see the importance to understand the extent to which size can be reduced because if we exceed too many small pixels like 2 or 1 as the minimum resize, the prediction accuracy could be bad. # ### Data Augmentation # By using the ImageDataGenerator its parameters, we generate augmented images on the fly during the training. # These augmented images can then be used to train our saved model, enhancing and helping to generalize improve performance on unseen images. ## Data augmentation datagen = ImageDataGenerator( rotation_range=15, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=True, vertical_flip=True, ) # # Compiling our first model # Using Adam optimizer and categorical_crossentropy lost function model.compile(loss="categorical_crossentropy", optimizer=Adam(), metrics=["accuracy"]) # **Callbacks:** early stopping and checkpoints # Early Stopping: # When a neural network has stopped optimizing the accuracy or the metric that we put in 'monitor', if this metric does not rise, this can decide to end the training when the network begins to diverge. # Checkpoint: # With this we can fully execute the training network throughout the epochs that we defined but in this file the weights of the neural network that had a better accuracy are saved. This ensures that the model is always the best. checkpoint_model_name = "my_first_model.hdf5" checkpoint = ModelCheckpoint( checkpoint_model_name, verbose=1, save_best_only=True, monitor="val_accuracy" ) BATCH_SIZE = 128 EPOCHS = 40 history_first_model = model.fit( datagen.flow(x_train, y_train, batch_size=BATCH_SIZE), callbacks=[checkpoint], steps_per_epoch=x_train.shape[0] // BATCH_SIZE, epochs=EPOCHS, verbose=1, validation_data=(x_valid, y_valid), ) # Defining accuracy and loss plot funtion def plot_loss_accuracy(hist, save_image_filename, len_epochs): epochs = len_epochs acc = hist.history["accuracy"] val_acc = hist.history["val_accuracy"] loss = hist.history["loss"] val_loss = hist.history["val_loss"] epochs_range = range(epochs) plt.figure(figsize=(12, 5)) plt.subplot(1, 2, 1) plt.plot(epochs_range, acc, "r--", label="Training Accuracy") plt.plot(epochs_range, val_acc, "b-", label="Validation Accuracy") plt.legend(loc="lower right") plt.title("Training and Validation Accuracy") plt.subplot(1, 2, 2) plt.plot(epochs_range, loss, "r--", label="Training Loss") plt.plot(epochs_range, val_loss, "b-", label="Validation Loss") plt.legend(loc="upper right") plt.title("Training and Validation Loss") plt.savefig(save_image_filename) plt.show() # call the function to plot the curves plot_loss_accuracy(history_first_model, "loss_accuracy_40.png", EPOCHS) # Saving history of our first model fit save_first_history_model_file = "history_first_model.pkl" save_pickle(history_first_model, save_first_history_model_file) # Saving our first model save_first_model_file = "first-model-40-epochs.pkl" save_pickle(model, save_first_model_file) # Loading our first model with history plot saved_history_first_model = load_pickle("/kaggle/working/history_first_model.pkl") plot_loss_accuracy(saved_history_first_model, "loss_accuracy_40_saved.png", EPOCHS) # Loading our first model with history plot saved_first_model = load_pickle("/kaggle/working/first-model-40-epochs.pkl") # # Visualizing intermediate activations Images img_path = ( "/kaggle/input/intel-image-classification/seg_test/seg_test/glacier/20253.jpg" ) img = image.load_img(img_path, target_size=IMAGE_SIZE) img_tensor = image.img_to_array(img) img_tensor = np.expand_dims(img_tensor, axis=0) img_tensor /= 255.0 plt.imshow(img_tensor[0]) plt.show() print(img_tensor.shape) # Loading our first model # predicting images x = image.img_to_array(img) x = np.expand_dims(x, axis=0) images = np.vstack([x]) # classes = my_pickle_model.predict_classes(images, batch_size=10) predict_x = saved_first_model.predict(images) classes_x = np.argmax(predict_x, axis=1) print("Predicted class is:", classes_x) # ### Instantiating a model from an input tensor and a list of output tensors # Extracts the outputs of the top 12 layers layer_outputs = [layer.output for layer in saved_first_model.layers[:12]] # Creates a model that will return these outputs, given the model input activation_model = Model(inputs=saved_first_model.input, outputs=layer_outputs) # Returns a list of five Numpy arrays: one array per layer activation activations = activation_model.predict(img_tensor) first_layer_activation = activations[0] print(first_layer_activation.shape) plt.matshow(first_layer_activation[0, :, :, 4], cmap="viridis") # ### Visualizing every channel in every intermediate activation layer_names = [] for layer in saved_first_model.layers[:12]: layer_names.append( layer.name ) # Names of the layers, so you can have them as part of your plot images_per_row = 16 for layer_name, layer_activation in zip( layer_names, activations ): # Displays the feature maps n_features = layer_activation.shape[-1] # Number of features in the feature map size = layer_activation.shape[ 1 ] # The feature map has shape (1, size, size, n_features). n_cols = ( n_features // images_per_row ) # Tiles the activation channels in this matrix display_grid = np.zeros((size * n_cols, images_per_row * size)) for col in range(n_cols): # Tiles each filter into a big horizontal grid for row in range(images_per_row): channel_image = layer_activation[0, :, :, col * images_per_row + row] channel_image -= ( channel_image.mean() ) # Post-processes the feature to make it visually palatable channel_image /= channel_image.std() channel_image *= 64 channel_image += 128 channel_image = np.clip(channel_image, 0, 255).astype("uint8") display_grid[ col * size : (col + 1) * size, # Displays the grid row * size : (row + 1) * size, ] = channel_image scale = 1.0 / size plt.figure(figsize=(scale * display_grid.shape[1], scale * display_grid.shape[0])) plt.title(layer_name) plt.grid(False) plt.imshow(display_grid, aspect="auto", cmap="viridis")
false
0
5,375
0
5,576
5,375
129454168
# # **Análise Exploratória de Dados de Logística | Python** # ![mapadf_entregas-loggi2.jpg](data:image/jpeg;base64,/9j/4QAYRXhpZgAASUkqAAgAAAAAAAAAAAAAAP/sABFEdWNreQABAAQAAABkAAD/4QMraHR0cDovL25zLmFkb2JlLmNvbS94YXAvMS4wLwA8P3hwYWNrZXQgYmVnaW49Iu+7vyIgaWQ9Ilc1TTBNcENlaGlIenJlU3pOVGN6a2M5ZCI/PiA8eDp4bXBtZXRhIHhtbG5zOng9ImFkb2JlOm5zOm1ldGEvIiB4OnhtcHRrPSJBZG9iZSBYTVAgQ29yZSA1LjMtYzAxMSA2Ni4xNDU2NjEsIDIwMTIvMDIvMDYtMTQ6NTY6MjcgICAgICAgICI+IDxyZGY6UkRGIHhtbG5zOnJkZj0iaHR0cDovL3d3dy53My5vcmcvMTk5OS8wMi8yMi1yZGYtc3ludGF4LW5zIyI+IDxyZGY6RGVzY3JpcHRpb24gcmRmOmFib3V0PSIiIHhtbG5zOnhtcE1NPSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvbW0vIiB4bWxuczpzdFJlZj0iaHR0cDovL25zLmFkb2JlLmNvbS94YXAvMS4wL3NUeXBlL1Jlc291cmNlUmVmIyIgeG1sbnM6eG1wPSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvIiB4bXBNTTpEb2N1bWVudElEPSJ4bXAuZGlkOjMwOTQ5MjE2RjFEMTExRURBQTBGODdCNzg0OEVCNzYyIiB4bXBNTTpJbnN0YW5jZUlEPSJ4bXAuaWlkOjMwOTQ5MjE1RjFEMTExRURBQTBGODdCNzg0OEVCNzYyIiB4bXA6Q3JlYXRvclRvb2w9IkFkb2JlIFBob3Rvc2hvcCBDUzYgKFdpbmRvd3MpIj4gPHhtcE1NOkRlcml2ZWRGcm9tIHN0UmVmOmluc3RhbmNlSUQ9InhtcC5paWQ6QjVGMkE3NkNGMTkwMTFFRDgyODNFNjQ5ODA2NjhEMEMiIHN0UmVmOmRvY3VtZW50SUQ9InhtcC5kaWQ6QjVGMkE3NkRGMTkwMTFFRDgyODNFNjQ5ODA2NjhEMEMiLz4gPC9yZGY6RGVzY3JpcHRpb24+IDwvcmRmOlJERj4gPC94OnhtcG1ldGE+IDw/eHBhY2tldCBlbmQ9InIiPz7/7gAOQWRvYmUAZMAAAAAB/9sAhAABAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAgICAgICAgICAgIDAwMDAwMDAwMDAQEBAQEBAQIBAQICAgECAgMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwP/wAARCAGQAqADAREAAhEBAxEB/8QA5AAAAQQCAwEBAAAAAAAAAAAAAAYHCAkEBQEDCgILAQEAAgIDAQEAAAAAAAAAAAAABQYEBwEDCAIJEAAABgEDAQQEBA4MCgULAAsBAgMEBQYHABEIEiETFAkxIhUWQRdYmFFhMpIj1ZbWV3fX2Bk58HHSM5OztFW2tzh4gZFC0yRUdDV1CqGxJZU20eHxUmKyQ3O1VhjBNETUJoYnSChoiBEAAgEEAgEDAgQFAwMDAQMNAQIDABEEBRIGITETB0EiUWEyFHFCIxUIgTMWkbFSoUMkwfDRYnI0JRfh8VOCwmNENSb/2gAMAwEAAhEDEQA/ALkfKd8p7y6uQPl08UMyZl4oY2v+Tr9jY81cbjNDYwlZ+U95Z9mD194GeaNRWBq0TT9RMgdJA7NKVYd+g78p35EGIvr7d98+lKP0HflO/IgxF9fbvvn0pR+g78p35EGIvr7d98+lKP0HflO/IgxF9fbvvn0pR+g78p35EGIvr7d98+lKP0HflO/IgxF9fbvvn0pR+g78p35EGIvr7d98+lKP0HflO/IgxF9fbvvn0pR+g78p35EGIvr7d98+lKP0HflO/IgxF9fbvvn0pR+g78p35EGIvr7d98+lKP0HflO/IgxF9fbvvn0pR+g78p35EGIvr7d98+lKP0HflO/IgxF9fbvvn0pR+g78p35EGIvr7d98+lKP0HflO/IgxF9fbvvn0pR+g78p35EGIvr7d98+lKP0HflO/IgxF9fbvvn0pR+g78p35EGIvr7d98+lKP0HflO/IgxF9fbvvn0pR+g78p35EGIvr7d98+lKP0HflO/IgxF9fbvvn0pR+g78p35EGIvr7d98+lKP0HflO/IgxF9fbvvn0pR+g88pz5EOIvr7d98+lKP0HnlOB/8A2QYi+ut33z6Uo/Qd+U78iDEX19u++fSlH6DzynfkQYi+vt3+L/xP6dKUfoO/Kd+RBiL6+3ffPpSj9B35TvyIMRfX27759KUfoO/Kd+RBiL6+3ffPpSj9B35TvyIMRfX27759KUfoO/Kd+RBiL6+3ffPpSj9B35TvyIMRfX27759KUfoO/Kd+RBiL6+3ffPpSj9B35TvyIMRfX27759KUfoO/Kd+RBiL6+3ffPpSj9B35TvyIMRfX27759KUfoO/Kc+RBiL6+3ffPpSj9B35TvyIMRfXW7759KUfoO/Kd+RBiL6+3ffPpSj9B35TvyIMRfX27759KUfoO/Kd+RBiL6+3ffPpSj9B35TvyIMRfX27759KUfoO/Kd+RBiL6+3ffPpSj9B55TvyIMR/XW7759KUfoO/Kd+RBiL6+3ffPpSj9B35TvyIMRfX27759KUfoO/Kd+RBiL6+3ffPpSj9B35TvyIMRfX27759KUfoO/Kd+RBiL6+3ffPpSj9B35TvyIMRfX27759KUfoO/Kd+RBiL6+3ffPpSj9B35TvyIMRfX27759KUfoO/Kd+RBiL6+3ffPpSj9B35TvyIMRfX27759KUfoO/Kd+RBiL6+3ffPpSj9B35TvyIMRfX27759KUfoO/Kd+RBiL6+3ffPpSj9B35TvyIMRfX27759KUfoO/Kd+RBiL6+3ffPpSj9B35TvyIMRfX274f/wCZ9KUfoO/Kd+RBiL6+3ffPpSj9B35TvyIMRfX27759KUfoO/Kd+RBiL6+3ffPpSj9B35TvyIMRfX27759KUfoO/Kd+RBiL6+3ffPpSj9B35TvyIMRfX27759KUfoO/Kd+RBiL6+3ffPpSj9B35TvyIMRfX27759KUfoO/Kd+RBiL6+3ffPpSj9B35TvyIMRfX27759KUfoO/Kd+RBiL6+3ffPpSj9B35TvyIMRfX27759KUfoO/Kd+RBiL6+3ffPpSj9B35TvyIMRfX27759KUfoO/Kd+RBiL6+3ffPpSj9B35TvyIMRfX27759KUfoO/Kd+RBiL6+3ffPpSj9B35TvyIMRfX27759KUfoO/Kd+RBiL6+3ffPpSj9B35TvyIMRfX27759KUfoO/Kd+RBiL6+3ffPpSjyO/1TvCD8UR/wCl1n0pVrOlKNKUaUo0pRpSjSlGlKNKUaUo0pRpSjSlGlKNKUaUo0pRpSjSlGlKNKUaUo0pRpSjSlVS+cXylynxG4rUvI+JMpVbCk3Z+TvHjEliypcqjFXiBo9DyZe29duNic16aVRjnKkXEqmWIZQwAQSaUptuMfLpvGV/kllmzeZbjrn3UMF4Ot+UJ/HGKML43xzY6wxpzNxZHc8EpWZpYZFeQjIhdig2cnTbCsuU4nDp30pS9wN5t2N822vjVHSHHfkliDHnMGFSdcccy5SqVch6HkayqUw97SpaacfaJOxwExKQDZyrFrSDJs0lStjnbqmJ0mFSo88HfNK5AZHxxzNyNyP4yZVj6PxwzNzHKXJEeniaOr8HQ8AvHD6ExM7i4q8LT0tkdjFMztlX/hRYuXIgYXPTuJVKfmqeb3jWfwLG8lpnjlyZpWKMgR+KUuPDmw0+vGuHJi6ZgMqlVKBiKhRtne2h3NLHTBXxEigwYHYm8WRYW5THBSnRx/5mGL3xc5RnIDGOWOI124+4lPny80jOMdWwkHuEyGdNVMi1OYpFitdcssU0l2Zo9yig6F21fmTRUTAypN1KaugebpUbVkfi9jm68UOVmFnHMizuYfj/AGLKFOq0VX7ZBNqo+uRrRJLRtulHlXEYRu3V9lyCKEsBHqRu46QU6FKcLLXmUssD3xxHZg4rclKJhRnkeHxk95KSkRQVcbspOx2BrVK7ZnkCyvLnI7ehzNifIN05YYfuCFWKqoBU9zApWwyZ5jDet56yZx9w7xc5F8m7Tg5tSVs2S+IoyioQOPXmQYoZ+swp1LvdKq/s026rweNOhGIOQTSEAE3WPTpSp6WS4tavQZ7IEnHSSbGu0+UuMhEikknMEaxEKvNu44UVFioEkiotzJdJlAICvYJtu3SlVRYH85fFmckuLNrDjdycxvhXmDM1uj4czxf6jWWOPpHKdrj3ruDx9IpRlrk7KwWlnkW6ZMpUzEYh86QEEXByiUxlKU2XPNzxXjO451Y1zAnIzMmJ+KtjLUuT3IDFtQg5fGeHrE1YR8vaIpdORscZars6oMPKIuZ4sFHv/ZhBMCm5iGKClWkVqzwNzrEDcqrKNJys2mCjLJXZlgoCzGWhJlgjJRUi0VD98bPWTgihB+Epg0pXn54mXvzb+SHEZnzEpfL/AAjPWSSm81voPjve+MMFGUeZj8Z5TvdOiaavkmnWuOucW6m4uppk9ogit3ThUDGSMXfSlSPwf5hNZ5SOvK/yo0lctYtX5S1fkLIyuIID3QlcfGtmKakmjf61liak2oWFROgWCOdexV4cUwcLlEXACmIAClPLx68x8/J2egX+KuInJuUwXcJa7w9N5HPYvHbHHU8rSC2JstIrRS19C7wUHOzdbVj2Dt9GokUcrI9YJkP1gpUIeFXm5ZssfC7kNyl5acZMtRNS4+r8s7nO5MiUcSx9elqthDKVxhozGMLX4i9Ly6t8rtchyx7t0u3RYO3zFZUrgxTlMKlPVkLzCgybi/jzk42PeYXF3HmS+W/ESmYxuS1fxbFyecorM0rIGYwsvW56bnpqBxRKpokTnVFWzGZFoqiox36t9KV2cSPMF5FZQ5Z83MO5T46ZBYYhwbyImKJAZgM4xND0vFFDgcYRFrOa/rN7wpZZ5aecmUftnTVm6BJo/RIqKZiHAFKVOO/OIw7e5/DUnJYM5FY8488kMiN8V8feVd4p8JHYdybcpdw/aU5umm1sby6VKJyE7jFkoB/MxbJtJH7sCGDvCbqVI/zNM75D4w8BeVnIDE0lFw+SMT4jnrfT5WaiG89FR0ywUakReP4d2INpFugRYxjJH9U23bpSoA8TOccQyrV5ztmXzZsP8oscYpwSbKmVsa0TA9ApMrS2zhOJTLNnlqjJPp54dtNvSxrZiCXU8duE09usQDSlTAwl5kkBkzLWOsQ5N438ieMEvm+vWGz4FnM6V6qxdeyuxq0YlPzcUweVm12NWq29rWVyyIQ8yRi+UagoJSCZJQhVKrl5recNKWLjVI3jjXjfknj6hWfkJiXEuIuYg0+thibI8gfOVZqFqa14VZWWtEfVLSzbyUcwmJCIbMH5gMCKwCYhhUqyDNvmNQuMss3jCmLuOPIflNdsOVaAtudBwbX6s7h8Tx1njl5ivxczK2+1VdvOXOYhWir1KFivGPwbAUxyFFRMDKUib75t+DoRfiSxxTjLNnIyV5tY2yBkjAkPiCqxryTl2uOG0G6sMPZULFNQRKi8YFmTEdrvjptGCjNYq6hBAoGUpb5v5/3PBkU0sczwa5W2arRmN4TJWR7DVmWLHLXHjJ7De3LHXztJDI0fI3WzUVmmqEm2gkXwAokJUTKiJd1KQFt5Vy9+5ueVonhjI793x35RYP5X5Nlopk3boxd+j4HHmLLLjaVk0njIZRm5gjWJdQiRVETEVWOVUphDYFKk3za5p464KYtquWcmVe/W+EtuWcf4djIbG0IjY7StaMkSC8VXQaQh3jNeRIrIpFR7tATrGOqXpKIbiClNRijzJsb2y3Zhx1mzFeXuJeRMMYjdcg5qqZ5iq+xPYcDsFHTWUyhUZmo2Czwc1EwUi0M0kmpXBXzB0YiaqQCcu6lIHC3mw46ypfsF1i3ce+SOBaVyqVdN+LuX8w06ChqHmZ8WHXs0JENfZFkmZ+lTtxqrY8lDM51mwXkGxR6A6wEoKUr+OXmSQfKXMV/xdijjfyEfVzEec8ucfsqZplYSqROKafecTSDuOeERl3VpLK2dtOKtyCkEY0cqM/EJA7KiY2wKU2/mgciM8Ygv3BXF2E89Urja15IZ1uFAyFla70ao3tjBVyv40mbeyK0YXaSioJm7dyscRIFVFiiYD9Idu2lKafh75itygXvmHwXLTM+MM3Yk4Ir0ecR5iYjp6daq13hbjTFLNL0STrdal7NWXeSqY/TSZmQhXSgOVHiKQpkXN0ApUoMKeZRWsk5axviDKHHTkNxfms51qyWzAMznOv1WOr+WYyoxKFisEaweVa12VSq2yPq7ksmMTMkYvDMyqGAomSOUFKjlbfPEw3VKSOaP/wAaeU0xxsmsv1/CGNuRkfTawTG+TbpPZJZYuSdQKby2N7PF1D3jWcA2mJJg0ZSANDlbHUOdIp1KkmnnJ66zj5jNGq1m5CWy14Zxvh5+woEDW6FOMKlLXfGVglYdXAUafwclYLTOLoldPkJ9bwoyCSZUxBAT6UqNXl95/t9dzjZsBclci81EckzOJTZAxhWuYMPx6aRUzQqVLEb5BtVctuAhcRspZ4yVnG4STeXVQdMmAolTTEhTHFSl5VfOPwxZ5WjWdbB/ImucXcoZZJhTHHMaw1CEY4OtV5eWF1UYFcpAsal7h6TabSxVYxs+9iUIx0sBBBUCKJmMpVvulKNKUaUo0pRpSjSlGlKNKUaUo0pRpSjSlGlKNKUaUo0pRpSjSlGlKNKVVN5Hf6p3hB+KI/8AS6z6Uq1nSlGlKNKUaUo0pRpSjSlGlKNKUaUo0pRpSjSlGlKNKUaUo0pRpSjSlGlKNKUaUo0pRpSqvPNqwHnHkFx2xfDcfccRGWr5jTlrxnzm5x9N2+uUZpZapiTIjS1WaOTsNrMWCbu3Ee2EiRFtwUMbbYdKVopxxy55F4I5bYZtnAWM4zPsi8YcyVCgWY+e8LXpvbb/AG2oStZrtPkG9EKk7hW7taXFc8g6HwiIJCBu0waUpJWbh3nWRxB5M1PYVyDGa4Z5S4+2fPLYbLEot63CY+423DG1mVgnAnKjZ1GlqmEEEk2fUZZIwql9Qo6Upm8e8XeZmNcY+ZfxEHA9csmOuTlx5q5gw5yEjcw09mxfy/IuvOHdXx5aMbSSLa1wUk0sDw7JaQAyrAqZQWEwF30pSt5V+W/kXkH5eHB7DRK9TJnMPDlzxwyQpiS1XKwVbH+SbDibGqlFuOLpLIdBXQnK22l2Ew9Tj5diYU0HiSChgFET6Upgq55ZEZkzCfM6m2DgtSvL7mMy8Urhg9nmeT5aT3JSzKMJiSY2txGSKLuSmIesY4jZyvNX7tYHBXavdD9hKHUAqUyBeRHInklzJ8nvEWQahxwhpvEOTbreLWTBXIqo8hX12iaTg6crsjliMZ0pkHxYYq752n3BZtcr927fptiJiKBjmUprOQHlK8ysk0zPNCc8RsKZaz1aeRDzKcPz/wAi8j3MraLZjg2YYq8wVYqeMphEXmO7TBUtAkGm2Oo2g2iDQyqIqqKEKKlTv8xHh1yQzpka8yOAuF8HXM4PadVqvh7zCMf8x3eDLlTX8fGMRZy2V6DXmLWdvkVRpk64JxqqU0i/ZkBIvdFOYulKunsFMvMnxxm8eTEs1t2SpHCcnTJScACRjKzXh5RV4R5K7KgRKPazM8oZX1gKVIqnaAAG2lKqerXBvkXE+Xj5SHHtar18mUuJGY+DtzzZEFtkL7NgYXB6RwyK6h5pNUWNicRyhvsCbUxjvNx7vfSlNnZOKvmF4Pgef/F7j3hvEmW8Rc58zZjy7RuQNsy3GU1XA63JKIjIjJkTk3Gj6EfWHIBKa6bOXcOeHWOZ6kokisKQlESqVdnx4w7H8fcA4bwTEyTmZj8Q4upONmku838TKJ0+uR8D7RWKIiJDPTMhU6dx6ANtuO2+lKo/4jVTzZeNnEFjw9p3CTGERb4ubzYwguQV+5SUR1jeIZZLyre7jC3R3QKXEz1/kzQ0Va01DRpARUWXS6BUIG+lKejD3lrZC4zWPynKhj58xyFSuGVV5OI5nyHLSDKvP569ZmpKah7GzrhlFnS7W05CePDnRbAfwLYxRNvt2qUyeJuFvKyN5mYWyxififA+XxUK5luz3Xk/MY/5dOsjYh5GUaWipxB7UofjjDMW9cirFaLC9bSASriPiFY0yZzAZVQRA6lYsXw55u1/gd5gvl1fEFWZWGybGcz57BOf2OZ6WnA5DkeQd9tl7qNQnKFIps7RSpJqa5KNHTt2Y7Evherr2OGlKm5zA4qZmy7x34DY6oMDDPbNgPlXwmynkhi7sMZFNImk4ScJHyA8jHjkxW8u6jEyf6O2Q3Udf/DDSlIWncceSNK5Nc9MbyuGmNt4u89rQ7ty/ISuZWq0NPYnSnMFMcaT0BOYtmEm9mnHyUtDpmZuY1VRIU3HUp09AhpSon17hj5heSMKcMfL+y/iXEVDwhxLzHhO53jlbX8rxthUzDQ+NdgJYcfxWO8OtYNraaTbbutGsEpRSTXI3jylcCiqqJibqVa35meDMi8mOA/KjAmJIxhM5JypiiZqdOi5SWZQUe+mHrlkdFB3LyJ02LBEyaJt1FTAQNthHt0pSQ5a8G2vKXy88mcQm4VrHttyThWp0s8+hGIhHM7RU1axYo9tLuIIjV86r7iyVhJF2ZufvPDHOdL1wLpSoC8QuBD6AzbjOx5H8rXHWBZnHMdZTKcgmXOK9ZtRibLIVR/VV5jEuOJqSlX7YtkbSK6fXLEZrsmawkMKhw3FSo82jhH5nTTgjj3yzKvgjDctj7AeXcUOYLkq4zXBN/jYwnjjOEPkKNbR2L3UK3mafkkYFLu36j10VkCjQ5mxlTrELpSnW5J+WRbo3mVyK5CxXC+sc5KPyec0a3eEPyyuXGm+4cvNUp7GlzUQ8btpuHqN0odjaxjd21WSH2mwWBZMxFUzEEFKlXh7hDd8b8hvLbudMwTj/BuHONPG/lBQLljenZDVucViu0Zgk6TL1+s1iXsCbeyXRo7dxz87qQBMpSKiO+xDE0pUU+bfl68jMx8neVFve8U8S8x65nLHlUrvHDJuY89PqhW+IziMo76uWKDc4aURcFnmr20Le203sQQXT9Y4JOlEil6gUp7uLPBjkfiyc8mR3dK5XW6HDLi7yIxbn1djbIh4WAuGQaLjuBqrWAboKnUsjB7I1x0UyzUTJtiEKJx2MGlKWnneEvR8F8UgxgWtK5FL5hfExelNbiqu3q7+wtLXKO2MZNumrd26ZMJMyAtzOE0VTtxUBUCGEnSKlN1Z+FnKjnlmLkBlnlRjepcUoOZ4MZP4TYlpVbyhF5hssrI5dnWNptOWrTYa1FRUVH16JkINk2jooOp2siKyiwJHEoaUrBqXHfzDOQE95fOKOSmEsSYQxhwMyfj/ADDcswVDL8bkJbPtwwnQ5ugY+YY1oTCDj5igV2zKTZ5OU9tKEXaplBsiCmwiZSp1eXNx7yZx0oPI6BylDRcLKZI5vcrM4VhKKmGE0m/oGVskOrHTJV4vHmMRnIv4hQplmqn2dub1D7CGlKRPP3h5O8rc5+XvLO8b0TJ+HsHZ7u97zdX8gp1yVhC1eVxXPVuEce6VlbvGlrWCyPEPsBUVDJfvghsXfSlQgzJ5SV7X/wDzd4k4CjarQeCXM/E8ZkqswbSRbRFd46cw6XZIh85YQ9GjHDKYPiXNzOGZOZVKJ7ssc7bLmSKUyxNlK54leX06icx44l8m+VxjzA0pRK7cI59yIjucV3zSpEzc7R5OmvpbEOOpl/LOmoWlGRVT7yXSZrMmaogIKKFDSlVhckbByFxZwSw75XPheMt1YUzl7x2xRjvKOO+QlQvOQc1Ven8qandIOtQXHyDaOL5T8jVyMjeu0OJVRNgxQjXShVDKKkKClXMci+EHK/Is/wCbdI4lmo3Hsrywx1xjg8EW5G7KwTyecYsqTuOyLVZaTgO8smPm9oQMpDFk0y98kk9Mul2EEdKVFvjN5aWX4vm7hzOBODWG+EGDoXjPyFwpkSDo2cmeYMnzNpyXD12PjLJPTyaZEJSKP4RcjEE1XTwhgOs8OQxyE0pWXDcGfMGsfE7BflUXrE2JoPjthrI+LGk9zIh8tx8g7uuA8G5KjMh0uOrmFwhUrjCZasTavR0e+cO3BY1FVNwsChwVKAKV6W9KUaUo0pRpSjSlGlKNKUaUo0pRpSjSlGlKNKUaUo0pRpSjSlGlKNKUaUqqbyO/1TvCD8UR/wCl1n0pVrOlKNKUaUo0pRpSoQ5a5B8gY3kWPH3AuGMY5AeRmFoTMVisWSsxT2NUGzew3myUqPg4hhA4ryIrIrlUrSzhVZU7YhCmKUAMI6UrG+MDzFfky8WPnWZC/Nk0pR7/APmK/Jl4sfOsyF+bJpSj3/8AMV+TLxY+dZkL82TSlHv/AOYr8mXix86zIX5smlKPf/zFfky8WPnWZC/Nk0pR7/8AmK/Jl4sfOsyF+bJpSj3/APMV+TLxY+dZkL82TSlHv/5ivyZeLHzrMhfmyaUo9/8AzFfky8WPnWZC/Nk0pR7/APmK/Jl4sfOsyF+bJpSj3/8AMV+TLxY+dZkL82TSlHv/AOYr8mXix86zIX5smlKPf/zFfky8WPnWZC/Nk0pR7/8AmK/Jl4sfOsyF+bJpSj3/APMV+TLxY+dZkL82TSlHv/5ivyZeLHzrMhfmyaUo9/8AzFfky8WPnWZC/Nk0pR7/APmK/Jl4sfOsyF+bJpSj3/8AMV+TLxY+dZkL82TSlHv/AOYr8mXix86zIX5smlKPf/zFfky8WPnWZC/Nk0pR7/8AmK/Jl4sfOsyF+bJpSj3/APMV+TLxY+dZkL82TSldDq6+YY+auWT3i7xReM3iCzV20dcp78u2dNnCZkV27hBXjEZJZBdI4lOQwCUxREBDbSlMnivEPIXBkvNz+FvLp8vTEs7ZEwRsEzjfMUlS5WbQKsZwCEpIV3iZHu3yAODCp0KHMXrHq237dKU+fv8A+Yr8mXix86zIX5smlKPf/wAxX5MvFj51mQvzZNKUe/8A5ivyZeLHzrMhfmyaUo9//MV+TLxY+dZkL82TSlHv/wCYr8mXix86zIX5smlKPf8A8xX5MvFj51mQvzZNKUe//mK/Jl4sdn/+1uQvzZNKUe//AJivyZeLHzrMhfmyaUo9/wDzFfky8WPnWZC/Nk0pR7/+Yr8mXix86zIX5smlK49//MV+TLxY7fT/AP5WZC/Nk0pXPv8A+Yr8mXix86zIX5smlKPf/wAxX5MvFj51uQvzZNKUe/8A5ivyZeLHzrMhfmyaUo9//MV+TLxY+dZkL82TSlHv/wCYr8mXix86zIX5smlKPf8A8xX5MvFj51mQvzZNKUe//mK/Jl4sfOsyF+bJpSj3/wDMV+TLxY+dZkL82TSlHv8A+Yr8mXix86zIX5smlKPf/wAxX5MvFj51mQvzZNKUmLQ55yXdGHb3Ph3w2tiFesEVbIFGx8k7jNJQtog1DrQtjiiSPFxwWPnIlZQxmzpICLoGERIYBHSlKf3/APMV+TLxY+dZkL82TSlHv/5ivyZeLHzrMhfmyaUo9/8AzFfky8WPnW5C/Nk0pR7/APmK/Jl4sfOsyF+bJpSuPf8A8xUf/wC2Xix86zIX5smlK59//MV+TLxY+dZkL82TSlMNAYZz9Vclvsz1jy4/Lur2XpNw+dyGUIXLz+Mv711KAYJJy5tzPiWjPKuJADj35xXEyu49QjuOlKfn3/8AMV+TLxY+dZkL82TSlHv/AOYr8mXix86zIX5smlKPf/zFfky8WPnWZC/Nk0pR7/8AmK/Jl4sfOsyF+bJpSj3/APMV+TLxY+dZkL82TSlHv/5ivyZeLHzrMhfmyaUo9/8AzFfky8WPnWZC/Nk0pR7/APmK/Jl4sfOsyF+bJpSj3/8AMV+TLxY+dZkL82TSlHv/AOYr8mXix86zIX5smlKPf/zFfky8WPnWZC/Nk0pR7/8AmK/Jl4sfOsyF+bJpSj3/APMV+TLxY+dZkL82TSlHv/5ivyZeLHzrMhfmyaUo9/8AzFfky8WPnWZC/Nk0pR7/APmK/Jl4sfOsyF+bJpSj3/8AMV+TLxY+dZkL82TSlHv/AOYr8mXix86zIX5smlKPf/zFfky8WPnWZC/Nk0pR7/8AmK/Jl4sfOsyF+bJpSj3/APMV+TLxY+dZkL82TSlda2RPMURRWWHjLxYEEklFRD/8rcghuCZDH6QEeMmwCbp23Hs+jpSn14xZkc8hePmH83va2lT3uUKJBXB5VkJYZ5CBdSzUFnEWhNCwixlEWi3UUi4tkBUKAG6C77ApUHPI7/VO8IPxRH/pdZ9KVazpSvg6qafT3ihCdZgKXrMBRMYfQUoGEOow/AAenSldazpq3MQq7lBAyg7JlWWTSMcRHYAIBzFEwiI/BpSuxRVNIgqKqESTKG5lFDlIQofRExhAoBpSvlJZJcgKoKprJG+pUSUKoQ2w7DscgmKO2lKg9FfrKrz/AHH8X/175Z0pU5tKUaUo0pRpSjSlGlKNKUaUo0pRpSjSlGlKNKUaUo0pRpSjSlGlKNKUaUo0pRpSjSlGlKNKUaUo0pRpSjSlGlKNKUaUo0pRpSjSlGlKNKUaUo0pRpSjSlGlKNKUaUo0pRpSjSlGlKNKUaUo0pRpSjSlGlKNKUaUo0pRpSjSlGlKNKUaUo0pRpSkk7nJlWVexkFEs3oRhG3jnT+RMySKu7S79JugRJq6OqYqAgY4j0gHUAaUr58bd/5igP8Av139qdKUeNu/8xQH/frv7U6Uo8bd/wCYoD/v139qdKUeNu/8xQH/AH67+1OlKPG3f+YoD/v139qdKUeNu/8AMUB/367+1OlKPG3f+YoD/v139qdKUeNu/wDMUB/367+1OlKPG3f+YoD/AL9d/anSldasndG6Si6ldh100SGUOk1nVhcHIQBMYqILRiSRlRAOwDGABH4Q0pW5LIIStfNJNerw76LUcpAcOk4EVbGMBTl3HY5d9h+mGlKh75bX9g3il+JmpfyU2lKYLyO/1TvCD8UR/wCl1n0pU180XDklWX8IlgrCWOsrxzpo5Unnl3zY+xQ4iHhFilatmLNpi3IRZdFdERMZQyrYUxDp6Tb76Uqq/wAz+x1tbifjbI3MGtYgwfmen8kMLvsVx6OalLW3jW7bNuNVbBPV2fkILHAyL73WQWVkE/ZqoMWhTGMp0GMbSlN/5msLgRzlKD5pwubuO+QMnY9491JhijjNkei1jMLPOLYMnSV1rr3FajWYC1MLNdHkoEcwla8g7FNRJFRURRIOylTe5zRuCOUHHOzYaybnikYFPV7xgScyQ7uDavzcPR7Co+hsk1ekXmGtbqMrT1lambDuTN3qxUFkREDf+rpStn5ZtqWsnGa0w1agcWxUHjrMOWcbY4ueLaEtj7FGWq7W37c0Lliu0pq57hjEWB+/VRc+z3B2TlwyWUaqikcggpSYwKz5AM/MUyqXkJYMO2CdU4bYsNWVsN1K61KKawIZuyyCiE82utyubt7LmebmBVusgiCfZ3e/bpSrSNKUaUo0pRpSjSlGlKNKUaUoEQD09mlKbQ99WKooQjJsoUiiiYHI4E5Td2cxNwEgGL/k/R7NLiuSCPXwfz8V8e/7j+bkf4Y/7nSuKZ+v5Yzo4zVkCDseOqXHYJjqrWHeN70ws6zu5WO3OhJ71xE9WxESRMZFAJvDLgUBW2DtHfswIpNgdhLHLGg1oRTG4b7mY/qBX6AfQ1a87D6fH1DBzdfmZUndJMmZcrGaLjBFAv8AsvHN/Oz/AMy/y/8Ad4Pf9x/NyP8ADH/c6z6qlcBkBYdwBg3ESjsYCuDCJR+gYAARKP7e2lc2PqQbVz7/ALj+bkf4Y/7nSuKPf9x/NyP8Mf8Ac6Uo9/3H83I/wx/3OlKPf9x/NyP8Mf8Ac6Uo9/3H83I/wx/3OlKPf9x/NyP8Mf8Ac6Uo9/3H83I/wx/3OlKPf9x/NyP8Mf8Ac6UrTyGX4qKayjyRXiGjaDRBzMqKSBRGLQMHqqv00+tVqQ2/YJyhv8GovN3en12PkZWdlQR4+InKcl1Psr9DIASyA/TkBf6VJ4el2+wyMfFwsaeSfLfjAAjf1W/CMkBWI+tibfWs5lkssizayDFs1dMnzdJ2zdIrnMk4bLkBRFZI3SHURRMwCA/Q1mYmVjZ+LHm4TrJiTIHR1N1ZWF1YH8CPI/KsPKxcjByZMPMRo8uJyjo3gqymzKR+IPg1le/7j+bkf4Y/7nWRXRR7/uP5uR/hj/udKUe/7j+bkf4Y/wC50pR7/uP5uR/hj/udKUe/7j+bkf4Y/wC50pR7/uP5uR/hj/udKUnH+V51rY4OHbUxd/FSiD5WRsST0hGUGo1KAt0HaBiCsqd6I7EEogAagszZ7DH3mHq4MGWbX5CSGTJDKI8cp+lXU/cxk9Bb0qcxNZrp9Hl7SfOih2GO8YjxipMk4b9TIwPFRH6m/rWsrmb31hsFrhfcySjGtYdIsSzEiJ0Gkw5N1d77NTMmBlW6ZQAQVAximAfgHWBo+0Sbvd7LVDBysfF18ixieUcUnc35e0CASo9QwuCD9D4rP3fWI9LpddtWzcafJ2EZk9iI8nhQenum9gxPgrYEW+o80tvf9x/NyP8ADH/c6tlVSj3/AHH83I/wx/3OlKPf9x/NyP8ADH/c6Uo9/wBx/NyP8Mf9zpSj3/cfzcj/AAx/3OlKPf8Acfzej/DH/c6Uo9/3Hb/2egO3YOy5hEB+gIbbgOlx9KWI9aPf9x/NyP8ADH/c6Uo9/wBx/NyP8Mf9zpSj3/cfzcj/AAx/3OlKPf8Acfzcj/DH/c6Uo9/1x9EegO3YOy5x2H6A7F7B0pR7/uP5uR/hj/udKUe/7j+bkf4Y/wC50pSZuGSLm0gXTik1uHmLERVqDVhLSCjJiqiZYpXZ1HAGSEpkkNxKG/aOoHs2R2LF08k3VcaDL3YZeEcz+3GQWHMl/FiFuQL+TU71vH6/k7dIe0ZE2Lpir85Ik9xwQv2ALY3BbwTbwKUaWQHgppCrGtyKimmKpCrnEpVRIUVClHbtKU+4B9LU3GWKAyACTiLgegNvI/0Pi/1qEcIHIjJMdzYn1Iv4J/iK+/f9x/NyP8Mf9zr7r5o9/wBx/NyP8Mf9zpSsyIyBGPlHhXq7Fokz3Ksqm6KsRuqQBOqm7MURBsZNP1hA+wgHaPZrpXJxnDlJIyIiQ9mB4EC5DWP2kDyeVrDzXc2PkIUDxyAyAFLqRzBNgVuPuBPgWvc+KV0dNw0u3SdxUrHSTVYBMi4YvW7pBUoCJRFJVFQ5FAAwCA7COwhtpj5OPlwrk4kkcuO36XRg6t9PDKSD58eD60yMfIxJmx8uN4shfVHUqw8X8qwBHjz5HpW0313V00aUpH2C1ex3RGiTcjg/dAoqJzmIBOsfUKAAA7iIBvp5pSOmciTqERJrQcKwfTKTFypFM3bw7dq6kCpiLVByuHT3SCquwGNuHSHw6wNpJsYtbPLqY0m2axMYkduKPIB9qs38qk+CfoKztYmBLsYI9rI8WsaVRK6DkyRk/cyr/MwHkD60hQzzON3FVgFqmwlbc9MwJdoqEmkjlpiTtAiqkouRUqqzuNKc2xRIO+3aI9oap/8AzDYQ5Gu00mImR2SYxjPhgmU/sQ6gmVgbs8VzYEev1PkVbz1HAlg2G4TLfH65EJDgzTwsP3pRiBEpFgktvJB/Ow8GnN9/3H83oj9MFj/udX/+FUKj3/cfzcj/AAx/3OlKPf8Acfzcj/DH/c6Uo9/3H83I/wAMf9zpSj3/AHH83I/wx/3OlKPf9x/NyP8ADH/c6UrDkcnBFR76UesUyM41m5fuzkMsqcjZoiddcxEiEE6hippiIFABER7A1iZ+Zj67Bm2GWSuLBE0jkAkhUUsxAHkkAHwPJ+lZWDh5GxzYdfigNlTyrGgJABZyFUEnwBc+p8D1rHxXdGN5SmbRFFEY2cdxjlkdRJZuoZuMWgmU50FykVTOIk9BgAQ1j6bb4O/1UG61jM+vyYw8ZKlSVP1KtYg+PQisjcanO0O0n02yVUz8aQpIAwYBh+DDwR5HkU8WpOo2kTfciVDGcGaw3OZRiI7vQboicp1nLxyYomK2ZNECncOlxKUR6SFHYO0dtV3s/a9D0/XHa9gyFgxOXEepZ29eKKLsxt5sB6VYOtdW3vbtiNXoIGny7cjawVV/8nY/ao/MmtbjjLNDyvHOZGkTZJMjJQiT5qqiszkWJlAEUvFMXRE3CRVQKPSbYSm29OsPqXees93xHy+uZImWNrOpBSRCfTkjAMAfobWP41mdr6T2XpWUmL2LGMLSAlGBDI9vXi6kqSPqL3pyNW6qnRpSmQ5Eci8Q8VsVT2Zs32olSokAvGsFHacfIzUvLzc29SjIGtVmuwrV9OWWzz8kuRuyYMkFnTlU3SQg9ulKoozh55bSmZX5Gy+NIS72XFuEvL+d5rWxVYcC5FruX65yDkMpSVTqnxi1yXi4241vHbiDMzcul1GhGiTTrcd+AAOylWW+WFmbMfIDjXHZXzTkGfvtltUg1epFn+M9g4vhWSOIKJfPICDqtnkJGXt1aavXp/AzxzgSQS7S7gG+lKhxkDzP+U6FZ5LcrsS4DxBa+C/EjLdzxfkNaeuNvZciclRWIZxCvZpyPi2Pjo1eisoKoPzORYMpAVXMukwVMRRITphpSrwa/YYu3VaEtcGv4qEs8BG2GHciAF8TFzUcjJR6/SAmAO+aOSG23H06UrT1b/wDG/8AAlP4lXSlRa8tr+wbxS/EzUv5KbSlMF5Hf6p3hB+KI/8AS6z6Uq1nSlaObrFaspEErHXoOwJtTHO2Tm4lhKkbnVL0KnQI+brlRMoTsMJQDcOwdKV0e59S8RFu/deu+Kg0U20K69iRniIdul2JIRS/he8j0Uv8kqQkKHwBpSsqQrlflkXreVgoaTbyRkDyKEhFsXqL87UoEbHepOUFSOjNyB0kFQDCQOwNtKVsGbNpHtUGTBq2YsmqZUWzRmgk2at0SBsRJBuiUiSKZQ9BSgABpSoRRX6yq8/3H8X/ANe+WdKVObSlGlKNKUaUo0pRpSjSlGlKwJSPby0ZIxbsVQayTB5HuRbqmQXBu9bqNlhRWIIHRVBNQekwdpR7Q18uodCh/SRb/rXdjzvjZCZMVvcjdWFxcXU3Fx9RceRVfNMwSHHnj/L4Y44yThtK12Lup8aSeVpqUuaTW2zr6Sl2Li1Sa5VJOShkJp7udMCiYjcOgoDsGozH139r1RwdUbSKrcDKS/3MSRyJuSLn/p4FXnc90/5331O2d/jVsWeaAZaYUaQEwRKsbCFBZFkMa+CfV/JpoKHmDL2C2dNhudWQMVvbjmK/VfG2ITYZp1sJGvbW+hwUlY2dMuzMVkm7kx6m7lQqSBEtwMIdgFwcbPztasadklgORkSrHF7SPYsR5B8ePPoT4Aq0brqnVe6S5eX8LYOzTU6nBmy879/kQF1hWSyPHZrsQnhkF2Lel/rNOYmIivRryZsEtGQUPHJ99IS8y/axkWwR6yp988kHqqLRql3hwL1HOUu4gG/bqwyOkSmSVgsY9STYD+J9K0/iYmVn5CYmDFJPlyGypGrO7H1sqqCxNvNgKzEF0HKCLpssk4bOEknDdyioRVu4QWKVRFdBYhjJqorJmAxTFESmKO4Dtr6BDC4It/3/ANa63jeNzHICsikggixBHqCD5BHm4Pm9NDjrBdGxdesw5Eq61nUsOcLJG2m7km7LIzUQlKRTA0c1LWYp4YzWuMRQOIqItwAhz7D8ABrAxddjYeTPlw8/dyXDPdiRcC32g+F/MCrV2Dum67JpdToNkMcYOmx3hx/bhSNyjtyPvOv3Stf0ZvIFPHrPqpUaUo0pRpSjSlGlKNKUmIi0oTE/Za+nFTLNWsqM0lpB8z7iMkxeJd6U0Q56zeLIiHYoOwdJtV7Wdii2m6z9KmPlRSa9kDSyJxil5ryBhe55gejeBY1P7Lr8us0+BuXyMWWLPVyscb8pYuB4kTJb7CfVfJuK+iU2rEfWGR9hMFHdsTbo2U7hMzlOZSaEEjdJ63cGUbnIkURDYCAA/Dvr6Xq3XVy87O/Zwtk7IKMosOQnCCyB1YlSAPpax+tcN2jsDYmFhfu5lx9aWOKFIUwlzdijKAwJP1JJH0pQIIINUEWrVBJs1bJJoN2yCZUkEEUigRNFFIgFImmmUAACgAAAamoIIcaFMfHRY8dFCqqgBVUeAqgeAAPAA8AVCzzTZMz5GQ7PO7FmZiSzMfJJJ8kk+ST5NalywmFZ6MkW84LWFatHaMhA+DRUCTdLdPhXYvTfZUPCdodJew2/bqNyMLaSbnHzocsx6mON1kx+Cn3Xb9EnM+V4fgPWpODM1kemyMGbED7WSRGjyObD2kX9aCP0bn+J9LVu9S9RFGg9bU/P6Vo69Oo2Jis+RYSkcVF+9jxQl2ZmTk52SwomcJpGMYTNVhDdM/8AlBqI0m5i3mI+XFDkQKkzx8ZkMbkxmxYA+qN6q31FS+608uky1xJZsednhSTlC/NQHFwpIHh19GX6Gtso4bonRTWXRRUcqd03IqqRM66uwm7tEDmAVVOkBHpLuOwakpJ4YmVJXRXc2UFgCzetlB9TbzYXqNSGaVWeNGZEF2IBIUel2sPAv4ufrXdrt/h6V1f96QOR7mrRoSPlkWzZ0d5YYaEMm7O5ImROUciioqUWqSxxVTKG5dw6R+HVM7z2mTqOph2MUccjS50EFnLAAStxLDgGNwPIFrH61cekdXj7ZtJtfK8kaxYU84KBSSYl5BfvKixPg+b/AIU4Bt99hER27A3ER2D6H0tXQk+h9BVNFvWvnXFc0aUpP2Gz1+sN2ytgnGEESTXGOjnD9UEiLSCpB7pJLcBA6pREDbej6OoXedg0nX4I5N1lw4i5D+1E0hsGkI8Kv4kev4fjU1pNBut9M6abEmyzAnuSLGLlYwfJP4A+l66q22koWKjomzWVKwzxjOjDJrIt45eSKKyixCosUzbG8G3MUg9AD2F3H066tDBnanWwa3f56524Jf8AqsqxNKORYWjB/kUhTYfS5r73s+DtNjPsdDgNhagBB7Ss0qxHiASZCP52BYX+psKUup+oKtHZXM8zgZNzV45pL2BFADRca/c+DZvHAqplFJw6AQFEgIiY2+4biABqI32RusXT5E/XYIsndKl4YpH4I7XHhmuLDjc3/EVLaKDT5O3x4OwTy42mZ7SyxrzdFsfKr5ub2FvwNa2s1VnCu5qwAR2jNXA8dJ2Jsq/VesWsmg06FUIwh/VbtklFTl9X6sAAdR/X+u4uqysvcgSrtto0UuSpkMkayqlmWIHwiAsR49bA1nb7sGTtMbE0xMbavWCWLGYRhJHiZ7hpSPLMQFPn9NyKV2rPVbo0pRpSuQHYQEPgHfXINjelJqu1WLq4zQxZnxvb0w4nH/jnqz3pfOikKqDTvhHwrXYgbJl9UNQGi65ruunLOuMxOZlNPJ7kjP8A1Htfhy/Snjwo8Cp3edi2HYBijYCEfs8VceP241T+mhJHO36m8+WPk0pNT1QVGlKTzuajHb2UqbCdaNbWWGVekaEEFX8cg6KKDSWO1MHSdFJwoUxQEdjCGw+nUHk7XAy8zI63h5kcfYxiGQIPukiVhxSYofVVYgj8fQ+tTeNq87GxIOxZmJLJ105QQufEcrIeTwhh5DFQQT9PUeldtaj5iKgYyPsE2NjmGqBk302ZomxGRVFVQ4L+FS3TR6UzFLsH/q67NBhbTW6fHwd1lnP2kaWkyCgj903JDcB4XwQLD8L1177N1ex28+bpcQYOrke8cAcye0LAFeZ8t5BNz+Nq2b16zjmq76Qdt2LJqmZZw6dKkRQQTIAiY6iiglKUoAH+H4NSGXl4uBjvmZskcOJGCWd2CqoHkkk+PSsDFxcrOyExMON5cqRgqoilmZj6AAeTTKvs41uZbO4/GkrCWO4FkGUfFRMsu5iY+XWXXDxBI9+qRJN4dJoU5ygQ3aIAHwhrVGZ8vaHaQS4XQcjEz+ziaOKGGZmhjnZmHIRSMFDkJyYcT5sPxraeJ8S73WTx5ne8fLwesmGSSWaFVmkhVV+0yRrcoC/FTceASfpX3HXvGyN4k8Zx8eb3gspZJ9cVGSf/AGQnJFjTHlUZF6s4IcVvDAYgikUSl9AiA6+sDuHQYe35HQcGA/3nPEsmaUH9ESiK8yyuWBvxuPsFh9SDeuM/qPepepY/e8+a2nwTHHhCQ/1jEZbRNEgUi3Kxs5ufUAilKNZWi4KtoYumGsBX697Rdkh45FOVY2FuZFdVGLSeqrqmRKeRETCcpxHcwhuGp5uvy4GnwIPj/Kjw9HgmVxBGqzR5S8WKwh2YlR7vnkGNybVApv4s/b503fsaTM3Wb7aGeRjDJjNyUNMUVQGIi8BSvi16QNUyBl2ZGvybJCVLeqc6SjrpVnbtOLprhhOrORVdnSI7XK4mIdkikYqImASmOPZrXum7D33sK4eZHjzDuOtkWLOxWZYcNo52bk/2ueU0EYQiO4N29PWtgbjQdE0DZeFJkQnqGyjaTBylVpsxZMdVsnlV4wzOzAyWsQo8/Wpho3yQIAlWbNl/SAHDrRMI/AIlDcu2vQRAB8el60F5t59aZJxK3xgjfZ+xJx9jBu4eSdSiYQqjeQXim6CipI1+s4TEgyBzFACmL1FAA/wapwyu16nE2+z2iQZsULPJhQY4KytCqkiKRmFjKT/MARVwON1Xa5ep1uqefDlmVI8yacho1mZgDJGqm/tC/obE0jMNTtuslcmbrbnglZT8o6kK7C9CZ/d6DalUILYVkkklXBzqFNuBwE4CT6e2q18V7js2+0eV2vs0tsTNyHlxoLA/tsdLjhyChmJN/DDkOPg+asXyfqet6PeYvVetxXysPHSPJnuR+5yHseXEkhQBaxU2PLz6UjbJPRDW70jMcUg7LS1ICfYW+0NYk5x8KVdJtGN36KhAkCmF6QCkEifUIB6enVX3u51uP27U/KWtSUdVOFkR5uWkJ/QGCRLKpHufrFlKqSbDzarPo9Ps8jqe1+Mdi8Z7SM3Hkw8VpgPv4lpWjYH2/wBBJbk1gT6XqSbdZN2g3coCJ0XSKLhA3SICdFdMqiRukQ6iiJDB2DsIa3zDLHkRJPD90UiKy/mrC4NvXyDe30rRc8T48zwTeJY2Kt+RU2Pn08EVwg5bOinO2cIOSpqnRUMgqRYqayY7KInFMxgIqmbsMUdjAPpDXEU8E4JgdHVWKkqQwDD1UkXsR9R6j61zNBPjsFnR0ZlDAMCpKn0IuBcH1BHg13a7a6qNKUaUo0pXAgBgEpgAxTAIGKYAMUxR7BKYo9ggIfBrggMCpAINcgkG48EUqsZEIm5shCEKQhZhiBSEKBSlD2a37ClKAAUP2tFVVHFAAo+gFgP9BRmZjycksfqTc/8AWnn1zXFQp5mYeumTYGrStLaqzDqrOZIXkAiqUjh23kkmwA8ZpqGKm4ctTteno3AwkUHbftDXnj/IHoPYe46zCzOvI2RNhPJzgBAZlkC/coPhmXja3rY+K9A/Afe+v9Q2Wbh9gdYIMxI+MxFwrRlvtYi5VW5Xv6XHmkdwvwjf6DLWa33OMc1tGTiUYaPhXpyleujFdpu1X7tqmc4IEQBLoT6h6jdZuwA1Af49/HPaOr52ZvuwwviRzwCJIWI5t9wYuygnja1lv5Nz9Kn/AJ/+ROs9nwsTRdfmTLeGYyvKoPBftKhFYgcr3u1vHgeb1YPr1NXl+jSlVb+ahhnLWRKBxpyth2hSGYrBxJ5c4o5MTeEYh1GtprLNOp7Gz16x12uBMOGkO8tMUztftaMbOVU03LyPIQFCHMUdKVVbn7H/ACo5ZZ/8wXNcBwozfiqmZO8o+dwBiUb/AAVdiMi5OyWnkeWlz1qarsPYZkISdBB8BI1q5dCqsyKKo92UxSipVztM4fr2E3GvKVizZylx7ZMZ4sxHEymHKbltxWMRyUrU61Hpv2F1x+lEvEZddw7E6D8PEk74qYF3DbcVKqBteKOYmJeMvOHyyqNxNynkGY5I5tzo5wLyPhBqY8e4nEXKG3K2KQsmUbO+sbSbq1hxO0nZFN5G+z11pJRuj4YRBXcFK9HuNaUjjbF9Bx23cC7QoVDq1MRdj1bukqxX2MIRyPUAG+zlZdXb9HSld9X/APAMb/wJT+JV0pUWvLa/sG8UvxM1L+Sm0pTBeR3+qd4QfiiP/S6z6Uq1nSlGlKNKUaUo0pUGYr9ZVef7j+L/AOvfLOlKnNpSjSlGlKNKUaUo0pRpSjSlcD6B/aH/AKtKVEe55Bo1Hla8wuNsgqy+vNoNVaa0mX6TNezWVcF3CMHDJqDu8kVUEjHBMvbsH7WuibKxsZkWd1RpG4oCbcm/AfialdZotzuYsmfU4s2TBhQGado1LCGIEAySEfpQEgE1tXruCI/i42SdQgSrtRdaEj5BeO9pOlmRAM6Wh2bo/i11maZt1DNyiZIo7mEAHXYzRBgrleZPgG17j1sD58flWHDFmmGTIxlm/bKAJGQPwAbwokZftAY/pDGx+l6YHIGLLdl59mjH+X4nG9+4z3KgwcbVMeKNpdla5C1s1AkJtG5TKLtu3PAPJFogLXw5k1EukBMPYO8bk4U+c+Ri5whk1EkQCp5DFh5PM/gSBa1XnRdl1XVYdRveqS7DB+RMTOkebKBjaFYW+2MwRlSRIqlufK4a/j8kTVsS23kNxCDDHK7HLPDb+zRJqhYqHhu/u1W9fqtalmpailW7y0Xfu0FlIqJbCr0nOYgCZMfSOsWHCn2miGv3cQgZ14MkTn7VU/bxfyfIA/8ArUxs+0avofyr/wAv+Mdg+2hxpffiyc/GUGWaVG98y4zBVIDu9rgX8MKldWoBhVK3XqrFGdGi6zBxFejDP3Kj1+MfCR7eMZC9eqACjx2LZsXvFTbGUPuYe0dTcMSQRLCl+CKFF/JsBbyfqfHrWsdjnT7PYT7LJ4jJyZpJX4gKvKRi7cVHhVuxsB4AsBW612Vh0aUo0pRpSjSlGlKNKVjnetCOEWKjxqR4smdVuyO5RK6WRT37xVBqZQFlEiCA9RilEA+HXS2XjLOuI8sYynUlYywDsB6lUJ5ED6kCwruXFyGgbLSKQ4qkBnCkoCfQM9uIJ+gJuayNd1dNGlKNKUmYuLsLWxWGRkbH7RgpIGQQUD4JND2D4dICvB8YUwneeMUDq9YA6dV/X6/d428zs/Ozvf08/t/t8f2wv7bitn+8eX5nz59Kn9hsNLPpMLCwcH2NvB7n7jI9wt+45G6fZ6JwHjx6/Wu2DmJKVcz6L+vP4JKJllI+PcPFkVU55mQnUWWZAkACk2UHsAptx12afZ5+xnzIs3Bmw48bJMcbSEEZCAXEyW9EPpY+a6tvq8DXwYcuHmw5kmTjCSRUUg47k2ML39XHqSPFKEREfSO/7epwkn1qFsB6UkLfRq/eEYlGeSdm9iSqEzGrsXZ2TlB6gIbfZiAYTIKgUAOTb1gD06rHZ+oaTt0eNFuVkP7TJWeJo3MbK6+n3AElT6Mv1FWXrXbd11OTJk07RgZeO0MqugdWRvyNvuW5Kn6H6VqUqEeAqszX6LOSFffysmvLpTUib26syfPXKCz3u0nPQUW6iSQkIT0E6tw1HJ0xtN1zK0vT8ybCzcnIaYTyn9w0cjurSWD2+1gOIX0W/ipF+4ruOxYu57diQ5uHjwLC0EY/brJGiMqXK3+4Ehi3q1rGl+RL7Agk5EjpRNJEFFVEyCCq6ZCgdwCZgEpDHUATBt9Tvq5pFeFI8i0jqq3JA8sALtb0Fzc/leqc8n9Z5ILxozNYAnwpJIW/1AFh+dq7tdtdVH/o0pWqnJuNrkS9nJdY7eNjkirO1iIrODppmUIkUSooEUVUEVFADYAH06jdxtsDRa2Xb7NzHgQKGdgpYgEgfpUEnyR6VI6jU5282UWp1qB86duKKWVQTYnyzEAeAfWkxIwr+x2OLcSDSsS9BSiSP27OTjfFTaViUMVVnINTuCCk3blamDcNgOBtV/O1Obvd7jz50euyemLjCRUli5zjJJBSRCwKqoS1/Aa9T+FtMPR6TIhwpNhjdxbIMbPFLwgOMAQ8bhSGZy97eStqUryBh5CUipp4wRcSsGDkIl6p1d6x8Yn3TrudjAX7Mn2DuA6n8rTavN2ONtsqFH2OHz9mQ3vHzFm4+bfcPxHioHG3Gzw9fk6rGmdNfl8PeQW4ycDdeX1+0+fHrW31J1G0aUo0pRpSjSlGlKNKUaUo9Po0pWrnJmPrkRJTksqdvGxLVR4+WTRUcHSbpbdZioIlOqqICIdhQER1Hbfa4Oj1k+42bFNfjRl5GALEKPUhVBJP5AXqQ1Orzd3s4NRrVD5+TIEjUkKCx9AWYgAePUmkvIHcLBBW2k1uFmZGdPDoSEpICWLfJ1B2UHJ3BXByEcrHbEMQxG5vhH0dmq9mvPKMPs3U8HEys/MaBZJpf6Ugwn+8sGIDEqCpWI/X6eKsGGmPEcvrfas7LxcLEWZo4o7yxnMT7QpUEooYhgZAP9aQ7a9qQC9jRj4DJNvfvrw9jGzJ+wKi3anSRROr7KeKFBFGvERETJHNv1nAQ7NVCHuL6aXOjwcPfbPMm28kSJJGFRCFUn2XIsuMBcqx9WuKtk3UE3EOFLm5mi1mJDqY5XeOQlnBZgPdQXZsknwyj0BBrQZMUpbM+QrbMVewTMtEwEHBOYuSO5Tq814tVF7HpR6KZikcqs3CQC4VTETFAo9m2ofv8nU8Z932Xaa7NytljYWPjtFKXGJPzKyRiMAgMUZQZXUkgA+LXqY6HH2rKTS9a1eww8XX5ObPkLLEEOVBwUpIZCblQ6sfbRrAkjyDSIqENka3WyMexjKrUmgMkGlhrjuuwsPMwqyxHKCT1Bi/7tN03lpBIqpFTh0mTAmwh8Oqj1nU957R2THy8CLXanpUSJk4r4sEE8DEOqyLHJYOs0g5h28FbWtVs7Ltukda65kYmdLsdr3KV3xslMmeaGcKVYxtJHco0MZ4FF8hr3v9Kfqv4jqkI5PJu017DOmczygT0sYPaQNbEJwfR6hm4pJLoAicUymOBjgX4Q1uXR/GnW9TOc/KD524L5B/cTf7vHJv7kZK2DLxJVSwJt9RWntz8kdi2sAwcZkwtSI8cft4f9rljWMcgDXKtyHIhSAT9DW1qFAj6S9lDwklKEhHqDNCOqyq5lISvFbdZ1RiE1DHVTM9VUE6omEdzDqQ6x0vC6llZDaqfIGolRFiw2a8GMEuT7IJJHuElnufJNYHZe5Zna8XHXawQHbRO7S5aqBPk87ACYgAERgAJb0ApQRFchIFeZcxEeixXsEiaXmFEhUEX0icokM6VA5zgU4lH0F2D6WpzWaLUaafKyNZAsM2bOZpyL/1JT4Lm5Pkj8LCoTZbzbbiHFx9nO00OFAIYAbf04gbhBYDx/G5rd6lqiqP2fs/x6UrgpSlACkKUhQ7AKUpSlAPoAUoAUA1wFVRxUAL+A9K5ZmY8mJLfiTc/wDWuATS6e7FJIUg/wDhGTIKWwD1bCmId3tv27benXHCPjw4qY/wIHH8fT0tXPOTlzDN7n4gm/8A19aQQY+YM6xN1iCmrDBFnJF1KqSzWSO4l2Dx44RcLmYOFwHwyB+46QTD1SkMIB6dU0dJwsXr+X1/T5ebhjLnaYzJKWmjkdlZvbZr8V+2wUeACbVcD3TNyd/ib/cYuFmHEgSEQvEFhkRFZVEirbkwvct6kgXrdQ0LXaTFi1ZeDiWa7vv3Th06TQ8dLPOkq7pdZyqUhnr9YvUIAIdRh9UNSuq1Wj6nr/22L7WNjPJydnYL7kz25OzMQDJIRcgep9BUVs9pu+1Z/wC4yvdyclI+KKilvbhS5VFVRcJGDYE+g9TXB5uVLb0a6FZkDwykMeSPbQWTCMQfEWOmWGMh0d6Lo5CgcDdW2w+jRtvsV7Muj/t851RxTKczkPaWQMR7BW1+ZA5Xvax9K+l1OubrT7ts+EbRcoRDDsfdaMqD74a/HgD4ta9x61v/ABTUHJWYuW4PDJCuVoK6QOjIFHpMsVsJ++MiU3YJgL0gPw6mTkY4nGKZI/3RXkE5DmV9OXG/Ljfxe1r+L1DDHyDAcoRv+1DcS/E8Ax8heVuNyPNr3tWps9gRq0DJWBywkpJCMRKsowiGpnkm4A6yaPS0alEDLHKKgCIfAUBHUZ2DdQ9d02Rup4Z8iLHUMY4F5ytdgLIn8x8/9ATUloNNN2HcQaaCaCCXIYqJJnCRLYE3d/QDxYH8SKI2ywsq7GNavkQmEothMPIVQ3RKR7GTSTWaKPWo+sgJgVAogI9hg21zgb7VbHJ/YY8yf3RceKZ4CbSxxyqChkT1W97fxrjO0W01+N++yIm/tjZEkKTjzFI8TFXCP6N6X/MVvf8AF+1uG/8Ai9Opj8vrURSnxsskEhZEBUICxpZkoVMR9cSFjW4CYA+gA6+DJGJBESPcIuB9bCvsRyGMygH2gbE/S/4U9OvuvijSlGlKNKUaUo0pRpSjSlGlK61v3pX/AOWf/wB0dKUjKv8A+AY3/gSn8SrpSoteW1/YN4pfiZqX8lNpSmC8jv8AVO8IPxRH/pdZ9KVazpSjSlGlKNKUaUqDMV+sqvP9x/F/9e+WdKVObSlGlKNKUaUo0pRpSjSlGlK4H0D+0P8A1aUqrLkfx+uea898WrP31ONibBmQ7Dk22MZJeaa3Za5NY5ZlS16urHpmjV41BwqJnyDsxAOUodO/aAwW01k+w2WFNeP9jjStIwNw/K328beLD+a/rW2Oh961HT+mdn11ssdo3WFHhwMgjbHEDOGyBMGs4cgWjZLkH1tSypXFPCtGssXc2VflJ631+65AvtXs1vsc3ZJirTeTiop3BvALvngkZQz5FuUiTQSHSQLv0bCIiORj6XX40wyFRmyEkd1ZmLFTJ+q1/ofoPQfSobcfJ3cNzr5NRNPFBqp8PFxpoYIo4o5o8O5gMoVfukUklnuCx/VepGlKJjAUobmMIAUvwmEewAD6YjqVtfxVAuB5PgVWW8nqvIcqqpyFpXmDM2OJpy+HwFdeOsi7YTNHteVICJOza0GlvFOlSs3Usk7I8kCEIKy6hSF6+gdtVEyRNuk2mPtAMJpPZeA2KNIosET/AMXuQT9T4/hXoqHC2UHxnldE3HRHfs8OENnj7VFePIhw5H5HJyF9JsfipSIk8VFza4vVmggICID2CHYIfQENW6vOtcaUo0pRpSjSlGlKNKUk5KWnRma+2rbCJmYZWRdNLe/GUTK5gEkkiGQFBsmcfEOjqmEDpm9YgAAiHbqtZ+y3J2mFBoocbK1TTumbJ7oD46qAV4qD9zlrhlPkePFWTB1unGszZt5NkYu0WBHw4/aPHILEhuTEfagFirDwfPmkta5/FlevVefW12yjLinCvywMo+B4mi3iFllUXiQuiiDBMV1jGKBVAE4iPZqvdk3Px1o+34Ob2aSLH7QuLIMeaT3AqwsxVxzH9McmuAGBJJ8VP9d03yFuuo5uJ1uOXI6w2VH+4hj4FmmVQyHgf6h4rYkr4FvNOoAgYpTFEDFMAGKIdoGKIAYDAPwgYB3DWxQQQGHoRf8A0+n/AFrXpBUlT6g2NfQAI/8Ao1zY/SuLikahe6+rcXVGWVcMbA3QTdNG71EzdKZbmSOsstDKiI+OTZlIILCGwEHs7dVaHuOlk7PL1CVni3SIHRZF4idSpZjAb/1AlvvP8p8VZ5eo7hOtRdsjVJdM7lHZG5GBgQFWcf8Atl7jhe96wVa9dzw7NmS+d3KI2MJJ1LexWu7qBBc6o14G2/QnugYCd+HrerrDk0nb21kWIm5A2K5/uvN7Cffj8if23H0H2kL7nr4vWWm76mNlLkvp+WvbB9pIfff7MjiB+55+p+4FvbtbzanAHbfs/wAXp21dTa9x6VTRe3n1rjSlGlKNKUaUrku24b+jXI9fyvXBvbx62pucae+Yw8x77rvF5ELRMljjvmrNqqEGRYhY0qZGRhIdAUwESnN64gPbqi9APav7Zlf8teZ87+4TiIyKiH9uGtFYJ4K29GP3H61eO+Dq42WL/wATSJME6+Ay+2zuP3BUmW5cXDX8EDwPypxDFKcokOUpymDYxTlKcpg+gJTAJRDV3ZVccXAKn6HyP9RVJVmU8lJDD6g2P/UVz/0fSDsAA+gAfAAa+q49fJ9aNKUdv+D6P/6NKUaUrRKysgSyNYUkC+Ui14td+rZAURCPbO0lippxaiIj34uFyCJgMHq7B/ih32Ocm+j1IwpW1747SHKBX20cMAIiv6izDyCPFS8euw30cm0bMiXYJkLGuLZvcZCtzKG/TxU+CD5re6mKiKwpJ0uyjn7xqyVknLRm5ct45AxSrv10ETqJM0TG9Uqrk5QIUR7AEdYmfkS4mDNlY8TT5EUTOsS2DSMqkhFJ8AsRYX+prLwMeLLzocXIlWCCSVVaRhdY1ZgC7AebKDc2+gpEPbsk3q0E8sBHlHm7iJIWLZO2xpN1EWGQIsRkg6K3TFIxkDFBQROBUx9A6qOV22KHruHlbsS6fb7QiCKN0Mrw5MgYRq4UWPEgNc2X6E1a8Xqsk3YcvF0zRbbVawGeV0b2kmxoyC7IWNxyF1sLsPJFKyvspWOhY1jOS/t6YatwSkJjwxGXtBwUxupx4VL7GhuXYNg+hvqzaXE2OBqoMPb5P7zZxxgST8QnuNc/dwHhfwsPwqubnL12dtJ8zU437PWyPyjh5F/bUgfbzPlvxufxrcak6jK0tjh1Z+ClIVGWkIJWSamapy8UoCUjHmMYpvEM1B2AixenYB+gI6it7q5N1p8jVRZM+HJPGUE8JtLET/Mh+jeLf61K6PZpptvj7WXHhy44JA5hlF45LfyuPqpvWE/8fW6euDNm+t8lDQhEW7NYyZpCwuWiJEiguooApmcOzh1HEdwEd/h1h5n7vRdXZcWKbaZ2LiAKjEGXJdFAHInwWc+WJHk3/KsrE/abzsynJli1mDlZZLOtxHjI7EniBYhUHhQD48fnTct8gqWyKlqLHSzCmZiShCmNCqprKJQkks3I6TK1UXbnRfEatjFFUCFP3Qm2EB21R8fur9l12T1DAyYdV8oriD+gQSIJSoccSylZAiEcwoYITYg2q7z9NTrmxxu25uNNtPjJsu3vggGeIMUPIKwaMu4PDkV52uLXrLseSTU6qqNCLFut7hG0VFTLKOATJN5t2070JCfBEERiYdbujqHV6diF+AA137zvh6t1w46ONt3DEjhhnSIeFyHS/uZHG3swNZmL/wAot4FdGk6MOz9iXIZDquoZck00LyeC0CPx9vHvf3pluqhL/cfqa0ELOz2SHdZZ26pUZ5VH4LzbV4ytRZRUzuOS6WriPjwOko+QB0ZVNQek5AKA9X04jU7jc98yNfjdm1mnl63NynV48wTEvELI0cYIMi8y6sbFQB935y+11On6LjZ+T1rZbeLsUPGB0kxPaASU3dZJLERtwCMouGJ/T+TtVert6qlJtWT52uwfSSr9lHLFQTZQaKpQ3jYhBummRBiU+5gLt9UI62T17r0HXEyMfFmlfDmnMiRNxEeOrf8AtQqoAWMG5t+JJ+ta53/YJ+wvj5GVFEmZFAsbygsZMhgf92ZmJLSEWBP4AUp9WCq/RpSjSlGlKNKUaUpH3ioEvEEMCrNS0Egd8zduHUMsCDpwi1OYx2Cim4CDZ11evsO/YG2qv2/rC9v0/wDZpMvJw4jMjs8DcXZUJJjJ/wDB7/dbz4BqzdS7K3U9v/eI8XGzJRC6Kky8lVnAAkA/8k+n08muyKpcPD2SXtDNWTNJTUfGxjtNzILOWJG8UmRJsZq1U9VFcxUw7w+4ice0fTr713VNXq97k9hxmyDnZcEUTh5C0YWEAJxQ+FYgfc3kt6nzXxse07PZ6PG6/krjjBxZ5ZUKxqshaYktyceqgn7V8BRa3pWpUq0haDWCNyI1gJquFsDGTqDNkV4g5bN2JVDoKy5yGQ718i4EBL0mMQQ331GydezexNm4Hd48LL0P7yOXCRA4ZVjuVMxHG8isfFiVte9SSdhwuvjDzukyZmLvDhyRZjuUZWaSwYQg8rRlbg3APpathPXRpHpWBpBNveu119m1fOahGOUiTB0XqyREDCVUBIkVQinWAjvuAejWbue1Y+FHm42oT+5djwokkbCiYCcq5AUm/gAg3F/oKwtP1bIzJMLI3D/27rubK8aZsqEwgxqS1reSQRxIHoTWxQgId5MMLk4hyN7OEKSNB2sooZ2yZOgI6cRh+lTw5u6XMIGN0iO4DsO2s2HS6vJ2kHap8UJ2EYgi5knnHG9naI2PHwxsTa9x62rCm3OzxtZN1eHJL9fOUZeAACO6XVZRcchdfIF7WtcXpRbiUd/QIfth/wBXbqd838G1Qh8/nTGN2eQwzpMPWbFi2oqkFEJv5J7FtiOJEybYvUzipRBEHizlB6ICcixxTIQB6Q9GtQw43eB8v5WVjQwx9POHCJJZIVDSkKPshmUc2dZPJWQ8QoNrVtqXK6UfiPFxcmWaTtq5c5jiSVuMd28PNEx4KjJ4VkHIta/1pSu6hPL5ch7wnIIFrjCnvoJzHC6dA4VkXLldVJwVoCYtDpFIcu5xOBwEPR2ansnq+4l+TMbtyTINHDrJMdoub8jKzswYJbgRYi5J5A1BY3ZtRF8b5XU3hc7ubZx5Cy8F4iNUVSvO/MEkHwBxt9afTG5Ege2ZYxCdZZdkUFBKAnKQY1vuAG23AB1fiiFudhzta/1t/GqJzcIYwTwJvb6X/hTxguiOwAoAiI7B6e0f8X09fVfNQn52+YNxy8u3FjLKnIaflWzWelDwVOqNWjSTVzuk0k3F24YwUYq6YtQTZtg613DldBsiUxeo4CYoDD7reYGhxhk5zGzGyqouzH8h4/1JNq2P8Z/FfbvljdtpOqRRl4k5yyytwiiQmwLsAxuT4VVVmNjYeCab3y9vNN4seZTX7VI4DlLNF2iieCPcsc3+Jawlyg2UmdVOOl+4j5KXipOGeKomTK4aulSkVL0KAQwgA4+j7JrewIxwSwlT9SMLML+h8Egj8walflb4U7t8QZUEXZ0hfCyb+1kQMXidlsWS7Kjq4BvxZRceRcVZBqwVqOjSlRu5U8psY8QsVnypk8lllW76y12i0ylUaFPZcgZIyHcH5Yyp0KiVwizX2xZZ56IgkQ6qKKaZDqKqETIYwKVQlyM833P1OzlyvfY9w3yIrRcD+Wm5yu74y5HoFCirRRsyzGVpWHr+Yp5cbE7i7DSYmpnRcO/Zs69QM2bnIDcXAGLpSrYvK7m812/jFB3rPNh5Kzt6ui0bYHP/AOTVcw1VrA2JIVyHeKrUWJwoo5gkMbyTtydaMB8seTBMRBYCiABpSmptebeamLfM/wCM2ErxkXCth4ycoYzk49rFErGMZuMyLTmuFKFA2evu5/IsnanjSZfSj2c6XCTaNbolKlsAjvuClW3LfvSv/wAs/wD7o6UpGVf/AMAxv/AlP4lXSlRa8tr+wbxS/EzUv5KbSlMF5Hf6p3hB+KI/9LrPpSrWdKUaUo0pRpSjSlQZiv1lV5/uP4v/AK98s6Uqc2lKNKUaUo0pRpSjSlGlKNKVwPoH9of+rSlR3df/AK06/wBpcfxx9KV0aUrkBEogYBEBKICAh6QEPQID8AgOlD5Fj6U1DbBGEmZGqbTEeOW5GN5XyeyIjUIVMjTJDrpB1fG4FaACNtcdBet+X/SDbBubs1hDW68eBBFYSe4PtH+59X//ACj/AOXrVmk7r3GZmaXaZ7M+EMNiZ5DyxB6Yx+7zAPNo/wBHn0p1vT2j6dZtVmjSlGlKNKUaUo0pR6O3T080rSQlbg66MqaFj0mBpuUXmpUUjKG8ZJuQ+zu1O8MbpOpt6C7F+lqI1Wh1GjOS2qgWE5eQ081ifvlf9Tm5Pk/lYflUttN5tt2MddpM0wxMdYIrgDhEv6UFgPA/PzWmkZnHs05k6/MSNUkHcUZAstFSy8aZZmcel03Iui+6RKbsA5QDfbsEdRefs+k7afI0m0n1s2VjFRNDM0RaM+HUMJLWPowAv+NSmFq+6avHx91rINjDi5IYwzQrLxkHlGKmP1H8pJ/MVpmtwWvsMzk8TzEI6RZWRuwnlJdo7IROLbDtINWyHQmok9OkJRQOId2YvaHZqJxuzy9z1UWw+N8rEkihz1jyDMjgCJf9xEWwIci3AkcSPI8VJ5HWYenbSXA+RcbKjklwWkxxC6EmV/8Abdm8goDcSKPuB8etaE0dUMov7bMU+42OMmmfhahKy0I8coJsBjHaUkKTBs4KRqoq4BLoOumIgYgiG+of9l1j5EzNls+s7TPx9rFwwppsd3UR+04ltGjgKS1uLSLfkpIvUwM7svx9ia3Wdl1mDPq5eeZFDOisZPdQxXkZSWAS/JY2A4sATSxqshA3VBnbU6+7ayUSrIQbN7YYkrOdQI1MCDszc6gGWTaPjCJtyiBVNx1aet5um7ZDF2VMKWPOxmkx43yoQmQoQ8XKk3YJITe4IDearHYsLcdVll65JmxSYOSseQ6Y0xfHYuOSBgLAvGPFiLr4pdauFVGjT+NKNKUaUo0pSJPf4EZQkOzTlJJ2WxhV3wsYx0o3iZIWwu+uRXMQhEmXdht3wCYnV2aqbd00x2I1eMuTPkjO/aSe3E5WGXhzvKxAAj4/zi63q1L07bjXnZ5LY8GMcH93GJJUDTRcuFo1BJL3/kNmt5pWvHjSOaOn79wk0YskFHLt24OCaDZuiUTqrKnHsKmmUNxHVkysrGwcWTNzHWLEiQu7sbKqjyWJ+gA9armLi5Gbkx4eGjSZcrhURRdmYmwVR9ST6CktPTk8rX42Yx7HxNsVkHccokC8kVowVg3JjC6km7oDlBYySWxiF/y9V7cbfcS6WDadJgxtlJPLERyl4RmB/wBcqvccrLYqPPL8KsOo1Ooj3M+s7pNka6OGKUHjFzkE6j7ImSxIubhj4t+NKtF01cKLpN3TZwq2UBJymg4SWUbKGDcEnBEzmOgpsG/ScAHb4NWOLIx5neOCRHkjNmCsGKn8GAJKk/gbVXJMfIhjSSeN0jkF1LKwDD8VJADD8xcXrHfSsZGKMUpF+0ZKSbsrCOTcrFSM9fHKJiNGwG271wcoCIFDt10ZeywMB4Y82aOKTIlEcQYgGSQ+Qi39WIF7V34muz9gk0mDDJMmPGZJSqk+3GDYu34KCfWtht9MP2tw39O2s308fWsP/tTbSTEKCFitFcr1gt0rbJ2MXlIhvJd4KRjAdsd4wScEMkyZNEjiZRMu/V2dvZqh5+H/AMMGd2DRYWds9lssyJpoVlva90LxhhaONASWUevir1g5Z7icLr+8zcLW67XYcqwzNFa9rOEkKkF5HYAKx9PP405Pp7ewOzfb6H0v2w1fD+P5VRf+1N3UV7grasipWAj4sC3mo8lOM5bt0Wx40zNQXgsVkR710l4gA6jKdoDqj9Zn7PJ2Pex7sTDTJlRjBLoqqYuBL+2V8uOVrlvI/wCtXXskXWo+v6STSmE7Z8WQ5oRmZhKHHD3Fbwh43sF8W/61u4S2NJ2atEGhGy7ReqvG7J06kGJmzCQUcpiqVWIciYQeoEKXYxgANh1LajsmLuNrsNRDBlRTa6VY3eRCkchZeQMDekii1ifoaitt1zJ1Gr1+2lnxpYthEzokcnKSMKQCsy2+xjfwPqKVGrFVfr4Mmmp0iommoJDAcnWQh+g4eg5OoB6Th8Ah2hr5dEe3NVaxuLgGx/EX9D+Y819K7pfgSLixsSLj8Db1H5V96+q+aNKUaUrEkJFhEsXUnKOkGMcxRM4ePHSgJN2yBNgMqsoPYQgCIdusXNzsTWYkmw2EqQ4UK8nkc2VFHqWP0FZOFhZmyy48DXxvNmzMFREF2Zj6BR9TUZYhzNvcowMwreaXJVaVlJdarTqDOKPNT6KwJpuaCwMQBfJhFkMB1HO+5x7B7BANaA1k+2yvkPD2cm41U/XsjImbEyFSEz5INg2ujI/qAQj7ml9W9D+A31s4NVi/H2XrItRtYOwY+PCuXjs8ogxyLldhID/TPum6rHb7R59b07DqSb++Bounw1XmHLt4m0ysuDpu1mYqNVaAEcq9bgBVJE7hMxilTP1ep+3rZWTnwns513V8XXZWRJKE27c1SeGIp/SLqLGYsCQFa441rnGwJR1kbDs2VsMaCOIvqF4M8E0oe8ojbyIgpseQtdvzpSI0ipNpWFmmsCxaSNdYOoyDWaJi2TjWD0xjumzdqgYjYpFjKCI+qI7j2anIuo9ag2OJtoMOGPOwIXix2QcRFHISXVVUhAGJJPj6nzUJJ2zsk2uytVkZksmDnSpLOrnkZZIwAjM5uxKgADz9KVOrHVdo0pRpSjSlGlKNKUaUo0pRpStHZSWNSCkSVJaMQsZkiBFrTKaisYRXvCd4LpNH7IYoo9QBt/lbah9+u9fTzr1lsdN4VHtGcExA3F+YXzbje1vrb6VL6FtIm3hbsi5D6QMfdWAgSkWNuBPgHla9/peu2PiGbdwMurHxqdifsmbaZlGbUiS707dIn2Mzjp79RqksA92Uwj0l212YWsxMeY7OSCBd3NCizyogDSFQPBb9RRTfgCTYV1Zmyyp4RrUnnbSQyu0ETuSsYYnyF/SHYW5EDyb1ky0Y3mIuQiXgrlaSjJyxcHbLGbuAQdJGRVFuuUBMiqBTj0nD6ke3XfscCHaa+fW5XMY2RE0bFTxYK44kqw9GsfBHpXTrs6bV7CHY4wQ5OPKsihgGXkh5AMp8MLjyD61oPdlxF05Oq1SXcQ67CNSjoeYfl9sOmgImKJFnIL9HjVRKAlETbenf4NQ/9gn13Vl671zJfFmhgEUM0n9Zkt/M3K3uG1wSbet/pUwd9BsOzt2HsWMmVFNOZJoY/wCgj3H6V434C9iAPwt9a7K8jcEXk2WzPId7HAsyLXFI5BVu98Km26XqksBxFIXC7oOsoE9UoCIa+9JF2iLJy138uLLgh0GKYlKycAlpDNfxyZ/I4+APFfG6l6zLjYjaGLJizeDnJEjBo+Za6CG3niq/aS3knzSp1Yqr1KbHf77av+LM/wD6Y30pTlaUqgTz8/K1zH5j+L8PWLj5IwzjKuC5K19xRrJMIwUZc65cm8P7RTjph6JI1hYI57ANzI+KOmgqkooUTlN06ovd+tZe/wAeKTBIOTCW+0m3INa9j6A+PravVf8Ai3829f8AiPdZ+L2lZF0mySK80al2ikhL8SyD7mjYSNfiCQQDYi9MZ/y+/k88iuA1wy3nzk8aCq1yvtLaY6q+Na/YmFnWjoX22zn5ads0rCrOoTxi7mMbpNW6Cywpk7wxzAJilDC6N1XP0ksubsbLK6cQgINhe5JI8fQWAq0f5UfP3U/lHBwes9M9ybX4uQZ5Mh42jBfgyIkauA9gGYszKtzYAeCa9R2tj14so0pVcPmTccsvZxpHH3IWAYyAtOY+JXJ7G3J2l45tM6nV4DKPuaxsdbsNDc2Vwmszr8jLVq3Ozx7xwUWyD9FIVdibiClVm5p4r+YHyqzNzezTcONVZw/EZo8ruc4r4ZoquYse223Dk1xkCSsRoq8zkI+RgGgy4Pu+auUVFWLVsAJquO9ESlUq2OscFcNT6/HrKmUKvajZtw/jTFlcbuonLmToevRsvRq+xbA3c1GrXSOoNgI0kgWAx3DBwRyUA6xOXp2UqC3LhPnrNeYdxZzniry/7Dk3FPEpryNgPehDklgOouMpNc4UGrVmLl6/X7PPtJmvIQL6JWFwk/TBVUu3QHbuKlXjNV3LmLbuXrM8c8cMEV3Ueosi5OwcrNyqLsjuW4mQcGaqmEgnIIkMJdwEQHSlJir/APgGN/4Ep/Eq6UqLXltf2DeKX4mal/JTaUpgvI7/AFTvCD8UR/6XWfSlWs6Uo0pRpSjSlGlKgzFfrKrz/cfxf/XvlnSlTm0pRpSjSlGlKNKUaUo0pRpSuB9A/tD/ANWlKgByVy5ccJ0f32pGGbZnSTNdYWDkahTnrdhJxNelXTsJu6vHDps5RCFqjZv3zoOkBEhvSGo3a52Rr8UZGNjyZL81BVbAhT6v9fCgXNXXoPVtT3Detp9zt8XS437WWRJ51ZkeVAPbx1AKn3JyeKH0uPQ0v6DkSh5Vq0fecZ2+AvdLl1HiUVaKy/Tk4WRPHulWT0GrxL1FDNnaJkzh/kmKIay8bKxs2EZOHIsuO17MpuDbwfP8agN3od31nZyabsWJPhbeIKXhmUpIgZQy8lPpdSCPxFLHXfUTRpSjSlciAlHYQ2H6A6VwCD5FcaVzRpSuQAR3EAEQD0iAej9v6GlK40pRpSsV8o6SYvVWKBHL5No5UZNlFO6TcuyInM2bqK9ndprLAUom/wAkB3+DWPlvkR4ssmIgky1jYxoTYM4U8VJ+gZrAn6XvWRiJjyZUUeW5jxGkUOwFyqFhyYD6lVuQPra1RgyJh9/kmNqtjm4Wr1Cbbt5qQvpSOTFOZQzE6TERlmzdcXybQzdM5zqD6qe4Bvttrz53n4wzO+YOu3m2xNdq9siZEmxAfzf2ysZ95FYyBOKlix8KSBe1b96T8mYnRs7Y6PVZWw2Wqd4I9fdfFvcBkHtOy+2X5MAFHlrE29ajjjqtX+FeRTVC6SNFqstKuko+4tXJXFPnZtv3jeNBgkYyAP05M6fSn3hPsm3aUNuzRPRdB3PV5WPjw7bI0/XMnJcR5yMGwsidbrF7YPH3FlIsvJfu9CvjxvDu+96dtcbIyJ9VBt+xY2MhkwnUrmY8DWaX3GHL2zEDduLfb+PmnndzEfgrNLP2s6cx9btlUI5tcgDIxYmTsDdESDJxEUwSHwipnrYAVKBTAUVjfAOtqZO1wfh/5Xi/uUjwaHZ60PmS+3/RlyVW3uwwxA8GLqA4ANubW8GtX4+rzflz4tlGtjSbe63YlMSP3B70WMzX9qaaQ/eoRiUNxcIv1FTEjpFpLsGMqwWFwykmjd8zXEh0+9auUyqoKdCgFUIJ0zAOxgAQ16jwc7G2eFDscNueHPEsiNYi6sLqbHyLgjwfP415jzcLI1uZLrsteGVBIyOtwbMpKsLjwbEeo8H6VgMS2MJmZNIqRRoAwM/YCbQi5ZNIQS/0/wBpmP8AYTgK3730egvp1hYY3o2uU2c2MdMQn7YIG90eP6nuk/afu/Tx+nrWbltozq8UYIyBuRz/AHBcr7R8/wBP2gPuH2/qv9aTzdd7Z7BDWOrXdg4psanKRk3BMmyDwkrLJmOmVT2kACo1VjlDF6kyj6wh26g8eXK3+6xN717bRP1fHWWLIx0VXE0wuAfd9UMRtdR62qbmixtBpcrSdg1UydonMUsGQ7MhihNiR7Xo4lHox9AfFL30dv8Ag3H9np1dPz+lU7/vSaq1VjqhHuIyLWkV27iSfShzyb1aQcA5fqAquQiy25yNyGDYhPQUNQHXeu4PWcF9frmneB55JiZZDI3OQ8mAY+Qt/wBK+gqd7B2HO7Lmpn7BYUmSCOICJFjXjGLLdV8FrereppSanqgq4KUpBOJCEIKg9SgkIUonN6NziUAE47B6R3HXCoqXKADkbmwtc/ibep/jXJZmsGJNvS/mw/AX9B/Culy2bPW67R4gi6aOkjoOWzhMqqDhBQolURWSOAkUTOUdhAQ2ENdU+PBlwPjZSLJjSKVZWAKsp8EMD4II9Qa7IJ58WdMnGdo8mNgyspKsrDyGUjyCPoRQ0aNWDZBkxbIMmbRIiDZo2SIg3bIJl6U0UUiAUiSZC9gFAAANcY2Nj4eOmJhxpFixKFRFAVVUeiqB4AH0Ar6yMnIy8h8vLd5MmRizuxLMzH1ZifJJ+pNRFzTLhQWNgtuLbHHRrx1PhD5EimRAeOnk/LtHBWT904XUUNGuo5skcSES6AEwgYfQIa8z/LGzXpmHndl+Pc+DHy5M32dnDGPceTJmRvbkdmJMTxKCVCcbkgm9jXpL4s1jdxzMLrfyBgzz40eH72tmk/pomPC6mSNFUASpKxAYvysARcXFPHiB97346qUjYX8Na5eP3OaSQArszR+gIg3MsddIqiE2g2OUq5gAo9e4huHbraPxhljs3Rtbn7ybF2Wzg8mVbOUkW/EsWF1nVCBIbD7ibePNax+S8X/jXdtlgaWHK12sn8e032B42/UFCkhoGcExi58WvY+KWy9Vi/e9G9KryJJVnArwgIA8UCK9nmVUdKLKR/SJDvCmMOym+/T2batsvXdf/wAmXt8jzjYxYbQBRIfZ9skuWMXp7n4N628VVYuw7D/jTdSjSA66XMWflwHve4AECiX1CW/l9L+fFao7SrZOY1iwtJCSWYQs4E3ErsV3UaVw+jlDtjpPkDkIo5aFUTEBTOAFN9HUa+N175AxNfu8aed8LEy/fhaNmi5SRkpaRSAzJcEFSLG1/SpBcnsHQcvP0uTDCmXlYnsTLIqS8Y5AGBjYEhXsQQwNxelDJMZx1LQzthNljoxn472vGCxScjL9+gJGezk4gZp4Rf1/V+r9GpvPw9tkbLFycLLEGvi9z3ovbDe9yWyWc+U9tvu8evoahcHL1OPrsrGzMT39hL7fsy+4V9ni13+0eH5r9vn09aIBlNRsMm2npgLDKJGdqKyRGabDxJTKqqtkAbJbpkFJESp7/wCVtuOudNi7XX6xYNzlfvtipcmUII+QLEooQeAVFlv9fWm5ytXn7Rp9Pi/stcwQCIuZOJAAdizeTc3a309Kx6lOSdigm0rL12Qqj5VZ2ieDk1Cqu2ybdwdJJc5iAUnQ7IUFC7B9SOunrW42G908ex2eDPrstncHHmILqFYgMSABZx9w8ehru7JqcDSbeTX63Nh2OIqoRPECEYsoJABJN0P2nz6ilJqdqCrnYfoafnSuNKUaUo0pWO7Zs5BquxkGqD1i6TFF00dJEXbuETbdSSyKgGIombbtAQENdGVi42djvh5kaS4ki8XRgGVlPqGU+CK78bKycHITMw5Hiy425I6EqysPQqwsQfzqNcxHILy0djF8lARM25sjyUqD2gIsmM1Ra0kUj0km5bu0ymScyfdd2sdv9UUB9PZrQ20w4ZtnB8f5S4WNtpM95cF9cI459figBxM6uAQ8tuLmP1Hm59K3rq82WLXT9/xWzMnVR4CRZqbFpJIM/KJKGJGQkFYr8kWT0NvT1qRTKHi2Dp2/aMGiMlIlblk5FJukm9kzNUwTRUfLkKB3ByFDs6hHbfs1vHF1Wuw8iXNx4Ylz5wollCgSSlBZTIwALED0v6VpHJ2mwzIIsPImkfBgLGKIsSkXM3YRqfC3+tvWtlrPrBo0pRpSjSlGlKNKUlwtbQbkFKCOmBfDCjNhKeBH2H3PfFR8J7Q7zb2h1G37vp+p7d9V7/keP/yodTEGV+7OJ+4932//AI/Hlx4e7f8A3L/y29PN6sH/AB3JHWD2oz437QZXse17n/yOVuXP27f7dv5r+vi1Z8fYYaZYvJGFkG0u0YLPWrlWOUBz0PI8omdsti7bukh2ASencQD4dZmFu9VtMSXO1U8eTjQs6OYjys8Y++Px/OPS34kVhZum2msy4sHaQSY2TMqOokHG6SGyP5/kb1v+ANIkmW6t7NiXrhvPs3s5GzUpE19zCuiTrxCBFUH5CtCgZNJwIJCKRDnDvAENvTqpr8l9d/YY2XOmbFl5kGRLDjPAwyHXGv7g4C4DG32KxHK4tVsb437Cc7JxYXw5cTEngimyFnQwI2Rb2yXNiVF7OVB42N617jJMy/nqXC1WnybslgYtJ+deTaCkU3g645OKRzFVFToPOtzFETNDbj07bb6wp++bTN3Oq1XXNXkSpmwpk5EmQphXHxWNib3schSDeE+bWtesuDo2rw9PtNr2HZ48TYcr4+OkDCVp8lRceLXGO17CYeL3vanf1s6ta0aUpMQdecw8pZZJawy8wnYJFN83jpFQh2dfTTTOmLGJKXtTanE24gPwgGq/qNHPq9jn502bk5SZsyyLHKQUxgAR7cP4IfU/mKnttuoNnr8DBiwsbFfCgMbSRCz5BJB9yb8XFrA/gaU+rBUDRpSjSlKbHf77av8AizP/AOmN9KU5WlK+iHMmO5B6R+j/AOnQ0rPariYTAoff0dPVsH0dw30pWfpSjSlGlKBDf06Uo0pRpSutX96V/wDln/8AdHSlIyr/APgGN/4Ep/Eq6UqLXltf2DeKX4mal/JTaUpgvI7/AFTvCD8UR/6XWfSlWs6Uo0pRpSjSlGlKgzFfrKrz/cfxf/XvlnSlTm0pRpSjSlGlKNKUaUo0pRpSuB9A/tD/ANWlKjm/SScKSDddMiyDg71uuioUDpLILGVSWRVIb1TpKpmEpij2CAiGuCARY+Qa+lZkYSISJFIII9QR5BH5g+RSUpdHpmOa6wp9AqsBSqnFmcnjq5WItpDQrAzxwo7eGaRzFNFsiZy6VMocSlDqOYRHtHXVj42PiRCDFRY4F9FUAAX9fA8eakNvudv2DPfa7zJnzNpIBzlmdpJG4gKvJ2JJsAALnwABTc4wtGc529Znisp4xrlHodZtbKPwnaYa0JTslkmoqtVlH09YYpNdU1afNnZUyFbmKQTAcR27N9YmHNspMnITNhSPGRwImDXMi28lh/Kb/SrB2TW9LwtLqMrrOxyM3d5GKzbCGSExpiTggLFE5A91SLnl5ta1PdqRqm10Om6bxo7ZqioVF41cM1RSOKaoJOkToKCkoHamoBFBEpg7QHt1ww5KV/EWr7ikaKVJVtyRgwv5Fwbi4+o8eRURcGpcf+Lk5TOD9MuFyf3R5Wbdlmtwl6kJ24T72sOJxwrOSLu6OI9KPFJrIiciDVRQqpUy7FKIduoPXf2zTSR9dx5JDkFGkUOWYlSfJ5kW9fp61tTub95+ScLL+ZNviYiahcmDCmkxljgiWYRgRquOGLXZLFnAIJ9TfxUwdTtaopvJLLWM4jIDDE0jeK41ylLVSUvMPjxWRSLbZepQoOfaU/HxQ/ZXEa1O0UIZQPVAxRDWK+bhx5QwXlQZrIXEd/uKj1YD8PFT+P1bseVon7Rj4WQ3WoslMeTKCH2EnktwiZ/QO3IED8DUUsT3qC51ReEeVGH8l5lxtRcbXfJMXIYwcIsISJy3IQyh644jMhMUXz4i0NHSKArMjEOY25hEQKOoXCyY+xpjbrAmyIsaGSQGPwBKR9tnFz4BFxWze0aTN+Fsnc/Gna9dqNjuthhYjpmAtI+CkgEofFYqtpHQ8ZLgDxa5qReF7RmexY0CfznjOExvklOSsxFqNU7MjbY80RHvVyVt03myGFI72djyEUOkI/YVD9I7egJDGytl/bXyc7H45qK5ESMG5cblQD6cnsAPpc1Q+2avqGF2Ua/puylz+uMkH/yZ4TCyu4HvBo/XjExIuP1KPF/WnRhH7qUiI+RexbuEdvGxV14l8JDPI9QxjALdwZP7GKhQDfs+AddmpzcjY62HOyseXEyJUDNDIQXjP/ixHi/18fjVf2uHBr9lNg4uRHl48TlVmjBCSAfzLfzY/nW01IVH1iP2SEkwfRrkDC1kWbli5Ag9KgoO0ToLAmfYeg4pqDsO3YOsXNxIc/Dmwci/sTxPG1jY8XUq1j9DY+DWTh5U2BmRZ2Pb9xBIsi3FxyRgy3H1FxSYhqBVIWtQVTTi0pGGraqa8QlMFTkVmzpJZVwk8BRVIAB2kqqYSnAoCG+q/q+l9d1Ogw+tR46z6rAIaETgSsjglg4JH6wSbMACL+Kn9p3LsO13uX2N8hoNnnKVmMBMQZCArJYH9BAF1JINIbKOMjz0BbX9bKCtwlVYiTZqyywvGzNzBnKcpYdBZJYse4coEMQOgNjnN62wCOqf8hfH7bnTbPM0IDdoyWhlQzNzVHxzcCBWVhGzKCv2izMfNW3oHfV0+41uJvSV6zjrNE4hHB3ScEf1mUqZVViD93lVHi9Rnx7yOttfsyUJlCT6oUhnKcoq4ge6l4l0mh3STIEGJERIgg5JuYQTUEwD2dno0H0j527LpuwJqPkPIJ1ILCUtj8Z4XC2WPjGFsquPP2NcHxW+O6/B/W9xoW2vx/j22pCmILkcoJkLci/KQtdmU+ByWxHn85swt5qE/FspmNsUSZjIJCs1F0+bMXJkwUOkIqM3iiLpEetMQ2OQBHXrLVdv6zutfFtMDOxjiTryXnIsbWuV8o5Vx5BFiK8qbXqXZdLsJdZn4WQMuBrPwjaRQbA+HQMreCPQn/rW6jImKh24tYePZRjNVdZ2ZBggk3bncOTAddyJESlIZRc3aY3pHUvr9ZrtXB+21kMWPis7PxjUKpZzdnsPBLepP1qKz9jsdnMMjZzSz5KoqcpGLMFQWVLkkgKPAH0pDwiCT+32a3SkLPVx1DojVW7mWlCFgZmFROV57bZMtiN0Cir6oqnN1bdm/p1UNTFHl9n2HZthi5mBk4q/tFaaYDHngU8/fjTwq+fHNje1Wzayy4fWsDrWvysPOxspv3bJDEf3EE7Ap7Dv5ZvHngBa/wDpTecgrLkGMr0QOOI6XdFUXGXkrHAh4xJkxYgB0m5iN+979B6J+o3YJDELsG+qR82b/uuBpMU9EgypAz+/LlY/3iOOPyFIW/JZL3PgqVHi9XX4Y0PTM/dZS94nxo2VPZixsj7C8kngtdrcWS1h5BDGtpgjJk/kCtTji3sSR0tXJArV65BopHN12yjTxRFTtVg6kFm6ZB73t2HcB2DUh8O9/wB13XQ5k/aIRBs8Cfg7BDErKU5g8G8qygHn9D4PisD5e6Hpum7zEh6zKZ9bnQ80XmJWVg/AgOvhgxI4fX1HmnEQQaWmUrdygre8Wg2LeRSCPiHKS1fsAuRFHv3oh2qqMFCCBNvQYNXmGHG7FsMDtOn2kr6iFJR7cLhsfJ5fbyf8WjIPG3oapM0uT17Azur7fWRJtpXjb3JlK5GPx+7in4CQEFr+opLWDIDiuFmGd0XjqISWlXMHQLABzyyT4PB96EtJIEKZKOBqoYBEiolLtqubvu02jXKxe1vBp1yclsfW5FzMJPs5e9MoFouJNyHKqfxqw6bpsO8OLldWWfbtjY6z7DHsITH99vZiYm8nICwZQTf6fSmBic+2CruLoe22eBtS0K6r7KFi2aSce3m4hdX/AE2zwrlEFTPV1WpinFuI79vV6oAIa0vrfmbd9en2zdl2GHsZcSTHSCFAI1nhY/flwOt/cZkIJiJ+pbwARW4tj8PaXfw6qPrevzNfFlx5LzyuTI0Eyj+niTo1giq4I90D6cfJN6ZOQkpbLlitkTUIKxOoiw21ran/AHPdLrM0yNzRaajtFFNJAzduZfvkyibvAKAhuPaOtT5mw2Xybu9lrOsYedJq87ZpmSceLFAF9oF1AClVLc1BPIC4v6mtrYeDrvjbS67Zdly8KPZYWtfEj5XUOS3ukIxJYMwXgxA4k29PAqcuFqVIUmjLViXh2sS6TkZFNVdi+8WM6kqQqBJ5QwGP4Jw9SDsRD96Aoa9e/FHU83qXUG6/s8WPGyVnlDNHJz/cAgKMg+vttIv8g/TYV5K+U+1Yfau2rv8AW5MmTjtDGQskfD2CCWOOBYe4qH+f+a5pfVKrRtMgWlciVZBZgyO6URUlHp5B6Yztwo5V750oUhlCgooIFAQ9UuwfBq59b67gdV00Wi1jTvhQlypmcySfexY8nNifJNh9B4qndj7Bndo28u72SwJmShAwhQRxjgoUWQGw8AX/ABPmuyxuJuNgJBxU4dnLzbdIDRcO4cEjmbtYyxOtNRyAkIgUEzGNv2biH09fe9m2+BpZ5ut4sWVt0T+lAzCJHa4uC3gKLEm/1NfGjh1OduYYex5MuNqXa0syqZXQWNiF8lvIA/IVrbHYpuArjKWbVKQscusrFN3sDCLEMu1O9KUHzgiyiapVWscpv1G29Yuw76wd5vdvpdJDsoNbPn7N2iV8fHYFlL/ra5BukRvc/UfWs7SaTVbndy6+bZQYOtVZWTInU8XCf7alQQQ8o9BfwfBFLEPQA+jcAHb6G4b7f4NWj6VWPrRpSsI0lHFcCz9oR4vtxIRiL1sDsyvT1gkDbve/6xLsO3TuADv6NYjZ+B7/AO09+E5d7BPcTnyte3G/K9vJFvA81ljBzTCMr2Zv2nqX4NwAva/K3G1/Hra9QrvyuevbgTZH6tTnpF+zaViixtqYu2Myzjmix5B4yjllx8a9McifeI9O5gOI7dmvKHc5PmQ7f+7JM2t3U8yJia+LMjeOdIkYyukTN/Ue4XklvPIm3ivU/To/h/8AtJ1LwrstNBC75WfLiOkkDyuojR5APsQAtxe9hxA+tSzoi1vcVWKXvaDBtZ1k1FZBvHE7tBADqCKCZyAqsQrgqO3eAUwl6tek+nS9nn65jTdxSGPsLqTKkQsq3P2iwLANxtysSL15y7fF1qDsORD1B5pOvqwEbSG7NYfcQbKePK/G4Bt5pXas9VqjSlGlKTi1Qrjizs7krEtzWdiyVjmsuHeFcFZrFMQ6JilUBJYOg4gAmKIlAdg1BS9X0M3YIu0y4yHsEMRiSbyGEbAgqfNj4JAuCQDYWqdi7LvIdBL1iPJcaCWVZXh8FS6m4INrjyLmxAJFzSk2EPTqeIt61BVxrilGlKNKUaUo0pRpSk7HzqNgNZIxolLxysO7Wh1nbpmo0Kdyq2ESvolRT1XSSXXuVQvZ1AGoPC28W6OfgY65UEmLK0DO6FAWKXEkJPh1W9ww8XFTebqJdKuDn5DY06ZUQmCK4chQ9jHMB5Qm1ip82Na+hUxnQa01rjN4vJdy4ePHUo7SRReyTx84Muu6eAgHQdcdwL1doiUob6wemdUxemaGPR4srz8ZJHeZwqySvIxZnfj4LeQL+pAFZvce05Xcd7Ju8qNIOUaIsSFmSNI1CqicvIXwTb0BJpYGIQ6hFjkIdZMBBNYxCGWT6vqu7VEonT6tg32EN9WhkVnErAGVfRiBcX9bH1F/rY1WQ7qhjUkRt6i5sf4j0P8ArXYJjDuAmEQEd9hEdt/o7fR193NrE18AAeQBevnXFc0aUo0pRpSjSlGlKU2O/wB9tX/Fmf8A9Mb6UpytKVAzn15g2I/L8x3AXHIsVNW6xXWSdxNGoVbWatZWfcxyBXMk8cSDwiraKiI1NVMFXByKeuqQpSiJtVzsfZcLreMs+UrPLIbIi+CbeT5PgAfj+Nbl+GPhPsvzXvJtXo5IsXBxIw+RkSglIwxsihV8u7EGygjwCbgCm/8ALy80PD3mFNrfFVOr2HG+RaI0Zyk/Q7I+ZS51oJ+uLVCchJqPQaoSTNN2AJLlFFNRE5ybgIGAdY/We24PZVdIUeLJjAJRje4P1B9D+fpapv5w/wAee0fCMmLk7LIgz9HmMyR5ESslpFFzHJGxJUkeVPIhgDY3BFWXrGcgTdqcgLFMXoFXqEgAI+vuBdh36f8Ap1Zpvd4H2Lc7j1Hj181oSL2uf9a/t/l6/lWQCyoDuChtx7REBHtH4ez6Gu3/AL11VDHnPy8m+KWN8eK0SgtMpZvz1mOmcfMCUCTmj1uv2DJl4JJPGzi12BNq9WiKtXYKFeyL9RFJRc6DUSJFFQwaUrz08yc++YTVeSvOGRmazifGOeMYeTzJyTWXp+Q79LYfmKxKZqnFp3IONhXYRdlhskVuLcLNGoPm+xXyKZu+FIQ2Uq13y0bJhDiNhTFWC8yXbizhvkLmhhTb1H44pWZLjYbRkgtwq8MnX7XLN8xTbq4yV4tCiCovAaFFmouA9zv26UpkfMkx/fOLkVWMpYM5lcwbFzYzJyGpsNx1we/yqra8aZDez99jHNqx0pgRKLb1RDEdUx4s9PJSRkSrRDNArlR33nQBlK9Ajczs0aiZ+RFN+ZimZ6m2Exm5HYoFFyRAxxE5kSrdQFEe0S7b6UpK1f8A8Axv/AlP4lXSlRa8tr+wbxS/EzUv5KbSlMF5Hf6p3hB+KI/9LrPpSpbZi5EXvFtrRrdc4lcjs2sFYtvIGuGKk8MGrKK6yipDxSo37MNFm/aLYEwMp0shR2MHSoYdwBSoZcq7Pa5i3eXbnJKXz/x9n7NyxxvjWz4Qnshsa7FyNQsrW/KTMbkqk0W1WKl2l47NGtzpnM/fJJI93t0qbgClNNy4YZW4+8rpLmLbMjOsjceZDIfFzGUBhGvckc149tOPpuwyzWgTErBYrq9hZYmyMacsdnaybyKlWazp6zbqCUwAQA0pTu+ZrlTMhnXEjB2AbPFQqHITkxI4iyhcUcgzFCSrzOvYxt95b059eqX31tqkhYpeGbpqpRqjSXcJEFugsiZbq0pUjeBWTnmSOLUM/QiJMthodkyli2SQnMk2HJjSetWMLxYqo/kIfKtrB9ZbXVJqSjBOxkXwLOk2xgTU6zpDupTK4Es+brR5ieVXOccS1DEcw04bYtbV6MqGVj5YbTcKOb8snPKvpJSi0MYV2R19jBr3Djcvrd78GlKtJ0pRpSjSlGlKNKUaUo0pRpSuB9A/taUqPT5M6T12RQhiHK6XAxTlEpg3UMYNwHtDcB0pWLpSuRAQDtAQD6YDt+zfXPml6bqy5XoFQvmO8ZWKfLHXfKx54lBhBYv3Bp41ZZkkJsCvG7ZVky8G0UA32dRPr32LuOsObNxoMmLDke2TPy4Dz93EXP0sLDz59an9f1fe7XSZ/YsCAyafWCI5MnJR7XvNxj+0kM3JvH2hrfW1NTB5/t05ycncFtcEXZbGkFWE5k/JNlKxL7GzqwgxZu3tFBBuU7ttYWLhyZsoQygmIqkPUQodusKLZzybdtauNIcRUv8AuAQYy1gSn4hh6VZszo2qwvjmDuku6wx2KfJMY1LI65Yi5MFybn7TEwAcEDyCLG9LLDsXmQTXKQz2jixxYS32xtsbS2PY50RdpiNZVA9bZWN9KNyvgse3WL0qB/DGN07bjrJwEz7yPsxD7nutwKA+Iv5QxIvy/G3ionteT1IDEg6Qdkuv/YxHLTKYENnAH3WiVDxMXp7ZYcxTG1HlNmSyZHrVKlOGGaKrXZzK1/x9JZElHsWpXa3WKeyQdQWUJJNNuVY1XvKypkWJSmA5TkHcxvRqOx9znzZSY74GQkTTuhckcVVRdZD/APhf0FXPafGvUtf1/I3GN27UZOfDrMbKXFRX92WadismGlzb3sYDlIT4sRYCpUuMcY9f3yLyfIUqsO8jQ8G6q0XfHEMzVtkbWJBRRSQgGM2dMXzeIeHXOZRApwTOJx3Dt1MnFxWyVzHjQ5arxD2HIKfVQfWx/CtZx9g3sOkk65BmZKaCaZZnxhIwgeZQAkrR34l1sLMRcWqGXDvCWdMaXjP1ty5YBgKbJXmzQ2G8N1ZKqx+MoDG5Jn27GXdlD1pi1Ozt02ddVN4ZwJnCpdzKCJhDav6HX7LEyMqfPbjjmRhFEvERrHfkHAUeGa5Bv5/G9bd+We5dL7FptHq+rQe9t48KKTPz5jM2ZLl8PbfHaSZiGgjspTj9o8BbC95N0jMtdzDTITIGE3DS+VWRtEhXnskt46BBoSCkF4uwrotpRo1dLrx71AxClEgFU9JREO3XZmbfYT4uPldagjzInygkpZzF7cQJEki3H3MtvC/WqHndNk6xt8nRd6aTW7OLDE0aKqze48ih4UYoxVVdTcte6+h8049elns1Glfv4N/XnBnLtAY2SMkZ0VNuudJJyIoiYndOyFA5Ph2HWZpNll7XAGZm4k2DMZHX2pePMBWIVvtJFnH3D8jVe3WtxtXnnDw8uHNhEaN7sV+F2UEr93m6E8W/Ot5qXqJo0pRpSj9n7P8AHpSmBy/geOyc9jJthJI16wsOlFZ6ZkV01fsymMqUrtul3ZlnKaogBTmMIdG5RAdaX+T/AIdwfkHKx9tiTrhbuGytIU5rIgN7OosS4PoxJ+24INbj+NPl7O6Fi5GryoGzdLNdlj5lGjkPi6MbhUI9VAB5WYG9V3Xmg2ChyKLCztFGkk6Bdcpe7SMzM2BUwNVWz5BZVsodchROZENjIhsAh2hrw53Dpm86fnLh9gjaLPk5MPA4FAfsKSKShLDyYxYx+AfWvbXUe46Xt+E2ZoZFkwY+KnyQ4cgcw8bKHAUkAObq/kg+DTvYx5A2HHzeMZSbte1Vg5zFkWDghva8OcqYpoNYl84cKd6zAnSqJRKAGMAkAS+nWzvj75s3XSYcbE2Er7HrzMRLGw/rwGxVUhkZjdALPYgAm6i3rWtO+/DGl7nPkZeDEmu3wA9uRT/RnF7s80aqLPe6ggkgWYg+lTUrd5gstxtnYngnfuOaMaoKzcg5TRjpcj9smrJRhhbqgdi7igUAjgO9HpH4Q16w0PbtP8mYOwxHw5P+IHHVTkSMFjmEiAyxEqwMbw3CyDkbH615X3nU9x8b5uBlrlxf8sE7sII1JkhMbERSjktpEltyjPDyPpTlwsfGxERGRkKmmjER7Ju0jE0FRXRTZIJgRsVJcx1DKkKmAbGExhEO3fV/1WFgazW4+v1ChNZBCqRBWLARqLKA1ySLWsbm/wCJqh7XMztlsp8/aktsppWeUsoUl2N2JWwCm/qLC34VnCiiYqxBRSErjr8QXuyAC/eE6D99sUO96ydg9W+4dmsoxRFWUqvF78vA+64seX4+PHn6ViiWVSpDNdLcfJ+2xuOP4WPkW+tY0dGR0OzRjoli0jY9uBgbsWKBGzVADnMocEkUgKQnWoYRHYO0R10YOBg6vETB1sMePhRg8Y41Cotzc2UWAuSTXfnZ+ds8ps7YzST5r25SSMWdrCwux8mwFv4VBnmKwkjy9RekGRXjCxL4rhMElVIuPXI8STKudQhDJIOHZVQKInEOoCgAa8g/5R4We+z1mXH774AxpOQsTDGwkA5EgWVnDWPI+bACvWv+MmZgprdliP7CZ5yY+JuBLIpQniATdlQi44jwSSahaAmKBDdpR+qKbYQHbfbcvbsJQEB9HwhrygLoQfT6g/8A3fjXqjw/2+v0I/8AXzU0sJYQmHEZVsk12+BFOlpAzsWvs1woiLFsdxHv2DhA63cOHCg9QFUEBIUo7h27Dr1f8SfEe0n1+u77otyMfIefnx9pivtqWjkjZS3FmPmzEcQD482NeWPlf5X1kOw2HRd3p/3GOsATn7qhvcYLJHIrBeSqPBKg3JHnxcVPP6OvZPr6V4+80en0aetK6F3TZqZIrpy3bGXVBBArhdJAy65vqUUAVOUVljfAUu5h+hrpmyMfHKrkSJGztxXkwUs30VbkXb8hc13Q4+RkBmx43kVF5NxUtxX/AMmsDZfzNhXTJ+0fZr/2OZsWX8G59mC9A3ggf90bwgu+7AVBb99t17dvTrq2H779hMNZ7Y2XtN7XuX9v3LHhzt548v1W82rtwP2P76H+5iQ633V9327c/buOfC/jlxvx/OkvCO74RzBMLFFQqyZ4A69hnop4cjdGxEX6SMY+OVIVU7JVAervB9A/9Ne1GT3FcjDw95jYjRnCLZORDIQq5INhHHEw5GNh55G1jVg22P09oMzM0mRlK4zAuNjyoCxxitzJJIDYSK3jiL3rdWELEMaPusaKJLeKabDMlXMy8H35PHBs3DvO/Fvv3fwdXp1K7wbxsAjrpxxsvcT/AH+Rj4X/AKn6fPLjfj9L2qK0p0Yzv/8AoBkHXe2/+wVD8+J9v9Xjjyty/L0pEzuKKy+lH1thmbaHyAsZV0xtZvFuzMpRVsVqL4Y47nwi+zcBL0iTbt3231U9x8b6HM2E3ZdVFHi90Yl48w83McpUJ7ntl+Dfb4sVt9bXq1an5G3uHgQ9c2ksmV01AEkxPsT3Ig3P2/cCc1+7zcNf6XtXUzZUWkL0CqzbgZKzOnUmFTfy7dxJyKkqcviZdZm/UIr7NIr1j0lExAAg9ICO2vjFxOodRm0vXNq/7jfSSS/s5JlaWUzEcp2SQhvaBv4BIsLKCbV2ZWX27tkW57Fqk9jQxxxfvI4WWKMQg8YVeMFfdIt5IBJN2I8064gIekBDf6OtjkH61rutLIWGHipKFiH7sEJCwruG0QgKSxxdrNku+XJ1pkMml3aXbucSgOonN3es12dia3Ml4Zuc7JCtmPNlHJhcAhbD/wAiL/SpTD0uz2GDl7LDiL4WEitM1wOCseKmxN2ufH2g001izK7ho+8PEKHZFgq78kTEOVmqwM598KTlRw4HuyFVZRbErYRUWHqASmLt6da13nynk6rB2+XDp89110whhZkbhkvZi7eBdIowt2kNxYqR61sfS/F+NtM3U4su3wUOwhM0yh1540d1Cr5JDyycgFTwQQ1/SogOuWOVnKDhJE1aZd+UxUnDaIEzhsBh3KdE67lVIyhA9AmIYB+ENeYcn/JL5IyYnji/YRcwbMkH3Lf6qWci4+hKkflXpjH/AMcfjyCVJJP30vAglXm+1rfRgqA2P4Aj+NIh3nzLbwZEDXSRRJJkImuRsmzQ7kqZAIAsTkQ7yPMbbcRRMQTCO46qOT8zfJ2T7/LazouQAGCBF4gC39Mhbxn6koQSfWrZjfDnxvi+xx1ULtjklS5dr3N/6l2tIPoA4IApVU7krkOpsZBm9chaPFd2tHOJ4yjhw1XKoiRQTOO8BRVmdsmcAT9IKmA2/YIasfVvnzvHW8OfFy5P7iZADE+QS7IwKg3a9yhUMOP0c8r2FqrnZfgjpXZMuHJxY/7eY7iVccBVdbMRZbWDhiDy9OA42ubh5EeZseVul4qgvzOiok8SLaabA3MuBA70UAVamUKiJ9+kDCIgHp1tSH/KrCEKnJ0sxyAo58Z1C8rfdxulwt/S/m1awl/xczTM37fcwiDkeIaBuXG/jlZ7cretha/pUrmUpZ3Vm8MeAbJU5WvM5FtP+PKd8eZc9B1IpRgAgJUUUT797tsIhr0fibHsGTv/ANu2HGvWGwUlTJ9y8hnexMJj/wDFVN+f1rztla/QY+i/cLmSN2Zc142x/btGIVuBKJPxZhbj9BSmcuWzNIzh24btG5OnrXdLJN0CdQgBetZYxEy9Rh2DcQ3HU/PkQY0RnyZEigX1ZmCqPoLliAL/AJ1AQwT5MogxkeSY+iopZjb1sFBJ/E/hSeTkLMa3KR5oRp7nexEniFjK/KZ4tMHV2PG+zwERBsCHrArtsP0dQsebv37M2EcSL/i37QOuUJBzM5NjF7f/AIhfu5+n51Nvh6BeuDMGXL/yf92UbGMZ4LCFuJPc+rcvHD6fhSnH6WrBVfo0pWmsM43rkO8mnbaQeN2JUzHbRbQ75+oCipEgBu1TEDKiUT7j9AA31F7rbwaLWS7XJjnlghAJSJDJIbkD7UHk+T5t9KlNLqZt5s4tVjSQRTzEgNK4jjFgT9znwPTx+J8VtklAVSSVKBylVTTVKVQokOUFCAcCnIPaU4APaA9oDqRjcSRrIAQGUGxFiLi9iPofPkfSo6RDHI0ZsSrEXBuDY2uD9R+B+tI7IF5h8d1h5ZZszgG6R02bZNq3F0svIOiqA0SBLrTKJRMmJjCJih0lHt32Aav3Xt+r6P1+XfbUv7CsEUIvJmke/BbXHi4ubkeAfN7VZ+mdS2fdt/FotUE95gXYu3BVjS3M3sfNjYWB8keLXruqkpLHp0dNXJ1Cpv1I80nIvIox0odJkoBnKCpTLj1JgmzEveCI7AcB7dtdnW9hs36vBtu0yYi5hhMsrw3EKofuU3b04xkcyTYEGursWBrV7PPqurx5TYYnEUaTWMxcfaykL63e/C3ki3ilM0eNJBq3fMXCLpm7RTcNXTdQFUHCCgdSayShREpyHL2gIasGNk4+bjpl4jrLiyqGR1N1ZT6FT9QfxqAycbIw8h8TLRo8qNirqwsysPBBB8gj6iu1UgqJKpgcyYqJKJgoT6tMVCGIChN+zrTEdw+mGuyRS8bICQWUi49RcWuPzHqK+I3EciyEBgrA2PobG9j+R9DSeqkE4rEC3iX1glLKs1UdqqTU2ch365F1lHAEXMUwl6GxDdBe3sIXUJ1vTz9f0qa3MzcnPliLsZ8ggyMGYtZiPogPEf8A4QKmuxbeDsG4fZYmFj4MUgRRBACI1KqFuAfN2Iu3j1NbaNlI6YZpyES+ayLBYVCovGapV26pkjimqBFCCJTCmoUQH6A6ksDYYO0xVzdbNHPhvcK6EMpKmxsR4Nj4P51G52vzdZkthbGKSDMQDkjgqwuLi4PnyPI/Ks7WZWJRpSt/j1coPbO26FBOeRaqgcCCKQAWNbgIGP6AOPwB8OukzKJxj2bkVJvbxYfn+NdwhYwme68QwFr+fP5fh+dOjrurpqorzbfLbk/MCx3QXNHuENT8pYjezrmtHtAuk6tPxFkSZFl4WVds0nTmLWFxGN1UXJUVQKJDFMXY2+qT3XrH/IcOOSKRY8mAkgt+khrXBI9PQea9N/4z/PEHwtvcyLbYk2V1/aJGsoh4+9G8RbhIisQHFmYMnJb3uD4pp/KG8qO38DZa+5Yy/cqzY8oXuuNqcwhKSu+fVut1kkijLPDqS8g0jVpWSk3jVEQ2bpkRTTEAEwmEdYnSunzdfaTOzJEfKlXiAnlVW9ybkC5Pj6elWT/Jz/I/V/MePhda6xi5EHXsOcztJkBVlll4lFHBWYIiKW9WJJPoLVebrYNeQKNKVDLnBxMdctMb0SNqWQD4mzJhLL1Lz7gjJxoROzR9QyhRRfIMDz9bVcswnazOwss8jpBuVZFUzZ0YU1CHADApUBrL5WXJvNWQeVWXs+cqMd2a98leC0pw7aQ1JxPM1mhYwVc2x3YWFjrsfIWuVmZKLWK7Od6i5ci5WdDuRQiZQLpSrf6LhCj1qq4wjrBVaZabfjej0yotbq9qkOtM97UoNlEpPY2QetnUnHJmWbGVSIVbdLr2Ad+0VKqZr/AnzEapzAzRy7JyP4n5IuuQZVeuY1f5Wwjk2VlsEYHQfnVjMSYyRh8mx8FXyvm5u+m5NFuV5NPdjuDmSKRIqlXgpA6KxTK9Oio8KzIDtRuQ6TdRyCIAudBM51DkRMqAiUDGEQKIBuPaOlKSlX/8Axv/AAJT+JV0pUWvLa/sG8UvxM1L+Sm0pTBeR3+qd4QfiiP/AEus+lKtZ0pUcc48QeMHJeTrczn/AATjTL0tTk1E6rIXyssLA6rxVVvEKGiFXhDmYKmWETdafScBHsHSlZa3E/jS5ydE5odYNxm7ytAoxaMNfndUi3VljCwjMI+HUZSLhFRVu5imRQSbrF2VRIGxTBpSttZ+NmAbpVLnRbZh3Hdhp+RLi4yFeK7K1aKdxdovbvwfiLjLtlG4ldWZX2ehu9H/AEjZIvr9mlKcem0ypY7q0HSKJW4Sn0+tMEouv1muRrWIhIePQ3FNpHRzJNJs1RKYwmEClDcxhMO4iI6UqH8V+sqvP9x/F/8AXvlnSlTm0pRpSjSlGlKNKUaUo0pRpSjSlQ3s+XYMuf1cJngrkaxvqc9v6NjJW3pqGlEspEkSaLc2sA8AjYjrD1kZj9kMl62sI50YzxrisnvGMvy4nhYG1uXpy/KrKnV81+pP3FZsT+3pmrimL3V/cl2TmHEP6jEALF/S/iktm2yU2PpjqkWfLrTC81l1vLY3oFuLMM4iyIXCbinKTJelC9VRI8tEWB/Et0wHfrTAdfOxmx1xzjTTjHknBjRrgNyYeOF/Vh6iu/p2v20+3Xca7VNt8PVMmXkwcGeIwRuCwyOIJWF/0OfwJFVlYhib/H8z8CxOOOcWZ84YxcYPXlb60sVaNcMQ5BY0RFelu3UfkWNkXMBA5KlLU28dIpmTFwY6QkKfYwgNRwEyk7BjR4uxyMjD/b3cMvKJwn2EiQGyyFhdvr4r0V2rK0WR8RbvK3/TNPpuxjchMZopvYzsVskjIVXxXUSS4iQn24jfiAwJFwKuRO2bHcNnajZqq7ZCczN0q2QVcszKABVRaLqJiq2FUobH6BL1B2DuGr5ZSQSByHoa8liSRUaJWYRP+oAkBrenIA2Nvpe9vpVe+SOFEpXZ4mTOM2VMnYlWgZ6fyrLYEpk8nG47z3kt2+POuW98fyrk68chb3SKTF0cgigi2HchCiA71fL688Mv7zUTzQFWaQwo1kmk9fvJPgN6H6Wre/X/AJhxthhHrvyLrNdtVngiwo9lkRF8rW4ir7YOMqCzGAEyID9zP6k1FCG8zCXxtkbkRa+TZ7xWEMMUfFcRe+K9Nxw3sR8fXuySRmM3eKflAsmiheaMq4MkiousdBNEywdHWBRNqEj7c+LlZc+3MqDHjjD46RhuDsbF1kv96X8XNrX+tbPy/wDHfE7B1/Qaz46GFkvt83NfG3ORltEMrGiTlHjz4fAnGybXYKoYsFPK1wKc2NYYtk5GreYrZssctMhQ09PVq/Yj4/IKOuvEkXkVI9HJGyGJ4Ny8eyFYeKm8Yod10JIbAoUx9+3NVcKRk7VNPnSozK8cP/8ADD/ZYxrc8T6+fA9armRP2XHgyfgPXavq+BlwQy42dtDb/wCa+Kf3HNc2QKqzKPsAS7N5UgWq2/fcAHtDcAEAHsEAEAEAEPgEN9XevLXp4rnfSlQ75iRfKVljWoSHDD2G3vVcyjVJi0UhdvW4thfseKvjEtteJJzJUY+BOomr4hRyns5OBRBMesQ3gd+m5GGjdf4jJWZSy/aOSX+4XPhfxuPP+tbZ+Jsj41m7FlQ/LvvtpMjWzJDkAzO2NlBbwS8I7tILjiFP2C/3fbTsJRWXq3Fy7SMkWVsTXik1YVacdd3YWU/KOinfg6dm6ma8RAoqG8OXbqV7sAH06o66z5K69hZeHrMmLawGH/4z5D8cpciWT7y7foMUCEmNfVuIB/Cup9j8b77OxcrYwS6yVZyMhcdOWM+PEhCcE8Os07Ae417LyJHkXpz60zm4+BjGdklkp2dQbASSl0WpWST5wJzmFUjUnqIgBDAXYPT076v+gxdvhabHxN9krmbdEtLMqBBI1ybhB+nxYW/K/wBaoW+ytTm7jIytHjNh6l3vFCzlzGtgLFj5b8bn8bVvNS9RFGlKNKUaUpOWiqQVwi14qejmr5FRu7QbqOECLLMFHbczc7pkY4boOClNv1FEBHbb0aguw9c0/aNe+u3EEc0TI6qWUM0ZdeJdCfKsB9Rb09anNB2Lb9Z2CbHTzSQyq6MwViokCMGCOB4Zb/Qg+v41WhcMPSGL7U3Ssism5pqhgWCwxUSu/K9jk1SGdRzxNJZuVi5O19VUwnAhercN9eA+0fF2b8e9jRN808nVGN/3MMLSB4gRzicBl9tinhyWsOQIv9PeHWPk3D+QOvO+jWCPtKjj+2lmEZSUghJUJDGRQ3lQFubEEfi2itjdMHEkygpWUa10ziXJHxfiVDNjR8oIkVQdICYETg5QKQqoiUTGAPTuG+qFJvcnDnnxdRk5MejLzCKHmShjl8EOt+J5KFD3Fzbwb+avcekxsyCDK22PjybsJCZJuIDe5F5BVrchxbkUsbC/kWqR8Py3loOKjoZhQIFJjFR7aPZJElZIAIk0SIkmUROQ5thKTf0jsOt76v8AyX2Wo1sOrw9LhJiY8CRxgTS2CooUDyCbeP8A9taN2f8AjfrttsZtpmbnMbLyJmkcmGLyXYknwQL3P4f6VKqj53x5e3cVDxck4SsUmgc/sdxHvUzpLN2513aQuu6MzEqZUjCUe87S7fD2a9F9R+Y+kdxysbV67IkXd5CE+y0cgIZV5OOduBsASPu8i318V567Z8Q906hjZOz2GOjaTHcD31kQgqzcUPC4cXJAP2+Df6U8mtqVq+mYz1T7ReMfrQVT6FZH2xGv1mSrojVN+yad8KjUTKiVuoIrGTOBVRAg9Hp321qj5m6v2Dt/Sm0/WuLZ37mKRkLBRIiciUu32n7ircW+08f4VtL4d7NoOpdzXbdj5LgnGljVwpcxu9rOOP3DwGW6+Ry/C9RPjOJeRpJZmpNTEBEtli9662WWfuWJjmE5mxWrdJNuc+3ZuRTuyiPwhrzdgf41d62EsL7bKwsbHdeT/c0jxk+SoRQFJ/8AyW4g+lxXozP/AMj+k4EcqarFzMmdDxT7VjWQDwGLMSwH/wCUvI+fSpROJWs8b8ZxMdIvJGcQZu3jSMIVJshJSbt84dSJk+7A/cooNgPsc4dXQXYRAdeh5tj1/wCB/j/Gws6XIzIIpXSIWVZZXkZ5bWvxVVvYkX4i3i5rz9Dr998598yc3CigxJ5YkeU3ZookjVIr3tyLNa4XxyNwCKWtKyVVbvDtpNhJx7d37GbzctDKSDZZ9AtFgERGUEnQREqe3aYQKGrb1PvvXO3auPPwsiFMn9qs80BkVpMdG/8A4trAW+pNgKqnauidh6ntJMDMx5nxv3TQQziNlTIdf/4V7k3+gFzTT2rIXtKGvk86tsCyxyLB7SIJxELKTIydilWYqNZR09i01nsSo3A/QKQEHYuxvh1rXsXdxn6rc7nI2WHD0UwPgY7QEz+7kzR3SV5Ig0kJS/Epb0s3r67H6/0z9js9Pp8bW5c3dxMmdkLMBB7WNFJZ4USUrHMGtyD38m62ra4rSx/fqLRo1SRZ2uUxueOdAqVzIpuGE82KbupHu3ZWj1wicewh1SCQ222pL45j6X3Pp+owXni2Ww0Jia4aUNHkKPEtn4SMp+jOvEn86jvkJ+5dN7bt81IJddr94JEsVjKyY7HzFdOaKw+qqwYXqQg9o7/R1uzyfJrS/wBLVxtpSudhH0AI/tBvrkC9cXtXUssi3L1uV0G6XUUgquFk0EgOcQKUgqKmIQDGEdgDfcddUksUK853SNLgXZgoufAFybeT4FdscUszcIFZ3sTZQWNgLk2AJ8D1/D60iapb2tykLCQlck49OqzB4ptJzLNNMJBymA+JXiu8IKySBBAA6wHY5TAO/wAAVPrfZ8btWbnKuDkQJrsowpLOgHuMB9zQ3HIKLfqBswIN6tXY+t5HV8LCZ86CZ9jjCZooHJ9tSftWWx4lj/4nypBFvrWGvVpqsws4THbpJSdmrB7cVNb3juRj0heLlGSSbATpVbIlQAe5TD1Sm+HWLP13a6DVZa9IkVtxl5vvn967yxjmwMoS3lV4/oUeAayYew6vfbXEbusbLqMXD9gDCRIpDwU+0WvcM3L9bHyRWputnVcuJqrRUwelT1djYqyO7rMQhHVZaRirlAjxug9XHu/Erpq92YA2EgDqM7X2GTImy+ua7KbU7jBx4cp86eAPipEWUSKrt45sDxP/AI3vUl1bQRY8WJ2HY4q7XUZuRNjJgwzlMp5VVijNGvniCOQ+jWpmMlZqmrVBliMTKNpFV/JzFLmGzlFmtNSqgtm5SOqux8Wc7xg5SUVEyndmACCA7B6dar778r7bseo/tnxq0c8k2RPgzq4QzzHgtnxI+ZMkbAuS3EixvYetbQ6L8Warr22/uPyOskMcMEObCys6wQjkxKZUnACORSEAXkDcEXPpUF7HVbBUZFSJscQ9h3yfaCLtAyZVU9xAqrZXYUnCBgDcDEEQ215B3mg3fWM463fYsuLmL/K6kBh9GU2syn6FSRXrvR9i0vZcEbLR5MWViH1ZGBIP4MPVT+IYDzWi22+n/g1C8walyORvXA/Q/Z/g19DwfWuVHm/413Nl/DOG7jukV/DrpL9w5TBVsv3ShVO5cJCId4gp07HLuHUURDXbA/sZCZHFH4OrcWF1biQbMPqptYj6i4rryIP3EDwcnTmhXkpsy3BF1P0YXuD9DY1IMc/ZHtN6rLpGYUr0eWVhGCMBDLHbw4NTvkEVk3SKgiD3vSHEBFTcALsAAABrdTfM3fOw9w1+RFlHBwRkwRrjQErBxMiqQ6n/AHLgkfd4AsAK0sPhro3XeoZ8EuMM3NOPPI2ROA03IRsylGH+3xIB+31NySb1ZJaaxBXCHe12xsCyUM8OkLlkdVZEFBbLFWR3UbqJqlEihAHsMHo17w7D17Udn1kuj3sIn1cpHOMlluVbkvlSCLEfQ14W6/v9t1nZRbvRzGDZxA8ZAFa3JeLeGBBuDbyK3KaREEUkEi9CKCaaKRdxECJJJlTTJuPaPQQgB26lY40hiWKMWiVQqj8gLAefwAFRckjyytLIbyOxZj6eWJJP4eSb1o7TY2lSgntgfNZJ81YdyKjaIZnfyCvfrEQJ3DVMQMp0mPubt7CgI6iOw73G61p5t1mRzy40PG6QoZJTyYL9qDybE3P5Ampfr+jyex7eLTYkkEWRNys0ziOMcVLHk59PAsL+psKzGU1FyDhdk0fN1ZBo3Zun0cCpPHsEn6QLNRett+8biqQewDAHaGsrF2uvzZ3xMaZGzYo0eSK49yNZBdOaeq3H41jZWr2GFAmXkQuuFK7pHJY+3IYzxfg3o1j+FbBQpjpqEKodIx0zkKqmIAokJyCUFExHcAOQR3D4Nw1nOrMjIrFWKkAj1BItcfmPUVgowR1dlDAMDY+hsb2P5H0NaqAi3ELEM4x1LyE84alUBSWlDEM/didU6gGcCmAEEUymAobf5IBqN0uun1Osi1+TlT5k0YIM01jI9yTdrWHi9h+QqR3Owh2uylz8fGhw4ZCLQxXEaWAFlv582ufzNfc7HRMtDybCdaovYhwydFkG6xBOU7QEFDOBDp+yEOVIDbGIIHD4BAdfW4wNbs9XPhbeNZdY8Te4rC4KcTy/MHjexX7h9K+dRnbHXbODM1EjRbJJV9tlNjz5DiPwILWuD4P1pCVdnV7ZB1KVpsu99w2EdJRDevkSULDzjIQPHmQlG0gmDtQrIxDATq+qHtHVP69i9d7Jp9ZsurZMv/DoceWFcYKRDPHYx8ZVkHMhCDa/r9fFW7f5O/67t9lr+z40X/Lpp4pmyCwM0D+JOUTRngOYIvb0+npTktWjVi2QZsm6LRo1STQbNm6ZUUG6KZekiSKRAKRNMoB2AHoDV8x8aDDgTFxUWLFjUKiKAqqoFgoA8AD6CqNkZE+XO+VlO0mTIxZnYkszE+SSfJJ+pNd+u6umuBABASiACUwCUwD2gICAgICHwgIDrggEEHyDXIJBuPUViR8dHxLRNhFsmseyRE4pNGaKbdumKhxUUEiSZSkKJzmExuztEdY2Fg4Wtxlw9fFHBiLeyRqFUXNzYCw8nyfzrJzc7N2OQ2XsJZJsprXd2LMbCwuTc+B4FZmsqsWjSlKbHf77av8AizP/AOmIaUpytKV8HIRQhiKFKchg2MUwAJRD6AgO4Dr5dFkUo4BQ+oNfSO8bc0JDj0I8V9AAFAClACgUAKAAGwAABsAAHoAADXKgKAq+AK4JLG58k18KqAikdUwHMVMomEpCic4gH/qlDtMOviWQRRmRrkAX8etfUcZlkEa2BJ+vp/rXz4hEO5A6hUzOP3kig9JzjsA9IF9PUAD2h8GuPejHAE2Zx9oPgn8rfjX17Mn3FQSqfqI8gebeTWJLS8RAR7iWnZaLhIpp3fipOZkGkXHNe9VIgl4h8+VQao96soUheo4dRzAAdohrtrqpUtkwIkUQEDdYAcTAO4D1AAh0iHYIbaUrI0pWsi5uGnCPVIWXi5gkbJPIaRPFv2kgRhLx5wTfxT0zRVYGskxUMBVkFOlVIR2MUB0pWet+9K//ACz/APujpSkZV/8AwDG/8CU/iVdKVFry2v7BvFL8TNS/kptKUwXkd/qneEH4oj/0us+lKtZ0pRpSjSlGlKNKVBmK/WVXn+4/i/8Ar3yzpSpzaUo0pRpSjSlGlKNKUaUph7LnGiJ30cPRtzgUsmHr6lt90vajUtoUqyDoGSs8zieoXR4gjwe7M426QN2Bro/dY37kYXuJ+7KcuF/u4+nK34fnUsNDuzpD2UYk/wDx4ZHsHJ4H2RMRyEXP058fPH1tWFMXd5EQ8i7lrS1r8cRouivNyryPj20YZ0QWyLs0hJGTat1UV1SimKhukVAKAgO+2uyV0SMs7BFt6kgAfnc+P4fnWFhQZGRlxxYsL5E5YERqrOXt5K8VBY3AN7C9rmmrxDTJnHuPIKpzmTbVmCRZGkHa2Q7oswcWKwpy0i6k2x3i0YQjNZFk3dlQbmJvugQvaPp1j4GPJi4qwSTPOwv9725G5JHp48A2H5VM9s3OL2DsE+1w9djaqGTiP22OGEURRQjWD3YFipZgfRifSt5ace0K+OK0veadW7YrT5xKx1Zeww7SWUrM+kmKBJyFF2mp4CUSQMJQVT2P09muybFxskocmNHKNyXkAeLf+Qv6H86wdbvd3pEyE0uXkYq5cJimEUjIJoibmOTiRyQnzxPiml41EvjGl2av3XBlM4/R9dyJbYug0+iTEfKwc3RQfi6iLsojGkTbQ8lZVXCqzhrsByKCIm2EdYOo/dJjvFkY8eKqSsEVCCCl/D+PALeSRVp+Q20k+3xs7UbrL3s+RgQPkz5MbJJHk8bSY4LkmRIgAqv6EeBS4c1vJqmYIq1tsiMG+IWtJkIeVxYatNlJKTuriQKuxtydsE4O2rZkxDuRZgHQcfWH07hkmLLOeswlAwRGQY+IuXv4bl+Q8W+tQ0ew64OqS6yTAdu1NmLImZ7xCJjhbNAYLcSWb7vcPkelQB5E2jlrxtzTFzeM7ehlDHnJjJVPqURV75WbHaSYctCrhBB+2hxqiSCNcxw4rbdZZV69MoYsgYoCAh6w1jaT7vUbBZMN/excyVVCurN7TX824/pQqCbtf7rCt49C1vxd8g9Rkw+x4ra3e9d1887zY00UP7+EAlTIJrmXKEpUCOMAe0D9fFP/AJS4+5EzblrI1dyetjWX4h3DEKNNf44axTpnkC83MXqcgV7abpGkj52OgIJ8ySFqi0e9KiZhASAbcRk8zWZewzZYsz2W0ckHAx2s7v63ZxYhQQLAGxH0qj9b71oOndXwM/rY2MXyribU5C5bOGxcfH4leMOO/KN5ZFZubPHcEAg2rM4m4ic1OINfsjxGIiZ/LANcQ2mdwxOTctVUcdY/fGbY4qR05aRdHbS0HAotyvO8IVwKxfXMbX3pcEwp+5y1g/unD2mMTErwQ/018n1UWv6G/rXT8n9pj2eUNH1+Xano3vnOhj2EcaTnKyl5Zc90RQUkkLGOxKcT9oFPFnXNdb4/Y+cZItcBd7JDNp2u188Xj2subbZTu7NJpRTJynCtFUVTxzRdYDuVurZFIBMID2AOdsdhFrMU5UyyOnJVsi8m+42Bt+H4n6VU+ldP2Het6vX9XPh4+W0MsofKmEEIWFC7AyMCOTAWRbXLeLiluteaa2tcPQ3VnhGl4sEC5s8LT3Ug3QsslX2ZyJvZdpEHODtZiyVUAiqhSiUhtwH0DrJOTjidcYuoyHXkFvZio9Tb8BUOml20msl3cWNM2lgmEMk4UmJJWF1RntYMwF1BNyPNKnXdUZajSlGlKwSycceSUhyPmppVFqR8tHFWILxJmofu03SiG/WVA5+wDCGwjrEXYYLZzaxZozsUjEjRchzCE2DlfXiT4B/GstsDOXBXZtDINc0hjEvE8C4Fygb05AeSPwrAnnk6zSjzQEQ3mFlpRm3kE3D0rErOLVMIPJAhzAbv1WxdhBMO02sLc5W5xYoW0uKmVK+QiyBpPb9uEn75AbHkUHov1rN0+Lp8qSZdxkvixpju0ZWP3Oco/RGRccQx9W+lb0dt+wdw+Afo6mT6+PSoejXFKNKUlLPMQzZSGrk1FvJZtc3i0IDdONNIRgB3IKqBNjuCbZioU23UYBAR3+gOq32DaavHkxdHtcaXJg2srY/ERe5F+m5/cfRYyPqbg1YtBrNnkJlbvV5EWNPq4hPyMvty/qsPY+ryA/QWI9ahXlzi88iQcWHHfi5Zmq5Ezisd33r9iVc5ClNFmJ2vGyZzCJiG6TJl9AiAa8ofJv8Ajzl64Puui+5k4zSXbEteRAxFvZI/WoJ8g2ZR6XAr1T8a/wCQGLsiml7v7ePkrHZcq9o5CoJPvA/oYgeGFwx9bE1GOCp8/Nz7qqtY0gTKKb0XTd8qDZWPLFgLiRW6VDEEVWrdE4mJsYxi77AI7a8+6fq+6225k65jQD+6qJOaSHgYxFdpD9xHlFVrr5JF7Am1b623ZdPqdNF2LInP9qYx8GjHMSGX7Y18A+HYgBvABtcgXp2OPEvR6zkFxMWmWOwRYMXBYKUUVM1ZHcrnBkqR8z6FVzEcNXPUTcQKn0CYw9mtk/B+06j1/uj7TsWS0MUMLft5ieKFmPtkSJYsQytdbkBeJZjWuvmvWds3/TE1nXsYTSzSr+4iADuFUcwY3uFBV1s1rluQVR+M/jZTqKtfTs8QtIWGJWn0a4VaDjXL1QsiqqCQmFE5UjiySEQE6wbk6RAQ317TPyL1mXSr2DWvPnaxs1cUNjxO5EpPG5U2PtjwS/lQCCL142Hx72SPctoNksOFslwzk2nlVAYwL2DC45nyAh83FjanG9H/AKf8f0/Tq82+lUi4rR2BlOPmSKNfmEYN6V+yXWeLsk35VGCKoHeMwQUEClO6S9UD+kvwaiN3i7bMw1i0uUuJliZGLtGJAYwbunE+hdfHL6VLabK1OJltLucZ8vFMLqEWQxkSEWR+Q9Qh8lfrTQZ++LJ/W2cBkSwe7qr114yBftmS0hItV2vSVy4QbIJqCZBRI/dKdWxB6g+EA1rH5pHQM3QRabvGb+xklk548io0kqMlg7KiqbqQeDXsDcfUCtl/Df8Az7C3su46ThfvUij4ZEbOscTq9yqszEWYEclt58H6E1X3cEK9WnMi3x3e1JuuTiabV41K3k46SBsiIqlbyhXDNs3dNu+DcBTP2ibYS7BuPijtEOl0E88HRdwcvRZqhHTjLFLxX7uE3JFV1v5BVvJNithevZ/WJdzv4IJe76gYm8xCWR+UUsXI+OUXF2ZG4+CGXxa4a5sEozsEuEMeojJqo1qQl2Uk8YiCZmxHqRioFkS9ZRMmqkiPrbCAGAodXoDVax9ztF1R6yclk0E+THK8ZsVDqQvu+RcEL62IBAF72qxZOl1p2g7KMdW30OM8SSC/IowLe34PkEjxcEi5t6mpztK5XeP93p8zEIvV6ncYlnV3045m2RUF5Z6uk7RknLdYoGBqRsHfACQgTYQ29Hb7BxdDo/hbt2r2usSV+tbTGTEkyGnQK00jK6yurAHgF+/7Pt8j/XyPkbzd/MvU9nq9k8Sdj1mS+VHAsDllhRSjRIym3Mt9n3fd4N/ymIBTAYPV37dwDbsEPT/hAdeobEEfWvMdwQfNqj1b5q7YqCMGNTsd7ipWzSE7aJd5HGlVK3W+tuBoONbszkORQAUHwxjbh6o7h8OtJdn23bfjkY/7Bc/c67J2EuRlzPEZji4t1/8AjxKhBB8n2ifHggit1da1XVfkL3xnHB1GwxsCODFhST2hlZVmtPKz3BHge6B58i1M/wAonM/MJ0yYqcjYXUI+Zrov4uHUcnSYSBel21NJNo1Q6zaUVbOTEORTboBLb0hrV3+Q0+52keq2nWsjOk1UsTLJDCWIjl8OhlWIlllKuVKta3Cw81sz/H+DTax9prOxwYMe1ilVo5pgoMkf6HETygBogyBgy+vK58VusBTsllutTlHyLHKTsLWDw5mj5fv2y4OGi6wosJNwksk4WdtRRKKe4APQQwHER1LfC+5z/knQZnUO9QHM1WuMHCRuSsGRm4xysrBy6FQV8egIYk1F/MeowPjje4nbekTjD2meJuca2ZeLqvKSJSpVUfkeVvqwKgCpXLSztKwsIIkJIKsXEW4ennyCkMayWbqAklHLAJu+F04L6xdg22/6PSEmyyY93DqFxJmw5MZnOSOPtIyHiIm88uTDyLC1v/TzrHrsWTSzbdsuFctMhUGOeXuurC5lXxx4KfBprbzmOFiI6cb1pVSXlouWbViXdNkz+DqUlMouko2WkVFW6iTxk2eI7KFS6+0Okfoa15275S1eswcyDQM2VssbJTEmdAeGHLOriKaUlCHRHH3BORuLHz4rYHUvjHabLOxJt6q42uyMZ8qFGI55kUJQywxAMGR2RrqX4+DcePNQXaJzNyynFVu+XN3NtLG6Fg6l2Mm7Qjfs6DgjdRs2cIs0TJNXyYACHdFKYQ6Q7BDXj7Gj2vavkXH0PctrJl42fJ7bTRyukX3KwUojLGpCSC3t8AD6fWvXGTJq+sfH2Rven6uPEycGP3FhkiRpfDKWDspka7xm/ucyV9fpWwlMDztQZmnpq0xNcbpWZxCxMios4XF8ikRRRpJtFYIr9VuqsRE/UifoMQQ2EdZ+x+G9x1jE/vO22ONgQrsGx4ZSzN7iqCUlQ4/uMpYBro3EraxNYWu+X9R2bKGn1Wvyc6VsBZ5owqr7bEgPE4yPbDKpZbOvIMDcClnGQsxk+tztoZKurLZSQblaSVReMlpCIsUG/SLCrQ0MsgVdgnZY1AAN3YFL0lMPaYdWrX6rafIOhzOw4rSbDsAw2aUh0aWHJx5FEDQQMvKMZUS2PGwsGP6jVX2G01fQd7h9fy1jwNCctBEGR1jmxsiMmdZ51bjIcWVvBYk3KjwBTMV2rx8nB2ESTMM8tC0Ms4a15yV01kWK0dJJu5ZwLx6g3jPEki2quyZFTHOJ+kAE3ZrVOi67gbDUZzLl4svYnxGZMVg6SxtFKHmbnIqxBxCrWUOS3LiAW8VtLedgzcDb4IbFyouvplBXyVKPFIssRSFeCM0vEyst2ZABx5E2NNeIgb1g9A9odm3p+l2ba18p5Nyv6itgoOP2/Uf61xr7r7NbmvomczUWimD0VTPm5kSxhkSSJ1kz94iViZxs3K670gdAn2DfUjpYXm22NEvumQzLx9oqJSwPJfbLfaHuBx5eL1EbqX2NVkSye0I/aYEy3MQUize4F+7hYnlbzarTrtP3NtVKtMVd4xiLK5bIuPdGzEbneWBc0emorEiqJyHB+y3MqoCXrKGDpAe0AH9Eu27rtWP1zXbPr0sONv5I1b9llBS+SxiBMPIkf1I/LsE8sRYfn+e3U9N1bI7FsNZ2CObJ0SOy/vMYuExl9wgTWsQY38IpfwoNyD5pN4ahchryS1ztVtlJKClYNIsVBPxcN1G8i4X72SUdxiyKfgwYrpmTamATGOiICO3ZqC+LNT3ebPftXY9lkZGmycNRDjyclKyM3KUvCwHDgwKxEEloz5t4qc+T9r0qHBTq/XtbjwbfGyyZp4+LBo1W0QSVSefNSGlHgK4Nr+akR2gO4dg/R1vAflWkz6eaSkC+g5KWsziOh12UqxfpREzIuosWKsodsiB0DIuzB1SLRFNTYp99g9Aarmmy9RsNnsJsHFeHYwzLDPI0PtmYoAVKv/7qKDYEeB6VY9xibbB1uBDnZSTa+WEzQxpKHEIdiG5IDaN2Iuy+p9aVYbbhv6N+3b07fDtqyC1xyvxvVdN/NvWknT17Q4jnhrYvXnD8sxIJMz1tUyzIsSQ5AYpujnERCSIHUC4egB21WurzdhnwZX7I+E+YMqUIcUlkEIIEYcn/AN0efcH0NqsXZodBDmxL11M1MM4sRcZICuZiD7hUD/2j44H6i9NVnv2pPQRKfWbhDwMsuoi+no17INY94vVzKlTWkPErHTVbtWK5QOfuzAZUNyhv6B1z8zf3DcaZesdf2mLhbJysmTFJIsbtiE2aUOxBVI2F24kFx9v5HYnw7/b9Rtz2bfazKzNagaPHljjaRFywLiLiAQzyKbLyBCH7j+Ij3RuQ1rhpItE8G3vkeeWLCRE1EsywsgVsqPs5FVi0aoC0U3OUFkzKh1j2iJh37NJdQ+b+x6rY/wDDvaTc4LZIghnhT2JQh/pqyIi+2fP3qW8nySx+m6O2/CvXtngf8u919PmrjGeaCZzPEWH9Rg8jtzHg8GCeB4AUW8zkq0I7rsG0iHs7J2Vy2M4MpMzAkGQdd+uosUq4p+rsgU4EL/7JQ16969qMjR6mLWZeZkZ+RGWvPNb3H5MWHK3/AIg8R+QFeSt/tcfd7WTZYuJBgwOFAhhv7a8VC3W/n7rcj+ZNKDU1UNQHaIAHpH0Bpa/pStMEwg+j5R1XlmU26j/HNiNkHZO5PLMyD/2a5XJ1A2U74SlPv2l331Ff3SHMwcjI0jRZeRD7iBVccTMg/wBpmF+JvYG/6b3qUOsmw83Hx92suJjze2xZkNxC5/3VU25Djci36rWrvh15J1FR7mYYpRkqu1SUkI9FwDtJk6MH2Vum5AABYqY9gGD0679ZNnZGugn2cK4+xeMGSJW5hHPqof8AmA/GunZw4OPsZoNZMcjXpIRHIV4F0HoxX+UkfStlrOrBpTY7/fbV/wAWZ/8A0xvpSnK0pVcPmP8AKTkLwroFJ5OY8xtUcr8cMXzz2S5k1k71eOy5CYgdNkmhMh4kVcOmsBKvKM/UF3IxjgQXfNfVQEDAOlKQ3ly86cueYlP5T5EU3H1cpfl8HKlTuMtnsRniOcc0WmvybtC7ZLloAjhRhTMeCYAZMI92QsmoqgZU4FKIl0pVq+lK6zJJHOmodMhzpCIpGMUBMmIhsIkEe0o7a+Gijdg7qC6m4J9R/D8K+1kdFKISFb1A+v8AGqo/PEXkG3lb8pXMTHpy0q2jMeOIqKWceERkpJHKdLUYsVXQkUBqm7dFKmZTpHoA2+w7a+6+KZy+cxOePFO+SdBz+947Xprk7hLye5B4Ve4zp1xrp8Y5P41UBlcntEuJZ20yvxgU+SYS7fokUgi3R1UDl7ohTgIKU/V15tZhgMYeUZb2DSlmlucGZMFUPMhHEQ+UaM4TIvHe+5TsClKRLLEUh3ydlrTYrc65nZU2wnIYpjCBwUqtDjfe+f8Ai/GHmkZm43q8d2uLsA+YPziyS+omS4G2TFwzmlWpaMt2QIJta4ixRMdi8GEQwVaxbgWMoZy7N1KlKmABpSvR3gvLcLn3BeJc41tq5YwGYMX0vJkOxe7A7ZR13rMdY2jNzsAAK7ZGRAhhDsES76UpTVf/AMAxv/AlP4lXSlRa8tr+wbxS/EzUv5KbSlMF5Hf6p3hB+KI/9LrPpSrWdKUaUo0pRpSjSlQZiv1lV5/uP4v/AK98s6Uqc2lKNKUaUo0pRpSjSlIy9WhlVYVR9ISEdEtVBFJaTlXrWPYM0zdJBUXdvFUGyQmMoBS9ZwATGAPTtr5ZlQcnIC/iTYf9a7YMfIypRBixvLO3oqKWY/wVQSf9B6VUvAYw5RXLnE9znPReGsbYWoMS/wAf1lwwr8bbMvZuo0rHeNbu3V6SEzunwDCwqEckYl6AP0iAgcDdWqxHhbmfsZ2Mq48WviXgtlDSzIR9X9VUNYgf9/Wt55vY/jjUfDKdMwpdvsO350q5UwaV4MHXZKPxKjGP2zytFdTIbkXFrEWpj/NA7i549zhiia5BVyPrEtgOJl3fHSEx8pkHLS79tkqDWbZVjoKHlGNjlq43I28IdBLpTTMUyoiIFENR3cLZOLkYUmUghbFBMATnJ/uLaUKCGKj0t9PWrn/jhz1G903aMPRZD7GLePGu1kyhi4IU4koOE8jo0SSm/MM3kghQPIqafF3OlNyhFWTGtWgcgRD7j2yoWOrE/udMe1GMnnpaeyO2f1Hxq655OM8Oz2UHfdBQQTNuPaNh0+xgzI3xIVlVsUJGxdOIY8BYrc+R4/0Nag+SOl7breTj9h2U+DLDvnycqJcfIWd4199gVn4gcHu3j/yA5Cwp2KJXcoQ1rylJ3rIUbcKnY7MzkcW1tlWkYR1jyspR5EHcDJSiayh7I4dSO6wODAQSh2fDsGbixZceRM+TKskLuDGoW3BbeVJ/m8/Wqvus/reZq9bj6XAkxNpj4zJmStMZFypi11kRCB7ICfbxF7+tOhtvrMqt02VIv87bbdlGtSmM7hSY7HthYQkFbbF4H2Hk1m8jgfLT9O8Kczj2YwWHw6vfABu89HwgGHj5Uk880LwyRpE4AZrWkBF+S282Hob1Y9xo8HV6rW7HG2OJmZGdA0kkEXL3MNlfiIp+QtzYfcvHxb/rXGScTwGUnWP3c5MXGIUxxeI6/QxKlZ39bRlJaMQXboR1nTZerO15UrgRVZq7JnMACP02XgxZhiMrSL7UgccWK3I9A1v1L+Vc9f7RndbizosKLElXYYb40nvwrKURyCXhLf7cot4kXyKdIROU4KiAlETdZB26QEQHcBL2bCAD9DWZf6/WqyOJHH1HpTSYnwdi3CCFzaYtqqNWQyHeJnI9vSQeyD32zdbCKYy8ycZBy5M3O7FIv2JISJF29UoawsLXYeuEi4ScBLIZG8k3dvU+fxtVp7R3PsvcnxJey5JyXwMKPEgJVF9vHiv7cf2gXC39Wu34mncIdRE/UQxkzkEQ9IgP0wHbYdh+HWb5Bv8AWqsVDizWIqBdGxX8WnK4xaLi6Jv9PssbfrnkPkFbslx9qyXiS72l336WJ6zXHSh52Co1gSZpLlblAiCQnN6dh1W8bC/abv8A+NCskDh2knaQNJE7G/tqPUIwsbDwK3duezf8h+MAd1spcHa48mNj4urgxGhxM7HhWxzZZVtFJkxFmXmbsbD0qd+rJWk6NKUaUrBLGRxZFWXKxaFlVmqbJaRKiQHqrNI/eJtVHG3eGQIp2gXfYB1iLgYKZzbNYYxsXjEbS8RzKA3CFvUqD5A/Gsts/ObBXWtNIdekhkWLkfbDkWLhfTkR4J9bVnay6xKNKUaUo0pXICIb7CPb6dh9P7f0dc3NrfQ1xYH1rjcfSHYOuK5pncgYXgLs9bT0e9c062tVjqhZ4JJNN86KZEUhRfl3TByUQEA6t+oC7h2761d3X4q03bcyPc4U0mr7PGxIy8dVEjgjjxk9OX8SbgXHm9bO6b8pbjqmJJp82KPZ9bkUD9rkEmNLNflH68fr4ta9j4tUOaRXC4uyc5hsnRLMtYsHtqto2qfi1Umi4CiqQkhEOF+tJqV2JygZQwGApR33D068t9R0Q+O/kGTV/IGNEOv5vv4q5mREQjDibSwM1woe4BYg2B+nrXpzte8PyD0GPZ9CyZT2DC9jKOJjygup5C8cyrYuUsSFBFyD61Pym1CKpNVZVerrLlj2qDkY907WI7XFR+ZRwRyssmmiRyAKrAYOwNyAAa9odV6xrup9dh6915nXCjR/ad2DteQlgzMAoYXa/p5FhXjjtHZdh2vsMu/36ocyRk9xEBRbRgKVUEsV8Lb18G5qFNdynyDtcpba1T3JLS4jBFqWYPGRcV7LMyfiiq8TBwJWffSHdGTBJU5x6NxANw315P0fyL82dl2Oz0XVpBsZ8f7PfMUMXsmOSxcBvs5SWKhHLHj5AuK9U7v4++GOva/W73syHXwz/f7Allm90PHcIeN34x3DF0AHLwTY1OasFnSV2DLaFG6tjLGNAnFGoAVseS7oBdCkBAAoE6//AFezf0dmvXvX13C6LEXsLI29GOn7gp+ky2+/jbxa/wCHi/p4ryVvm1DbvLbr4ddGch/YD/qEV/sBv5vb8fP4+abPNGIY3KMJ35hdIWSDj5Aa65RVEqCi6wJrCyeIGIoCyDhRECgJek5TDuAj6NUD5W+Mdd8hagzOZE32JDJ+2ZTZSzWb23WxDKxW3ixBNwT6G9/FvyXsOgbb2l9t9FlzR/uVZbsFF15owI4soa/m4IFjY+aqhVSWQVVRXIdJZBVRFdI4CB0lkjimqkcB7QOmcogIfAIa/OF0khkaCVSsyMVYHwVYEgg/mCCD+Yr9GIpYpkWaIhonUMrDyGUi4IP4EG9fADt/h18lQa7CL0uZXINpn6xD1Cak1HkJXCkNBtvDtu+aqpEMkmKrzo8UqmVE4l+qEewPoatey7x2Hc9fxesbfIMupwQDjrxS6sAVBL/rICkj1J8Cqfruk6DTb/J7JqcdY9rmk++3J7OpIJsl+AJYA+gHk1gt71dWUc4h2ttsSMW7ETOWRJh8CC4m6QETgKvVuIJl9Ah6A1i4/b+2Y+BJrMfZ5ya+Q/dGJn4te3r91/oPr9KzJun9Uys1Nnka3BfPj/TIYU5La9reLfU/T60qi5syUZOIRd2R3IIwcm1lY9N4dTcrhol3KSKyzZRuu4ZimGwpHOJRHt9O+rKvyv3wx40OVnyTxYmQk0YcnwyDiFZlKsyW9UYkfX1qvN8UdEEmTLjYMcEuXjvDIUA8q5uWUMGVXv6OAD9PSujHeUbHj+wtJVg+eezFZksnYIpsudJObTP1JrpvBEw+IOVM5jJdYiBVNhHft1jdH+Q970nex7DEml/YNliXIhViBkA+GD+btYElORNm8/jXb3f490XctLLr8uGL9+uKYseZlBMBFipTx9oJADcfVfAqQ0VmTFRM0J3lmW0sWs4wSi3wOEmMXBxjlRMGysm+ZNjOHEkc5Q6jHHpEhjibt2HfeGt+VPjmP5WHb8X+4xQZkIik5COHHicjiZZEUs0pI8lvHEsW82rSew+L/kR/iw9Syf7fLPiTGaPiZJciVQeQijdgqxAHwF88gAv1FTPkpN/K1N3KUN9Cv5B1Hi4rsg8WMtBOFQUAAXXVbjudqBCH3Ev+UGvVOfsMzZdbl2PTZsSbNlhLY0jnljsb/qYr5KWDenm9eW8DAw9d2OLX9why4cKObjkxovHIUEfpUN4DXK+v0NMgTNNAq675m8g2SzZSALZpSzVdCPeQVjnUDIIzbViTYh3DxpJKmKAqHEeoNvqh1qZPlbpfXZpsXJxImx2wxlzZeIsb4+TkLxXISMWBZ0lJUcmvfx61td/izuPYIYcnGy5VnXNOLFi5TSJPjY7BjA8h8hY3iAJCra3n0pyGFBpM7No5JCIMrJzkDF+EQkkUitmCREyO2LlGOFH/AEGTSE4dZymEQHf4e3V7wumdS3G2Tvv7UtsMvDh4LKo4xgAPGwjt/TlBI5MDe9/r5qj5nce1ajVN0Y5IXAxMyXm0RPKQklJEMl/6kRseKkelvpUcqnhu9FgpOHl3VlVjHlnmYKRqikg3ZwiENJLGVG5QblyCy67psIlOQOgpjmEQ37R1orrXxb3BdRkavaSZ7a+XYT48uGZVSBYJWJ/fY7vyZmXwVFgWJPm163f2P5O6k22x9nrY8Bc+LAgyIssRM87TxKAMKdV4qqN5DG5CgDx4FNnkywQlJsadWgizqL6jAyiZl41lW8Ce0u2zcF4iwOjRrYVVncQRyVIElREpiAH0RHVA79vNT1PeL13UDMXL1Htwzusy45y3VeUOQ3tJyaSEME4N4IAufJq+9D0217Vo27BtziPibb3JoEeFsgYiM3GbGQStZUmKlyyC4J/hWsqmN4TJ0m8g5a3SsNlJ04TfNW0tEEWYyMSZoR+Z6dVh0j4pRmsU4mOconNsIAIG1H9d6HqfkHPl0+02eTi/IbyCRVmhBjlhKCQyEx/zGNgxYkFjYi4PjP7D3nadBwIttrdbj5Xx/GhjdoZiskU3Mx8AJL/aHUrYAgD1IIppcixUHA3Gcga+lIosIZ0aMOEo5QdulnjMTIu3RFUEkSlbOFQ6kyCXqKUdjCI61r3bW6jTdry9PpFnXCxZDEfedXcuhs7AqAODEXUEclBsSTWyuj7Db7jrGJt900DZeVGJR7SMihHsyKQxP3qPDEGzHyAKRGqtVsuL2+tZbFk8kXaLKPbLvHi5hBBu2IZRY5iFFQRIUu47JkIJhH0FAN/g13Y+JlZ2QmLhRvLlOftVRdjYE+LfgBcn6AE1j5mVjYOM2VmSJFip+pmNlAPixv8AiTa31JtVmeAFJGz1YJC4Jv5mTg5ZH2NLTajGTbJGTYg1FzWHrcpjEbARPpOcTCYVN9h2178+GJM3sHXhm9oWXL2OHkj2ZpzHKoIjC88R1uQnixN7lr2NeCPmOPD0HYP2fWWhxdfl4x96GASRMbyF+OUjGxfzdRawW1xenusTOfftmZK/MowrpKTZOHjhwyB+V1GoqdT1gVMxyAkq6T9Uqnb0623vMXc5kES6XKTEyFyI2dmjEnOIG8kYBI4lx4DfStT6XJ0+JPK+5xXyoGx5FRVk9spKR/TkJAPIIfJX60l7tN3hrL1uCpMAm6PKOvFS1hkyHPAxMU0OPjGTgUhKsSUepiHhx36dw2EB37K923bdvg2eBp+p4QkfIk5zZMovjwwof6kbFSGEsgt7R9L3uKsHVNV1LI1udt+1ZhRMePhDjRG2RNM4+x15faYkP+4LXt5vXfkayXKsxjV/TacN1U79RN8yK8Fs5aNxIUG6yCJAMo6E65tjlIAiUoCbXb3nfdp6/gR5nVtWdtJzYSRiTiyLb7WVRcvdjZgP0i5/h19I0XWN9nyYfZ9n/a4+AMblAyu1/uVmNgllH2k+psKRVWnbzBTZZHJabki1+lo2Nqlcr5FpaLq6aTQVl/a6/QTwKwnMBVVRMYpxAR7PgqnXdz27T7f9/wB+Eiy7nJiiw8XGDTQ4gVOTe+1h7bXIDPcqTc1aewajqW41P7LohjaLT40suXk5BWGXLLPxX2VuRItrlUsGXx5Nb+q1iu48kJ2uNrWZF1fZSVnYKvOlkUl49RRFT2ieCRE5lXRUjbKnOPaAkDf0amOu9f0fSM7M0UGxKT7nJmycfGcqGiJU+6ccXu4B+9mP1AvUN2Hf7ruuFibufXB4NPjxY+RkoGKyAMPb/cNayEj7VUfQm1NO1xK6m3F4jsh11xe5aDigZ0m4SD90xXsca48S9ax790m67gJBk9UKUTATpIQOkRENt9cY/wAa5G1n2+D3fCfcbHExuGBmyyPG2TE3KRYpHD8RJHIQCQtlX7TcVsfI+R8fVw6nO6VmpqNfl5HPOwo40kXGlXijyRoU5GOSMEgXuzeRY0vsNYkRx5HuX8kseRsU6hGKPhft2Si0MZmgYns1o9bAbv0Ejm+rDbcS6unxZ8aRdIw5M3Pcz7vMSIye4kZaAotvaSRf1KpP6vHpVN+UfkeXuuamFgoINLiPKI+DSBZw7A+68bfpYgfp8+tOOxjrQjapySfT7d3VXjFgjCV4jEqTmJeoAQH7tZ+AdTojwSmECj9Tvq9YeD2CLseXnZmakvXZYo1x8YRgNC6ge47Sfz8zcgfy3qjZeboJevYmDh4bx9gilkM+SZCVmjN/bRY/ROA9T/NalRqw1AVhSTRZ/HP2Ld85jF3jRw2RkWYgDtgqskZMjtt1er37cxuou/ZuGsTPxpMzBmw4ZpMeWWJlEqfrjLAgOt/HJfUfmKy8DIiw86HLmhjyIopFZon/AESBSCUe38rDwfyNamqVprU4RpDt1TO1kwFWQlFUUUHczIqB/pMpIAiAEUeuzAAnN2iO2o3rmhx+uaiLWQt7sqi8kxUK88p/XNJx8GRz+o+pqR7FvZ+xbaTZzKIomNo4gxZIIx+iKPl5EaeeI+l6Uep2oOjSlKbHf77av+LM/wD6Y30pTlaUqEXMngxQOcbnC1fzRd70fCOMrye/Xnj9CPm0fj3kFJR7ch6hEZZEqQSsvVqpNIlfBGpqg1eqB0rkMAAIcXANj60sf9K13Gny/wDFfETOmc8p4DslsoeMc9oxs1ZOL0Wdgng2sZTbuVTTmVaDAlbgvUJq1xwIoPmLIUmChku96BOYALzSp3aUo0pUaeYHGuI5e8dsg8eJ21ydIi8ge7YOrRDRzSWkoz3btMNaUvDR79VBov4taGKibrMHSRQTB2gGlKTmdOEtG5C5fw5lO6WeZLF4uw1yIwpI0dkzZlYXWscj6dAUu0Kvpcynjol3FRcKYW/cEMBzrj1bdIaUqGFF8oi0wVi4cO7/AM5M0ZWpXBDKMHduPuPJujY9godhVa7SbJQ4qm3SQr7dtJ3aWj4GbQQbzrowOkUWgkBERcKn0pWHJeT3aCt+TNQpvO3O1Bw9zGzllHL/ACGxfBU/H7hGci8qPmh5+jUG1ybV1P40QewSB41/IsTKrv0FesU0lQ6hUq4WlUms41olVx3S4tCEp9EqsNT6tDNQ6W8XXq3FNoeHYJfCJGjBmmQB9I7bj26UrHq//gGN/wCBKfxKulKi15bX9g3il+JmpfyU2lKYLyO/1TvCD8UR/wCl1n0pVrOlKNKUaUo0pRpSoMxX6yq8/wBx/F/9e+WdKVObSlGlKNKUaUo0pRpSoXc8uNkPy149W7Ak7apamRuQCxbJxPwjNtISbD2POxdgSUas3iqTVcVlowqRgOYPUOIh26it3qo93q5dXK7RpKBdgLkWYN9fH0q//F3yBlfF3esHvWFjRZeTgmQrFIxVH9yN4zdlBIsHuLD1FMDZqRE8isKVOh4I5QTtKQxhcqZCS+SsPScbJyr5fFLZOJsGO55RF4m3akmughJJDrE6YgAdI6x5sdNrr0xtZmNGsMiAyREEn2/DI3oPP8w+lS+t3GT0LuGVu+69bhzH2WJPJHiZyOiKM0l4sqIFbkx3Jia1j580wHLzkbQsT4x5bco8Z4farcieNkTGYfTyBeKQ8g1V21vmItogjVrQcpV7RT4dWfF30omKio5TAhtim3GM3u2xsHDztxiQD+6YiiLm6EX5Efpb1ZRe/jxfxV5+Kvj/AHfaOydW+NuxbVh0LsMr5xxsfIWQAwI7EzQjxDO4i4XYFgpuLkWrz+cJ/N65htOQWEaTmPMSF5xJYLtHVK2trtGVdiZnF2l+LM8+7uSMS3mkTV9w6K4IKrgUu6T7s3qbdOsOvd63y7PHx8+cSYLSBW5hRYMbci1r/be/k2+le6PmH/FT4nl6Judz1PUnC7TBhvPAcd5mDPCvIRDHLmM+6F4my8uR5Dz6+z9NVBwkk4aOEXbVwmCzV22WScNnTdQN03DddA50VkFSbCU5BEpg7QEdb/uCLqQVPoR58fx+tfkSyyIxjlUpIpsVIIII9QQQCCD6g+RTY2s2ZAyNjQtKLQTYpEbD8bx7AaTC6J7MC+63uOVqAxxhNI7+L8QP739T26w5v3/7uL2BF+z+73eV+fp9vD6evrf6VYtWOpHQbE7g53/Jx7X7H2+H7c/d/W/ccvu/T+jh9fWjMGacZYFqKN8y3aEKjU1rDAVVKWXZSUgQ0/aH5YyCjwbxbR66A796YCAfo7snpMIB26Z2wxNbj/uc5wkBdVvYn7mNgLAE+TX11XqHY+7bU6Xq+McvaLBLMUDKp9uFecjXdlH2r5te59AL1D+h2zOWbL9k3IeBuVmO7ri2j3+9UwMUSmJnkWWJtcJVCMGVInLmug0lnDeJt6qbxy9bEOCrcwkIYfqQgsabZbHJmytZmxSYUcrr7ZiIswWwUsbE2axJH0rau71XS+n6LXaHu/WNhh9lzcHGyP3qZqvzgkn5NkR44LIC8AMaxuRxYAkD1rAcrveDuOb7ybyZB5Iy3l3Ot0xUjl6hYyl5a3Uyu3GUcnrKrnElflG4Oq5SmIPTLuUx6jKCUu4gGwh8Ev13El2+Wss+bkvGJUjJdVcnjeJT+lB6n8fFd8ccHzLv8H4667Nr9V1XS4eacHJzESCeXHQe6BnSobS5DceKHxa59fN3ZxbyBrufFsq4avExQaXeV53IlOr9OoWWYmx3qXx5EJFi3N2AkSoWRq063F2cF0Nu9YLEAR2HWdh7OHZmfAyWjjySzqqpIGcoPHPx5VvPkfymqv2Xo2f0hdZ23TQ52XplhxZ5Z8nCeLGjynPMY93HCaM8Rxa/GVSbU6fjMZcRsAIubbcJxlizC1QbJS1zub+VtthThGCqbYsjOyKLd1LTL46zkoGOVMxx3AADYNZnLE0msvPIwwsdPLOSzcR9SfUnzVZ9nsXyn3ox6rFhfsu4yyUx8dUgiMjXPGNSVSNbA2FwPxNzRiPFOFYCwXvPWK4VNCa5KEq16ulqTfza5bomlDkGsyhY2UdKN4cvsl2UQSQRb79Xrl6tMHCwIpZdlhKBJl8XZrt9/j7TY+ng/S1c9p7P2/PwcLpPZpi2H173sfHhKxg45Mh95OaAF/vX1Zm9PBtT56kqpdGlKNKUaUo0pRpSjSlGlKNKUaUo0pWhslYgbfFLwljjUJSMcCmKjdbqKYBTVTWKZFdMxF25utMNxIYoiHYPZ2aht917Tdn1r6nfY6ZGA9rq3g+CCCrCzKbgXKkXHg+KmNFv9x1rYptdHO+PnpezLYjyCDdTdWFifDA2PkeaS1fjMjRlvlSSctBSePVkVDwLZJuZtOQYpCRJhEgJEikdNEWxR61TmMcxgDbbt1XdJgd61/ZslNhk4eR0llJx1ClJ8e1hHD4FnRUH3OxLEgfnVg3Of0fP61jvgY2XB3RGAyGLBoMi9zJN5N0dmP2ooCgXvelwLJJq2fFimrFk6cIrmIZFsi3Io9FI4N1XHcpF73pWENxMAjtvq3HEjxoJRrY4Ysh1YgqqqC9jxL8R5+71Juaqf7uTInibYySy46MoIZmYhLjkF5Hx9t7AW82qCEtlbk7U2Mmwm68ZQW7lw0NZBrAuASUOYSJKR7lkUsc4RKIdSRjInAwfVb68dbP5H/yD61iZGHtsEsEdk/dftOViTYGNktGyj1QlCD/NevXut+O/gPseVBl6rOC841f9t+64kgeSJFcmRWPo4Dgj+W1IrEucp2LyYE3fbM+cws0i7ZzijxRydmwEEjqtHSEYyRMQh27lIEykTTKUoKj9DVR+NPl/ca7v3927nsJpNVlI8eQXLlI/tJR1iQEAqyhVVVAAY3HirX8kfEepz+hf2rp2BDHs8V0eAIFDyAkB0aV2BIZTyLMxJKAfWmYv86ys13tdgjkAbMJecfvWaAFIXpbqLCCRukhEyh3pS9f1ID63b261b3Pb4vYO37Hd4Mft4OVmSSRr4FlJ8GwsPP6vQevnzW0em6jL0PU9dpc1/czcbEjjdrk3YDyLkk+P0+tvHjxSQ7B1W7/T61ZiWWuDAPSfY4JD3SmypwASoj3ZulY4GMUokSHYwgIgAgHpD06+SRe31v8A9aBrqfT09aoEwd5hfIfJPK2m8U57JmHWVSrOYL3HPOUTOtSSdX5PRFWTTdNMMY3aPGTeqxmQmPtXuJRds+V//UBFv1mEQN6E33xz1zV9Rm7bBi5py5cOJhgl19zBaQ2OTMQTIYTxugKj9VmsB48/6L5E7Fsu2Q9TyMrDXDizJVOcEbhmrH5GNCCAiyjlZyGP6brc3v6FYOAmbJJoxMFGO5N+4OBE2rRE65ygY2wnVEoCCSRPhMYSh9PWjtRp9p2DOj1mmglyM2RgAqKSR+Zt6AfiSBW79xutXocF9jt548fDjFyzsF/0F/Un6AXP5VIan8cHj5VRXIM0egsHT1OKrxHybIZSekFROHdItVHXSiP2P1SG9c4CAhuHbrd3WPgfKy5Gk7tlnTYckoixhII/dyJTfwqF7L6eFP3Ne48VpLs3znj4kYj6Xijc5kcJlyShk9rHjFvLOFufXyw+1SLHyaZXIVQGiW+bqoyKEr7JdmbeMQTVRESmIRUqa6ShC906TSOXvClE5AN9SYQ1qXu/V/8Ah3Z8vrf7hMkY0vH3FBX1HIBgR4cAjkASL+hIravSezL2/rWL2H2HxjkxhuDEEetiVIPlCQeJIBt6gGp44LyW7l6JEskq+0CIqcI+i5p6R8yZKDLtCitFR7GPOZMqgTTRUvriJQ74TB2jvr2V8O9+ydp0/GxY8KP+263EkhnkEiRn3kHKGOOM2De+hH3Ege5yHk3rx/8ALvRMbW9uyMp82T+47HLSWBDHJIPZc8ZpJJACQYHU/aLnhY2HisdjR0btcKfKNsWFo8DTXSqkuwnQI3QmW0siZ6mWMjmhVGbo7SRATrCp0j1mKPwbB0YnUYu2do1ewx+ujUaXVSEzR5FlWdJlLgRRJeNykt2flb7iD+Vd+X2yXqnWNnr8jsJ2242iAQyY5LNA0LBCZZXs6h4rKnG/2gj86eu1OpeVbW2rx7Cyw5kqwL2Pt0SVvuo9MInLHQ3Wco+1SFR6QA2xR6/T9HbPYsjabGDZ9ewoc/FZdf7kebDx8yHz7UFyP6wAt5sPNao69j63XTa7sGbNgZQbP9uTDm5eEH/uT2B/om9/Fz9tbmlO3z6pV1zJM5VhIGimibxrOd37YKu3TBudWS7oRT8W5FLvTbfCfUr1PKy8zrWDkZ8WTDmnGQOmRb3wyjiTLx8c2I5m341GdqxsTE7Hm4+DLjTYYyHKPj39gqx5ARcvPBb8Rf8ACmrzVh1O+112nVo+vsLS8mY+TfSbtAGy0iRsRRucHL9JNVUTpoH7AEpgMAdP0BDXPyv8XR9z0cidegwoewy5UUskrrxMoQFTzkAJuFPi4INrePWth/Fnya/Tt3HJ2CbNm0EWLLFHEjchEXIYcIyQLFh5sQRe/wCIpo4y7v8AD8i9plrioOYuqsSwSiZepRhV51CNSYPQJMWF08Sbpu/Z7FFMU0Eu0yaYlMYOzWs9f27M+L9hL1bsmPh5XbHxoxDNhxcslYljcCbJdwof2o1XjGluSqQxFbIzup4fybgxdo67kZeL1ZMmQzQ5kvHHMpkQmHHRGYp7kjMGkf0ZgVB+jDTk4fIFYcmUqsa5k0rOm4Wv8LAN42RmmQN1DvQmWLY6yzZc4Ld+ZUoClsn0m7e3Wm9vuH7r16QvroJNgmwDNsYMZYpZ4+JMnvxqWZCeXuFwCn22PmtwanUp0zfx8NjPHgPryo10+Q0scEhYBPYkYBWUcfbCEhvuBHitJjOBpCzaxWq7SDVaOrKAOoytN5dswnLFIN1UXCTZFq7bKpOo1ygBiqdpRE3Zt6dRPQdN1GaHO7H2yeN8HXpzixVnWPIyZFYMqBHUq8TqCreRc+Klu+bftkc+D17qsMi52e/CXKaFpIMaNlZSxdGBSVWsV8EAefwpyrvQ156Vl5zGFGkoOKRqcPOyaSDkkek0WmGaz54mxFwBTOEWrMwd4k2P0D0iGwb6v3bunT7rZZO3+PtRkYetTWw5EoVhEEadGkcR8rFgqEckibibEWF7VROqdwi1GuxtT37bwZewfZTY8RZTKXWFxGhfjcKXcHi8ilhcG5NqkrhFtT6RS6gWCczk0bJDlFZV2m1Vcs2sy3ZCR4msRMgEh2SCiZietvuYN9x9Ot9fEcHWOo9T1i6d8zLO9kVi4RnRZ1SzhgPECKQR5vcj1NaK+V8js3bO07JtumJijRxlQhcK7QM90IJN5nYEN4tYG1h6VI3Yd9th3D0ht2h+2HpDW9eLA2sbitHXFr3FjWhmrNA14jY8zJt2Xi3jRg3IcxlFlHT1QUmpAbpAdYCKqAIAcSgQPhHUPtt9p9GqNtciOH3JUjUE3YvIeKDit2sx8ciOP51MarQ7jds66uB5faieRiPChIxdzyNluB54g3/AV9vGk4eciXbSWQawbVB8nLxB2RFXEk4WKAMlknoj1NStTdpih9XrnJxdu+4xsnGyUj1EaSCaExhmlYj+mwk9UCH1H1r5xcnUpqcjGycZ5NtI8ZhmDkLEo/Wpj9HLjwCf01u9/pjqWFxUV/GtW5hYd5JR807jGTiXiSOE4uSWQIo8j03RRI5I0XEOtEq5REDAA9oaj59Tq8nYQ7XIx4X2eMGEUrKC8YcWcI3qAw8G3rWfDtdljYE2rx8iVNbklTLEGIjkKG6l19CVPkfga1oyNlC3pxZYBAakMKZ4ezC/IDgkyCwFJFhHb9ZkzI+v3u23wajmzt9/yhdcMJD1r9oXOV7g5CcNYQ+162t5528VIDC0P/GjsDmP/wAkGUEGL7Z4mC1zL7vpfl44X/OuYOeeS0jY2LmvSkMjBSRGLSQkO78NYETpd4L+M6B6hbEH1R6u3fX3qdxlbLOzsTIwcjEjw5xHHJJbhkqRf3IreeH08187XUYuuwsHLgzcfKly4DI8cd+eOwNvblv45fXxX1ZpSVjol+pXI9nPWRFsVxHV9eRQYHfB3pCHEyhzdaSRCCI9W3SIhtvrnsGx2ODrZn0UEWbvkQNFjNKsZk+4A3JNwoFzyta4tevnQ6/X5uxhTeTy4eiaTjLkLG0gj8EjwBYkmwte9je1btqddRs2UdIg3cqN0TuW5TgqVBwdIplkCqBsChUlREoG+HbfUvjtK+PG+QoSdkUsoNwrEAst/rxNxf62vUTkLEk8iQMXgV2CsRYsoJCtb6XFjb6eld+u6uqjSlGlKNKV1x8xNVx3IHjYxrKNZRw0cLFVeg0XbnQbg3VAoGSOVUqhCFEB7Nh311ye9dfa4+vm/wCH5fnXZH7X3e7y9PFvx/P8qUXv/Nf/AGon/wB9I/5jXZXXXUe8yqh0lT1JMx0RMKRvbaXqCYNhHYENh3DXW8UbusjC7p6flXYkskaNGhsjjz+ddvv/ADP/ANqJ/wDfSP8AmNdlddHv/Nf/AGon/wB9I/5jSlHv/Nf/AGon/wB9I/5jSlHv/M//AGon/wB9I/5jSlLWPnLY6ZoOEKsxMkoTqIb3hRAdt9h6g8COw7hpSs32pcv/ALUY/dEl/wDuGlKPaly/+1GP3RJf/uGlK61pC7LJKIpVqMbqKkOmRdafKqkiY5RKCp0kmIKKFII79ICAjpStkzjPY1XTiu978WMQo3Mtt094cjY/WcC9vSUxxEQD4A0pURfLa/sG8UvxM1L+Sm0pTBeR3+qd4QfiiP8A0us+lKtZ0pRpSjSlGlKNKVBmK/WVXn+4/i/+vfLOlKnNpSjSlGlKNKUaUo0pVfnmC8iZHCOPKzAY+tWOa5nzKFhRrGDWWVEJtWm2S2IqNXsnDyCsGiddqoaAKuZFRQ6SXfimAm7dtQe+2T6/FVMZ4k2UzcYRJfgzeCQSPT7b2Ppe1bT+Jej4vct9Lkb3F2GR0nWQGfZNhGMTwwEMqOokIDD3eIYAE8QxA8U0/Am4UbIfG2rZBplCxnjR5eZOyWK9VPFjtm7rxL4rMvI+wzbg7dFs4LJT7qO8ScHCZVgA4FETAUDD89ayMfK1KZWPFDCZWZnWO3Hnchj4A8sRfz581kfN+q3Wg+QcnQ7fO2OxiwooYsafNVll/bCNXijAJI4RK/AFDxuCfF7VXN582Xseu+JMriKLyTWl8mRWVcXSdjx0xsrUbayrbxGdXbu5evIuQekh3CgJKAKiYpmECG2+pHVT+Ss7FOjbBSZDmLNGWjDfcF8kEr62/jW//wDCTqu9i+Uou05WuyF67JrMxIspoW9hplMYKpKRx9wC4sDf1F/UV5DqJXoe3XapVWwWiNo8FZLFEwcvcppBZzD1WPk3aTRzPyrduILrR0WmoKqxSesKZR21o3GhjnyEgldY43cAufRQTbkbfQetfqnu8/L1WnytngY0mbm4+O8kePGQskzIpYRIT4DuRZSfFyK9d3lpcj+PvHLBtuwHMcomOX5ys5yeY8ol3GCtDeiWCy2iq+NqFPpEo/dTB3FcFavrmB0czRmUVekgBuBh3l1HbavVa6TWvmCeRMkoj2bgWZbqiEk/b9p8+BX5W/5D/H/e+/dzxe8YnWn1WJk6VcrJx/chOTFDDNxnnyEUR2ltKg4APIQtz+Ai7B+aV5gDCsoZEcxuLb3C2VXKSsrX4XFt1k3mJpzHDwIZGiPI6CdBNpLyiaqEkV0+3RWTWMQigdBgCHj7l2hIRlEQSRP7twI3JiMZtwIH3ebhrn1vYHxWyc3/ABr+DJ9kdBHJssLMxxhBJZMzHRc6PLXmclXkHtkIQ0RSP7lKglTyFWq23zHcHowdUpitFsuXc72TCFdzjGcf4qrIIWaZVVTZO1IFpF2hNyaHt7ZuZeVasnCJnJY5uKwD6BG55Ha9cI0x/befaPjLMIAtmP142a9m9WAIvxF68zar/H/uT5mTt0zcfVdJx9zLrn2bzEwxgFlErPDb3ICeMLyKwUytwP1FbLh1n26crco3zJmNj0qm8W6iqrR06dERDFzZMgZWkI2KnJm0TUygyjDwkhUlnwxzlECLpPVC94RUwdofeh2eRusyXMxfbTSoeHAAFnlIBZiQBYrexHm5+tY/yx0bUfGPW8LrnYRmZfyVlAZJnd2EOLhK7xxwxxln9xZwvuo11aMHiVFSmqHJfHGS6Nly84VeyeVVsMTV4p1jrlZj5FhOvchUWOO9k6HEElmbQHcw7W7tu3UTBRuoqqXYxg31MY+3xMzHnycAtOcdnRlUEEuguUFwPPpb1F/rWtNt8d9g67utVpu4JHrE28OPPFLMytGuLktxTJkKM3FFF2YGzAA3AqJVKzBw9x5kfFtwvfHxrxZzXl/Gs3kh5a73RIqtDSxtNhVjZqk3rJhE2kawvVpmESmBibZV4Khdu02wwsGdosXKhnycUYWwnhaTk6BeHJrFXk8AOx/l+tbR2/VPlffdf2ep0m+bsvUNTsY8RYcbJeb9x7MQePIx8Q8nbGhjJHuDwlj9BVkblCJm05CFdpRM2j0lby0M6TZSqXQsQFSN5SMWBwQCLJ7HAiyexi7CACGrWRHKDG1m/EGx/wBCD/2NefY3ysNo8uIyQv6pIOSG48XRxbyD4up8HxWWkik2STboIpN0G6ZEUW6CREEUEkigRNFFFMpE0UkiFApSlAAKAbAG2voAAWHoK6WdpGMjks7G5JNySfJJJ8kn1ua+9c1xRpSjSlfRSmOPSUomMPwFARHs7R7A3HsDShIHk186Uo0pRpSjSlGlKNKUaUo0pRpSjSlfQGMHwiHwekfR6P8AEIa5ua4IB9RVbfLGHrMNd4cIOORjZKQhTyU4m1bpt2bk675cjN4BUylA7tXu1QVN8IFLrwf/AJI6rr+s7fijUwLBnzYhlyAihUctIwR7DwXPFg5/Ja9z/wCOOz32z6plf3Wdp8CHKEeOXYs6BY1Lpcm4QXUqPzaor+nXngC1eh7AHlXIdmuLeb18seXpSmqdUkLjY4Sss0Vu8nnKLYVfCqOE0mCyoIvH6iIAHfMmyImMoP1GwCAjqd6z13P7Pv8AE0OIsgmy5lUMEZuMZazS2HqkYuSQbCx8ioDsnYMHrWhy97lshhxYmJUsBycC6xXPo7mwCkXNx4p8g4CcZYuQxtidvWGTZCgO3WUKq3Jj6ve5tfmWz94r7ZgigRNnV58qjlwJlUupVXvjHEfgD1dP8OaOLtMXXpN1uv77kYTzZM7RgwZGOjhfakkLAJ4S3Alrix8eleWIPl7dy9Xl7BHptMNHj5qQ40AciaDIZS3uxx8SZLl78wFsbr59alYuwq+PbuwdY9YUdmtIQj/3pgWcgVKyzLFm2F/FNqzGpufBis8WQMY5xL63YO+2r7Lhdd6R26DI6RDqIpp8ST93jxyhcqeNE9yFcSINw5OyksxHnwfQVRIs3f8Adepz4/dpdtLHDlx/tMh4+WLDI7e3M2VKV58UVgAL+PItet7alqnfqzTCWuLnmK8xYIZZkxjOhxPVmaEx1WwSa7MF/ZiXQmIKKCAAJB7NtTHY5Otdz6/ql7Hj5kMuVmwNHHFZsjFn8lfdZA3tCwPNiALGojrsXY+nb7aN13Iw5osbCnWSSW64+VB4De0rlfda5+xQT5FN5VabUc2Psj2G3xAmftbI/pSHhHB0E2zWDEqbOVah0mMnMnL++qiJiqegS7bhqj9c6t1n5ay97uu0Yt82PPkwF4MVCpj+EmT6icj9b3Ib0K2q69i7R2X4pxNHpes5NsOTBjzm5qGLNP5eFjexgH8qWBX1DXrYSfGWuNqlIQFRmJiOcO5JpNODyK6T4ku7h27j2PGvulNqmhHovFu8ExSGOAiI9vZtnbD4A0WP1mfSdZysrHmlnSdjKwkE7wq3sRSWCBYxI3IsFLD18+BWFgfPW9yOyQ7nsmNjTxRQPAoiUxmFJmX35Y/LlpGjXiAxCn08Xp96ehZW1YhG9xcNHdmRZFTmHLHbwarkpzgBm/SmkXo7npDsKHaGtx9Xi38HXsSDtEkUu/SICdo/0FwT+mwAta3oB5v4rUHZpdFNv8ubrKSR6B5SYVk/WEIHhrkm97+pPik7cMdjapVhPMLbaKrNxzL2c1cwjwpWhmqroXDkjiPVKCSy7gphT7zqASl27B21B9o6Oex7GHc4Wy2Gu20EXto2PJ9hQvzYNERZmbyvK/hf4VOdY7sOva6bUZmtwNhqp5fcdZ0JcOE4qVkHlVXw3ECxP4XratL/AEx0VwAWSPTKxn/dJdR4oZn12NMAA0Wl4oqXiXimwiAJ9QGH0DqSxe6dUyA4GfCohzf2TFyUvlAW9leYHJzbwFuD9DUdk9N7TAUvgzM0uH+8UIA9sY/+6eBPFB9S1iPqKwUi3G0xFyiJdovQXIvnUXWpyJfJP36sYHQKE+kQDADVyYwCHdGEBKGsNB2nsOs2mr2cbaacyvDi5EMgkkaLxxyQLjgxNxxNrVlues9f2er2WtkXcQCFJcrHmjMcYl88sc+DzT68x60pGlfZEYNG0ki3m3qMUhFvZiQZNjyEkmVoDRws7X6DKbvSiYTl6un1xDU7i6TFXCigz1TLykx1ieeWNTLKAnBmdrE/1PPIXt5IHioPJ3WU2ZJkYDviYz5DSpDE7COI8+aqi3A/pmwU2v4Bqv6xr13DEjTZ2sVSxV2eeKXIkqytB0Hkg6glQPEtARKKaTBJiquY5kygl1CkGwiO4a8V72fR/FWdqtz1/W52DuJWzhNHllXkfHN4U4ggRiMsSVHC5QWJN69l6ODd/KODtNRv9jg52oiGEYXxAyRpkC0zkkEyGQLYOS9ufkAUzmMI6Ns1yZ1SUZJrJXQDwIPtii4hH71cHKU5GpeqmC7Y6XSCYj0mTOYN+3WrPj7D1+/7TF1zYwq8W2vj+5a7Y8rkOMiIeByQrYKfBUkX81s7v2Zn6Dq8vYtdKySaq2R7fos8aKVOPKfJ4uDcsBcMqmpxw0TOypjUrHOWGKteo6bOtWqDmYRV5MKqFWclmyqvl0ynBGRSOZJMyfURLp6QN2a9e6rW7jYk9T6N2SE6TULHiZePPAZJ2IZ/fBkYA2kUsilLqtrBvFeTNpsNRrwO1d365Ku62zPlYk8E4SEAqvsERqbXjIDsGsz3uR5pZVfDqcTjZ9jiXmXXgncy9kEn1bVXh3jZms+I8ZtU3Corq9aIJgRQw7gcOz0atPXvi5NZ0OboezypP2cmVJIHxWaB0QyB0QMeTXW3Fj/ML1V+wfJzbLvUXeNbjR/u48ZIymSFmR3EZR3KjiLG/JR9DXXeaOzZY5hqlGXhxVXEXMxJq/YrBKqqrLyRHplkWb9cDIqSRnIKGKRH0CIF3AQDbXX2/qOLidExetYG3fXT4+VCcbJyZiWaUSFljka6mUvchU9CbeCBauzqXbMnK7vldjz9SmxgyMWYZONjQgBYjHxaSNbMIuFgWf1HmxBNL9jTYsksytMw2aydySgmsG/m+6Omk5TQMRZQ6MeY5m7brdlE5RAvWT0AOrridV167GLsO1jjyO1LhpjyZFiA4UhiRGSUW7jkLC49L1TMvs+wbXS9f1ryQdXbMeeOC4JQtdQDIAGayHibmx9bUsO0ewNxH/GOrR5J/Oq14Hr6VxrilGlK5323/Z/1du2n/alJZhbo6RtU5UEG8oSSrzRg9euXDE6UYslIAUUSsXomEjlUgG9coB6uq7hdnwc7sWZ1mJMgZ+FFG7s0ZWJhJ6e3IfDsL/cPpVgzOtZ2D1/E7LK+OcLNlkRFWQGVTHflzT1VTb7T5v8A613mqsCe0o3QzEBsqESpBpSPfLAJYtVTvjtu4A4Nx3U7eoS9X09dzdc0z9hXtbQ33yYxxxLybxETyK8b8fX62vXSvYtwnX26ssv/AOgnyBOY+K+ZQOIblbl6fS9vyrmdtUDW14RtNvysVrHKJwsMQyS6vjZJUoGTbF7lM4JCID9UcQL9PXO37FpdDNiQbecQy52QIIBZj7kp8hPANifxNh+dcajr243sWXNqoTLFg45nnIKjhEPVzci/8FufyrHgvfH2rZwsgQoQ4SSYVEYwVRejF92fvhme8ESA77zp6ejYNt9dGm/5R/cdgN6MQar3x+y9q/uGKx5e/fxzva1vFr137f8A4x/btedH+6/uhgP7z3be37txx9m3nha97+b2pUasVV+jSlJRncI17b5ilJNpQkpCRrOUdOlmRk4lZu9FMEk2j/qEjhyTvA6yAAdPbquYvaNfl9myuqRR5A2OJAkruYyISslrBJL2Zhf7lt4/GrFk9ZzsXrWN2qSTHOvyp3iRFcGYMl7l47XVTb7TfzW9lPaXsyQ9jA2GY8G59lg96vBjId0bwYO+j1/D9/t17dvTqX2P779hOdX7f9y9l/a534e5xPDnbzw5W5W+lRGv/Zfv4f7n7n9t91fd4W5+3yHPhfxy434/nWqiHsrH1dm/vTiHYSzVgC1idMlTIwbZwU5gUUQWcG3TbAUS9px9I6jdXl7HC69Fm9xkxYdlHCGyXjbjjq1/JVmPhQLeSfW9SOzxdfm9glw+opkza6Sa2MjjlOy28BlUeXJv4H0rfpKprpJroqEWRWTIqkqmYDpqpKFA6aiZgEQMQ5DAICHYIDqbjkjmjWWJg0TqCpBuCCLggj1BHkVDSRvFI0UoKyqSCD4IINiCPoQfBFdmvuvijSlGlKNKU9tTERgme4FDsUAOnf0AcQ3NuI+tuHb8GlKUelKNKUaUrDkf93vv9jc/xJ9KVC/y2v7BvFL8TNS/kptKUwXkd/qneEH4oj/0us+lKtZ0pXG/7P8AF/5dKUb/ALP2dgaUrnSlGlKgzFfrKrz/AHH8X/175Z0pU5tKUaUo0pRpSjSlGlKp88wjMjpoxfp4EqeGc/cpcJT0FkmIwfbEkrRkGJqBFCxs5Z6ZVmDlKaZ2EGTtMUVi7ACRjDsY3SUat2POMcXHWR4+VucdlkELfc4X0ZlUfdysfB/C9b6+F+rxZey9zvGVt9F8a7iGXDfYwkw4rzgco4Z5mBjaLkp5L/5WFwLkQ18sSE5KYgtN9dZ8473SCneYeRZLLz23QSFeY0bG7BhWjOmMJaqy0Fs/qMkuu+MyI3FEpjrJ79PYIjCdPj2+DNIdniyLLnymUsLBIwF8BlFipubAW9RWzv8AI7M+PO163Bj6Rv8ADmwup69MFYJDK2Rls0tmkhma6zoAvuFr2Cm1/QVFz/mFK7jSDomJ7NW4Cis8n3vJLqMyRYo9nEHvktA1moFdVdnPO0zGmEo1ksuIogcEyHDYNzAAahvlGHDjxoJoljGXJMQ7eOZVVuoY+tv+9bK/wP2HYc3d7PW7CfNbrmFrw+JEzP8AtkkmntM0Sn+mXYD7rEkevjzXliImZU5EkyGOoqcqaaZAExznUMBCEIUAETHOYQAADtER1pryfT1r9LSwUc2NlAuSfw+t/wD61ajwB4qZ75LZDyRxOPKfFdUGrKg5eyiztMMcxmB6pNIDVgTjmq7SXbSVtj5Zy0ByiYQ7kwArsXt1c+s6XZ7bKm0hb2YAElkDD04n7fA8gsCRcfT1rzP85/JvSPjzQ6/5QEX9y2rPk4OG0Mn6hNGfeu7BkKQuiScW88h9tzV2WcsF5R8u2rXTMOFuQVylrlkvI0AhlPJE/jOAyAXDGC4phJK16Ie1MZB0uauR0kds3RfCiZ27Mmmn1AImE2wdjrczq0Mmdr8qRp5pV9yQxq/tQgHiCtz9oNgGtc+leO+md06389bLD6p3DRYkWo12vlOHiRZcuL/cNi7J7sizcAPddQ7NHyCICzW8C1QeMuQOYcdTN7vdnoEBGXrkBPXeXqXNjkXV7YwlcdVuZZz1KarUdxDwsnK1R/LvXagNW5zLptAIRNJHuQMoajYm0z8SSXJmjRcnKZyuXOrAopBQcLAlST6DyB6AWr1V2Po3VOwYeFpdbnTyaXRQY8c/XtVNCyZU0bR5DDIEkiJOqKo5sApe5Zm52WrJcHchMO+VLgahWSizuXuZOKuUVqllY6yVpnF1eq0vLNQQSiLbX4dGbZtZJ8/uU4dQUQUIkYyTIDAUe0Rtut2uB0vWRS4zZGfh5jmzKAqpKvh1Fxclz/DwK8+dy6J2z/Jvu2dr91Bqupdm63jIHimZ5ZsjBnJeCWQxsUVceO3KxYcpLEjxVr9fz9jvBlxXNnLMuM8fRfIiSG24cxoNTi6nI4+Ugqqwk8kV3I1tgEFIWStsa9epi7cSKySwKF7sDGEQKF1i2eLrp77GeGKPLPKKPiFKcVBdZGH2lgT5LEfhXmLP6Pvu56lR0zU7HPytBH7Gfl++8yZXuzMmJLiQSESJA6qeCRKy28kD1qF3KLmjh+14IzYxztY8O5Cwtnx3M1bg+lSYK1T1hss/X2x4KRsV89os2rCAkaPenzdZJ6kdMUkd1USqiAANf3HYMGbW5C7NseXX5JK4nAMzMVFiz3AClHINx6DyK298b/EPbNZ3XTTdJx9vgdv0aRzdhORJDHFFFK3upFjcGZpUyMZWUxsDdvtcqKqgv2PeWmAaXT+J2SnljwvH5hutThsmczqq/sV+o2UHc4kzJh5m+uyTuOlYVOmOFlmz1RmsgZ63VBVwCgJlAaVlYu81mOmkyy+OuRIoky1LOkhP+0C/gjh5BIIuDc3tXp3R774u7zt8v5P68mPt59VhzyYmgmWLGycNYyxz2XHKuknvgK8YdWEbLxS3K9eu3GlZk6VjjH1Nm5v3mmalSatWpaydS5veCSg4RlGvZvqdHUdGCUctjL7qGMoPXuYRHcdbyxIXx8WLHkbnIkaqW/8AIgWv/r61+V/Ytjj7jsGdtsOH9viZWZNKkXj+kkkjOsfiw+wEL4AHjx4pa6yKh6NKUaUpos64agM/4zmcWWex3qqQ04+gn7mcxvaHtNtzZWvzDOaapMLBH/6U2bO3DIqbkgdiyBjEH06xM7DTPxmxZWkRGIN0Yq3g38Efj9fyqw9W7Jl9S3cW9wocXIyYkkUJkRLNERIhQlo28EgMSp+jAGnaSTBFFFAonMVBFFAplDCdQ5UUyJAdU5tzKKmAm5jD2mHcR1lgACwqvk3Yt9SSf+v4fgPyr70rijSlGlK5ABMIAHpEQAA+iI+jSlMjgTkZhjk9T5e/YMuqF7qUFc7Dj6Wl28VNRBWlvqh2yc9Di1no6MeKGYmeJD3xEzIKlOAkOYO3WFgbDD2cJnwn5wq5UmxH3D1HkA/6+lWbtXTux9J2Meq7PjHFz5cZJ1XnG94pL8GvGzKL8SCpPIEWYA09us2qzRpSsV8+YxTFxKyr5jFRbMCmeSco8bR0a0KYwEKZ0/eqoNGxTHMAAJzlARHbXDFVUs5AQepPgV2RRSzyiCBHkmb0VAWY/wAFW5P+grzBeZ356zLjFyXwlHcU8oUPPFIqldvhOQeLIQrB5XZ62LKJMabHOsnIxD97ErxHfqOViQ66gdbbulwAT7BrLsvd11uygGrlSeFFf3UFiC3oo52Nrev2/hY17h+D/wDFiXvHStnN33BytVtMiaA6/KcsJEiF2lYYxdQ4ewUGZR4bkvpVpfADzOsU8+MVlkaU5qcZnyOpbewWjCTaxLrSEbKHaio/QjTy7KNkJCBiXp0kFnwEMiB1QATBp175Azux4uwgxsD29/iIzY8TSrxyrKxVkb1VQQofnYjlf081qL5b+ANh8S77F/uuRNL0nJyRGc32f9peQFnCFl9xl5MiKfIU+CabO2S1wyVdJKRk2SjqeXcEZOGcY2XWasCtDAyI3RTQF13TZM5PqgEQOYRNuIj2+COy7LtHfu1z52wiMm4dxGyRKzJHwPthVC87KCD5ubm5uSa9Xdc1vWuidWgwcCZY9OiGRXlZVeTn95Zi3G7EH0sCAAPFqw5miTkHHx0m/wDCkayzKQeR+yi3iFvZTsrOQamaigC6DpA5gOIKAUnd+t1fBrF2fTdxp8ODYZvtjGyYpZI/Lc29lwkicOPJXUm55ADj55fSsrW9v1G2zJ9fh+4cjHljSS4XivuoXjcPy4sjAWHG55eOP1pHJmIQxBUTKqTrTMcgmMXrIU4GOmBijuUFS9giHaAD2arCMhILKGTkLi5FwD5Fx6XHg/X8KtLpI1+DFGsQDYGxI8Gx9bHyB6fjVkFD5C4xkI2LbFgn0TPR0SyZjHRsED8ySYHI2OyiHqIi9eN0A6TqCYpNwETCAj6fd3TPm/4/zsHHx1w5cbcwYyJ7UWP7nEXCGOFx97qvhmJC3F2IJrwx3D4V79g52RO2XFk6abJd/dlyPbubFg8yN9iM3lVALW8AWFbNYi2cWDt8/cT2OIJg/dNKzIlk0491a4ZbqazicxBuVUjNw+x90HUO5Sn6gEQHWfKsvy7hyZmXJm6LTwzsmJL7ojfMgN1yBPjuwt6FPJ8A8hesGJovibLjxMRMTebeaFXyo/bMiYk4s2OYchQeXryNh5IsbWrZzcHVG1YuCVHpUc5s+PYIYiNcTUW8ORdqMeooLaOlAUB7JpnjFFSFMmp1dZgADbCA6z9vqOt4/X9pF1DUwSdh0mH7MTZETkMntE8I5r+5KpiLqCrX5EC/m9YGp23Y599rJO27WaPr+6zPekWCVAVb3AOckVvbiIlCMQy24i9vFafAc7RHb6UjqdW14aSeVmvz1kdBJnetTvthZmjiNV3bpdksxVObcOzsEN+3Ub8M7jp+TmZOF1jAfEzpdfjZGU/ul0L/AKDEFZ3ZGjJNx48Gx82qS+YtR2/GxMfN7NnplYUWfkY+KntCNxH4f3S6oiusgAt+Y8eKkig1bNgUBs2btgWUMssDdBJAFVj/AFayoJFKCip/hMO5h+jre0UEGOCIESMMxY8VC3Y+rGwFyfqTc1o2WeecqZ3dyqhRyYtZR6KLk2A+gHiu8AEfR2/tduu8Ak+PWukkCuQ9IAI7bj2/DsHwj/gDQefFcn0/OkXEKWKxsbTHWeBNW0heyMRFrsZIjhzIwizcUUplNVPqFg5U7w3SUe0hgAdtVXWybzfYmxwuwYZwIjLLBC0coZ5cdl4icEf7bm5sPUGxqz7GPSaPL1+docwZ0oiimlWSIqsU4bkYCD/uKLC5HggkVoXeMWDaht6hBHa+0YbZ5XJ+ys0Jpwxnk1jKoTj0BRKD58j3higcxRNsIfQ1DZHQMKDpsfWdMY/32L9+LkZSLO8eQCSuRJdR7ki3IDEXtUzj99zJu4P2XbiT9llfZk4+M7QLJjlQrQJZj7cbWB4ggXvSnrU4Z4u9r7xR28nqy3jG09JKRijGOk37poVVRzGHMAJLIqnARMUgB0CO2p/Q7g5csujyWll3GvSJMiUxGOKWRkBLxH0YE3JC+FvaoDe6kYsMW6xhFFp895Wx4hKJJIo1ewSUDyrAWALfq9aQUpUYyuO07rdL/Z12kFanc7GkXcnbxscSXFFojAuWrUix38YgoUoJAYCiAiIj8Oqbsesa/Q5K9s7Xutg+Nh7F8iIM3GKMTcUXHZEDGSJTbiDaxJJ+tXDX9kz95jN1Xq2mwEyMvXpjylVDSyGG7tkK7FRHKwvyIvcC1R65CYiyddbsayQUOlNQxotq1jyspFEi7du1SFQwuGr5ZAE1nCihhDueoD7Bv260l82fGfyB2vtv991GMmXqjjokftygMqqL/ckjLZmJJ+y/LxfzW6fhf5K6D1Xqo0e4yXxdoMh3k5xkqzMbDi8Ya6qAB99rebeK2fHnDEtAPIfIM6n7Fcshn2ElBT8eJHgF3TJGykcqsUpY8Cl36lB9Ywb9IgAjrP8AhH4q2Wny8Xu24H7SeL9xHLj5EVnt4EcsZYARWF7sfJF7EA1gfNXylrtzi5PTNU37qCUY8kU+PJdL+TLFKFP9X6WUeAbXBIqYbGFhGL2QlY2Mj2r+Y7o0nIM0Ekl5IEwN3J3KyYfZxIBhEDDvuI7/AA69Q4up1OJlz7LBx4I83KsZZEUBpbfpLMP1Wv4Pn1rzNlbXa5eLDrs3Imkw8W4ijdiVivbkFU/pvbyPH4Ujay3q2PnLCgIz0m8l7AvMWCObTbpxIyDlPvAUfAk77gEkmjXp9RMxg2DfbfVX0EPXelZEPTI8zIl2ea8+TEuQ7SyML3ks/GwRP5VJFh6Xqzb6bsPc8ebuUmHjxazCSHHlaBVijU2tHdOV2d/5mANz62rY3qHlZqPikIiJrMuu2sEW+cIWlNVVo3ZNlRM4eMASARLLNyjuiI9gDrN7hrNjtcLGh1mNr8qWPNikZcsEoqKfueMD/wB5R/tn8b1g9R2eu1ebkTbLJz8aKTCmjVsQgOzsLKkl/WFj4cetqW4+kf8AH2+kQ1bj639RVUHpSbtlZaXCAfV188k49q/FuKjuHdixkU/DOEnJO4dFKYUgOdIAN2DuURDUD2Xr+N2jSzaTMlyIMebjd4H9uUcWDji4va5UA+PIJFTnXN7k9a3EW6xIseaeHlZJk9yM8lKnkhIvYG4/AgGulu7sDayIQRIMp6m3r6Jy2dSQTO6PKoGIgSNUY/vphMgTrMt6BHXVDk7uDfJqFwwetphA/uzIC5mUhRE0fqSVHIv6E13TY2mn0b7dssjsT5pBxRGQvstdjKJPQWY8Qnrb+FKnViqvV1rGUIisdJPvVipKmSSE3QCqpSGMmmJh7CgocALv8G+viRnWJmjHKUKSova5A8C/0ufF/pX3EqPKqyHjGWAJ9bC/k2+th5t+VaSrv56Tg2T2zQqddmlu+8ZDpPCP02nQsciPS7T9RXvUSlP9LfbUT17M3GfqIsvf4i4O1flzgDiQJZiF+8eDyUBvyvapbsGHqMDbS4uiy2zdWvHhMUMZe6gt9h8izXH52vW/1M1DV0rNmzgUjOG7dczdQFm5lkEljN1gDYFkDKEMKKpfgMXYddUsEExUzIjlG5KWUHi34rf0P5ixrtinngDCF3QOtm4sV5D8GsfI/I3Fd2u2uqjSlGlKNKUaUpA5AgKpY2sCxt0qaOZFn2arJoMiRghOSBd+5h3aRwEsi3c7+sh/lapndNL1veQYeH2bJMOJ+9Ro090RrkSfywOD4lV/rH9fXxVx6buOxaSfLy+t4wmyjhuHf2jI0EfjlMhHmJ0+kn0pdppJN000EUyIooEIiiimUCJpJJFAiaaZA2ApEyFAAD4ADVwjjjhjWGJQkSKFVQLBQBYAD6ADwB9KqMkjzSNNKxaVySWJuST5JJ+pJ8k/jX2IiBTbBuIAIlD0dRgARKXf4Nx19/Q28m1fItcA+l6TlVkLDJxBXdogU63Ki7eJGi0nxJEhWyK5iM3IOUw6RF0iAHEvpKI7aguuZ272GsGT2DDXA2XuSAxLIJRwViEfkPF3WzW+l7GpvsOFpdfsjjaDMbO13toRKYzGebKC68T5shuAfr60o9TtQdfaYkKchlCicgHKJyAPSJyAICYoGDtKJg7N/g0pT3REjEgxZJNjt2wKlAqTQFiHOQ49Rugw77ioYA3Hf4R0pW/0pRpSjSlYcj/u99/sbn+JPpSoX+W1/YN4pfiZqX8lNpSmC8jv9U7wg/FEf+l1n0pUzs38pMXcfJCCjcgsMrO3NiZun0cbHWDcx5bakRZrFQWLJPcYUe2tIdwJzeok7OiooXcxQEAEdKVC3lzySvVh460bkfxiyrkPFsPWM8YqqFprVywU/q0lkSItmXqDR5yEloTNdNibbARKcVOuRResWiR1zqAZJbZPSla3nheuXGH8xM8zwU5lOK4P40wnG2XLZ8MPcCBbou5RmQpZ3brBMwGWqPaLHaarHY2K1Os1hHce4KmRY6ZjK7AClWFZRaLXjE7iSruZLRhWONFMbiplCps6W4lo2ssmftp6sojkGr2ytoR7qLATODqsROkmAiUxBDfSlRi8uK6ZpvnEyHy7ma9WrKr/ACVZL/kTF7yxV2m125hhCSm3nxPxMuxqEHUa45n5anMm0gdbwzfrUkQIYwAQB0pTfYKyhOZT8xTKcnO4ZyvhdaI4bYujG0VlhpTWkjPoGzflhcZiELS7ldGikWmce7MK6yC3ef8Aw9u3SlWh6Uo0pRpSjSlGlKNKVSBHTMByI5nNcwcQ4rDqdWpUlI0/kTyTJENJLJFrdwL40VLcejxj5COsNfIQUEnSMwJTt1iJ9JBMUmw02F02m8XO0iwe1GSk+RYGRiDYwWNmHpfl6fhXpHZ4eX0P4sm6p8ny7c7DMVcjVakuyYkIlUOm05qWil9WQweGW9zYnwquJ2RL/m/kDyky+yvVpfcd0pODxPj7GVuZvoKUpeRcdlQZ5Elka29QKZtFzzoe9bOwVN4winV0l6dtZekysrY7PMz1kc6oMsSRsLFHj8OeNvQ/Q/X1qE+UNDo+mdG611SXCxk76Y5M3KzIGWRMjFyrtioZVNmeNfDpxHtkWub1A/zY/L+sGcLEncuPXE5jlPLmTGSR8g5md5ScVt3Svc9KOYQERFVCSmWMDIOrBDInQ78UzkR6NzFExgMFZ7t1iTYzfuNZgibOmH3ymTiU4ABQFJCnkPH5fhW7P8YPnTB6ZgHUd87Q+s6rrnIxcBcMSrke+XaR3nSNpEWKQhuNwWvYGwsanLF5T7umuMIYKVuSMpzVyCZnk3JeOXFgjKvTMNYEZu12klZlLU9bKMJ2VK8KmkZRFyCrdyAgmgsmIH1SZektAcfWmS/YZf6kkfIKkUNyC3I+GP8AA+D6A16fwP8AJ+LbJue6JiNH8P4AbDxMoRPNkZ+yZQyQiFWDRpxubMlmXyzofFeqHiPj2gYfi7TiDH2GbjR69hyUYU2Hyfe2sS+ls3xjpEZ1e1x1vaokkrHGA9cGKoK4lSIsId2Qodgbn0mLi4CPgY2PJHDAQgkexMw/VyDerC/4+Pyr80flPfbztmRjdq3u3xM3P20bZD4eMXVNc6n2xC0BPCJ+IBHH7iv6ifWq27d5eOasaUzKEXUeYON6DljljmS6wE9fMox8zOhcsR2tu9d07B1XZ2KRema2SvSa6zoi8cUivQHSl0lIG1Tn6tsMTGmTHz4os7NyHDPIC3OJrlIlDE2ZTc3Fvyr0Fqvnrp/Ytvrsna9U2Gd1fq+px5YsbDaOP2M6Eqs+xmaJVDRSoFQrLcX8tcmlTjviThy1cJeSOKr9mXN12x1S7HKv1pnNzK0MU8P5QwxU1Cyd4pfWdOxWihMbABpRumfrRXQRKiBDCBuruxdHgTdey8LJyMiTEjc+ZQw9qSJfLp/MyBvuH0NrWqN33yl23W/MXX+z6LUabD7BmY6KI9e0LHOw8+ccMfI8GKHJaK0LsLMrMXLC4tr+RGSsYPPK8eY9q2TcU5/yY2xDQH5JVrZKviixu3NwkyDA5Zh4eWatZCvzMw+RILJuCLd7LHEyRDdao7/G1zMNumnFhmhyssQIb8ljb7z4lAIupJ9BYFvQeTXd0Lr/AGOH/JFN7stds9F11trkrwaKbNiUQIfcwndCVljjUn3H5PHCLMRZarrksLZFx75aLGu5+4N2u4Szu4ZMyTd7x7/+KzFQbVaoOKjaHl6RpSYObAlHzMst4aSi1igHh48Tq7qKlMFWfAysXqIi2eteRzJJI78/6qMygJKU8tYnwyn6L58nxv7H7f1/ff5EPsOj9zxcTFXExMTHx/2vHAyYYZHfJwUyPEXKNBzimX+aUKv2qQYeuqTyaPhXjLhrkdjKOiMCV6Za5TpVbnK/J0KTfx09cz1OwR9ut5Y/x0I4vpJsFmqPfFOogVJcpCmAN4FsbbnX4mv28IXWKwkRSChILcWDNa4538eQbWNbXi3Hx0vcOxdt+PtjJL3ieFsPIljlTJRXjxxPE0EHLjIMb2+LtxIDFkJI9LbM54W80XP8XXuIMXCY7xTRcWY1qU1Iv2DF8phq0yFQsxHGLIKnZBmWkrYHM7DV2PZtpdsr6oHTOqO/Vq8bLX9y2aJokWKDGhiUkgH2mKteMJIQWuFADA/gTXlrpfb/APG3o2VP8q5M2fs91s9hPGisy/v4UnhIzJJ8WMpEIpJWkeBx9CF8Wr0GVMtiJVKuS3ljCW1OuQidoLCmVPDlsKca2JMhFHXAFjxwSBVO5E4AYU9t+3Wz4BKIEE9vf4Dlb05W82/K/pXhTaHAOzyTqvc/tZyJDD7lvc9ouTHzt458bcreL3pQa7awaNKUaUo0pRpSjSlGlKNKUAO3aHYP0fh/x6UrTQVbrlXaKx9XrsBWY9w8cSLhhXYaNgmK8i8MBnkgu0i2zRuq/dmKAqrGKKiggAmMOvhI44hxiVVUm9gABf8AHx9aycrMzM6QTZ00s8wUKGkdnYKPRQWJIUfQeg+lbVVZu2SUcO3Ldm1RKKjh27XSbNWyRe06zhwudNFBFMO0xjGAoB6R19kgC58CuhVd2CRqWc+gAJJP4ADyT+QryW+YZ/zGWRMNZdd4r4oYnpj+Oo9tttbtOR8lK++1ayOEC9TjGzvGvubOxTdtEg5KqKq6y7hbrAE+gvaOtUdg+Q58PLOLqokKo7Kzv9yvbx9nEjx+NyT9K9//ABB/hxqOx9eXfd9z8lJsrHikix8f+hJj815kZHvRuS9rAKFUW83N683PMzzI+XHN57EBn29LdzW07DGt6vVmS1LrhYmclGksWGla/HOEm80lEuGoFbKPyuHJCDsZQw9utdbjse23LAZz+FuOK/aLE3sQPW1vrc17O+N/hb49+MopG6pijlMY2Mkje9JzRSpdXYEoWB+4JxW/ooqAWwAIgHaAegf2ejUDW3l/OngwvnnLPHy/w2TsRXKSqF0gWUhGR0qz7pYpY2UQO3fxrlm5Iq0esHJD7mRVIcnWUptuooCHTlY6ZkDY8rSBGFiUdka3rbkhDAfj58/WojcaDT77BfX7bHinxHYMVdQw5L5Vhf0YfQix/wCpr2i+XD5gquY8HVeeUt1NrOa757WodqhWUgZnNmmag7SkHUnWolyqk8FR9CqoSCwoFcpIEXMQptyD06hh2fZPjPZbHH6yXhgzoo4zlEEtFGXDgqy+FYMGUswJK+i3INeSflL4q0+020Ue3glydLr3ORHHYGN/cQpxlNjdQftAulyAT4NTok1LA2k5EqU+vIkZIuPaUxHqSDiOFzJJCV0c6zkCfZZUxgKqYSkAxjD6ogA6qufJuoNhOIs151jRvdnjMjRcpR95LPbzMfDmygkn7SBVZwI9PPgQe5hpA8jL7UEgjWXjEfsAVPpDa6C5IAH3XNfNcQoss+dJ3CVmoFNVrHlZPoWMbyHVJGWAkgZ81XdN0it1CGEwKFOXYQ36fg1xoU6hssuVO0ZOXho0cftyQQrLeXlaUyIzqApHnkCLHzxrndS9v1mJG/WMfFzGWSQvHPK0f9LjeMRuqMeQPjiVNx45fWnzxRRWNSzRUlCW2LmmasvLpVt3CiSRRnG7SNWBdVyu0dHTiFUQclAU1QMJhAens7dbg+OOnYnWvlfWumzxsvFfKmGK8FpBkKkTcizI5EJHIXVweRBt4F61L8idvy+y/FuxR9bkYuUuNCcpJ7xGBnlHEIroDMG4GzIQFFr+TUrs8Y8jrvTZB8oyWdWCvMHTiCVTlSxSaZljJeLK8XcD4TwXclFRUFNtwT2Axd99ej/mLpGD2/qs+XJC0u6woXbHImEIBbjzDs32e3xHJuVvC2DC9edPiDumd1PtEOIkqx6bNmRcgGIzEhQeBRV+/nyPFeN/LXKtaknx9tMq+o9ksN4sangI+RShkjyKzdtBRkdEsEmgrxTkvdoFYvO9KAmKYxBMQBAw76rXwp2LZZnUM/edvz2/ZwTrADKVXHiihjCcon8L7b3AuCVuAQTe9WP5n6/rsPtmBpepYK/vJoDORErNkSyzSF+MqG7e4nEmxAaxIIFqV0HijFTtnCS1I6WbZhMFlUZmrSwnNMLNHBznYSskBnB5KOIuI9SIm2AQ27NtWbT/ABv8c5GNibLqNoseHK94T4kxJnZGJMc0v3mWIPe6E+CLfSq3t/kX5Dx8nL13a7yzzYvtNDlw2EKuoAkhisoilK+klrkU3uYMl2mmyiRJpdxCsvfBk5pKFdcIpubZDM2gjJtLKq4UMRvGGkFkUz9BQUDr3AogG+qV8n997F1XYqm1d8XF/ukbYC4zKGzIES8qZRYkLF7jIrcQHHK4UgXq6fGfRevdowC2rRMrK/tjrnNkqxXEmd7RPihQOUojV2W5Knj5YXFOHHTTDMUXOU6bbWCo2KtPIZW0sYp74Y0fImUO9as4+ZS7zxzRRNIO9HpL1AbbV1wNth/KWuy+r7iPN1m8wJYDlxwycDHLcyIkU4J9xCAOfgXBtVKztXmfGOwxezamTC2Wkz4p1xJJk5CSKwjd5ITb23BJ4i5sRenXczERGu4yLeSbFpISxjoRLJ05TTdyZ2xCCsRmicwKOlEymAT9ICIb9utk5G01mFlY+uzMiGPNybrDG7APKVA5BASC5Ate17VrqDWbLNxsjYYmPLJhY1mmdFJSIMTYuwFkBNwL2vSNlavM2qTXJY3QsISDnoyaqZq5IuWUk9FogIuGtj6iHSWaqODdiZdgMX06q2x67tOx7B13kns6nEzIp8M4sjJK/BTyXK8EFC3ootcepq0a7sOs69gK+kj97aZeHLBmDJjWSOPmw4tjeQQwUeWN7H0rJY3YycBNy1kj042Trhnas1AQ7tOfkGbQihzMFDItPsnfyDQAUKmIAIAP0tZGJ2xk0uZs9/AMfYYBcz40DjJljQEmMlU88pE+4La9Y+X1VX3OJrdFMZ8DOCLBkTIceORyAJAGfxxjf7S17VupNk0uVVdMFHEmxj7BFFAV2iikdLNWzpMjgDIHMUVGbwobAO4blHcNSuxw8XtXXJMOR8iLCzsYfchMUyK4DfafJRwPH4j0qLwMvJ6v2GPMjTHlzcLIP2uBJCzISv3D0dD/ABsfUVocfWKsWytEbwZn75hX1S1x0lYWohJA5iCEQKZ+k5KYVVlSpAoBxABMI77APZqG6VvOvdl0Ah05mmw8Jv2rjJQ+7yhAX+oGH3M1g3K3n18Gpjuek3/W98ZtuIYs3NX9yjYzj2+MxLWjK2sFuV4/T0uaUL20V+OnY2svZRs0m5Zk6kY9it1p+IZsh6XKwLGL4dMEhD0GMBjbdgDqbyuxaTB3MGgy8hIttkwvJFG1xySM/c3IjiLelibn6VC4vX9znaiffYuPJJqsaVI5JBY8Xk8qvG5Y3/EAgfU1ny0WxnYx/DyiPio6TaqM3iHeKJ981XL0nTBVMxVCgYo+kogOs3Z6/D3GBNrNgnuYORGUkW5HJW8EXBBH8QRWHrdhmajPh2Wvb282CQOjWB4svobEWNvwIrsZtGcTHtWDUpGsfHNUWrch1B6G7VsmVJIplVTCPSRMoBuYf29feLjYutwo8THAjwoI1RQT4VFFgCT+AHqT/GvjKycrZZsmXOTJmTyM7EDyzsbkgAepP0Art7hqsqi77lsssmQxW7vu0VVSJK9pgQcdJjlTVD09Jtja7Pax5ZFyeEbSqPteylgD/wCLeoB/I2Ndfu5EcbY3KRYmI5JdgCR6cl9CR+YuKyA/YA/s9Ia7q6f4etJqrMbOwjl0bXNM56RPIvlkHjJiDBJGNVV6mLMyICPWq2S9Ux/8rUD1zE3+HhPF2PLizM8zyMrxx+2BETeNCv1KL4LfWp3sGXoczNSXruLLh4IgjVkeT3CZQLSOG+gc+Qv0pSanqgqNKUaUrrWUFJFdYEzrCiiqqCKYbqLCmQxwRTD0CoqJekv0x18SyGKJ5QpcqhPEerWBNh+Z9BXZEgklSMkKGYDkfRbm1z+Q9T+VaKqzjmyQTKZdwUpWnDvv+uFmiFTkmncrHRL4ghQKAd8UnWXs+pENRHXdvPvdPFtcnDyMCaXleCcASpxYr9wH/lbkPyIqW7DqYNHt5dZj5ePnQx8bTwEmJ7qD9pNz9t+J/MGlDqaqFo0pSchp5zLSVij1oCWiEoJ+kybSEgQhWc8mokKgvYoxe07VMQ6REe3cdQeq3GRss/OwpsLJxo8OYRrJIAEyARf3IbeqD0N/rU5tNPBrsHCzIszGyZMuEu0cZJfHINvbmB9HPqLfSlHqcqDo0pRpSjSlauThIea8D7XjGcl7MepSUd4xEq3gpBD95eN+r97cJb+qYO0NR2w1Os2xhOzx4sg48wlj5qG4SL+l1v6MPoakMDbbPV+7/bZ5YPfiMUnBivONv1I1vVT9RWub2B04tklWjV6XQaMItrJJ2dUhfYkis6OQp4xop09QvWoH3OG/oKOsGDd5M/ZZ9A2DlJiw4ySjLIHsSlyAYkPr7i3Jbz+NZ02lx4euQb5c3GfJmyHiOKCffjVASJXHoI2tZfH1FKYB2HU/61A2vTR2vJqbOHyalWm7ha0Y9YILqtpGOXMydOHgEO2M1IicHEg36RHqEm2xtaz7L8gpi6zsEegjd+xaOFWKyxt7bM9ivAKeUq2vfjaxrZHXehPlbPQyb10XQbqZlDRyKJFVLhg5YFY2va3K/itpjDIkfkevGkmhHCb6LVbxk+kszUZJozQMkXDxNomsc6otiHUECibYwegQ1I/H3ecHvelOdjB1zMdliyAyGMCfgrOEDEngCSBfz+NR/fuk5vR90MHJKNh5CtLjlXDkwc2VC5UAcyAL2uPwNOPq91R6w38xHwLcZWTkWkSzbKogZ8+cJtWySqihSIAdZUxSAZRUwFKAj2iO2sPP2OBqsVs3ZzxY+GpALyMEUFjZQSbC5PgfnWZga/P2mSMPWwy5GWwJCRqXYhRdiFHmwHk/lUhYJ4u/iWTxx0GUcpd71JhsUxDGN3ZgAf8A1ybD2dnbrLBBF18qfI/hWIQVJU+CDb/pW31zXFGlKw5H/d77/Y3P8SfSlQv8tr+wbxS/EzUv5KbSlMF5Hf6p3hB+KI/9LrPpSrWdKVGDlFxFxHzAqsHScyOck+7MBONLG2jce5YyFi4j2ZjHjKSh3kyrQbBALzIwspHIOmZXJlCtnKYKEADdulKSeQeCGB8rEq7PIzrL9xg63W4CqvqrNZ0y04qV+iK27K+jS5TrZbcnE5JdndEAzleXSdKu9tlhUDs0pTq2rjhi26QWbKxYGNic1/kFXmFTyTEN7pbGDFzXY+sDTk4qstmUwglSGrmvmFBwWIBl4gTCdTqOIm0pTv1+Ah6rAwlYr0e3iYCuRMdBQkWzICTSNiIlmiwjmDZMOwiDRm3ImQPgKUNKVDCK/WVXn+4/i/8Ar3yzpSpzaUo0pRpSjSlGlKNKVCO0cLoxbN9ey5ivIsxg2EFre3GU8aY7rsLH1zNFvtkGtDwd4u7ghm6qtgpSyoOWZ+6U7xUuxhDcR1Cvp7bBc3DlbHjs/uRooCyswsHf8WT1H51s7F+SDJ1GfrPZcCLb5hbHGJl5MsjS4EMMgkkx8cG4EeQBwkFxYHxX3xP4T1jjDR7FDu79cMs5Hvtlf2/JmXbgsolYr1YV1nCce/fxZXr6Mjxi4lRNoUjboKqVLrMHUYdudLqF0+O0ZkebJkctJI/6nbzYkeQLDx4rq+TfkfI+RttBkphY2s0WFjrBiYUAHtY0QALKrcVd+bguS9yOVh4FcckXuWMc4pu8zibGoZdyM2jCJ1OgksbepltB3z1vHvipWJ0YqES4YRrpVyQxhAetIADtENZezlzIMGSXAh9/LA+1OXHlc2P3fSw8/wClQHSNd13a9rwsHtmw/tXX3kJmyvaM/tBVLKTEvlwzhUI/Br1VFy2cUPAPHzD3Gg+B5jlHlHOVcn4tXHNsvsipmeQr7Anv9Z4tjk+NjwsUxFVq2KptvDN3aH+ipdSZDgG2qbvDjavVwaj9s2bmZKMDG7n3So+9rSAciFbxYEePS9elPiyPdd573tvkQbuLrXW9LkROMqDGQYCSsf20Lthu3tRvLAC/Nkb7zZiL3ppuQUlzn45y2HuamMKhL1KnvOOBI7k3gi62Z5dsUYWeUqNbR9WQbMGMoWwycgVmqTvHTBE4pqInUcHApzmLg7N+x6l8fsGHGyY5xLZELMXji4Cy2APIn8wD6G/1qz9Fx/hfv+JtviDsmXFlbZOwctRsseJcfN2C5Ds0xLMntIvIGySMLhgsYuADs5jhjnzPB8BeYTgDMlGns02WuxWUJnHGVF7HceOsjYpFoxRRUw/HTjIzrHzZBJsoQHR0wdodRu4UII7j9voNnsv23aNZPG2wdRIY5OTwFiB5iB/2/wCPr62IroxPl3pHSV3nwR3nUZsHUMfIfDjy8MRY+1SJGYkZzxtbKJJB4A8GsOat9LKs78YbPyVSw2F2zRf8aQdRQRk8q43xPIkjK3lKeVSiXpo+UmXIHki1+ImGSpEkxIcHTRYxFA3NuFu2Wom24x/3GRLDHH5kSM2WQ+PBPrxBHj8R4NeeelfI2u+O22x02owdhmZRKYWXmrzlw4wXXmiL9nuyIwJNxwdQV9LUn8yVfhhXcrY+omUsEVWVtmf27WGZWMcbtndRTYYeRQm663vllRM0h6vHV5UiZosFgKTvwAC66c+HQRZsWNmYyNNlAAN7d1tF5Xm3ooH8v5+lZ3Utl8uZ/Wc/d9b3eTHrNGxkaL92VnLZxMcpxoTykmaUE+9xuePk1HLOePcsO8u5vzJiW10aNzpSJPEtjpNBxpktKPt2ecNRH2NljrNjW1yXu1UIOdl3Shmz1iiUztNPoAxjCQdRWxxM452Tn4LxjZRmJkjjks00Q9I5gx4qCSSCBcj6/Wr/ANL3/V4uq6bqXacXNk6VmR50ORk5eIWg1ufJ5bK17Qp7uRJGiqGjkayE3sBelRxPDOvJRtny88thtmMF5Fi+wxNcdXBIhrj6kKwZk3iGX8dyrpzIS7WTlY1x0eOX6UyuUjLoD0ATp79J/ctsMnI3nOEsDEYPARLf+6hJJBI+p8XFxUZ8oHpfx7JpNN8W/tdkkbrsI9qDIcrIEl1ODlooWMoji/trc8WCSC/K/RhSmpYvu5sGUDm5lTO+Q8Fx1ozOGFbS6jnp7djq31trEY4odjvizJdg8gI+YbpqN3qLky4LuzHUIQoBt86/HGHk/wBtxtjPk5WMGl9prHlGy2jRntYqD5DXv59BXZ3Hbv2TTf8ANN707WaTQ7qSHX/3CEOvsZUErPl5MWMGDLK0ZIaNkC8YwFYm9WM42m7lZKDUZ/IdOTx7eJeFbPbTR0pdCfSq8uoKgOIgky1AraSKgUpR70gbD1bfBq14ck8uLHJlR+1kst2S/Lifwv8AX+NaC7Fh6nX7zKwdDlnP00UzLDkFDEZoxazmNvKX8+D+FLbWRUNRpSjSlGlKNKUaUo0pRpSjSlGlKgH5pd8peO/L55UTGQZGSiKxOYvlaCtKxcAtaHEdJ5AUQqsI6WhG8lELvGKUtJJd+JHKRk0hE+47dIwPZ54cfQZTTkrG0RS4HKxf7QbXFxc+fNbY+C9Xstt8uaGDUokmbFnJkBWkEYZce8rgOVezcFPH7Tc2H1uPzSMT45vt/v2KKrVqkreXV3yAzrFCqckabSiLfMuJSPav41MIh21kmzITuG/tBRssismkcBA+4AIeb8XGnyJ4oo09wvJxVTezG48ePP4XsRX7W7/danUajYZ+bkDFTFxDLPKvtl4kCsVb7wVJ8NwDKQSD4NbHkqpLoZ7ysxsGN6ziefh7ZKVuax5VTyrqu1eWrxvYki1iXM2/kpV0gq5ZGVKquuqdQVOvftDX1suX7+VXjWJw5BRb2BHg2vc/6k119IXHk6ngTYubPn4smOsiTyhBJIkn3qXEaqgNmAsqgAC1vWmJ1hVbqNKGpgcW7A6jpOSo1Hc1Zjd82RSGN3FysqD6FtGMjnscdNsHeJro0lW8fB2a9BFlh1XLtIQKg5UR3KVbvAkcFIplbD4xOcgBD7i34/cGHAk2Ba3HyP8Ap61rfvEHOJNxn/uf2Grc5CxwsGjyP6TxsMmEqWeODmZQqN5KhvJXifTAy863AsFO0nC+bMYZ0wmo/ja9C5vZz0FGPJik5Lx/EpVlu+kiyqrGXlmFkVTM7cLopt/CC5EToqCQR1V+09aTYZ+djmWXD69mNHkZGNHCrTNl46FVs0hAVXu3heKryF1Nq82an4e7IuuXsOhn1u22eKZlwJ/eZYZMHLk91lCxBlDxeFAbmX4GzrcVY3jTIGOORNYmsicd7CxyDjeCk3UY6k4p41fSUIomiZ6s1siaRGrhNdul63enRIUyZydPYYu+jdn1PbStk5mm1+UukxST/UCGaOMqXvKyWDhbEGQDj+lfBsKx8nKm6rk42l7tLHj9kykDAAOsUrcgg9gNy48jYiPkTfkfoaevHV2k6vLwbyr1yKWlYgjp7LOlgTUkJmObqqO3qCCz8VEIrpjd0hO3IVYSgJuofRqQ6N23P6/s8PJ67gYz7LGDSTO1jJPErF5FVpLrD/SupaNQ5AJJPpVY7v1XX7/W5eN2HOyU12SVjhRbiOCVlCIzCOzS/wBSzhZGKAkAAetTsqlkyXKu0mc5CQl5x/Z/FPlbdHPWKbWGhHkeqopXlo1DpNKLx66Yt1D9oqGMI77Br2F1rfd+2WSuLtsTE2/SthzkbMieMJDA8ZJxjCv+60TD2mbzzJJvYV5F7Ho+h6/GOVqsvL1Pc8DjGuHKkheadJABkiVv9pZFPuKvjiABa5pyasrTbtR27eMge6p7hNxFpwEtEiwTK3YODJGbnjVBHobgsn1E7fWDYdX3rsvVu2dRjhwMLj1h1aEY00PtgLGxUqYjey8hdfxHmqL2GPs/VO2PNn5nLsqFZTkQze4S0i35CUercTY/h6VnRbqmVmSYY9hiMIZ+rHO5uPrrBqqin4Aq5wePEuhMW5AFcDCYBMBhH4NZmuyeq9f2EPSNUIcXMaB8iLGjQqPaDHm4sCo+697m5P0rD2GP2fe4E3dNoZsrDWdIJMmRlY+5xHBDc8j9pFrCw/GkVfrbX3C8/UVKfYLVMsItkdZKMhkFVUGNgOmxF5DyrsqiZHbEFgUUAgAYnTv8Gqr3LsukmmzesS6vN2O1hx4ywigViseSQheCZwQHj5Bm42K2v9PFp6f1rcRQ4fZU2eFr9XNkOFMszANJjAyBJoUIJR+JVeVw17fWm5hYGUxDkDZWzvInGEglHEPIWx03lnFjnnDY3eR6TlNIryOcNAJ1Csr9iAiYhv2hqh6nT7L4y7rxk2EuN8fTrGDJmOkzZWQym8SuFDxslrl3+wKpF/Iq9bbb6/5K6ZyiwIsjv0LSER4aNCuNjqwtIVJKSq97BU+7kw8eDUiwYViyqQdkK3iptSOFZ3XZsnduxaC5AE1l410QRKUFgTABEojvtref7LQb98PfLHjZckF3xpxZ+HLwzROLjzaxt62rSJzN9oky9Ez5GIk9kyYDdOfHyqyofJsTex9L0oBAQHYQH/D2D+wdTViPB9ahR5FxWiZBW0JqVTjhiCWF6m3dzSbU7cZNwkkHcNXD8iZxWMVPtIUx/R6NQ+KNFFtslMI4y7uVVecKV91gPtVpAPusPQE1L5f98l1WM+aMk6aJmSAuG9pSfuZYyfFz6kD1rEjXVmCfsDea9gpwfU3VqZGLhQZpy2IiJpA8m2UNsAprBsTuw229OsfAn343ObDtv2a6j7DhiNiZ2UL/AFDKpP0b9PHxb1rIz4NCdPhTar9421+4ZhkUewrFv6YiYfiv6uXm/pXzT7LD2yKUmoRk8ZNVZF81VTkIwYp2q7ZKgguuo3MAHUKoYvqqDuJw189Y3ur7JrW22pilhx2nkQiSL2XZ424sxX1IJ9GN+Q/Cvrs2j2nXNiuq20sU2QsMbqY5fdQI45Kob0Fh6qPQ1zbadC3WHdxEwwRdAskHcLGKci7ZdE5V2pyuUBSdlQTdEKc6ZTgVTbYdwHXPZur6rterl1m0hSQOv2sbhlZTyQhls4UOASoIDWsa4632badW2cey1kzxlG+5QQVZSOLgq10LFCVDEEre4rYV9nLsIWOZTr9CUlmrYiL1+1a+CbuTk3Ah0mvaKRQS6S7fCIb/AA6zdJi7PC1UGJuZo8jZxxhZJET21cj0IT+XxYfxF/rWFucnW5u1nytRC+PrpH5JG782QH1Bf6+bn/W1d8vEsJ6KkIWURFxGyjRVk9QKooiKzdYOlRMFUTEVT6g+EpgMHwDru2etw9xrp9TsE54GRGY5FuV5I3qLixH8QQa6tZsczUbCHa69uGdjyB42sGsy+hsQQf4EEVq36qVNqa54eGfSjetw5SR0DGmMtIvEWCJEm7BoZYVDKuDJlAAEwiIiHbqOy3j6t1pm1eLNkw4GKBFjxXaR1jUBY0LXJYiwBNyfrWfhxydo7EqbPKix5s7JJlnk+2NGkYlpHC2CrckkC1vpWzhpBWWiIyUWYOopaQYtnasY+KBXseoumChmjsAAABwgI9JvphrP1WbJstZj7GWGTGlnhVzFJ4eMsL8HH/kvofzrB2mFHrtlkYEU0eRHDKyCWP8ARIFNg6f/AIW9R+VaC5QtonmbdhW7QFUIqZwnKvUmBHkgdqq3MmkMaocxStHaCwgcD/D/ANcP2nU9g3GLHiaLYf25WLCWQR85ODLYe0SQEdW+4Gpjq+16/qMqTL3mvOxZQphjMhSMOGufdABLow8cf/sN5GNloaDZNH0i6mF4uOIm7lHKYC9kTtUd1niyaYiAuHHQIiUu+4jtqW1+PLq9RFj5k8mVPjwAPKw/qSlF+5yB/M1r2H18ConYZEW020uTiQR4sORMSkSn+nEGbwik/wAq3tc/TzWogbvWbEViVjIkQfyLM79tCyQBHzgsyLnbi5PFrmByRIVUx2HbYQ7dRum7doN6IVxJwmZPEZFgl/p5HAMV5GFiHC3Hg28+tSW46pvdI0zZcDPiQSiNp4v6kHMqG4iVbqTYi4v49KVmrJVco0pR/wBOlP4UnYU1qNIWIJ9GGTiyyKYVY8Ydc7taKFIRUPMAr6hHnfbbATs21BaluxnNzl3a4q68Tj9p7RYuYbeTPfwH5enHxapzajrowsL+zNktsDAf3YlChBLfwIbeSlvW/m9KLU7UHRpSjSlGlKT0mzsa85XncXMM2MEzO8Gxxa7EHDmYIqkBWRWjsQEWItltzGENuoOzUHsMXfTbbByNdlRQ6iJpP3UTR8nnBFowj/8At8T5P41N4GVo4dTm4+wxZZdxKEGNKsnFISD/AFC6fz8h4A+hpQ6nKhKNKUaUpqMvT9trdeSkKy5ZR6KrgrCRk1Yt1NSEcs+URQjXMfEtyHTdlBcRBYFA2KQd9a2+Tt12XRaRczQSRQxM4jklMTzyRNIQsTxwqCHHK4fl4A8/StjfGum65vN02Fvo5ZpVQyRRCVYI5RGC0qSTMQU+2xTj5J8Un6k3KbM9ger2FrLTTTHdYi51q0jV0SoyJDNVVXSrr/8AVEju1SGODbpKchTB9AdQvWYAflXOyps2PJ20WixIshEiZQsoKlnL/oBcgsIrBlBH0FTPZJ2HxdhYsOFJjaqXd5UuO7yqxaMhgECfrIQED3blWI/EiljWazI1m3WX2ZGQjKlz208q4RdPVp55bnRylkFHKSyh26DAUi+oCYAAD8GrToOv53X+y7AYGPiQ9TzP/kFg7tkPmsR7hYMSojI9AtqrG+32Dv8AreB+/nype1Yf/wAcKURcdMNQfbClQGaQH1LX8U42r1VHrAkoKOsjQ0RLRbaZZOFETnj3bcHKKqqKhVUDCiP1RklSgYPoCGsDZavW7nEbA20EWThMVJSQBlJU3UkH/wAT5H51na3Z7HT5Yz9VPJj5qggPG3FgGFmFx9CDY1IuFZiwimDQQEO4bJk6TbgJPV37vpH6kEwHYA+AA21nKAqhV8KBYfwHpWESWJZvLE3P8a2mua4o0pWHI/7vff7G5/iT6UqF/ltf2DeKX4mal/JTaUpgvI7/AFTvCD8UR/6XWfSlWs6Uo0pRpSjSlGlKgzFfrKrz/cfxf/XvlnSlTm0pRpSjSlGlKNKUaUo0pRpSq1uQ135vR+d3NDp2MceK8eLTQbAvW89N5dw8tOIbpGVSQdA/udCdKJ+97N/ZCtk2jViAdSPUJzCb1dV7LyOwJsDj48MX9seM8Zr/AHROFJuyH9QLWAC/T1rcPXtR8Pz9Ni3G42OcO8Y2bGJNaYwsOdA8yKVgyV/2GWLkXeT0a1hbzUH+SWULzc/L9xtnTHtryEnnKHtVAjIO61LFDmr3a320loVq1lrfsSUarzOPKLeHianjXBTAUGiZPSBwDUNts3JyOsRbLEkl/uSugDrFxdm5cWWxBKI/8xH0H51sn4+63pdT857Hpe+xsA9Llxsl5MefNWbHgg9kTRS+4jCPKycdSPbUi/uFvTjeqsrXyo5q8TOZ8fZOaURkWbaXElkGx45oVgh57D+RqJKRD6vYwqcbT0408QkSMtrkWzpwuYjl8JwMqnuACrTZtz2DSdgE3YVlZZOXJEYGJ0I4xqFtbw1wSbFvqPx9K6z4z+IPlH4ifX/EEuvhlxPa9rLyopI87FyUkWXMnecvzJeAc0VbpFYhDb9Mv/LozryBxtkHNNo5e4dyZiOn8ls0VOs4ZjHlenmFTpdv8M+blpVfp8iYDUqiBCrJKhIgRFgddLuyAY3V0zvVdls8PKyJt7jzQY+XkKsQ4kKreRwVD+hLWPLwP4mtU/P3S+idi0Wn1vxVttdtdt13UTzZ7rLE0+RByU/uJZ1//OMn3OS+1dpAp5Gwte81hlLGctdpXGsVkKlyWRINNZaaozCyRLu2RSLYqJnCshAIujyTRNArlMTidMAKChd/SGtjJmYb5DYkcsbZS+qBgWH8V9R/0rxjP1rsWLp4uw5OBmR6GYgR5LROsDk3sFlK8GvYgWJvY1Wp5muYLVB4vv8Ahy346x82xhlFbHdRp9kyFaLOLLMr55IGn8h46hq9Q0i2qJtUJEQ5Vo5YViJO1y93v6wBqpdvzpo8OXAniiGHNwVGkZrSkm7oFT7gwC3U3sT4r0L/AI69U1uZ2PB7bqs/PbsetGVPPFiww8sBVX2sXKklyT7EkMjycZVClkU3t4vSvkZDy2MgwNCr93CjVGSv+KsY2iKYXpy/x7kNxjnFsmyQx5H2+ZcuWUmxNAyrZJNFg9clcODbD0qAO+u9m6llRxRZPto8sEbAPdH9uMjgGJII4m1gxufwqKx4P8hNFnZ2dpv3uVj4OzzIXbHC5WKMvMRjlPBGqsje4hJaWNCqjxcUkaurw8wXgLlTyutOVbNmfD3KbIEollPIDSLlFxLBSC6+PkKdHsooyDlvBVVNZduaQTKkqKY9e47E3x4TodbrM3dzzPkYGZKfccA+h+zgLeir5HLwbealdinyx3TvPWfjHW6zH1Ha+tYKfs8ZnQf1EAyjO7PcGSYhW9s3F/H40w0j5eVzxHUscX3gBkm8e3cr3SgRszkw0y2ZS9M4ZHTbWauUapJTi4ou4eFOVE5XB0/aTtFXu1PVEQ1HP1XIwYIsnrE0nuTyIDJfymL+pUXl6gePP6iDY1dsf551HadpsNJ8567D/ZazDyXjw/bLJPv/ADDLkTe2LrJJ9wKg+0jLyXzard8CW7Lt4xuxn84YpTwxkA0vOxzqjpWZpbSpw8XIHZwU6aaYkI2OpYmCZXRkQDdAx+kdXnWT52TiCTYQft8nkRw5cvANlNx4+4ebfSvK/eNX1bTdhfC6dtDt9F7UbrkmFoLyOnKSP2282ia6Bv5gL082s+qjRpSjSlGlKNKUaUo0pRpSjSlGlKSd6oFFynUZ/HmTKnB3mg21gpEWqpWRilJwk7ErmKK7J+yW9VVM/SAgICUxTAAlEBAB11TwQZUTY+SivAwsVIuCKz9Xttpo9hFt9JkS4u1gfnFLGxV0YehUj/8AcfqK/OE5VcZ+WnlI8mXGSJCj1mlK2mw3KT46ZMrT9CwQdaQJPov20lRlEVkBiLdXIJcjHu5FoYrdJwoKZDmKRYnnbaa3a9U2ZyCioXZjC6m4Hn1X8GUePI+vj8a/ZfoXdfj/APyC6UuliypslYIoV2GNKpjkkPtkFZwQecUjgv8A02+4qoYgEqazb3coy7qR0v7rs4O1Knl3NymmElLOkrjLSkmtKGn3jGUdPSR0sqq6VKuDU6TQxejoQTEDdVanmWazcQsvnkQT9xJvcg3sf4ePTwK3dqdZkaoPje+0mvHEQoVQeyiqF4BlVeS+ARzBe97sbiyD7pXuu/7tTuO8BHvug3c98JROCXebdHeiQNwLvvt266fpf6VM80DcCRyte31r40r69a3VbWl2k9Evq86Mxno16hKw7xNQqa7aTije0GSrU5gMHjCOG5RRDYd1AKGvqPkJA0Zs4Nx/EelYeemM+JJFlrzxXUo4PkFW+1gfysTf8r0vs0ZbyZmzJdjyjmSXkbNlGyu27u2z863IhIzD1uyatU3Umy8Mgn41dFAp1T9Je9MPUIbiIj35uXk5mS2TlktksbsT6k/mKhutdf0fWtLDo+uxpDo4VIijQkqiliSFNz9oJNh9PSrrfJb9+a3yGnUKAa92bB58eV0/JRo4kIqmo1622OOIpFInqr1ZWTnW0TY2a7Fm8R2BwiuCyhE9gINQ7zia+DUja5fvHDieKOfhZZHWU3aAXI+xmUEk3H2g2B8VqP5SngztOmJlfsoezvkTNrSVeYFYSQZS6rxRjE4YofQ3UFvJr1WMzY8ha1FPmjb3qt80ylWsoDiSXiI+mPHXWjFLkYizInIHBFQesTrGQES+gNw1rXFbpGq0WPmY0f8AcezZcUyS8pWhiwZHJWFhHwAkPE/cWcx/b6C9ebcpe67Te5OJlSHXdaxZYmi4xLNJnIljMpk5kxjkPAVBIL/WxqwfALti7xFSlGCZE00GKzR0UqBW5DyDR4uk+UAhewwKrBv1/wCX6de2vhbKwsn4y1D4SgRpCUcceN5UdlkNvrdh+r+b1rxf8x42Zi/JW1TMYmR5g63bkRG6K0Yv9LL/AC/y+lMnabzkjGGVmNclLRLTVVmllJxiV5DtHz2UI4VMb3bilQOU7RNA+zchzHAE9wOYNhANak7F2/vfx/8AJEOjz9jk5XXMtjkR84EkeUMbnFhIIKBT/SViwC3DsLGtrde6l0fv3x5Lu8DX42L2HEUY8nCZ40iKi37qUEEOWF5GULdrcQbit5PJZSy3KVayViqGo0dVZdBwqhbVFIydlnCayap0yrtUiqua8o0W27sDdB1gNuA9mpncxfIvyXsNdvuv646fB1uSrMuaTDkTMGDEBkW7YxRrcb8WcNcHxURp5Pj7431+w0W/2I22bscZlDYYEsEKkFQSrmy5Idb8iLhCtvrWPfOTL+rR8g0Qgof3mNNy0bGlbzzSZj27CLcIJe0ZhuyVK5bLvu8OBG/UQxTEN1D2a6O5fPub1zCnxYcLFPYP3c0UQXISeNY4WUe7MsZDq0l2CxkqQVNz4rv6f8DYfYc2HJlzMn+wftIZZS2O8MjPKrH24WkBRlSylpLEEMOI810UXkZEXb2PAXmFhikfIyAWGWcC2ZwcWcxl02QFbyi6plWy7P1FVin3KofoAO3XR075z1nbP2um7hiYoSZZP3Mz8Y8eI3YJ9srElGTw7hrhiFtXd2/4R2XVDk7jqOVlF4Xj/bQqGfIlFlL/AHRAAMr+VQrYqvI+lPnS7CybLFpTqFjaesTx56bBx7xN2jL1BqYARnmgtidw1RXFbqBMTdQent7dbg6nu8SCYdTnxMfVzD3DhY8bh1mwkP25CcRxQNyuFvf6i9ak7VpcqeI9qx8qfZxH2xmzyIUaHMf9WO/I8nZeNiwFvoaT1MtNNpiDmjvpmXjJRvZnUbHtrhIpPrDLKyynfNH7MiW7g0Quc+zc5ylIABtvqD6r2HqvVYX6jl5eTj56Z7xRJmyCTJmMx5JJGB93ssTaMsABb1qZ7R17tHaZk7bi4uNka98BJZGw4jHjQiEWeNyft95QLyAEm/0pPRjnG2ByzXt+fUnbq/eA/mJI0b3lokW08/EzBsVAip/EIIKgI9JD9WwdQl9GoTX5HQ/h0ZY3Oa2Z2yaUSTS+1fLlTIl/poFBPJVP0Vr2HIqPFTefB3r5f/a/2bDGJ1WGIxwxe7bEjbHjtI5YgcWYfVha5sGrW1HI9Bu2U5ybfykAgtW0GtfoEqo8dxTySZTiagSjQWb5ZJF65K8UEhelIDlEQ2+AdYPWe9dM7d8i5e2zMjCSXARMbXSmR4ZJUyAfdQpIyrIwclRZAwJH5Vndk6P3Dqnx7ianEx8x4853yNjEESVIngK+04eNWaNSgDG7cSAfzp/qtWW9SihiGz+Xkk/GvnouZt6Z++7x8uZdRHxBylHw6JjbJl29UvZrdHXtBB1rXf2zHmyZ4/dkk5TyGSS8jcivIgfap8KPoK032DfTdj2H9znhxoH9qNOMEYjjsi8QeIv9x/mP1NJ7IlDWv7GPYJ2qdq6TFyo7UNBnBJV4qKJkm4LqdaZwTbHMJukB2NvsPwCEJ3fps3c8SHDj2WZro4ZC5OOeJc8SF5G4NlPmw8H0P4ia6V3CLp2XNmPrsTYSyxhAJxcIOQLFRYi7AWv6j1H4V0Yvp9npEK9g7FahtTdKRUUgHaqaxXzSKOAj4Z8quZQ664qiJtwMYpd9gHbXR8fdY7B1HUzajebH+4wLOTjOQfcSE/ySMxJZr3PgkD0Bru7/ANl0Pa9rFttLrhrp2gAyEBBR5f8AyjCgBVtYegJ9SL0rmMys9mpqIPCyjJKIBmKMu6TIWOmPFpCob2aoAiY/hRDpU39A6s2HtZcva5etfEyIYsXhxmcARTcxc+0fU8PRr/Wq3l6uLF1eJskyseWXJL8oUJMsPA2Huj0HP1W30re9u4bb7h2h9Ifo/t6mKiD6flWuby8U8kH0U1k2TqUjComko9Bykq9YFcF60DPG5Tiq3BYo7l6gDqD0awYNprcnNm12PkQybDH4mWNXUvGG8rzQeV5eov6/Ss2bWbHFw4djkQSx4GQWEUjIQkhXw3BiLNxPg29PrWw1nVhUm357aFlgSRqUOapGbPxsazo6oTCTspDezSxqZR7o6J1Nu86gEQDUBmt2Ub/DXATFPWjHJ+5Zy3vq9v6XtAfaVv8Arv5A9KnsNeuf2DMfObKHZBJH+2VAPZKX/q+6f1BgL8Lf61jylGqcxYIy2SEM2XskOZEWEyHeJPUyt+87lE6iZy983TFUwgQ25dx11bDp/W9pu8bsmbiRvvcUr7c/kOON+IJB+5Rc2Vri/wBK7cDt3Y9Zpsjr2FlSJo8oN7kPgoeVuRAIPFjYfcLGwpWb79vo1ZKrnj6UmJFjaVrLX3kZNMmlZaIvi2GHWYgs9lVlSADBRo82HwhWx+0wbh1ar+fidil3+Fla/Lhi69Gsn7mBo+TzMR/TKP8AycT6j61P4OX16LRZuLn4ssm/keM40yyWSFQf6gdP5yw9D9K3EsjIrxcihEOkWEsszcJRr1wiLhBo9OmYGzlZAf35JFXYRL8Iak9lHnTa+eHWyLDsWhYRSMOSpIRZWK/zBTYkfUCozWyYUOfBLso2m1yyqZUVuLOgP3IrfylhcA/Q0hl7u3p7RtE25eSlp+OrKc1MyMNAPFWL7ulyMnCrUiBBIRdRwoAghv1AXt9GqjN26HrGNHrezPPk7mDXieeWDHcxyWYIxQKLBixuI/W3n0q3Q9Tm7PkybHrawY2onzzBBFPkIJI7qXVXLG5UKPMnpfxTht1iOUEHCYHBNwikuQFCCmoBFkyqFA6ZgAxDgU3aUe0B7B1d4ZUnhSeO/tuoYXFjYi4uD5B8+QfI+tUmWJoJXhe3NGKmxuLg2NiPBHj1Hg+ort12V10aUo0pRpSkxLSdkaT1bYRVcJKQckq8LYZsX6bY1fTRTKZqoVocOp94pQRLsUfV231X9nsN9jbnAw9dgjI1E7OMnI9wL+2Ci6EIfMnM+LD0qf1uBo8nT52ZsM44+2gVDjQe2W/cFjZwXHiPgPNz61uGsrGPXT9kzkGTp5FqJoyTRu5SWcR6qpetJN4iQwqNjqEDcoHABENSePscDLyZsPFniky8ZgsqKwLRki4DqDdSR5ANrioyfX5+Ljw5eTDLHi5ClonZSFkANiUYizAHwbXtWfrNrDrntDt7f2+3/r09K4sK1bOFiI5/KSjCNaNJGbURVl3qCQJuJJVsmKSCjtQO1U6SYiBRH0AOo7F1OswczI2OHBFHn5bK00iizylRZS5+pA8CpHK2uyzcPH1+XPLJg4isIUZrrEGN2CD6Anya2f8A5d/8P0dSNR9GlK3NfdiylmaoFKYDqlRN1FMbpIsPQYxekQEDAA9g9u2lKffSlGlKNKVhyP8Au99/sbn+JPpSoX+W1/YN4pfiZqX8lNpSmC8jv9U7wg/FEf8ApdZ9KVazpSjSlGlKNKUaUqDMV+sqvP8Acfxf/XvlnSlTm0pRpSjSlGlKNKUaUo0pRpSm7v6Y93HLbeqB10xH6ZilMH+PbSlV78om2OcZuKrzCyNZcxNIjjVGTrhSl4xWeykTbm91VZV842KgsSAe3OYxd4RRsHWTw+xj9oF1B7gYmIU3uW+QI8QH7I7kNz+37kH6rX8fh61tT42k7B2JMn4o6/j6l8rsMkYGRmBUeA44aX+lkt/sBwpD+Dy8L9ahvzCv0HgXIjPzBrjjHHVlxJH8ZGlRpcjZXlmNlCbyrcJFWx49oRqMdV3VIuEdKrJLOpNVqL6PMRUSqbkKUYDfZMesyh2jIhifBGGFQsW9wyMeSJw8qF9LsRdfPnxW2/ifRZvd9C3wTqNlsMftMnYzPkLCsP7OPDgURZWT+5AWd5FAKpCr+3ICt1sxIhvwB5Y2vzQrNyRoPNBzUnuHqFG0zNtVqUIirRm9Ce1u4bsnSlxiHLGWkYKHRH7N7QcKmUN6xhAm5QgesbufuMuZjdgKHBjVJVVRw4FX9eQsSo+vI1tj5z+L9X/jdr+v7z4jXKTtmdJka+aeQjIOSssH3AQOGRJJD+n2kFh4H3eamva8l0Z5zloVRwjihHG16ym5Y2J9y8a4qZXej8gaHJVcJa01GsX+MVEsBKtWzZATyiomRMugRLfcQ6rDNl4zdjig1sPs5MxDHJ9sOkyFbsquPQjx9x+ot/HT+r67uofhnO2vctodjpdYrRLozmtj5GsyUm4QzzYzj+qhLNaEWYKxb+DN3eTxD5e9is1pzly6uvI7PeRlWdUTfZQdVSyRvHeNcJuZqlZceYh713Jx8fWwKiis9jyGdu2p+khAA3bH5D4PV5Xm2OdJlbKay3kKsIB6pL7XkgL4uV8kfSrbpsbtPzvgY+s6Z1XD6/0jXhpiuGJ4n2ri0eRgrnWVGaX7mWOUhI3FySRUFcv5xJVLBOtrpxoxHyKxxzSqcaM3yGiLIvGyOfLtFQpCuMi43Yy7p0jhRzBR7pVePr60e0cv5BBMyZTn2IFcztiIJWWfDgy8TYRi84axncL/ALkdyfa4g3VCAWIFga3T1TpjbTBhk0/YtroOwdQyn9vVvCHTWY7yeMXLaNVOwEjBVlylldIomYMQPNb/AI+eWhyQzhDTeEZvLuUcZeX++JGZixAg6JGz7i9qysvGFZVO4QcurBz9fmYqLaKOVm6rQjNs8E5kin70TD2arqO22Eba+WeaHrBtLFezc7kWV1PFlIAuQQAD6etYPe/8iPj7puVD3HD1et2PzmhfAzivKIYwRH5TwSIJIpUd2CKwcu8fEMRxtVz9AQ49eXJHY5wK6yRlezyHIPKTtli+FuL2UyJOoyCjWKYLw8KdozSTgKHWmwt9ym9VsVTq7Q6hDYGKur6okWtaad2ypiIw5Lm9gCB4+1FFv4XryLvJO+fP+TsO7x6/WY0Gi1itmSQKmLGUBdhJIGY+7kzHl5Hl7W8eLyUxzim+0rJ+b7tYc2WrIdbyfNQ0hRcbWFo1bVvDLaLYqNHcPV3SCyq7pjMOjgsuZQpDFEvYA+nUtiYeTj5mRkS5DywzsCiEDjFYWsp9SCfJrXu/7NpNv1zTafA0+NgbDWwyJk5cTMZc8u3JXmUgKrRqOKgXvfzSC4jZjzLmCo5DNnbE7zFN6x7li34/KUrCTYVm9V+IddcFdaWExu/eQL9kcpPEG9RVYhjE2KIAGNo8/Pz4Jf7lCYMmKdk9CFdR6Ot/JBH19Km/lPqXUuqbXAHStomz0ufq4Mr9SNNjSuLSY+Rw+1ZUYE8R5VSAfNSu1NVrGjSlGlKNKUaUo0pRpSjSlGlKR1/yJQMT1GZyBlG51zH1ErqKbmft1slG0NAxKCqxG6R3kg7ORFM666hU0ibidVQxSFATCAD05GRj4kLZGU6xwL6sxsB/E1JajTbbsGwj1GixpszazEiOGJS8jkC5so8+B5J9AASfANfmfeafz3t/mAco7LkR48WQxZS1pGl4Pq+6XhoKjM3xuqSMKJQK6lbY+RF+6XMImHrTT+pSKAebu0b2bfbRshvGKl1jH4Lf1/Mt6n/9lfth8C/FGu+JejQ6iJQd7khZs2Xzd5yP0+fRIlPBB+RPqxqtnVdrdtPzx5y8tiO7uXL6DibjULZVrjQ7fTrGR0tCSkLd649rzl+AMimkGEzBKuUnzJ2yFF4k4akAigEMcps7X5f7ScllDwurIyn0IYEX8eQR6gjzcVUO4dcXf6tY4pZMfYY88M8U0dg6vDIsgXz9rI4BR0cFCrG4uAQvMqcNMzUWvvspVGo2XJ/Ho0PG2eKzpTa9LS1FbwMyCJmzO3ybZqonSbbELOCtZOKk/DPGbkPWJ3Z0zm78rT5kEZyoUaXX8QwkUErY/wDkR+lh6FTYg/6VE6D5K65s8tNDssiHB7eJGifCmkRZzIl7mJS15omA5Ryx8kdfRrhgIk+nUT61sP8AUKeKjS2LZwmRgzcvfnM9KU2XeY+u1fkUZR4wyNFshWr0ddIuZ7w87ULEZuVg5WRcN3scJ03BBWImZA+XA+LJ7v733PcKEowN7OPQMD6q3oSCCPB8+lVjbY+9xGwv+MDEGImSgyIZFKhsdjaRoWS3CWO/NVZWSSxQ8SwcXreS/I8rLLZZyYtVnu//AON7WmSKLBtMsh927vcPbCcbGA3sBmgLTMpU1EXR9nLhUyCe5SbAG2tdfKm1zk6zj6ydnONk5IZb/pPtX8kn9VibDz4Nal+QNd0jGz2l18ON/wAsjIDlWBkhidQzfYD9iyDjeygMbE3NekefjIhCHq0lHOmAOn8cojMxKL47p+hItHjtAXqiPQdNs2eNkkzAXvBHrEdigUQ1p/da7WQ6vXZ+DJD+4mxys8KyF5FlR3XmVsQquoU25ep8KFtWl9NsNjLtNhg5sc3sQzhoJjGEjaJ0RvbBuC7IxYE8bWHli16sKwjlfHw46rkY6scTCyUAwQjpNpMLtosxnZOpQ67cFDJkdoKgP74XtEd9+30+3fiT5I6Sei4OBk5+Ni52FCsUqTMsJLi5LLysHU/+Q+t714t+V/jvuY7vm5+Pg5GVg5kzSRPCrSjgbAK1gSjD/wAT6C1vFJ/JOJIKax1MycRf3DqM8VO25B5MP2crHu3joqaxGTaVKTxLOPb9wYEUkjCAmN6wDtqE758aafa9Fythq9y8mB7mRmq88iTRPI4DCNJrco4l4kRohtc/cDapno3yPttV3fFwNnpkjzvbx8NkhjeKVEQlS7Qk8XkbkC7uL2H2kXqNUhlF9UrAMxWJWUcvJSixTRDewGexEDKvI5s2fKtI8CqImUZpttk0zgmZFUwm3HYNaEz/AJEy+tbr+6dfyMiTKyNPCi//ACfchxpniRJCkdipKBLKrBSjkm58VvfB+PsTsmmGr32Pjx42Pt5nb/44SbIhSVnjDyXDAOXuzKWDoAPHmzAOXK7xw4dulTruna6zp04UHdRw4XUMqssoPZuoqocTCPwiOtNTTTZM75WQxfIkdmdj6szElmJ/Ek3P51ufHx4cSBMbHUJjxoFRR4CqoAVR+QAAFKSu2YkG4YHdQkTOs2Tty6Vj5Nv3iMgDhBNEGbxQPspmKKiQKlTIJR7zcd+3U7o9+unnhfIxMbMxYpHcxyrdZOSheDn19sEBwqlfuub+ar+90LbaKZcfLycPKljRBJE1jHxYnmg9A5DFCzBhxsLeKfyIa5EnYW1ZzB1ZIRVn3bSso11si4ahFmMdu/SaN1lhOyhINszICwlIHWQRENxAR1uTWY3eNxqtj8viTPw5Y7JiDGVWT2ieMixqWvHBjqg52UclJIPg1p3ZZPSdTtNd8SGPBy4pLvlHJZlf3fDRs7BbPPOznhdrqbAj0pVRC8NyDyfBGlW7qpmjKgnLe02IpNLBaVmwkI3k0XiaaxGrNACmMRPcelIpthAw6sWsm1fzZ8h4Z2McmtMGsE3ux2TJy2SwWVXAISNQCQnmyA2IY1XdlDs/hj4/yxrnj2IyNmYfae74+IrAloihKl3a4DN9XIuCBT3S0w1u5JOrpx1Ps9vcJyshjyTYEXkIZBCF/wBFaOJywoD1Rs60MooIEAfVP2htrbez2uP2xcjr4g1ew7O4mk1ksYaSALj/AGI2Rkr5iyEJYhQfDWIFap12ryOqtj79p9ngdZQwx7KKQrHOzT/e6wYzeJcdwFBYjyvg3pv8i03GlAqlfJYK46lr/OQjWCZqRrx0LzxcaKDqTkYtyVI6CEkyFUTJL9yKqgbAP0QpfeurdB6X1vCG7wJMnuuZiJjoYpHMnOLi8ssLAFVlS5KScObCwP41c+k9n713Psea2nzo8bpuJlvkOJUUR8JeSRRSrcM0UlgHTnxT1H4VLWvEQTgIMjVV6u2JDxhW60iJzSCqIMkQTUfGUAqhnhybCoJgARPvvr0ppFhTS4iY7StAMWIK0tzIV4LYyE2Jcj9d/PK96837ppX3OW86xJOcqUsIrCMNza4jtccAf02JHG1aR/MTLe8QEK3cVssI/hpR2/au3gp2hZ227zwp4hj1gDiOIJQ78/SPT26ic7abSDtuFqYXwBqJsWZ5EeQjLLpfgYEv90Yt/Ua3jz+FS2FrNXN1PM20yZ520OTEkbol8QI9uYmkt9shufbW/nxWZUpSwy0UZ3aK97rSIP3zcI0XqT8DMW63S0fC4S3IUHaXrdI9pfh1l9a2G72euOT2HBGuzxPIvte4JP6amyScl8fePNvp6Vjdk1+l1uxGN1/N/uGEYY293gY7Owu8fFvP2Hxf6+tahWyz6dPkZp/VpJGTI5ds2sPAumkvIKNlHPg2co1VAStjj3ZwXEoiPSBRAd9RU2/3MfWJ9rm67IXYK7xpBjuk0jIW4JMhvx9D7lvpbzUnFodO/ZoNXhbGBteUR2nyFeGMOF5vC4/UPI9sH6k+LV8rVidUrEBFjeplo/inrGRkrCogzI+l2rdRRZeNkSCJW6KLlNUCKGKO4AQO0dJev7h+vYWvbcZUWdjzRyy5JVBJMiks0Uo8KqtcK5B8cfrSLf6hN/mZ41GLLh5EUkUWMGf24XYBVkiP6mZSCyg+vL6UqWkJBs5SSno+MYt5WbI3CTk2xABxJJti9LUHCwCYFioE7Cj9DVhxtRp8bYT7nCghTY5YX3pVA5yhBZOTD9QUfp/Kq9k7bbZOBBp8yeZ9fiFvaiY/bEW/XxX+Usf1fnW21J1HUaUo0pSJbXVN3fH1FRg5gTRsOlKu7CZDohCKLCXuo4qpigZR0oQ3UAhuXsEPSA6qUHbEyO5TdQiw8r+hiiZ8njaAFv0xA+pcg3uPHqPUGrXN1V8fp8PbZMvFtPlGJMYNechb8pSB4CAi1j59D6EUpJlk5komSjmUivEO3zJw1bSrUhTuY5dZMxE3jchtimVQMPUUB+ENT20xMjP1s+DiTvi5U0TIkyAF4mYWDqD4LKfIB+tQWsy8fA2MGdlQJk40Mqu8TmyyKDcox9QGHgkV8wjB3FQ8ZGv5NxNPWLNFs6l3hCkdSS6Rdju3BCCJSqrD2iAa+dRhZOu1ePgZmRJl5cMSo8zgB5WA8uwHgFvrX1t8zH2OzyM7Dx48XFmlZkhQ3SJT6Ip9bD6Vtd/ogA/4A/8APqQt+Fqj641zSjSlfCnX3anddIq92cUgPuBBVAoimBxDtAgn23+lr5cuEYx29yxtf0vbxf8AK/r+VfScOa+5f2+Qvb1tfzb87elaCqHtSkI3PdEIhtYBWd+JRglFVY0qAODgzFI64ioKpm3SKm/YBt9tQvXH7HJqUbtiYse75vzXHJMQXkeFi3m/C3L871NdiTrybV16s+TJpeCcWyABLy4jncL4sGuF/K1KLU5UHWmm7FB1sjBSek2sSnJvko1go8MZNNw+WARSblP0iUpjgUdhMIF+nqK2+81GhSGTc5EeNHkTLFGXuA0jei3tYXt6mw/OpXU6Tbb1po9PjyZEmPC0sgSxKxra7EXuQL+gufyruZwkPHPpSSj4xmzkJtZJeXeIJFI4klkCCmio7UDtVOkQRAB+gOuzE1GrwcvIz8LHiizctg0zqtmlZRZS5/mIHgV1ZW22ediY+BmTyy4WKpWFGN1iVjdgg+gJ8mtnqRqPpMxEHKR05ZZV5ZZCWYTa7RWMhHSSZGlcTbpGIqgwUIYTqkdGEDG6gDYQ1X9Xp9hg7fP2OVnz5OJluhigcAJihQQVjI8kOTc3+oqf2e21+dqMDXYuBDjZmIjiWdCS+SWIKtICLAoPAtf1pTasFQFGwj6AER+gHp1yASbCuCbevpQPYO2uK5rtQXVbLJrom6FUjgdM+wCIGDt7N/g0pT9xThV3HM3K+3erIEOfYAABEQ9IAAiAAOlK2GlKNKVhyP8Au99/sbn+JPpSoX+W1/YN4pfiZqX8lNpSmC8jv9U7wg/FEf8ApdZ9KVazpSjSlGlKNKUaUqDMV+sqvP8Acfxf/XvlnSlTm0pRpSjSlGlKNKUaUo0pRpSmyvMokqdGLS2MZA/fOD9ggU4l2IkH/tAAiJv8GlKr65c84sIcMq81kMmPpWTt83Dyc7Ssb1tg7dWi7MYJ2xbTx4dcGqkS0LDIPfELmdLJFBEhzBvtqB3nY9doIg+YWM7KWSNQSz8TZreLeL3NyPFbX+LPhruXy5ntB1xIo9TDMkeRlysFhx2kVjH7guHb3CvFQin7iB9arcufmjcNuXmALQpYMEGyJC1PLOJoWYxnm07WIryvvrZBgK/cmM5HBIMnMjHCs4UTYiUqoAURMIFHcKnkdx0G91jmTG92OOeMGObwPvbirgi4uPNhXoTU/wCNvy38U95xlwd2MDNytXnSR5ev5PKP28PuSwNG3Fgj2UGTyvkW8i1K/L2NcEcE4HkbinjpiA2FJPMuCJXIchnfIZGtpwOm9hn7huhhw7W4yBSDLWVi6XQZxCKgNlTuETGKIkLrvzsTW9bjy8HVQft3yMYyGZ/uhuDYRfcfVgSAvobj8Kierdi7r81ZvX+zd+2v94xtTu0xV1uKTDsirqCc8GBb8IWVWedgXUK4B8mpcY+5M1SoYbxDijGFFYu+RTzjTWMrY/4pGBPH72Tq7Ru0ZSXhXIN3lZrEcR2VdVNHvzCCewFAd99TmLt4YMCDBw4wdqcRZExv0ErYA29VUXubXrVe9+O9pte27Xs/Y8106CnYpsLJ3XnKVJmLMtxdZpn48QW4jz5P4VAVjxXziXlNRVsk0+qZHyxbb1NZ8vvJC/4ibZDqVU48s4wIhlxHbWGQaLtm9/hm6aiTJwVEUTICHdjsOq0ul2P96jOWiS5skhmed4+arABYYwYj9Y+h9PwreE/yX0w/GmanXsvK1/WMXCj1mNqsXOOLPNtGf3G3ZiVgTiyEhpFLcgwPL0rb0nj/AIOzvyjezFW45z09xQz1Fvs+2e62+u2ulP8AG+ecKzEpVa1AUONaDDtqbHu2RTm8ICJ1HqSw9BgTACh94+s12x3JkhxGfS5IMzOyupjmiJVQgFuIP4W8j0rG3Pee59K+NkxNn2CCD5P0cq6yHHglgyFy9bsI0mmlyXb3DOytYc+QEZX7hyN6sR4+3uFzPlq7ZmpvIG4TlHsFHi63F8YbbBIVhzjR/Vp0YWQySEG/cltKAW53GKppquGybdcigmTObsALTrMpM/Ok2EGVI2O0YUY7AL7ZVrGSx+77iLXtY/S9aE71pMvqHV8PqW20WJDuIM15X3EEhmGWs0fuJie4o9k+wHUkIxdSLMopVcxs9Y/4rYOs/Iq71aOtMjjxNFrRI5VoyGZf3S0rpw0HBwUw4Zu3EIpNO1SlXVR6DGQIYPWHpKPfvtni6XXPtchA7RfoFhcu3hVBt9tz62+lRvxN0je/Jnc8boOnyZMaDPJbJcM3trjwj3JJJIwyiQRqCVBuAxHoLmoJYs57cuckUvFmPQ4yBC8rskuZyfmI+dYS8FjHE2OY61qsImbyXHu3il0gl7LXkTKRqyqZG7tcSCTcD9IVvC7LvMuCDF/Z8d3MSSCCscSBrAyAnmvJfKk2BNvxrdXZfhH4s6/uNnvv+Rmb4x1yxxRvG0cmZm5bQhnjxGVRBKIZSBMoJZF5X8i9TvTq/KhxhFaqhnPGS3IVtakXbjISFH76qsqmeylfewF6aV6dwhImqoGaEXOJepX7Jt/lasgh3Ta72f3MP9053L8LqF5X48b3vx8XP8a0o2y+NI+5DZ/2XYjojYpUYpyLTNP7PH3RkcbFPe+8qPRft/KvnHsbzCf5fezWT7FiCCw1FvLfERFHp8XJylntsaudmNItktY5BUqcDJtA74rxkkUyZuzb07gxU3zZpkzHgXAUsAiglmB/QxY/pI83Apvcj4og6qmH1zH2s/bZEgkfIndEhgccv3ECRKLyo32mOQkEef4FP0DPmQ8z5zmo/FcBCjx8xa+vuNsyzF2ibBWMkJZmrg9cK2x+wdgWMsVEdonJ3r8oCBjCbpENg36sXZZWw2LJhqv9rhLxylwyv7q+nAHwyH/yrO3vR9D1HpkOR2aeb/nWzjxsvAjx3imxDgS+JDksv3xZKm9oj5H1v5tGPjL5gWaMj2Z1hPLvF3IVaz23r+WLwsLatydPxpFViruJEMbNJqYtCjhds9v5WZECOinFsRZUphAAHYIjUdn2GXMdfnYcqbMLI/hSsYVSfbBLebva172rY3yL8F9Q6/rl7j1bsmBkdHafCxxymSfMeaYJ+7aOOEAMuNyLFCOZVSPJp4sc8puTUvlHElJynw0m6TUco1Fo7e5DqWQ4G/RlEvYi+WloCyIRoEL7sxbJomIyiZhKdw4ImUoiA6zsPc7iTMgx83AaOCaO5dXVwj+bq1v5QB+r8Taqp2D41+OsXre13HWe2w5m11uWyriz4smM+TjfaEliLefedmP9EgEKha4uKnyg7aOhWBo8aOzNlRQclaum7kzVcA3M3clQUUFuuUPSQ/SYA+DVmDKfQg2/A3rR7xSxWMqOoYXHJSLj8RceR+YuKySlE5gIUOoxhApQDfcTGEADbb0iI65rrJAFz6VXblbzJuPdep2flMOZCxVlDL/HWwNK3f8AE1uyVEYbWiJALbE1KdWkrDfEY5ghFwjyUBMXqXfM1XvQ2Kr3qhS6r+V2PAjhnOHJFLl45s0bOI7fcFNy1hYE+o8X8etbh0Hwv27M2OpXseJn4PXtxCZIMqHHbMDL7LyxhY4CzFnVb8DZwl5CvEXrUPvN18vdivi9uXklQJj4z7m5x+nJV2bjpKBo9jZNk1HjnJEsu8Ykp1TM5MZFrLuSAyeimY6JjJgJg+G7b19TEP3MZMr8bgiykevM3+0fQN6H1FZEX+Pfy9Kuc/8AZcuP9jjCfjIjK88ZJsMdQG96W1i0SnmlwGAYipSYme5Dtd8yFkwuZMcZO43XmJqTnAsRRYpqu6r4sWqra3yMleo988j7ezm5NMTNxS2BuBRL2CA7yeIciWeTJE0cutcL7QUeRb9RLDwwJ9Pwqi7+PT4Grw9L/bczB7niySjOedyBJyIMKrAyhoWjX9V/1ev1qjPz8ufmBonDuSuAjavWDLOX7vVIy52llS5Vi2i8Ux1KnIy6kVu70U3TlJ+4iYVZyLVuRRVuiUDrgQpy70nve+wUxJNEFaXMdQzcSLIFPL7vXzYXsPQeteov8T/ibtWT2LC+V5JYcDruNO8MRmVi2UZ0aA+yPAKh3CciQGa4S5Bt5Ep3M2NOQ3LRDLGYMRnhcM2eRiqvKY2w22aQM7EVmOq6FTrTGnu46Nj4h7fkysm6qTp21M3fyACo7SOU6ga1K+bjbDbDKy4eOGxAKR+CAF4gKQLcvQ3IsT6iv0Mxetbvp/x+2g67sRJ2SBGlXJzCXR5GlMsjTBmZxAeTAqjBo4yFjYEA1FvJldGo5Cula91rZSE4WySzBtUr4ABc6+xSdqeAjLR0sItM023ZCmDgxG6KZlNxIUCiGozJj9nIeMq6BWI4t+oC/gN4Hn8fFX3R5w2Gnxs05GPktLCjGWD/AGZGKjk0X3N9hN+N2Y29STSH101LVct5TnP3LeDcz4k4+2nL7OO4iZHu69HzFi+/kYExKSk5OAIa6XKXdrlRRiJmOSOQ5JByfu0SlDvDd0Al1b+q77Lwc2LAllA1Mj8ZEa3Di/hmJ+hH4n0/hXmv5/8AiXr/AGfrmw7dga9n+QcLGE2Hk4/L917+N98MKgXLoxuPbUXJPgcqus5B/wDLs8SeQ9Cm8seW1nmObuG7J01r9RXu0dlPEdmskaZiVWILklq/eTdScqoCqdY65pMhF1SB3aSXaF0z/j7VbCA5XXJxe3heQeMkW8c73X878v8AQV5l6h/mF8g9P20eh+ZtU5QsDJKIGxcuONuVmOMVVJQDYAKIiVB8s1eRfOuC8pca8r3LCeZ6m9peR6HKDGWCDemTVApjppuWMgwdoHUayMRKsVk3LRykYya6CpTlHt1qXNwsrXZb4WYhTIQ2IP8A6H8wR5B+or9Cuqdq0XddDjdm63kLk6fKTmjjx9SCrA+VZWBVlIBVgQfSrUPIUvttj+f+PMPMp+JQoGboy21q91Owrv8A2fZSxdRnJ2DGCQaEVRRuLCWjklGy6ndlK2KuUT+sBR7tV17C7PtIdTsFD4ziUcG/SxaJgB6Gx5cWDCxHHwfUHTv+TTRar4tzO1QJINxrngkhmjA5RXnjVy1yLxMjMrp5uSpt4uPVo5RdRL58yMYpHLF84aLGSMU5QcMHJ0TimcS9pQVSHYf8oNeO5459bmS4TG08MzI1rEco2Kmx+ouPB+ta0gkx9liRZgBME0Kut/F1kUMLj8wR4+lYh0jdJVTFKJDHMQDB0j6xQKY3Z6Q7DB2+gdYpjPD3GA4kkX/MeT/3rIVxy9tSeVr/AP0/+n/2vWWR89FieOGSeFYJn78keZwuLEym+wnK2AwtyLiBuwekN+0N9ZQy8tsM4PvyDDU8hEWb2y348L8Q352rHOLirljOEEZzCOJk4r7gH4c7civ5XrvhISVscozhIJivJSkgsCLRk2KAqLKmATCG4iBCFACiImMIFKHpHXZqdRst7sYtTqIXn2U7cURfVjb/AKAAC5JIA+ptXxtdtrtHr5drt5kg18K8ndvRRf8ALyT58AC5+gNO5VMC2q21uzz7R5HMnVTfyMdIwL1J2EiZ1GJAs4IkdBNVEwCUBAogA9Qh2b7hrZ3W/hvsvZdDsNzjzQQ5OtmlilxpFf3S8Q5MAVBHpfj68iPF71rbsXzF17re81+oyIp5cfYwxyxZCMntcJTxUkMQw82v6WB8+hpKr0lzEHTYOW7WVey9XQm1Ojx7ZWoM3TkoElXwH8Ml3iSIBumruQxFAHsHVdl6lPrGXCnSPKy8rXLkH/cU4SO4tNIDxW4UA8X8EN+NT0XaoNmrZkDyY+LjbBoBf22GY6L5ijI5mxa/3J5BW3oad7GN1tkTfYZQ0K6f1UIn3JGswTgBj5JszQTbi8bxLt24QdSBzKA7cEIG5+sw/U762f8AH/bOy63ueLIcSSfrgxv2H7XHb+nKiKF5rC7srykn3pFX9XIn9N61p33qnXNh07KjXLjh7F+5/f8A7rIX+pEzsW4NMiKyRgD2Y2bwvED9VTFjIOwzM00mFosmO21Ul5Ng3iYxGLcpXmuGblBn4xdBNFaMad8YwggXcANv2a9Ra/UbzbbWLaS440cGtypY1hiELDYYpX+nzZQphTlchBexv4rzJn7fSavVS62LIO7n2ONFI00plU4GUG+/grFhK9rXc2uPrUanqjShe8DW10Sw42rt9nYUh3FWukYY0O8jnaqiarNkiiDplHLNzmVdCPeGPuBQ9Ia0JlPi9NOdj9j0+bodHucyAFsTOivA8bkgpGq844mUl5vUtcAeore+MmT3AYWR17b4W83enxJyFy8GX+skiAEPIx4PKrAJEPtA8k+lSpnFFbLIylKZR8zCPWtcQkILJQRrd00j3TsyKYexnK4EOEsmkG5wAxBEu++vRe3kl32fkdUxIcrEyosBZMfaiJXSN3IH9Bm8icL+oAjxe9eedVHHosHH7VlzYuXiyZzR5Gr91keREDH+sq3Hsk/puD5tal5FtXDGMjmTt8tKO2jFq2dSTgoEcSDhBAiSz1chRMUqzpQonMACIAJtXHX40+HgQYmTM2RkxQojysLNIyqA0jC5sXI5Hz6mqfsMiHLz5srGiXHx5JXZYlN1jVmJCKfUhAeIP4Cm3v2N17XMxNmjbg8pkxCRrqMbyjBmzWXI3eOiLuP9JcqJGRKqn1JiACACU4/tDQ+59Em7HtcbsGBtJdVtMOB4kljjRmCu4ZvvcqVDC6kA2IY/ne9dO7zF13V5Ohz9ZFtNZlzpK0UjuqlkUqv2qGDEGzAkXBH8CFpYZaLhKtLS0udd7Ex0SueTMwSF45ctSIgg6FBJufqVXWAw+qU2+4+ns1bN5stfqevZOy2ReXWwYzGUxjm7oF4txCm5Y39AfBPiqppddn7XsGNrtcEi2M+SoiEh4Ijk8l5FhYKPxItYeaRTF4rDxuMGVDThGNRlVA8S2sj1drOBDOG4PUU4Vu4W791KlFQ4qJG6xKHwaqeJlS6vB69i9NXEh61ksOSZTsmR7DL7iiBWbk01ySyEkqKteZjR7PO3+X3A5cvZcdTxfFRXg99W4Ezsq8UiNgFYWDGsmRu+M7XEowr2faO4q5vpWpN0yGeN/aL1kUpZRgRwRNMzdRuUwCJxEpdh3Add+f23oHY9Ymqy82KXXbaWbDUDmvuSRi0sQYKCpUerXA/A10YPVO+dd2TbXFw5Itjq4ocxiQje0khvFIVJIYMfRQCfxArYY/tFTnWD6Hp4ujxdMcI1sFVklPCq+FR2SNHPTqKhJtQTJt3xTCAjrO6X2Dre3w5tX1cyHX6p1xbsDwPBbD25CT7q2H6wfNYXctB2PUZkWz7N7Yz9ohybAjmObefdjAHtPc/oI8Cl9q51Tq0bqRlUp6LjmkQm9iXLd4rLS5X6KakOskQDMkTsB3XXB8PYBg2AuofIz9jHucbAxsYS62RJDNN7igwMo/pqYz9ze56XHp61L4+Dr30+RnZOSYtjG6CGH22ImVjZ29z9K+3+HkmuYh/MPHM4lKQnsluwkgbQ7rxqTv2yw7oDe0O7TKUzP7JuXuzbj8OvrWZe1ysjLj2OJ+2ghn4wvzV/fjtf3bD9HnxxPmuNlh6vGgxJNfl/uZ5oOUycCnsSXt7dyTz8eeQ8VvNx9H/RqXuaibVgyTAspHPo5RZ02TfNVmh3LJUyDtArhMyYqNnBQEUXBAHcpvgHWJn4Y2GDNhO0kcc0bIXjPF1DC11b+Vh6g/Q+aysHMOvzoc1UjkeGRXCuOSMVN7Mv8yn6j6iviKjiREZHxabh27Tj2iLQjp+uLl64KiQCAq6cGABWXPtuYw+kdfGtwU1mvg10TySRwRKgeRuTsFFgXb+Zj9TX3sc19lnzbCRI43mkZysa8UUsb2Rf5VH0FbDWbWFRpSjSlGlKNKVyA7CA7b7CA7D8O2gNjenr4pCxuPIBowlYySF7aGUrPrWM6NmXCUKzeqKlVTRYAoQPDNGpy7pED6n6OqhgdI0uLh5ODnGXYYuTmnKK5TCYJISCFjuBxRCLoo9Pxq3Z3dNxlZePnYPtYGVjYYxg2KDDzjAIJkt+p3B+5vr+Fbda0xaFpY05Qj72vIRLmZbnKyWNHgzaKCkqCsgAdwk5ExfVTHtEP8GpKXsWug7FD1dhN/c5sZp1IjJi4IbEGT9Ibx4U+TUbF17YTdfl7Mhh/tsOSsDAyKJObi44x/qK/iw8A0otTtQdGlKNKVqJ+HSsELJQi7t+wRk2p2ijyLcC0kGxVNt1WjkoGFFYu3YbYdRm51cW61WRqZpZoY8iMoXibhKoP1R/5W/OpLT7OTS7SDawxwzS48gcJKvONiPo6/zL+VZccyLHMGMems4cEYtGzMi7tUVnS5WyRESrOVh2FVdQCbnN8JhEdZODiLg4UOCjO6QxKgZzydgoCgs38zG1yfqaxs3KbNzJs11RGmkZyqDiiliWIVfoovYD6CljWYg8lIJmOmItmxyqLmEvqj0+sVId/QJhD0enbWVWN609JSFIUCEKUhChsUpQApSgHwAAbAAaUr60pRpSsOR/3e+/2Nz/ABJ9KVC/y2v7BvFL8TNS/kptKUwXkd/qneEH4oj/ANLrPpSrWdKUaUo0pRpSjSlQZiv1lV5/uP4v/r3yzpSpzaUo0pRpSjSlGlKNKVr5aWi4GMkJqbkWURDxLNzIykrJOkWMfHMGaRl3T188cnTQatWyJBOoocwFIUBERANfLusaGRyAgFyT4AH4n8q78bGyMzITExEeXKlcKiKCzMzGwVVFySSbADyTVQfPDnFkmvxMIz4Q524bS1ijiLSVwjL9k2uPLbJuyLxytdptSiPa5IpVzcUl1UOtY5VynFMEjEMcDapu/wB5loi/2DJwTIvlg8i8ifHFVF7fd6fj+Fq9I/EfxX1/JypW+W9N2uPCl+2F8XElEMa2cS5Ez8C4EBAaygrbkWBAtS4xxyxlbzxMT5JvcR3OSucTHOkb5hannjpq7RV1hpZOFs9fbpnfeHRUYvBM46HKpFU2nacN/TK4m7fI0n92aCQzqPvhSxcMDZlAv+Pnz9PWqB2H4yxtR8on4+i2uImpmkBxthkc48d8eRDJDKx43PJbJdAQz+niqLeT3Kux8teRmALThOqwUDcsZcoLJxqxqFvlvbdQy/U7nUyKZBeWCBTiu/ZIwCqoMJPu1jIokcAAKbiBg11uN1NvNriza9FTIhzGx4+ZJWVWX7+Qt44/pbzYX9a9n/HPxlr/AIu6DvdZ3HKmn1Gy63Ftsv2E9ufAnx57YqxSF7MZReWK6hmK3428VBA8xQ+MWdqBRqhiqywFix9yBlYzOeU4l7NMa9mnE8dZCEesK3jHIhHEZARUGRBY4vEx3bA3KqByAffVa93F02yix4IXSWLKImkFwssYbyFjkFgF8+R6WvcVu0Ym7+R+lZ2522zx58DO0SPrsN1jaXX5rRfa0uZikPK8pKj2z4fmVKm1XdUnhxwD5rYbncd405GXnkDP4rnrWeGyXdLhPXadxzL5Dcs3z1seAnmkFCWeHIlDiVmCiK6KA9RyKAYQEdh4+h6x2DAbFxMqTKlgZrSOzOYzIbn7SAGHj7fUD8a8c7j5a+cfh7tsO+7FoMLRYOzgh9zEx4IsePKjxQyqfdjaSSKQmS8nFlZhYFbVNywZAyLj/L+MMS4v4eWfJ61TYYwodr5ESTSq1KuV7GMu0TjZWUrM8oQZWWPXVGfevYZt3aQCb1S+jexS5WXi50OFh4DzcBGjTkKqiMixKn1NvUoLCtN4Gi0O96psu09k7Zja1cqTMyYdWrTTyy5kbF0SaMHggl5Wjne7ePJ9aYKH5ts+VmcT4hwhyFhOPzrGk1m2nZkxpf6ii5yZc2NZYmh4XIuNZtdo5goRlX5lXxQJOXKS64J9JkzB6YyPsA3Wy/Y67KXGMLSrLG6/1HCiwkjNrAA+fJuavOV8OS/GPTB2ruWhm3sexh10+Bl4s5GJjtM/OTEy4wRJI0sY4XRGVb3DCp5cerrU5nAtOsEZmlHNcBAwbyMms0yKbeILan1VVcs7HYJhIEGLRgs3cNVPED0FTICYm6hD1hsmsyIJNZHKmR+4iRSDKfHIr4Yn0A9PP/etJd80+0xO7ZeDkac6fNnmV49el39lZrNFFGbsWBVhx8km4FvpS5rNbxdJ2BzminxNRkrFeq3ExTjJNfKzdu7VU41Qy8Kz9uszqJSMQ1V9ZHpMJQEPT2dmTDDhvIc+BUaaRAPcFrso9PI9R+FQ2x2HY8bBXqG1lyo8DCyHcYkpZRDO4tI3ttYpIR4a9jSnsNfgrRFrxVigIKyx5jEdJxNiimMxFKSDMRWjnCzKQbuWwqtnZSmIp0daYh1FEB7dd0sUcyFJFVl9bMARceR63HrUbgZ2ZrclcrAnmxp/ILxO0b8G8OAykGxW4IvY+h8Ux+A6be/Bu8o58x5iarcjp9J9VLPP4tO8kGcjj6DmV1aJFrzMkY71cEI4E1VURHoSX7C9gAAR2tx8ixzNnFAm1a6sY/N0B+wcj59PUfjVy7vttJ7qdb6Rn7PJ6BAVnhizOKsmVJGBkuI0+0Xa4DerL5Pmt3C4mxVhq0ZxzpCQsqxs2T0WtuyrIpyM3OmmAo8M8BoeIr6rhw3YroRveFKgySILhQQAQE22u2LBwsCbI2MasJpvukNy1+ANrL9PF/A9awsztHZu263TdLzJo31utLQYSFI4+H7iReXOUAFgXtdpGPEfgKXGNcg17K9BqeSamSXTrd0h0ZyFJPxDyBmSsl1FUiBIwz8ibyOcgdE26agAO2w9oCA678PKizcVMuDl7Mi3FwVNvzB8g1D9h0Wf1jeZXXtmYjsMOUxye06yx8hY/bIv2sPPqP4etUIcw+ePNbB/I/kK1wtdcfXvHeA2jW/XDE1vxSrWloKgrMISNHwF+cljXGRFfbcz4lyEY58SgkTsExQOUNa77svYdbtsoa+SKTFxQHaNo+NksB4fx7nk3PE3Ar298T/Cfw/3L4/0Mvb8PPwd9vGONBnQZolEmSGkf7sYchij204p7ycGY+gJBqc+KuRGRJetVuv8qmLe0Y65KYbnstuc30qACjYRwfjWZqaKK+MsiW6RfhIJWNy5crFauzLAYCrJgJ+sPVseFtct4ki3QD4mXA0vvIvCGGMr/tuxN+RN7G9/NaX7P0LQY2wyM74zdsbf9e20WCNfkS/udjscuOYkZeLAicTCoCl0C2ureLevzxPxHhvB07xjxlKTXxlZXr+OMsOcA5Bx4yubnGSGDJiwmmiRUlNKSUhASc0Vq/ICbl4dVdxuIpm26RHjS4OBrpcPDdvezlhl9l4+ZjEJa9iblSfPgkkmufk7tPbe54XY+x40P9u6vPsMEbPGymxxlnYpEI+aRhFlSO6m6RhVX+YetkReckVzhnYbzkvivVsR5Mwxcsvvp/mvNOs4mXtuMMjTDtNq8sibORk37KNi4mFTWUXiGyQuVlwKRJL4dY+RlxaCSTL0yQTYEk5bLPvfdHIT+qxJAAF7qBe/oKmdN17P+W8DC678l5O113bsTUrF1+Ma60GZixqSsRZEVnd5CoWdzxVbszVNjC3JDFfM3DF6ufHWzupuMOa648avZBF5VpOOtZIl00jXDohRWkoVq8UdIuWjsCgqKI96QvUXbVi122wd/gyZGrctH96eQVIYD6/UA3uD+HmtN9z+Pez/ABL2rE03fMZYcvjBklVIlR4S4LBT4VytmR0vYN9pPmvzZ69izPaPPCSwGRrV8jZwd53lMd2Kq3qeWn8f5Rt8DblF3Fau0nKOGZbVAzs7DE3O8MQy64Jq9RFQKcvnePGzxvTgfbJm++VKsbq7BvRibcgSPr/Hwa/ZLN3vUm+Kk7WTPh9YXVrkRywRiOfGhkh8SQqgb2nSNz4QEKLrYrcGRPM3j5cnGb8xlyRxzqfCG2rUjHV9wxxorp4ydgLaklOVzGM5T6nYa6+cLSs49eLryvgkxcO0ehYinQJes0huMCY5s37nHXCmKI8cIsQ3kIVUjySTdrC59b2qo/G/btcvWNd/ZNxP2fXDJyMfM2MnON4jwkyUlljkFkjUBYuZCqbqVv6D2K4j5AYO4TeTmOScYXeHtsDxxxLM1AruPfvphmTPjt6q3XpCDyXhWSzpwxyTZyt0yuGoJd2UCnMJNza29iZ+FpeofucVw6Y0JXwbj3b24+R/5tbyP41+dHYepdn+Tf8AIz+y7vFlx8rc7BJrMqoxwFUETlUcgBsaIseL3vcjz4r87my3SUvkxeLxepaZsWQ7nPGnpGxvnxVFJB/MPHz2yu5IFElHC671dZPu+g5CJF6iiUSiUA8+yztO7zzktkObkn6knyTX7B4Osg1OLi6zVpFDqMaLgsaiwVUVRGF82AUA+oNzb87o8XDhI4EFQ26KaiCfSYpykTP19ZSCAGLsPeG7Q7e3cBDXRcg+tSXFG8gepv8A/b0/CnezBe77kRtjGVyC/iHL2tYyq2Oay1YwbaHk2tDpEcgyqK06q1YNQm3L6PddST5ZVy4XIX1zABSF1l5mRkZIjacgssSoABYhV8Lew8+Pr5vVd65qdTp3zcfUJIIp86XIkLOXUzzMWlCcmPAKwsUAVVJ8A3JLM6xatVfZFVEyqgmoomCyYpKgQ5iAql1FOKSgFEO8TFQhTdI7huAD8AaeR6V8OitYsBceR+Rtbx/pXol44Zi5rYo4e48NjaZn+NXHDiVDzuaM8ZDw3knH1jtue08nZXq7StQ1gx+xlAma3awZy4RTIZc6DQWbM4r9JwIQ2wdbmbnF1Ef7ctja/FBkleN1LS85AFBQG4bzxHKwsPP4V417r1z4z33yHmf3tItz3DfyJh4OPl408cWD+1xZTK8eQycJIuSe6/shn5uOFwSR6HvMo8o/AvmUY3i+RtNXDDHIF9jxhkJpfgjVpNG/QLigNZiAp+Sohs5EFl49kg0btpBkAuGhSHIBFyGAutg9j6pg9jxxsYj7OeYw/L15DhcK4/6AEeR6ea8hfC3+QXbPhbdP07Yj+5dSTMbHMHIKYHGQySS4zEeAzF2aN/te4N0bzXmG/wCXhwrKZO8zLHdoA0tHR+BKte8ny7pvEKvGXjkoN3TIuAmXZjokhAlnVoU7pQ/UcyrfoKQRERLrT4+wnyeyRSnkqwKznx9bcQD+Fy1e4P8AMHtGPpvhTM1/9N5tpPBjKpcBrFxMzqPPPgIvIHgA3J9L+rHMVhp7TP2ScesJyuoWyMlBmF6YjJsSz7KIl0ivGEmpB98D9JjIJiZVJTo6VCiJgHbt15Q+WdNka75C3ATGeHBbNdo/sKoVYB+Sn0INy1/4mtU/Fs2Tm/GWm2czPKjYixtKbkF0JQoW9OS2CkeotalFH0qZmH8NHVamzjhc6rSSGQl0DuGLphIlbAxVkUUUTM2cSkqmqIqdRhMQ49W3TtrHwOrbLbZeLh9c1eXJIzJL7syl4njkC8DKFUxpCpDktckqfuta1dWd2jW6vEyszsOzxUjCvF7ULBJEkjLcxEWId5iCgC2ADD7b386y0QkFBRiaRJuLk7KvIFMshXnTh3ENosEXArkXO4bICm+Sf9BSFKJiimAjsHw4XYNRp9TrxEuXj5G/knBZcZ2eFIbNyDFlW0gksAASOIJsPrIdf2u32+eZGxMjH0SQEK2SqpM0t04lQrNeMx3JJAPKwufp2Y7Ws8bKDZatHrOHFQWZ2CUeNRUMs1hyOAZOUFG5FiC6ZuzOQKoUhDK+jbYN9ffSJewYGzO+67A0k+sePJlkW5ZIOXtupUEc0fkAwUFvS1heuvvEWgztf/YewzKkOzV8eJGsFeYqZFYMQQjpxupZgvre5tUgskgELlN84nJy4w9JyC1jLidSjN3RZM5VGncNG0mmcxTILNhBTsIBhDr7S/Q3X3y2q+RJp9vmbTF6lu44s4nXq4lN04osoJurJ936b+vlfw0x0bltPj6GDU4erye16aSXCAz2UxCz8naIgWZW+39Vh9vhvxK/eE3EXe8ewTkbOL53HNqva7bGDKhIRbxZn/8Aw/YDC2BVk0SOU5hVXOYiZyiPSAba50nbkn1256Np5DsDNJGuJl5kRm9yGRktjZJ4AxopDHnIxVWB8AAVxuepvDn6juu3QYHsxSNl4mHL7Xtyor//ACcYc7O7Cw4IoZlI8k0pyDVcdyjOxJRVFmrHI1luxqrqlLKvXy91SEWktIM4QFzwzZhFGOPeKAJFFQIYA33EAnl/470jZRb2LG1GXvZ9eseG+ATJI2ev2TSJByMCxwk/cw4s4UgepAgW/wCQ9110ujkyNti6SDPaTLTOUJGuCfvhjkn4iZpJrfap5KnIE2sCZP4usytwo8PNuHqkk8N4hlIyJ4pSGK7ftFjEXWRjlO1BAwCAFH0G23DsHXob487A/Z+pYu2nlafKPJJZDCYA8iMQxWI/pU+OP0PrWge/6GPrXbcrVQxLBi3V44xKJykbgFQ0o/U34/UXsaj/AJYqNETzHQZZ0WKVVlJOQf35pJyYrkNGsotJZi5WilVh8O37lucQ7sgAcS/COtKfJHWunJ8paXZZP7ZpcnIkk2KSylgYo4QY3MJb7VsrH7QORA9TW5fjnsnb3+MdzrsY5Cx48EceveKKx915SsirMF+5rsB9xPG59Ben9xoZ84gDSb24t7mnOvXErEvWjbwTJpEKHFBoyYNlCJuCtkQR2EVA+q3AOzW5+hHNm0pz8raptUy5mmhdF9uNISSqJGhAbitj+ofqvbxWnO9rhw7gYGJrH1cmHEsMyO/OR5gOTvIwJXkb/wAp9CL0q4ufhZpSQRipJs+Vin68ZIJImN1tX7cAFZscpylEyiYGDfp3L9PVk1+61W2eeLWzxzS40zRSBT5SRf1KQQLlfra4/Oq9sNNtdVHDLsIJIo8iFZYiw8PG36WBFxY/S9j+VYNvha7YK5Jxds6AryyRFZI6j1SOTSRbqFVKqo9SURO3IQ5QETdQB9HWH2jU6PdaLI1/ZLf2RlBlJkMQCqQ3IyAgqAQLm9ZXWtru9NvMfYdcud0rERAIJCWYEWCEEMSDYC1/wpuMPsq9XHV0pUBNy0yzipVrMtk3zM4RcXHzqHiWjKFlTrLll25ibGMpv6fSHbqifGGLpNFk7XqWly8rKxsbIWdRIh9qGPIXkkcEpZhOlvJa/rY2vV6+TMrc7zH1fa91i42LlZGO0DGNx7ssuO3F3niCqYXv4C29Pr4ra5Cm5CFlq++LjJ5eIyNTcSAS0WZutKQMoc4M25GDBYSnVMumbc5y9hCb/Q1Jd32ubqdjg5a6CXb4GOrSe9DxaXHmJ4KI4zYkspuzD0UHx4qN6XqsPa67NxG38Wpz8hlj9mXksWRCBzYySC4AUiwU/qa341nPm8wFuqkawpcEtSHUdLSE7JrMmhHsLLrpfYEmyRTEBJZ8B+lYxUzGN27mDWXmQ7T/AJNrdfh6rDbqMsE0mRKUQPBMw+0IBYBpL2chST5uwtWJiTaz/jexzszaZi9rjnhjgiWRyk8Kn7ixsbiO10BYBfFgb0qWytZrbcsGxNFxSUayO7RhWqiCKqLLqUMKiDIDAqYqigGAuwD1G7A7ezVjgk0GhgGowzjY6QQlxAhVWWPybiO/Igm9rep8DzVfyU329mO3zBkZEk8wRp2DMrSeBZntYEC1/PgeTau+Cno+xQzKeYC4Tj3xDKIjIN1GDgpSqGSHvm7gCKIj1kHbf0/4ddun3OFvNVFuMIuuFMpK+4pjawJX7laxXyPF/Ufxrq2+nzdJs5dRmBGzYSA3tsJFJIB+1luG8H6fnWOxq0JGz81aGbZROasKTJCVcmdOFU10o8nQ1BNsc4oNxIX0iQodXw66cPrupwN1l9hxY2Xa5yRrM3NiGEYslkJKrYfVQL/Wu3M7Dtc/TYmgypFOrwmdoVCqCpkN2uwHJrn6MTb6V2S8TISDuDcMp15EIxcj4yQaNU0FU5tt3RieznZlSidJHqHq3L62+vvZ67NzsrDnxMyXFhx5/ckRACuQtre05PlV+t1818a3YYWFjZcOViRZMuRBwjdywMD3v7iAWBb6WPit6P7N9S49KiLWpu7kxI0mKxcyp2yScwjoYpKBry5fAu05w/cKyEwwP0lcoRoev1bgJNUjtWGmLtNf2oLssifEk9kY2Mw9txkHiZJ4zYMsX6r3+2rt1fMbJ1mf1ctroIMtPeORkqeaGAchHDILlWlPi1rGlEztMW/ss3VG5X3tWAasnj8yjJVJiKL8N2/hHpgBF0cA+qKXtLqbxOxa7M32X1uETf3LCjjeQtGVjKyfp4SH7XP4gelQmV1/YYeixOxTe1/bsyR0jAdTIGj/AFc0HlR+BPrWyl5NrCRUjMPQWFnFsnD90DZEzhwKDZMyqgIIE9dZUSl9UodojrP2ewx9Trp9pl8v2uPE0j8VLNxQXPFR5Y2HgfWsHW6/I2uwg1mLwGTkSrGvI8V5MbDkx8KL+pPpXVBTLOxQ0ZOxwOSsZZmi+aFeIHaugQWDcgLt1PXRU+iUe0Nden2uLvNVj7jB9wYeTEJE5qUfi3pyU+VP4g12bfV5Wk2c+ozeBzMaUxvwYOnJfXiw8MPwIra6kqjqNKUaUo0pWjmlLGQ0T7vN4twU0ogWaGTVWTFGGEpvErMASAe8fFNt0FN6o6iNs+8Q439kTHdTkqJ/dZl4wfzNHx9ZB9AfFS2qTSMuR/enyEYY7GD2gDyn/lWS/pGfNyPNbwQDfcP8AiAb7ft7b6lz61Ei9qNKUaUrRxsXIMpadfup13Is5VVqpHRLhJEjeCIgj3SyTNRMAUVK6P65hP2gOofB12bi7LMzcjMlnxchkMcLBQmOFWzBCPJDn7jy9PpUvnbDCytdiYePhxwZWOriWZSxacs11ZwfC8B9ot9K3mpioiuh0gDps5aidZIHLdZAVW5hTXTBZMyYqIKBuJFiAbcoh2gOunJhGRjyY7Myq6MpKmzDkCLqfoRe4P0Nd2NKcfIjnUKzI6sAwupIINmH1U+hH1FLXBMjWpLHUWNWXsTyNYOHsSo9tbddvOunkeuKTpV34gAVWJ3htkz+gxADbVS6BstTsuswnSyZkuDAzwh8oMJ2aNiGLlvLefRvqKtffNdtNd2WYbiPDizZ1SYpilTAokUFQnHwPHqPoaeLVzqnUaUo0pWHI/7vff7G5/iT6UqF/ltf2DeKX4mal/JTaUpgvI7/AFTvCD8UR/6XWfSlWs6Uo0pRpSjSlGlKgzFfrKrz/cfxf/XvlnSlTm0pRpSjSlGlKNKUaUqOnKWAuF8wpkDG+O5eowtzucEMEyd3+sntlMNHSbhFtNsp+EAyZJBnJwpnDcSbj0ioBth21g7LHny8GTHxiiyutgXXktj6hl+oIuKtPSd1p+v9rwt1vY8uXXYsvuMuNL7ORyUExtFL54MknFr/AJWryLV/iRhjO86fH/D/ADXjCwcr6De8m2K+VLNuDFsdSUlAwz+EYRS1RTI2lY1pGY1mUQPCvVFiGOkcpxTEdt9OxaTX7KX9rosiF91FLIzrLD7ZKgqBx8EARn9Bv+dq/STO+Ue3dJwRvflfT7LH+MM7Cw4safXbEZaJJIsjOJyWRy+XGbTxhTZgVDWvWs40YDyNkDJGdvLaz/fc94Dv0hfJfNdDnKM/POxMnPW2vrtbp8ZfsIWLCYrN7gGyUm38S8aEM46UukDiJR+NRrMvKysnqW0lycbKMplRkNwWZbP7lvBVx9wuQL+PWsn5E7z1/R9e0v8AkJ0fC0e80iYKa/JjyVEbrFBKGx/2nucmjmxpCYW4I5CXa/HzU0r3xmguBOH6ngLFWIeUuWc3lJa7zR+Z2GsRxlincQ2C2P2TOxtK/HrSijSJeyddgSMl253Jt0VhUA/UbbVgydPH1nBTV4UGZPsfudMqKIM0TMQGAF7AlVsRf0N61BpvkXO+b+2ZXeOzbXrWr6b/AEMfI0GfnPFHnRQqzRNKwQF1SWQyKwUfcvHjYXqDuTclZ55tc3MbUBeCiGT2q16hx9WwbzDr6dOs0lGlqzcMyJKuEo1RBFzfI+PcS7xEHSJ+7OkZNQp0g1XMvM2fYewxYpVVZEQLDlLwYjj/AFRe384BYi48EWItW5eu9e6R8O/Dmw3iTSvBkz5LTbHQymeFH94/sCAXBIxmdYI24MLhwykNUvcPVPj/AOWHa+Y/JKFxLyZtsXjjIlbwzRYZeNj0a49hMieNk1JLHkg2duE7hWWj2KIipJOiFVRQMkUhDHHqGdwINZ0+fYbaODMdYZViQWHG0lzeMg/coI/UfIHpWq+2bTvX+R+s6l8e5e165i5OwwJs/JkDsZVkxeKBMpWA9iUq5YRISrMGJIAsHdwvx95k3HM3/wCet8zXIcasZW9+9ypcOL91vs7bkK9RV4N2ysjlJdZaHq8FGP60qD6K60Uwi1lSi49dLtz9fq9/kZ//ACbIyDiYjkyPjs5biliGP0UAr5X/AMSfPkVVO396+JNT1H/9SOk08fYux4sa4UG4x8aOAy5IkDRAgB5pHWUe3NZiZlB9vw1eWAbzHQeYZJRzL2uUxojlx9NSaqTqKf3aerDW5FkvEKzpzCjI2B9HsUTGUFx4VVYN/qR3HS/7lY89iWc4gnJPkF2Xnf8AV6FiAPra9fpf/ZZ83qcYjixYuxNqljQcXXHimOPwsIh5SJXZrDjzVfHqKvzrnnvY2+N7I8VZ8Mz5eINkx80rtQxLE1yitLFF2d6Qyd1f2Rdiu3jpSDs6DhdPw6YiICoBjBv1COzYvknE/fypNjt/YniCrEFTkGP6yxFgQ1z4rw9sP8KOw/8AFNfk63bwH5Vx89pZ815ckxPCvnHWIMC6SQkKeR/Cw+lTZ4ScyMfZK5Z5NjMbXh7S+GUViHFuOMCUmxGiKjjNplwF4RabqmPmr4zZy8t7lq9V6mZBUWUKCpwJ09BhsPXt9i5e7mXEkMfXxBHHChssYl8clS9iWPn7fX8q098xfEu9698Xa7I7DhpmfLsm1zMvZ5EXuT5bYNpBHPlMtwsAKraQ2UHiCb3Au+OdNM5U1FE0lVBMVJJRQiaqpiBucqSZzFOqYgdpgKAiAenWxPw/GvGwVmBZQSo9SB4F/S5+n+tJWJvtJsExba3XbZW7FZqGqi2udahJuOkp6rvXTY7pkwsEW0cKvIZ4/RIIopuCJmUDtDs10JlY8ryRROrzRW5KCCVJ8gMPUE/S9SeVpNxg4mLsNhi5GPrs4EwTSRskcyg8WaJ2AWRVP6ihNqQ+Bcuvc4Y4ZZCfYuyLh5Z5N2GGClZTiQhbagSAkTsCSqrIoAX2bMAXvmp/8tIQHWPrM5tliDKaGaAlmHGQWb7Ta/8AA+o/Kpnu/VYem9gfQQbLX7ZFhik/cYT+5AfdXlwDf+afpcfRqce1Q8tYqtZoGCnXtXm5qvzETD2iNbkdSFak5CPcNGM+xbHOmmu7h3KxHCZDGKUx0wARAO3WXNG8sTxRsUkZSAw9VJBAYfmD5H8Kr+sy8XA2eNm5sKZOHDOjvC5IWZFcM0TMLkLIAVJANgb1Bi18FMITdAxlZ+Z1sk8327BNDsldm85ZGmnNGj5mFn1H4y8xc4ttKlrSXct5ACJquFDdyZMhwNuAAFcn65r5caGbfu2TPjRMpmkPAEG92cX4/Xxf09a3Rq/mnuOFvdjrviPFj02r3WbFLHrsSMZDRyRBeEeO5T3jcrcqoHIEi1r3jRC0mt8TsW8rILkVYkbd5ak5E4wo3HHHUNZVMmSURRLf0oSyIsGQpzoN5ewzBXpHR3K4910rEMBChqIjxodLh5se1f3OossaQIG9whG/V4H3WLG97nx5vWw8vcbH5P7L1jN6DA2J/kNDLmZO1ypIhiI+TB5Q8mvHeOKP2ygRfuuhHImq/eS/NDD0ThGhcWuHOSa1gaqYJy5Yau9VyDlW2R8zYqpAEBWKn6Va6J7edS9VmJGafCqjIKFW6kUhKTYvZWNvv8BNdHptFKmNBjTsp5yMGZF9GRk5XQlj4bz6eK3p8d/EXbMruOd8lfLGvyd3s91qoplGLhQtHFPLcPFkQ5Ptqk0axx8WiBX7mufNedS+gxh7bfK5Vro8uVM98JFRnYEzvWjC7ox7x4nE2t3GuFAFVy5RdKnSO4KZdMqxu0OowDqnJ4pPLFDIZIPcNm8gPYmzEf8A3+fNe/NGZsrV4Ow2eGmJt/2ihojxZscuql4VceiggBgv2niPwFSi4Hcvsn8R87U+yU+5t4Kkz1hionJMDZXkuegv6xIPGrSampqIjVBMaUhIsDrNHSSZnBFEiFDcgiUZnre8zNJso5ceQLjswEgYngVJsSwH1A8g+orWnzb8U9c+UulZev2uI024ggd8SSJUGSsyKzRxxu4/RI9ldCQpBJ8GxqPnNd9B5ZzBmvMFan29ozrZc3OLFH5ZpjQ+PaUtTa00WYQMxSIaJGPfEs1sUSbOXbp6ggqkdqVQphWXWNrnbbGHJzZ8nl7mc2QSJUHBOC+FKgWPI+CSQPS/qTXPx903Y6PrOq0ns/tOrQ6dUfByG/c5AyJCGlSaVuSe3FdlRUZgQxU/aiipA+Wfxuz7mZNHl3P1qb5V464R5ikVblhEtosy+V0UnrZrkuQn8Kgwk0XtqvkvaGAiWNWE6ThwUTqAYFDbWHrWvzs5RuGVsrFw5btHc8zf77x+fuYken1Pr61pv5w7f1Xq07/HEEsWg3vZMALFmGOIYoKk4wjzOSFYoEjaxkWxVbBSOIvJD/mJuXjPIKPFfA1ArEjievS2MkuR+T8YrRaNZlou45SdGGGr95j4B6lAv7FBt4ddV8is1Fy1kVDGMfc2wSXyDtlnGLgQIYozH7zpaxDOfAYDwSLebi4NUz/Dv48l1Mm97Xt502GZHnHXY2SGMiPDirZ3gaRS6xuXAQh+LRgC1hXl/wBazsK90Wpb40rJLpkOkVFReLapWW0QkIs6nLFHVGGat5KQQarupW0S/wD2ZAR7ZFQx1nS+6aJCiYQHbbXfjRCbISLwAzAeSFHk/Vj4A/E1E73OOs02VsVEjNDA7gJG0rkqpICxJ90jG1gq+WNhTp5TwVk2nxVlsjGMfXXBNLvk3Q4fMlSLL2HDzqxiuistEQN+Wh4iLkpFygRJTuegiipQ7wpBJ62snKwcmFGkUF8FJCokW5jv+AewBP8A3/hVe0Ha9Hsp4MOWRMbtWTiJO+JLwjyxH5AZ4A7sqg3F7kA+Cb+Kj4ZDpbJOe+QMCqqqQolU3cpCkBBA6yQlASprAf1BARAekd9uzfAANr1cle8hQgiwBv8AQ3/A/iLef4iuoClEDiY/RsQRKHSIic24ABdwHYvYO+/0tcV9Ny+npXs/8nXlVwyxr5efIK65jxvx/QythujBK5Zq9cgYdS4Zrw9GLwq+NXN7Ush5GqT90sd6lFWaLYT9ZHJEFFkETCA63J1DaafG6/kT5kcH7qFLyBQOUkYtw5XupYsSLfja4Ffmp/kX0T5J3XzBqNX1zN2x0OxyuOLLJI3s4WW/MZAgEYWWOGOBA5a1ivNUdhVimOf+YR8syx4kSvcxkOzYtmYhgmmthmZo0u9u7Q7Jqh3UXXPdtq5qks0STOVJuoR41SHuzF6SFKG9gxu/dbkxPfaRonUf7ZUlv4Cw4kf6j+Fac3H+I/zZh9gOqgxIM7HkckZazosJBJu8gkIlUnyWHBz5vck1uOJPm7eVHmPMbXGuCzxuKco5SfJxrN7N4UicUhkKeeO1HTaDdWmISOnJTMi/MY6Cb9UgLujgUhjLHKU3Zqe2dWzMwY2FaLJlNvMYTkT9OQ9ST6X9T+dY/wAgf4+fPXXOuHddp9zO0eCvIqmY+V7CKti4ifyqKv6jGDxQXICgkN75yHljnznWZLmVxdj5irc3cQoxtjbTFVknTGRyVVKnHqtnMAq1O6TYBZIiHJ1s19incItxan6iGANR3eun426wpc2CINsAo5A+RIgFipU+LgWsfytU7/jZ86y9P2cXx93GVJPjjOLRgSAWxZZW5CQMAW9t3NnXyFLcx5Bqknij5xWNOK/GaJgpfM/KXNkzkF4/iJDClpdwsgXjwMWuzdzcpAZFkJVpLW6Guz6SO5jQ7luo0Ikq2VSAyfeK6S30+1PWBrOs7OfHSSBoDikFY4gWVjIkkZDq3i0dlAAZgw/H17vfg3I7d8hybTY6XSYsOI6zLsIg3PPJVkWOXGKMiGIC012fm3F0ax4r6DML3yF5s0lpkDDTuoOa4eoI2duvCNFGi9keR+0a6bSvfHVXZWxocxkHSKpkykcFN1AAj1a09HoOzfJmwzdTi4mDgbTWYQnkjjQo2W8f9MOzkm8rqf1ErG17kAm9aq7A+v8AhOaCPsU2wyEy8446tK4YYqP/AFLKoCgwAi62DOFsAbeKdPEk7daTVLbaq28rLBgd02SdjI9y6s0gtDeGXcRdfi1DbuSuG0mAqnApgJ0b9m2+uz4z3Hbepdb2fY9FLr4cQyKH93i+VIYOLNDjQn9QZZbu1iFtfxa9QvyTqeqds7HrevbyPYTZgjYp7XJMWMT81WbJmH6SrRWRbgte3m9qfpW65JsEqQ9fyJBGQhYOXvsitHMUXrJjXHbdutF1+bi0CieRl48hTgIpnH7KIgbbYNbmk7X33d7ENpd5hmLEw5tjK0UayRx4rqphxsiJReWaMBgeJP3Eg+laej6r0XTa4rutJliXKy4dfGsrmN5MlGYS5EErG0cMl1NmUfbYi9R9G9RtZt848hpB3aKNemrU9khzN3UUaTXfCASrQEW3SqwfRK51DtgAR9QxQEBAdaTPb8DQdnzMvVTybHqG4RDlQFXh91pP95OK/dHJAxZohc+CB5Brcw6jnb7reJi7SGPX9t1EjjFnDJKIljv7L3b7ZI5lCrKbD7gSCLVlSzCHY+Pk5eFjHdZsJn1eq0RVR7+TqESg5aSg2M6bQXqaMqDdyUF2q5k1llTKdQl21lbPD1WGJs/Z4mPL1/OMmNiQ4f3S4UKskxyiE5hZuLASQyFXkcvyK2rH1mXs8sw4Gtyp49/hCPJy5sscYs2ZleL9qC/AtDyUmOaMMkaBLBr050JYnKcqwgyXi8DYQYMsc1eJimyJ1Ypo8atnTa8yUeZwVumcWKobth6lEekR6/VENX/U7ydNjDp02+3O7EMesxIYUUtCkiI6bCWLkFB9s/7Ru0dr8vBFULa6SBtdNt31Op/snvPssuaZiFmdHdGwIpApYjmP93wr3tx8g0vmmNHl1yJYJA7hCWqxGDCnXeUn2jxCxzTmKZot5JtBgZMvsls5dIkXK5QPsbcxQNsO2rpj9Byu194zc5pEyeu+zHg58uQjrlTvCirKuPcf0UdwsglQ2P3KDY2qnZPesXq3ScLCVGx+wmaTNwIsd0bGgWZ2aJ57E+86oxjMcguLA2BF6eahYpqeOXj5xBuZZdy+R8KiSWlVHvgIwinfEjo9ubpAjRJbcwG6TG3Edza2p0342630XJmn1EmS88y8VE0xk9uEG/tRqbWRT5vYn181q7uHyJ2PvGNFBtkxkghbkxhiCe5KRxMkjebuy2FrgWtYUmFbJXGOZWsPP1ZaGn3CJ2lGmmKqi6NkZyRBPMPpBi0KmiyBu4Q7vvlwOYw/CGq9JvdHhfKker3WubF3LqU188bFlyklBM8kkaAKgVl485ORP5VYI9Hu8z4vk2en2C5WmRg+fA4CnGeI2hSN3JZ+Sty4R8VA/E09UlGsJhg8ipRoi/jn6B2r1k5J3jdy3U260VidnUQ23aGtsZ+Bh7TCl12xiSbAmQpJGwurqfUMPwrVWDnZeszItjr5GhzoXDxupsysPQg/jSfgIiwRUrNJuHsWNNTbxTWnQjBiLZxBtmjcEXKDtx0gDhNQxS90G49JQ21C6XV7vW7LKimlxz1gJCmFBHHxbHVFs6u/8wJA4f8AiBU1udlpthr8V4Ysj/k5eZ82eSTks7O3JGRb/YRc8/8AyJruXutUaWQtSdT8c0sh2aT9OKdLg2XVariYEjpHW6EVTqdA7EKYT7AI7ba7Zu2dbxt8Os5GbBFvjEJBC7cWKNcAgmykmx+0Et+VdUXVexZGjPZMfDnk0QlMZlVeSh1tcELdgBcXYgL5Hmsit2mv29gpK1uSRlo9J65jlHSBViEK8ZnAjpuILJpmEyRjAA7AID8A67tF2LS9nwm2GhyFycJZXiLqCAJIzZ1+4A3Un8LfnXTvOv7nrWYuu3sD42Y0SyBGIJKOLq32kixA/j+VNreKJjS1ZDqC1ndrpWsrFwvCxDZyo0CYbRS4vQcujIpd4r7MWLuUO8IAhuGwhvqhdu6d0HsndtZJv5XXsghZoIVYp76wt7nJyoufaYXA5AfSxFXvqfbu99e6VsotBEjdcMyrPMyB/YaZeHFAxsPdB8niTfzcVvlJivXqpvkrlFSFVins0pBGY2ZwEG6fOGrootFWawKoGFJ+omBkAKPUoACAb6mJNrpO49amj7Tjz67WTZZxzHlP+3eRlccCjXUkSEXjsbsAbA1DprN11HsUUnWMmHY7GHFGQJMVf3Cxqy/eHFmF4wbOSLLelcu6lWczCxTOEM5hFmjsJCa8YmT2QdmkQrJsZofdZ2Lvbp6gEOnbcdWebJ2OLtMTXYuIX1Lxv7k/MD2SgAjUofufna1x6fWq3Dj67K1mVscnLCbVZE9uDgT7wckyPzHhOHrYjz9Ky5eWYwkc/lJBXobR7Rw+XKmXvXJkGyYqKig2KPeuD9IdhSgIiOsrZbHE1GDLsc1uOPDE0jW8vxUXbio+5jb6DyaxddrcvbZsOvwlDTzSLGt/C8mNhyY/aov9T9PNaV7KzcnAxEvSmzB0pJLRboU54XLACQjkxTvFe6Avepv025tyJmDbq9OojL2W22Gmxtn1SOGR8h4ntkcowIHsXNrchIFPhT9alcXXanX7jJ1vapJo0x1lS+PwkvOgIQXvxMZYeWH0pW77D2CP/m1Zf4VW/Uea40rmtDILWFKahgYoRA1wxHvvC6eOFEZBscEy+A9np9iKhVFNwU6x7A9GoXNl3ce1xFw0xf7ERJ+5eRiJENh7ftj9JBP6uXp9KmMKLSPq8o5bZI3gMf7ZEUNG4ufc9w/qBA/Tx9a6JNW2FsNcTiGsOtV1QfDaXTtZQko1EqW8b7JSIYEliqqjsp1AOxfRrq2EnZV3mDHrI8VuvMJP3buxEqfb/S9lQbNc/qv6Cu7Aj642lzX2cmUvYFMf7VEUGJvP9X3iRdbD9NvrX3Pw8rKrwSkbY3sAnFy6UhIotEEliTrJMolPEOxVMUUW6pu0TF3EPoa+9zrNlsZcOTX50uEmPkiSVUVWGRGBYwuT+lT6kjzXxp9nrtdFlpnYMWZJkYxjiZ2KnHkJ8TJb9TAeAD4pRD/i3+D6H0tTp9T+FQlqNcUrkPTpSk5EWBSVlrHFmhJiMLX3bdqSSkG4JMJoF0hVFxEKgO66CG3SYfgEdQes3Umx2Wdr2xMqBcKRUEsi8Y8jkvLnAf5lX0Y/Q1N7LTJrtdg7BcvFyGzI2cxRtd4OLW4zD+VmvcD6ilFqcqErUy8sSNbKFR8K5mF2rw8LDqvEWjiZetm51iMmgqm6jnUEoAIlAegB3HUZtNkuBjssPtybR45DBAXVGnkRSwjS/kk+ASAbA3NSWs1zZ86mX3I9YkiCeYIzrAjsFMj2FrD1AJFyLCuuuP5eTgoyQnoYa9MOmwKyEILkjwY1x1nKLcXSYFIv6oAPUAbduvjRZmzz9Rj5u5xf2O0kjvJBzD+01zdeQsG+hvb6197zD1uBt8jD0+V+91kclo5+JT3VsDy4G5XzcW/Kt1qWqKo0pXW3irnJWKrhAsohetEkVffN2/cLIv2jEEetoMOmT7Gu5UXDY4G3AC6gNpJ2RNlgLpo8Z9S0rfu2kZhIkfH7DCB4Zi3g38Wqd1cfXX1uc23kyU2qxr+0EaqY3k5fcJifIUL5FrEmpJgAAGwdmp+oKudKUaUo0pWHI/7vff7G5/iT6UqF/ltf2DeKX4mal/JTaUpgvI7/AFTvCD8UR/6XWfSlTZybyu4y4WsKVSy7n7EOM7QswRlUa9eb/WaxMqRjk6hG78kdLyLV0ZosdIwFUAvSYSjsPZpSo2Zz5U3+EtnDW7YDtmC8iccOQOdqhhm2TCZZ20WOUC3JW05Z7Hdrrdja1Bu0iVK2CaniUHwqKicoFLtuClNbl3mDyHxTzRlKBfY2HxTxDby+Dq7UcxzmBL3d4K82nJbZw0nKxLZcg8qQ0DjiRJa1WkXHqu668aAu4L3yu5ihpSnv598obrxgpuIZKme6UUOTsxRWNZ+9XGsW7IjDHsM+q9pnveBrivHb+Pv+RX7qSgW7AjOLU6mwOxcrbIonEFKXXCzkRYeRnF+mZyvMZARUrMDdSP8A3OJKrw8pH1G1z8AyscdAvlH1nrylji4ZN8MI/wCuVjlFxarl74ghpSo84LzjjzOnmLZUmcdObU5Y13hriyElBteOsiY4ckkFM35ZdFKzYZFq1VfyjXuR7XDVNZuBvV6+rs0pVoWlKNKUaUo0pQI7do9gB2iI/BpSkvaLlW6bCyFhscxHxMLEs15CVk3zxu0YRse1IKjt+/eOFU27Ro2SATHUUMUpSgIiOvh5EiQySMFjUXJJsAB6kn8K78XFys7JjwsKN5syZwiIilndz4CqouSxPgAAk1WNybuuO8Pe/XmKOsp2SxVuq4KXrVTore8ilhC7EdyQyFWko1qzZyLcbTZpV6kzSkkxUIVJQBEOku4V/Zy4mtWTtLSs0aY3FU5/0XubqRYH7mJADea3F0fA7B3TIw/gaPXY8Gdk7oSzZJxr7DH4pxmRizIfZiRWkaE2JYH6mqZchZA5FcX+S0HzZPR8XXjJ/LinwUmbFUTRrVaLPg7CkPD1yRs6ths1bcMma8hE1lugmZVAintMxSHOTYu2qJk5O11G3XsJjhkzM6MH2wjM0MICluTLYXC28gfcbGvVmh0XQfkn48m+HlzNlh9b6rlyJ+9fIghh2OweSVYRFDKGYK8pY8WI9kEqp81UxyE5GXVvyQzTnbixI8hqhVJg0jXnuR7dLTbexy7a1yUiRg+sajhixCBjTR7wWMTGOu+8ACIHIoCgAJaRtNrkDa5Gy0xy0ha6mRybnkTYt4HEWNkU3428GvUXQ+gaiT4/0/SvkuPQ5W0h4SriQRxmJDAiclhAZvdfmvuTypx9zlZl4nzYDT+MfO3NfGrAHKnCefc65kv13tbKOudAc5CcUupVmo4wM6j2sas8QsBSzK8jM18rdV6USqLkW3UTMJhPqzwafsmx1OLudflZORlSSAOnucFVY/AF+Xm5Wxb1INaM2vyN8KdP+Qt58Z9x0el1Gjw8Vnx8lcUZE80+ZZmcKYj7YSOUssZ8KV+1gLCosTmZrvEZPzBl3MGO4F1Zcb58uJOREfc77HI59UqeW6/7o2bG9GqgTrGPbwcJDGVZIykYVV62Osmr1lANghpdhkpmT52fEplhyW98M497jKvFo0XkBYDwGX7hcGtl4fUdNldc1XVuqZ8y6/YaOA6tsfGc6wT4MvvxZeRP7bM0kklpGhl4xuFZbEmmnn+VELiHJBAw28tmGfYuPXbpGSwHlK4W+MlMoSkAyRqhrS1ylLO4aUg6vDrCzfEboCsm6OodE5xKUAwpd1HgZf8A8Avj8YvWGR2BkKjjy9wkEKPDWHg3IvVnwPjLM7V18/8ALUxdv72eqlNnhwQOmGkrGf2Ww0WRJJpAHjLNxKBQ4AJNKd3zD5KEzvjPPPJObaZtCZ46OjL4+JPoVChTeJ73DzFXRhL00hm7RBRB0MgZ9KItklnqjgC+uU4D0dx3u2Gyh2W2YZHLFP2X4oYnBWz2+nnkwAJJ/Oo6P4o+PD0rY9J+PYX05h34tle0Z8mPNxpI5jJjNIWII4iOFnKxhb+CPWraTVbLyUisxZN41itIPVmcczXcuWke0VcqnasWjl6dR44as0BBNNRUxlTkKAmETCI6pj8TIxQALc2AvYC/oL/T+NelcdZVx40mdpJhGoZ2ChnYKLswWyhifJCgKCbDxasHXFd1Z0dJyUQ+j5SKkHsbJRL9tKxb5i6XbOo6TZqkXaSDJZE5DtnrZZMpiKk2OUQDYdfSO8bB0JDqQQR4sR6EfmK6cjHx8qCTGyo0kx5UKOrAEOjCzKwPqpBIIPg1eZwM80GVmuRWKFOfGSXVppeOmNxc43ypOkfNpXH9znmJU1X9sWqzMq9vhJdigLICvUFysTCRQgb777F613KSTawf8mmL48Qb25G8FGb/AMuP6gR48jx4rxh82/43Y2H0DaD4P1y423z3gGXhR8SmVjxt4WATNaCRGPuH22UyC4P0q9Zzz48sbDFXy9yYx5kbGErZshSCTq8JY/K++M7Ldsgmhm8OzcQEogzkFHJEDimk8XRbsSEERFURDYdkN2bp+vhn2+LLC0spu/C/uSsB4HE2P8CQB9b14rj+Dv8AI3t2y1Xx1vdfso9dgIRjnKK/s8GGRryMJULKAT5Mas8hNgF81RZYfO6u1RytnXIvH+gWBEuapiIfCzzxeHd6iKU1hIVeOZFoNLgBioipLqO3Rl3JBdyDdwKSfUUA3DWt5fkTIgzcnK1kT/8AyGBtM5cIFFhwRbBfJufJB8eK9p4H+G+m2vWdLoO9Z0BOoideWtx1xnyGkkDscrIl5vOAo4qeETLdrGoO5o80HnTnmPSiLtnuxxMKQiJVofHTZhjdi+O3di9bOZA1TRj3rp0grsBT98XcgAUQEOzVez+49k2S8MjKdYx9IwI7+b+eNj/61uXqH+N3wv0ic5Wn0ePLmEm0mUWy2W68SF94soUj6cT58g3qKk1m/NVliX8DY8w5UsEFKtzNJSFm8hW6ViZJqYxTGbSEa/l12bxuYxQESKEMURAOzULJstjMhimyJnib1BdiD/EE2NbNxOm9P1+Umdr9TrIM2JuSSR4sCOjfirrGGU/mCDSQkbdbJhonHy9qssqwSKyIkwkp6VfMkyRjUrGNIm0dO1UCFj2RQRQACgCSQAUuxezWO800g4yO7L48FiR48D6/QeB+AqWx9Xq8SUz4uLjRTnldkiRWPNuT3YKD9zfc3n7j5Pmk9sH0P/P+39HXV6VnVzrmlcgAjvsAjsAmEAAR9UvaYR2Adil+ER7A1waePrT38fmOLy5Eh7LnKvzVowxW3Pe3qAr0m8gpuwlcsJFSJrULOtSASOm5h2yEGxlVUE1BTMAnKG4hI6tcP90suyVnwEP3hSQTcGygj0JI8en8apnepuyHQza/pk8ON2/IW2NJKiyRxWZA8skZ/XHGrfeFDEXBCn0qw+9cxs0cjpWocWuHPGrkBxN4c44ytWse8k7HgeptpLKy0RXX7AsdcMh5Vgo5i4rVihaw2VXk/HPjpmRRFwu5UKBia2I23y9gsWr0uLkYeljdUlMS3ksPRncD7WC35Xa3i5JrxxB8d9e6dNm97+St7pux/JWZizZGAubLxxDJIG5xY2K7ESRSSkCLhGGBbgiKbGvNznGRrEtmXKb+koO21OXv9tGqov7E5tz0IAk4+JFqubO6AFp5d01Aqx3Rv34xxN8Ia17mtE2ZKYLiH3G4+eXi5t5+v8a9j9ThzoOuYEW0KtshiRe6VjEI9zgvICIeEANwF+gFvpTW6xqsVbFkrEEbSBJBk+dO1G4ljF2kgg0btHfeJbLPW6rF0d4gCQHDuyKImETAPVsUQHlSnE8geVvFj6fxFvNYswyHdPZZBGG+4FSSRY+AeQCn08kN4v4+olFjfm/f8P4alOKwK47umHsg36IyjJ0nI9NZXBFO0Q0A5hFnVWbTq6aUHIMmUot1LsxQUXWAp+sQKG09g52zi1cuNDGJdUsivJyTkFYgqPX9PqfIt59T6VqbtPVekZ/fMDdbHMfX99kxJ8fE9rIMEk8Kusjg8P8AdAKqeL8rLcBfWmeyhbsA3CwWaTwJUbdVqmtbXnu+0sdxaWUsbXEI1k29iuEk40zs0snNpOFhcGkXRAQUIlsYxBVNhbSCPGyCiQSwcrMquQTwYC3i1/Jub3PjxVl6Dt83c6hMqfaYG1MQeCaXGQqhyYpGElm5cbKvFSojX7gWBs1hYvQPKavtj4VUrmtkjM+NeP8ASMg5OhKLU2GYkbDBN5ivzjzwzO+NZmMjJcgRLsjV4o2TOgQXKbQygKAkYpxmcfqmRJpk3WRNHjwySBVElxcHxyuAfHgn08gVrfb/AOQeqwvkrJ+NNPrczbbPEwXnlbEMblJEFzAUZk+8XQMQx4lwpHIEVrnPkzeZC6iLBO1Lj5OXTGcO1nbBXL7XrNQXVcvtUZOi+Hs9VWa2tdaWYTMW2K/bkHtFApttjh0j8HpvYyjPFjs+MASGBWzL+K/d5BHkV3R/5J/DKTw4uw28WNvJGSOSCSOcSQSsPMUoMQCsjko34N6+PNVo1yrBOzC0JIzsLTXCLd6r4u4HfxsYVwwbLOjx7pygxerM3rkEe6RKdLY65ykMJerqCtRQ834MwQ2P6vA8fT0Pr9K3Zm7D9rjDLgikyUJXxFZmsxA5AFgCBe7WNwtyAanV5e/BvlvypzBj2f42wlkgG9ZyJCuFc6syNE4DE8jXX8TKOrRIOVHRHAyNeI9bOW7ZJNRZyoYoFKIdQlnev6TbbTLjk1ysoWQXl+iEWN/X1FwQB5Nar+YPlH4+6J13Mw+6SwyyT4bgYJJMmUsiugjUWI4ycWVmayqASfpf2Nech5pE75ceGcaYgxvMNLtynyPUmaSNun2cU/b12BrbeLiZvJlmrTttIMZGQvUoi5TbMFkiIHFRwoU4g3Eg7e7h2eTruHHh45EmzkQfcbGwFgXINwSxvYHx6n6Wr85f8cvgvF+Zey5vYNzG2L0XDyGPtRl1MkkhZkxo5FKsqwKVLOrchZFIHO48W9S5RYUuWaMi5b5f8W6zm5K+RrQUatiSxqcZIeu2doqmJ7Cyj8cQxo1YJRqmKLluCKSRzHFftULsOm4dphTZsmXt8VZg4/SjeyAf/KyC3n6j/Wv0q2HROza3rWH1/wCPN7PrGxXN5cqIbJ5IiDaNmyX5fafKsWJAHD9J8egXypfNy4vcdOMfI1rbaBQsDxGM7TWviaoFXkL5kS/W6HyZMWR1YwlnFqsT6VsTGsv2LMXThqLRNHxPUKJSGACy2o2ms0es2270eHhp2mRESP3GkYyqTb23Ytf20ABIXj5Nza9eYPm/4L7x3fu/XsGbYbDZ6zIjlbMmKY8EMD46R8HjWKJVSSUM4VX5khbciR5uK4i89OH3IS+RBcU2/HcxenuJbPkxLGTEJuMmYlxEeJBZKJiZJNZqzsKsUkUJJAVlFyF3MQokAB1Tumpja7sUm7ydRhRzYupyMl4ofc9hMhAxUYgYuPclhAE5N7G6r6WrWHyn8e970vWTr9nPshq5N1jYYypfaZpIZCoJyWTiTDHMScewCnxyIJqc8alVn0fRbpXmwUCSskPPRyuOCNlVC2+CILl3OsolJBIiykw7E3+jLqnIBRMGwAO2p7Ai67l4On7XpI/7LnZ+LkRHVhWIzccFnyI4QoDGd7/0pHZbXFgDWq86TsGJm7fq26k/vOFg5WPIuzLAHDyDxTHkmLEqIUt/VjVW5WNyRSOQoTuzsYRvG1dxP08kyn7IXYT7WFu+MnT9VAZRvOtmwqN3qkKm1KJe/wCtwImAez0BV4em5PYsPEhwNdJm9YGWPZaPIWDP1TSsvurkKt1cwBAR7nKQkg+PQWefuGNoMzKmz89MPsrYp95ZMd58HapEre02OzWaMTlzf2+MYAI/M7JphmfpFnvaNbvT6BZvawLp1crBHkbRpDvZNJYjYZcqqxPEEAo+KW6CKCJvV231n4/xXuuo9g3Meh3EuFjS6/m2dkRBIgZJQwX37kcxY+6/ENc+LXrAyPlDTds0Gok3uohzMmLP4LhY8haUhIiC3s2B4m/9JORUWsb2pmJSvQhCSy8fOTkrkKHRBVtaQnkoVtZJZN4RF6rGJSZU3TlpERggCayaxBdFDsAR1qnY6TVKuTLg5mXk93xlBXL/AHAgTKmDgSGISgM6QxW4usi+6PQE+u09dudqzY0WdiYmP0vKezYn7czvixFCyCVoSUR5pb8kZG9o+pA9JVUmpWFbG+O5uFsjuYtkUsrYgaSFjcHhpUJpcpHraWUITxLtqwbJj3BB7CK9QbiGvRvUutbyXomj2urz5MrsmM7ZPCXKYwTe+wEiTEDk6RqPsU+A9xcivO/a+x6SLvO71W1wI8XruQoxuUeMonh9hSY2hBPFHkYjmw8lLGwp75CuwQzrK9vm7g03XYWQZN3CDhyZJKPcEUXfJgwTN3TpU25ugRKJ/gDW3M7R6c7iHuGYj/3bBxJI1ZWewjYFpB7Y+1yfNiQT+HmtT4W7239pl6jiOg1WblRuysq3MikLGfcPlB6XAIX8fFaRjIydjnarY4WGhl6c+g3qrmblWqjO3MXRznK1asW7goLIsl+n7KAh2+nURh52w3u41291WLiv1aXDkLTzIUzI3JIRI1YBlja33g//AFqWy8LA0mn2Gk2uVlJ2eLLQLBE4fDkQAc2kZSVMi3+0j+FKieb2FyiwLXZJhGrJyjNaSPIMzPCOYlM4i9aNylEO5dLkHYhx7C6sO5g3k8cA0c8OPKuQjSmRC4eG95EUfR2H6W+lQGnn0sEkzbuCaeNsd1iEbhCsxH2Ox+qKfVfrWO8rKTy0xFoGUmEFIdi+YJxLd4JIV4V6O4uH7PpEF3KHoTNuHTrpytBFldhxuxHIykfGhkjEKvaCQSerSJb7nX+U38V3Yu+lxev5PXxj4rpkzRyGZkvOhj9Fje/2o3qwt5rWXDHFLvqAI2iBav1CdPdP0+ppKIdH1IIyLYU3RC7dnT1CXYR2Dt1gdo6L1TuUPtdhw455B+mQfZKv/wCTKtnA/K9vJrO6z3ftPT5jL1/MkhQ35RmzxNf6tE10J/O17geaUMHBxNbi2kNCMUI+OZIpooN0CFL2JkKmCqxwADLuDlIHWofc5x7RER1NajT63Q6+LVamFIMGFQqqot6AC7H1ZjYcmN2Y+STUNttvst5sJNptZXmzpWLMzE/Uk2UeiqLniosqjwABWot7SfXZoL1RjBK2AFfBJyMwApqRcc8IdJ66YOEkzLlcpblOUgCBTiHaA6jOz4u6lxUm63FhtuuXAST+DFE4IkeNgOXJbhgo8NaxBqT61laeLKaLsc2Wum48zHD5EsiEFEkUkKVbyCxuVvcWpIUxjD2esEq1tnYzJU7SpvubA+VQcdLWxtHLlwyE5F0m4+KZIHAoCG5fVHVZ6piazsHXx13s2ZBv9zqMvjkyFWsmSju0ZIYLZ41NgRceDVk7Tl7PQ789h65hz6LUbXF5Y0asv3YzqiyAFS32uwuQbHzThPLFHs5yKr7gXftKbQfOWXdtFlWopsCgdwLh2Qootz7D6oHEOr4NXbK3eFi7jG0s3ufv8tJHjsjFLR+W5OBxU/gCbk+lUvG0ubk6jI3UIj/YYjxpJd1D3k8LxQnkw8eSAbfWmYyoyrDy1Qq97p027rzdgnFx1ripJ90kmJx4VMIZeHjlU1zFMCfWZwfcqYBt8OtVfIuJ1/K7Hiy9w1eXLo0gEMWZDLJYT5Dgew0MRDEeORlPhRW0vj3L32N13Kh6js8SPdvMZZcSWOO5hx0uZ1mkBUEXsIxYsfNP8mkRBNNBMOlNBMiKZd+rYiRQTIHUO4m2KUO0e3W6Y41hjWFBZEUKB+AHgefr/GtNPI0zmZzd3JYn08t5Pj6etaGzWeLqTBvJS/jPDOZJjEpAxZLPlhdyKootgMigAnIiJw9Y49hQ9Oobf9g13W8NM7Z+77EmRHCPbQyNzlNluF8hb+regqZ0Og2HY8x8HW+178cEkx9xxGOEQ5NYt4LW9F9TXfBWODszd07gJJvKNmUi7iXazbvOhGRYmKV00N3qaY94iJw323Ad+wddun32n38MmTpshMiCGd4XZb2WWOwdDcDytxf6efBrp2+j2+hnjxtxA+PPLAkyBrXaOS5RxYnw1jb6/iBXFjr0Va4V/X5tFVxFySZEniKLhZqqchFSLFAjhuYiyQgomA7lENN7o9d2TUzaTbIz67IUB1VmQkAgizKQw8gehppN1seu7WHc6plTYQMSjMquASCDdWBB8E+oraNm6TRs3aIAJUGqCLZEpjCcxUUEypJFE5hExxAhADce0fh1I48EeLAmNCLQxoqqL3sqgAeT5Pgep8mo+eaTJnfJmN5pHZmNrXZiSTYeB5PoPFd+u2uqk9JNbMrOV9xFSke0rzYX3vJHOWZln0mCiQAwCPdgHS0FusG599uoNQefBv5NxhTa7Igi0sfufuonTlJLdf6ftv8AycW8t+PpU3g5Gij1GbBsMeeTcv7f7aVX4xxWb+p7ifz8l8D8DXFgtMVWlIRKTB712CXQhI7wjNZ2XxzgomT8UKQD4VvsXtUN6oabrsOu0EmJHsPd5ZuUuPFwRn/qMLjnx/Sv4sfAppuv7HeplyYHtccLGaeTm6ofbU2PG/6m/BR5NbeQF8Rg+NGJt1ZMjRyMck6OYjVV8VI4tU3KhPWI3Ov0gcQ7QLvqTzv3iYcx14RtgIn9sOSEMgU8AxHkKWtyP0FRuCMR8yFc8uuAZV9woLuI7jmVB8Fgt7D8aw4FeXWiY73iTjm1h8GmeXZRbgV2bZ0Ij1lamUMKwt9ttjG9OsTTz7KTWwDeiCPe+0DPHE3KNXPrwv54/gT/AOtZe4i1sexn/shnfSiQiF5V4uyfQtb7eX42/wClbfUpUXSKtqS6b2qSbCmo2p+0nE24PDroNl6wwepGTfTSB1u1QE0wApky+sbfVT7Kk0eXrc/D1SbHNjzAvMsqtiRyC0k6k+th4Kjyf9KtfW3ikxNjg5e0bXYcmIW4BWZcqSM3jgYL6XJuGPgfWlqIAAiAdobj2/R+n/h1bDa/j0qpj0/Ouh06bsmrl67VKg0ZoKunS59+hFugQyqyp9gEelNMoiOwCOwa6cjIgxMeTLyWCY0SM7sfRVUXYn8gASa78fHmy8iPFxlL5ErqiqPVmY2UD8yTYV0QErGWZhGS0G9RkYyWBNRg9R7zuXCR1BSKoUDEIp09QCHaAD2axtVtdfu8CLbamVJ9dMvKORb8XW9ri4B+n4etZO01efpthLqtpE0GxgbjIjWurWBsbEi/kehp2Md06Zp7eeSmLS9svtedcyzBN22QblgmSxSppQ7UyJjGXbI9G5Tn2N29oajtHqtnrHy22OfLnLkZTSRh1VRBG3pClvVV+hPnzWdu9prdkmIuvwYsIwYyxyFGZvfkX1ma/ozeLgeKcbVgqCo0pRpSjSlYcj/u99/sbn+JPpSoX+W1/YN4pfiZqX8lNpSmC8jv9U7wg/FEf+l1n0pVnkjV61MOAdy1dgpR0BASBzIxMe9cAkUREqYLOW6qgEKI9gb7BpSohcouGrjkhIYbdQ+fMoYIjsI3mJyZVYDFcFitWIdXyA9pEg56VRu1CtSihY9tLuUvComSbKgp1HIYwAOlK+su8Mvj1scZ8aGfsy2HErSw4/t8pglE9FiqJY7LjeTi7BAuZmVjac3vB4hxZ4VtIu41KUSZLuEgASAn6mlKzcqcPEMpvK7ZneccwV/JGPMy2nMeIMixLmnOpfFilwqDmizWP4KHmqnJVmYx6vW5B0iDOVZvXAGcmP3+4F2Up2OO+A6rxuxshjiqS1isZV7JbbtZrXbnjZ9Zrfd75YH9pt9omlmDOOjUnkxNySqvdNWyDZEnSRNMpShpSmNixEfMpvACIiAcH8YbBv2BvnfLO+wfBvpSpy6Uo0pRpSvhRRNEhlVTlImQomMYwgAAABuIiI/S0pUKuVnILIGJMdOLnjjEVmzaLKyV9hL06luCN7OFYkn6TSXsEc2O3cLSqsMkcFPBpFAy2/aYpQEQjdtm5GvxP3OPA+QwcAon6uJPlgPN7fhV26B1fT9w7CNHuttjaaB8eVo8icExGZFJjidrgRiQ+OZNh+BJApuctUnGXMPDeTMFuL3IM4uxtI+rZFSx7ZYot4pzlcWUy4qk6REZIkHKuEE+4eM3SXWdE6hRLsO+uvOx8Le4E2uaQhGAV+DDmh8HifWxt4IP0rK6vuOx/FHbdd3RMKN8rHZpsX91C/7ecDlGJoyeHuICeUciGwYKaqf5nUXiDh/EkXaGKNy5SUeyZJwpiCm8a6VmYE6bXZ/CLVVBRPHsLW2sqia2MGhwPMRB+x8quAKgQB1S9/jaLBwFmQSZmO8sUSQJL9itF/4Kt/uA/Uv8xPm1enfiLdfKvau0y62Y4nWtzj6/YZ2RtsjAvPLFsWBBypJSh9lm8QTj/bC3W9qrZe5xybmTzP2h08U3fiw5z5T5rDyjaWsEpi64OsUWypJ1GmWmSb2ZYYaMk6j7KbroJQaaJXazTu0d1dxGpNsszYdyH9CTCbKjMVixjf22XijHl4BQgEcPUiw816Fi6Z1zqX+Nzq2zw+zR6PLjzrxxJmQDNgn9+eFDCPcdJ+bKxySxRX5P9tKbDfll515Y8g820LNXIVNVlxjiITClqyTHV6wTEjfZ72O8TimiEdcnMaMy3h4wR7yYOZTvTopiTc4gprtwOobHd7TJxthlfZhqIWkCsS5sbeGtew9W/IfXzUd23/IrpXxh0TTbvqGhIm7JLJsIcRpYo1xovcXmxfHD+2ZH9IABYMwPgFav74wcBaVxce4okaplHKkoXGOJJbF61Pczx22MbTIWGfe2aeyHIUsTu0mttdyr9TuTEXMRuhsmXcA1szT9Zx9M0DwTTN7MBj4E/wBNixLFynn7rnx58DwK8N/I/wA47j5Ji2kG01usj/uW0TME4ivmQrFGsUeKuR9paBUUXBUFmuxpcZe4E8P883yMyXlnA1Ht1wjk3yS0k4YGYEnvH/vqtqbxSjEtkdIm9ZJZ0J1kzD9VtsGsnO6zotllLmZuNG84v5tblf8A8rfqP5nzUN1X5u+V+k6STr3V93mYmpkKkIG5GLj6CEuG9pSPBVLKR9Krn5x+U9xQhOIeaHuEccVHHmS4h4hkeBu9ktpYtNquykUPaNXdWm0u1WUPUXcS5WSQZ7kDvyopkOAj21XsXSdJHosg66KOLLU+4HZrfXyvJjYLa9h+NrVv74Y/yf8Ak/M+VdPD3DYZWf12VDiSY8MHMkMp4zLDCA0k6uFZpPP282IsK8apjnP0AodQ4Jl7sgHOJwTJvv0EAREClAwiOwbBuOtB+frX62hVW/EAEm5t4ua+dh232Hb0AO3YI/CAD9EPh1zXP5fWuNKUaUo0pQVMDHASkAVDbEAQKAnHcewoCAdQ7j8GuPr+dC1l8n7R5rn0dg+nXNPWuNKUaUo0pRpSjSlPjgDKFExBdXtyveG4DOCKNdkmNcqNsnpaGqbSxvDIkbTNmYw5AcWqGbtAWSVjDKtyL96Bu9KJA1IazMxsHIORk465I4EKrEhQx9CwH6h6/bcXv6+Kpveet7vtemTUaTbT6Z2yEaWeCNJJmiW944mk8QyFuJEwDFbW4kGphBkDEudMb0Ss4V4rEZ5h+OmVu2asK46d36x17J9VLAOUazJUOtNHruws4KlkFz4qM8SYjJZQqxVBTcqAnPDKwtjixxa/Ctn/ALgvLEhdlkSx4lFvyCp5ut/tPkEgm2p20Xaemdgztj3Ds5fqZ06Y+v2GUuNDLhzGUGZMmZlWJpcj7OE3AGRQUKho15RLvvMaax5wzyNw4xZKZyqFxZ5vQybdn8pI17DBcf1RjHTFKfYxjkoSwjeMtx9mkrQKrhOSErhsigQDJHSTOJe07qWHRPqcczLP73NzcR8RYqY/BLPct5B8i3p4NR8XxnhbP5Xg+QdxHrZ9S2uONjqBJm/uJGZJ1yizxiDG4LF9hjurljZgWAqBud+EvLDjHCwtlzvgi+Y3q9jbQbmCtMxHt3VWki2Rm8kINs2skQ6kYQ0k/YMFlitBXB0VJMTGTKGw6iM7TbXWxrLnQSRxMBZiPtNxcfcLi5AJte9bM6p8n9A7tky4PVdri5mdCzh4kYiVfbZVcmNwr8VZlHLjxuQASai0Gw7b77D6dg7dvh2AezfbUX61ezc+R6UCAAYekREvwbgADt8G4AIhv/h1zQA+p9aby/VJ/ciRUWks1YRyTk7t9KgUTzDQ6ROlBOK7AKkLgTCCh+r6kNttWTru6x9G02W4eTJKBUj/APaYE+TJ+NvoLev1rSHzT8abj5Ui1vXceTGw9FHkNNkZlic2BkW0a4foF9y5Erlv0i1jepveXbwqvvK/JtXxQKsZT8dVV0yks0Zf76NiKZi3FpJVBm9uNhmJ91GwUfMSJVvDsGizjvH0gqRMgG9fpyWxI+0b9psNpVw34vK8hH9IH9Q5EgFQfCXtc2FqgIexZfwT8RQ6/sUODJ2TFEuNr8XCV757ryMBEKIXEjr/AFMniGCDkxcEgV+ibcbH5fGE8N0PgPZXVEzWzrWF3bbGnGuUdQmTchZBquPao9fJJR7RfvmaVulYlNc8essqxWWOofwpgAo7bonk0GFiJoZeEyrD9kJs7uqqT4+gYi9vI/I1+ZmtwvlzsvY8r5WwVytZJPsgcjYqHxseCWeVVPIixMSOVEgAcAAe55Pny+Zx86zmKji+TheKVWp/EbAmFrT7h1GiU2vN3+Vse16DdMWlVr2Vi2N87YVhhJnFVBAjJsUHqyayIAfuTmHWWd3TcftSmqVMTAhbiqKPvQC3EPyJsD6eB58j6V7j6t/jR8ctvUyO/T5PYO17KD35Z5pCuLkSOpMsmL7aq0jL4Zi7HgpVjbkBXnJmrBPX21ztkuE06kZy2WBzN2WxSrhwumpKzsiZ1Iy0mZADnVFZwuY49HaAB6pR2ANa7eSSeZpZmu7tck/ifUmvZONiYmp18WFrogmNjwhI41AH2otlRb+lgLC/j8TXrV/5YGmJsM5ctZaFj3aVbisUYqpz6URlSWWMl7O+sBph3IsphkizjGxJNJmou2YCkZw3b7FOoYxFBNtf4zhC52W6AiMRIpN7gkm/qPHm17eoHivz+/zi2Rl6v1+DKdTmPn5UyqU9tkiWPiFZGLMeJYBnBCs3kLYrahDzVuSeU+TfODNtkyjYa1PK0G32HFFLRpZlFKjB0akWCWYw0TBrOE03boO8UVXdrK7nUfLLD2F6QCi9p2WVs93NLlMrGNii8fQKpNgP/Un8ya9YfAXS9F0n4x1mHooZoly8dMqYzW9155o0Ls9vA9AqgeAiqPW5qurVerdFKFtGMlq65kU3z00/7baxzWERjlVEHkOeOkHklIjIkW9VVk5bIJi37s3WRYT9QdGw/QUGMsCedwLW+ljc3/6VHyTyLmiFkX9p7TMXLC4fkoVeJHoQWPK/grbzfxYh5V9tpXH7lDjrmHl15YmGL8C2Z0o0YVOACy2jI+UZyoWRtS8VV6KK/YnbvbIQzhVV+p1t2SSG6gCJyANi6vLDr9nHt8vkMSBvRRyLuVPFFFx5Pnz6AVpv551+y7d0fL+OuvLC++2sIu0sntRY+MksZmypGKtdY/tAjFmcnx+k17COLPmu3fl3yUxLijFfGLLMpXjTFtfZ6vl9oCtPQ43RhGbw0BWH0maOdx0nYhVjxBcyblqLjvSlRKqKaoFtXW9n3Db7/HOxOPPDFmTuT+0AaDHYWhiEhsUlWx5uv67gebV4M+RPhPqXx70LZbjM2sEGa+Jjphww5hlGynDD3pxFyBMJDfYrBgliW43W8sXfKuJ4+2rmxdMj4xyvjnj7xzeQ9guV8sNJUbxd7krC1YJpyWDk0XRD3qOK7eJJPziZM6avoAADt79Lr9r07t+4zIEC9GyJDkTck4vHksAB7ABtKknrIzWIPgAfWkZ3WoPkPqnUtVgZuJl/I+bG2PjwxT8+EEbMSufdb48igH2lAIZfxJ8Z2d+QrCyzfGOmNhnQpnLlhHPsVNGsbJRbyeF7VY+6GTvztdm6Y1Juyi36KgN3BiqKnE6YbmIIhWvlrC7v2rJxNJrow/TM0J9kc3tSzFlDN7/JWAiiNm4jywv6kC0p8TaLQ9bwt72DOaNe3ddeQZDuoljhCytCP2oV1aWSRgy8xdV8HwD5e3H8YF+u76MuNtxTOO8QFZw8riSmv4qdf0SRctAVr6lqSRdOJOEdrMyeIbJOukVSB9SJQ31I9c6P2bd7rHzfkBtSdZrI/bh12OgkWJlFoneQ3YFQOSgk39CKq/aO0anrvX3XpUG6iydy5kbZ5XOL9ynL+skK2COOR4uyjwfQ3NSMZ0+vx1jc2tkyFtMO4hvBqmRWOkyCNaqAsigjHpiRogYpw36ilAR1tjF6vpcLeydixIfb2UmMuOeLERiJDdVWIfYtj9QL1qDK7Nus3Rx9ey5fc1keS045KC5lcWZmkP3sCPoSQK2MhOREY8i42RkEGj6eVXaxDZQwgs/XRRFVZNuAFEBUTS7e3b6Ws3N2+r1+Vj6/OnSPLzGZIUJ+6RlW7BfzA81h4Wo2ewxcjPwYHlxMNVeZgLrGrNxUt+RPimrqk7EVSoRZoFO+3lhK3J7Di5kWy7mZj13b1RN06elcppHSgo5ZEQKfbYCCGtddb3Gt631jHbTDc7fCyNrJDykVnnjZ3Id5AwBXHiIsDawUithdj1Gx7F2bIXcNp9TmY+rSbhGyrBIqICiIVJByJAbkXvcG9PWIbCIbgOwiG4egdvhD6WtskWNq1UDcXrjXFc1yACPoAR/aAR/6tcgEmwrgkD1rQQtngLEvMNoSTQkV6/JKQ80miVUox8kkAio0WFVMgCoUCj2l3L9PULquwabeS5UGpyEnmwpzBOFv/TlHqjXA8/wuKmdroNxpIsWfbY7wQ5sAmgJt/UiPo4sT4/jY12FkJcbEpGDBKFgyxRHpLF4tLuzvxWEh4zwW3flFNIOvvBHp219DN2Z3ja84jDUDHDjJ5rYycrGL2/1Ahfu5elq+WwtYNIuwGWDtjkFDjcDcR8biX3P0m5+3j61ntWUe1FwsxaMW5nqwuHazNBukZ44HsFw5VQKAulhAe05hE309ZuPiYWMXlxIoY2mbk7Iqrzb6sxUfc34sST+dYWRlZuQEizJJpFhXigdmIRf/ABUMfsX/APCABXVMOX7GJkXsZHGl5BqzXWYxhFStxkXaafUiyK5UKJEDLm2DqHsDXxs58zE1s+XgQHJzo4maOK4X3HAuqcj4Ut6XPpXZrIMPK2UGLnzjGwpJVWSUgt7aE2Z+I8txHmw9a0oW1gxa1ULQKFcm7WZu0ZwjlYHKvtlREqq8WksimKa6rcR26/VKP+HUSOyYeHja4dh4YG22JVEx3bk3vlbtCrAWYr6cvANSrdcy8vI2J6+GztVrgzvOo4j2Q3FZipN1DetvJH4UqtWOq7R+z9r6YfTDSlJqbfNKbASUtHV5y/I1P4xSGrUej7QkHDlZJJVVBqiVMqzgRP1qGH1hKURER21X9tl4vVtLkbLCwZJljPMwYsa+5IzMASqKByc3ux9SASan9ViZXaNzj67MzY4WccBPlSH241VSQGZieKi3FR6XIApQNlRct26/dKI9+giv3CxeldHvkyqCksUPqVUurpMHwGAdTmPIZ4Em4spdFbiR9y3ANm/Ai9j+dQk8YgmeHkrBHZeQP2tYkXU/UG1x+INduuyuujSlGlKP8X+L9nbpSgRAA3EQAA7RER6QAA7R3EdgANDYC5pYnwPWk00qkC1tMndmyKozs1GNIl668Wqq1VYMjJmbESa9Rm6ZyGTDc5e0f8OoDG63p4Ow5Ha8dW/vGVjpDI/MlDHGRxAS/EEWH3DyfrU9kdi28+gg6rkMv9oxch5kXgA4kkvyJa3Ig3P2nwKSuSHOUI4YOUxyyjJtBiuueyVx6ZJF3Ltjd14dOOeKhsgonsffpEBERAdhABDVc77kfIWF+z2HRosfLhhdjlY0lledDx4iN2/SR9w8fWxNwLGw9Gg6Bm/u9f3eXIxZpkUY2THdkhccuRkQfqB+31HoD6E3DisFnDhiycO2pmLtdo2WdMjKFVMzcqokUXaiqUAKqLdUwk6g7DdO+rziSzz4kU+VGYcl41Z4yb8GKgsl/rxN1v8AW16pGZFBBlywYsglxkkZUktx5qCQr29RyFmt6i9q1y0bJKWFjLJzrpGKbRzlo6rxUETNHrtY4GRkVHA/ZyLNi+qUoeqIen4d8GXAz33cOyTMkTXRwOjYwVSkjsbrIX/UCvoAPFZsWdgppZddJiRtsXnR0yeTB40UWaIL+khz5JPn/wBK3KhCKkOmqQiiShTJqJnKBk1Ezh0mTOQwCUxDFHYQHsENSrokilJAGjYEEEXBB9QQfBB+v41GIzxsHQkOpBBBsQR6EEeQR9CKXdBgE2+zhFs3aRrVPw7Rog3SRbh29WyKBEwSTSSHtDpAvra+IIIMWBcbFRIsdBZVRQqqPwCgAAfkABX3PkT5UzZGU7yZDm7MzFmY/ixNyT+ZNOvrtrqo0pRpSjSlGlKw5H/d77/Y3P8AEn0pUL/La/sG8UvxM1L+Sm0pTBeR3+qd4QfiiP8A0us+lKtZ0pRpSjSlGlKNKVBmK/WVXn+4/i/+vfLOlKnNpSjSlcCO2lKgnmfKGW8gzOZ8KYZiZTF9+x3H0l9WcuZLqKktie0ubL4aRkUIJNm6M5nFYeNKq3cFEhe4cHAe0A1ET5ObktkYOArQ5UYQrLIt425eTxt62FwfzrYeo0vWdLFqOz9tlj2WgzZMhZsHEn4ZsQiBVDKWHGMOxDIbnkoP41qsk5kkcUT2DautjfIORHuW7k2oklZcfwxHdfx46JHtl3lxvCiqxVIWprOVDAQ4CYxdhDt27e3Lz2w5MeExSytO/Asg8J4H3P8Agt6wOvdSx+z4O52SbDAwIdXiHJSHJk4y5QLkCDGAFpJwALjwD6/WlpSsVY4xvMXqfodLgqpN5MsvvhkGUhmx0HVvtPdC39vTRzKqA4kO5Hp6gAobCPZuIjrIx8LEw5JZcaNUlmfk5HqzfifzqH3HZ+wdhxMLB3eZPlYWux/YxUkN1ghvf24xYWW/mxvTC5G4W8arVgq/YUDDUO3olll5zIryrU906rEnJ5CcqIzTiYibEgspJwM9Y5KMQQXct1EupMwkH1BEBjcrr+om10uv/bqMZ2aQqv2kufNw3qpYgAkVdtB8vfIes7nhdw/u0rbvHhjxVmnAmRMUAxhHiI4SxxI7MqOGsQCPIBqiXmvw75oc+Z3BkzA8QYvAM9R8KmYyE7cMiJPJlBSrWSSg69jKWsacgqyeO04Jq3l49crIioGdqlUcCOxda37DoewdmkxpIsFcaSPHsWd7t9rECMsDb0s6mwPk3Ne1vh75Y+Ivg/B3WJndql3mDmbfkscGKVjImhSSXMjiKBlUyFoJVMhX7FKoBc1bt5fdayNxj4d0SI5g5FPC3hxZ5NsqbKVlhUl60WWfkjqtRUbO4kVk5sy4NDLMwVcHcG8T3YAAEAoXnq8WXqNDHHvZeOSXP+4w+25sqBr/AHelx5v59PFeVvnXY9f+RvlnNy/ijXibTLjowGHFIRLwXnNkmEIDHx5BX4qFHDl9SamZQs14pyhZMlU7H94hrVaMO2NGo5PhYwH3i6ZZHCKy6MPLC6aN0BdKItzmAUTqk2KPran8bYYWZNLBiyK80D8ZFF7o34HwPwPpetSbvp/Z+t6/XbXe4U2LrdtjmfDkfjxyIQQDInFibAkD7gp/KnQ+mPYHp3HsDb6O/wBDWZVbqv8A8z/IFBpHAzkNJXdCGnIey05SkwsY9fETay1ssblNrX0mTpNF4kMnGPURfoFENjKM+ncBENVjuGVi4/Wsp8ji0bx8ACfBZvAsfxHqPzFbz/xw0W83PzboMbTGaHLx8sZEjqpLJBEC0pZSVPB1PtsfwkvXjapvArNth4rZn5VzcTI0mm4lbUl/EsbdDPoVfJEPaH3hZKbqz6QK2bOWEEkqgbcvX4k7gpU99h1oTH61sJdLkbqRTHBAEIDAgyBj5Kk+oHj+JNq/WvbfN3TsD5M1Hxnhyx5m22rZCu0EiyDEkhW6RzqtyGkIYWNuAQlvpUJhOcSFTExhIUTHKQRHpAxwKBzAX0AJgKG4/SDVfrcNgDf6186VzRpSjSlfaaiiRyKpHOkqmcqiaiZhIomcg7lOQ5djFOUe0BDtAdAbG49a4ZVdSrAFSLEH0IrsVKcwAt3agJKGECqH9Yp1ClIKpQVApSmUL1AIh6QAQ39O+n51wpHlLjkPp+R9PH4fn9a6dh+gPZ2j2fB9HSvuxrjSuKNKUaUo0pXOlKlzxcytGw0ddaDDPsvNrt7QJeCo8cGbRHK1grtdgXp56CnryZdV/QqFAJopyLhVs1XMqKqgrEOmQoFn9NkmOKSCP9yJuXMGAf1GCg3Uv6og9SQDf63rUPyXpY8vPw9nltpm14ibHI2rk4cMkki+3LHjWC5GTJcxKHdAoC8SrEkuW4xhxTyTh2csgWiCi8u58w9llrx5xByHeRr2vw0pW56HPbOQTbkLHNaoHvdKRsTJNa1CvWS6L+TTOikZQ5yl1P4WJq59e0t+GZkRye1HObgEEcpfdsv3EBuCkG7elak7H2HvWs7lBgiP9x13UZmGc/N1iMrOkiSCLXnXs0/9KNniOTMkimOIqxCgE1PK14utvnAcD6BjvCecntBrfGSEWxrhLEGS5KAVluY94w/WYkj/ACcsyXfM7LSbPEM5d1EpN1EXSezxMyhyFOba0TYs3btHHj4U/tx4w4RxuRfIaNRd7XupAJWxv6jz5rRWv32u/wAeflTL3PZdWuXl7uQZGbl4yycNRDlyvxxuXExzRsyLKSGQgowCkgV5C7LWp2m2Gbqdoi3MJZK3KPoSdh3pSFdxktGuFGj5i4BMxyd82cJGKbpMYNw7BENalkjkhkaKUESKSCD9CK/RLAzcXZYUWwwXWTDnRXRh6MrAFSPyIN60mvisurNvLy8qnkp5hllSd0mKGhYOipNdheM+2pocKXXFmbZN2vGRiALN3llsCqapSkbtvsKRjALhZEvaNk6/1bY9glvCOGEDZpWH2j8h+J/Ien1IrRnzD89dL+IcMpspP3faJIw0GDER70gJsGY+VjjFiSzeTb7FY16UaNy58oLgVw95J8YY6nMMpNcQ3xljfIFMsilckrXzNyKyUiHshdI5/Hu3bJap1+S9coOlGjWObsRSalOrsVTY8G26lotRkawIJRDJwZTYtkP4Ja4JHEH8bAAWFzXizafH/wDkN8rfIul7xLkPgy7DEbIx5o/cWLT45DhYWVgre9Ivj7Q7SNJykIW5Wh/jFnfyfbHki9J8reLma6JU7a/sNwir9WM32m5TlPkXRzFY47r9eqNZpCxaoslILiDlys4XTBEhDiYohqja3O6jLkP/AHTFmSJiWDLIzFT9EAVV8fmST4r1R3fqn+RODpcVuh7zXZWwx0jheCXCjiSZR+rIeSWWf+qCo+1VVTckWq7fHmYv+XY5TOaTxDrNBvkXJ5YlMZ0FlPu6xkGkWG3SdFTcM8dRluyM2mXbxw7UWceFK4VR63ThYnfnHfrC64+X8f7QpqIo5A0pRAbMpYr4QM9z/C9vP1NeYdx17/MDoq5PyHn5eK8OAmROUEkE0cSzENO0WO0agAW5cQ1lUHgPoXJnP+W1x8jiCQxJA8nV6RAyeW5y/wBtmyYshHMrOVMVWCVDoklPS1kUkUC4+RI7Bu6K4Kg7cPzqLNzCUhQyH+OccYhxY8nhGZSzHgCSvjipJN/t82N7Em5FRGJ/mbuG7Cm/y9J+6y0wEgiT904VJfuM8yokfE++eHJePJVjCq/kkqfMHLvg75HvDRpjHhXIY6zVl6btEnXwatbvB2axv7y2YuQl8jZnk6sZYyXuyciCLeLBNokp0g3SBEoKH12Ze20nStOMfTGObMLEfqBYtby0hX8PoPH4C1YXXfj75Q/ye+SG3XyWmZrevRwK9zC8cawEgpj4ay2/3ByZpbuR+tuRsK8MmRr1MZQv1xyPYmsKzn7zY5a1TjeuxTeDgyzE48VkJJSNh2n+ixzdw9XOp3SfqFMcdgANaRyMh8rIfJkADuxY2Fhc+TYfSv1K0epxtDqsbTYZkbExYViQyMXbigCryY3LEAAXPk/WkcUiihippJnVVOYCJpkKY51DmHYpCFKAmMcxh2AA7R310+b2qTkfgpY+gF6urpfl61fBPJ2i4x5HcjsE49x3a4fF94lZex3aZhcuVKnZIpkbPx0vEUaHYv0GVvdDYSs0UHSy/QAFWWTBHqDV0h6/Dg7KPG2OTBHjuEYksQ6q6gghR6N5tY/xItXmPY/MGw7V0nL3XTtNtMzcY8mTCqRwo+LNLjzNGyPM5XlEPbLFkC3/AEqxerVuU/lPcevLe4dkfP7hXc9ZSyDlBarxsHcq3ZIe15qq1sLA9GFsKx9YWsiNUyqY8Qi9RsPSDoGxXKCIlKsCRrRtOq6/rmpuWWfKkl4gMrAyK1v6cYXlxfxcNa9rgetq0L0X5+7f8zfIZijx59VosTA9xnhljePDli9z/wCZmNIIjLjWdkOPcLy4O1yvIOFg+h85MH8FcCQfLHOrfCuRS8sMPOuImJrtkSZp1wt0L30mWfxdmKzVmNm5ewQ8tCPkjQ0M+MTw5mgJKqpgciIZGFj7rC0cEe2n9jI/dRmCNnKsw83SQgEkEH7VPpaxI9KiO0bf4v7P8pbbK6Fqzs9OdBmDbZUOOk0ML2X28nEikZER0dSJpo78gxZVaxc08ZAdZBzjl4cT4BvnLLkPxwyLIX2Ex7Vsy59i3tnzlknGKT2zRirCovV4t1jdWlzztN+gzdIGUfoR2xVjkXEhKjkNPm5f7XAkysjXycgiySgmR0uR9vjhxJuAR5t6+a9EaiPUdY69/f8AtuJodP3LCWB8iXDwGWPDx8kiNg0w5jIEyKY2dWAQyfpHG5s88ufkjU+ZEHZMW8v8tZh5TZhwHZa1yX4tt7zZ4DC9Vncx41jloqt4ix3LhKMLNa5ycnJVJm7YPWvgOpv3iZTEEDKWbr2xi3CNjbeWXKzIGE0PIiNTIgsEU35MSTYgi3itI/MHS9h8c5UO8+PMDX6Pru2hk1uzMMT5kqYeQ3KTLyECGKJI0RnWRW9z7rEg+FuW4ZVg8tzrzLnm28a+RXHDPGcuNWPbVnSqzC0JL8bEbWhOpx6UbD2eLZovbDl5iwYpKPVFTdyi2WVKRMhhP1W/UR8t7NnTY+Rj502MrSqbGHle1gwFy4A8/gL+BXnD5HzRB8V63q2v3Wn3HVtZup4sKRQ6bH2jGWLPEzFY8QsxCBfLMFJZgBawGw5QzNF8l8e4mhsAyU/g2z0Cw2W5cjE7MybRdCuEYq8JC0ZxVTomfSK86mgmYHBVClT78vYYAMIT8mTmLso8VICcJkJaW/hWF7Lx+t/x/OtSYmj63P0vL32TtVi7RBlxxw6/2yWnhbjzmEt7Lwuftsb8Tci4vIBVq2XVQWWbN1lmhzHaqqopqKtjnKJDnbqHKJ0TGIOwiUQEQ1lSY2PNIkssaNLGSUJUFkJFiVJF1JHgkH08VVkyMiKN4Y3dYpAA4BIDAG4DAGzAHyAb+fNd4dno2Df07dgdvaPYH0ddw8eniuk+fXzTd5ceZQjsX319hKFq1ky80rMgvjiAvEg5iafL2whS+zGNkkmaiTplErGE3eKJmKYNg7Q9OsfLbKXFkbCVGywp4BjZS30BI+lTHXo9HNvcSLs8s8PXmnUZEkChpki/maNSCC4+gII/KtxQnF1d0WmOskx0HD5Fc1aBcXyIrLtaQrcXcVoxsexx8A+cCLh7DM5YyqbZU4idREpREdx19wGYwIckKMgqOQXyA1vIH4i/pWNtl1ke0yU0ryyaZZ5BA8oCyNCGPttIo8K5SxYDwDekDmgzh5Bt0YO1xsJOVqWi7S7jVZwkS6lIxqqZMsadQFkhRSk11CpkMrsiY3YPp1q75VM+VqI4tRsoMTbYGTDlvEZxC80SG3tE8hYTMQoL/wBMtYE+lbE+LhBi7d5dtrp8rU5+PLiJKIDMkUrrcygcTyMSgsQn3geRTlRS8W1bsXSzGLrslZBReuI4pmSK7uUXQK4XRMsh3YSjtIphAVA6hMAb+jV7102uxoIciaHHwc/P4yNEDGGeZl5MpZbe663N2FyR5qi7CHYZE82PFNkZuDg3RZLSMqRK3FWCtcxIfB4mwBNvWk45pTiUrwQs/c5x71Tisp7VYuEIh2szUWOZGDUO2DoUYlROKQ7D1nD6YBqDyOqTbDS/2ndbXLlvmGX3kZYXaMtdcclPBjAJU/VhbzU5B2qHX7o7XTavEiAwxF7Tq0yK4UBsgBvIkuA34D+FbSERqVJTgaFHOyMlFWz1WDinTpw6fOmyCp3D1Uiy/WqqVE6wiPUYNg7A7A1n6mHrfUo8LpmDIIXaOQ48LuzyMqsWchmuTxLebn+FYG2l7H2t8vuOdGZYxIizzIipGjsAqAhbAcgv0H5mtCVEAy+q56Ll1DSkyCYTl9w9vGiHQmT6v3h+Efg6NQ6Qj/8AWc05/ul/7SBe/wD+jv1nwB6/uf8A+mpdpifjVIb6u391Jtb/APSH6PUn0/bfh/8AipQLSrR1cWtad1l+6FnGe32dkXj0FYZo4BYG4s2r1TqUQlhKPUHSAD0/Dqam2WNkdoj0OTr5pDFj/uUymjVoEblx4JIblZiPPjza/moaLXZOP1mTe42fDGJcj9u+KsjLO68eXNkHhofp5v5t4rWg+vcKMO1exbW3KStmeN38jEiSKb1yuKCB2Lty3cCY71ZsT1D9HaYdYAzO46o4uPl48ezfJ2DrJLDaFcXFPmN3VvLlR9rcfU1nnE6htBlZGLPJrFxsBGjimvM2TkjxIiMvhFY+Vv6Cl/q61TKbewPcir3avQ9aYtY6qtyJy1jsr4iTpKQQBUya1bZIAUzhm/MQoKFX+p2NsIhtqh7rL7xN23B1ehhjg66gE2VlSAOJVvZsVF/UklvuD+nm16vWmxekw9UzdnvZpJ+wuTDjYsZKGNrAjKdv0vGD9pj9fF/NKW1VwlpjUo08tMQwJSTGRB3CO/BvTixVFUGh1ek3U0ciOypf8oNT/YtEnYcBcBsnKxVWeOXnA/Bz7bX4FrG6N6MPqKgevbx+v5zZy42NlFoJIuE6c0HuLbmFuLOvqp+hrqtDuQZexXjaeg6/HpzLck0pOFL0P2CwGTLHMFzqJkbv11dugRHt9AfQHq7DlZuH+0ycfNw8LDXKUTmceJI2uPajYkBZGNuJPr9K7Ov42Flfu8afDy83MOKxgEBN45Ab+5IoBLRqL8h9KVI/+f07+n4NWKq/XGlKNKUi1nErNztlqEtV3TepHgk00bMV+QhJdWST8O/jUEE9nLRVqkqb7Jv8G4duqpNPsttuM/rGy10idbbDAXK9wATGUcZIlUfehQE/df8AMVaYoNdqtRgdl12wjfsYzCTi+2SYREeUcrMftdWIH2/nY1voGDjq1DRsBEJqoxkS1IzZJLLqOVSIEExilUcLCZVU25h7TCI6mNNqMHQarH0utVlwMaMJGGYuwUel2a5Y+fUmojcbbN320n3OyZWzsmQu5VQoLH1sosFH5Ctt6P8AHvqT9KjaBEAAREQAA7RER2AAD0iIj2AAaEgC58CgBJsPWuCmKYAMUxTFMG5TFMBimD6JTFEQENcBlYclIKn6jyK5KspKsCGHqD4Nb2GgncwqAJgKbcBOVRzsByJnKTqKQ5QMBw6/g7Nc1xTxRMeEWxSadZVBTAepQqYE6tx3DfbcTdIdm49o6UrZaUo0pRpSjSlGlKw5H/d77/Y3P8SfSlQv8tr+wbxS/EzUv5KbSlMF5Hf6p3hB+KI/9LrPpSrWdKUaUo0pRpSjSlQZiv1lV5/uP4v/AK98s6Uqc2lKNKV8nECkMYRAoFKJhMb0F2AR3ER9ABpSqwGd05bZNybKEGiwGDqFjjOD2Il3tyWSuT3kDh4sU48LZaEeMK1CkPFJUEw6HHem6B+q7BKaDjn3eVlECJcbFiyCCX+4zxW8Mlv0G/438VtPL1fxfodEkr5s+632x04kRccGBdZnFxeLJ58v3KhL+U4i/wBPQhzMVY/yTSLJmGXvGZ53KMLfryax4/rUrCsohriSrC3USCjRDpm4WVmmBVTgp36wJn3L9T6dZmFi5eNLO+TkNNHLJyRSABEv/gLeo+tzVa7Nvev7nX6nF02oh1uZg4XtZUqSM5zpr3/cuGAEbW8cVuPzotclndvmPF0bTKxRJHBb+LtKmX7NMzLtpeoCYboANRb1GHSMDSTZv3HY7MoBhTL6Ntu3md9kM+FMdIjrire6xNnU/wAoUfUH6/h5prMbpUnU9lkbfJzY+5xywjBhjjVsaWMn+uZ5D9yMo/QB6mnl1n1UaNKU2uUsQ4szfX2lOyzTIC/12MsEJbWkDOlO4bM7HX3BnUFNFRQXRWI7j3AidIw+qI77gIaxMzBw9jEIM6NZYlcMFb0DL6H/AE9RVh612rsvTs9tt1fMnwc+THkgaSM8S0UotJHcgjiw8MPX+FKqEp9SrsjOyldq9ar8rbH6claZSGhIyJf2SUTIZJOSsL5g2buZl8mmcSgs5MooACPb2jrtjx4InaSJEWRzdiAAWP4sR6n8zeozL2u0z8eHG2GTkT42KhSFJJHdYk9eESsSI1P/AIoAL/Sq5cU8t7ahnLI2LeSA2WFjc05JyjXeKdN+KmXgpeOxxiiBeLXmQuVmbk8G4bzaK5FYh6JzC56DFIIDtqrYW8nXYy4e25qmRLIuMntkERxA8yzD1v6qfrW/+z/Furfpmv7J8fft5snUa7Dl3U/72ORGy82RRjLBCfuBjIKzx2HC4JFV/wCccsceIThvS6JXcfXd55Z2Wy2Gpo3Jt7WX5KVHNaGTHknJSDZhkRJ9HwtNj1GThQ0i72U7hUybce0CjWNjnaqPQx48UUh6jPdeYv8AuFm9y5NpLgILH7j5/Ct5dN6v33M+WszdbDOw0/yJ1ftTnHPAamfXnEVEUtilWkyG5KBEnjkoMn405XKSm5DoHCPH9Tg+Vj3kjRrlmiMhq/NWeGph8Tz+J3EKyj6tiHK1qZzCbCGokQrDd6Mq2Hxqj8QIVJM2wly9zj5WN16KCPNOXjSZAClgnttEQAsUjX8ILX5Dzf6Cq98a7fQ7z5iztnmdYTr26xNQ8kscMmR+9izRIzzZ2FC0ZZ8lw/EQv/TEXksw9fI9LVoV52URpjedsteJIS6MNMpwblEko3h0TupVdBNqZ83K3YpoqqdixzEakKooBB6gDRzw3lYY4Z4rkA29bev4+nr/AA8mv1NxNiEwY227Q4+fwQyRmQEoZDxQG/E3YkD9IBclVv4JSAdoAICAgPaAgO4CA+gQENdFSp8G1GuaV9ppqLKJIopnVWWVTQRSSIZRVVZY5U0kkkyAJ1FVVDAUpQARMYQAO3T62+tcMyqpdiAigkkmwAAuSSfQAeppxyYiv5GSEtJQSsDCKzNnri83OmNHxkXY6dGISthgJ1QU1V4OVYNnaBe6dJpCdZchA7RHbL/Y5QXm68Y+TLc+AGQXYH8CAR629RVfPatEZji484nzBDDKI4/ud4p3KRSxi4EiMVY8kJsqknx6vCfKEbH8O3+Dk6TSZ169zTX8pmyt4xke31cJamuopfHkdGOGiE4kUTwYLvnbdVZl65UTFBQxDDnfvFXRHXe3GzHIWT3PHJbrb2wLX+nk3K/T61VB1vIm+WE7m+ZmwQpp5cP9lxYQTe3OHGWzhjGTaTjGjBZPBYEgMKpXr1lyErkGEYyE7kf4izZFkkK1ZVopROdlp4hhM2gJ52XqkwphZQVyIrKIlBXpAhtih2XbKxNUNVJLHHiDsv7RTJGGBRU+roP0+9xsWAJte9eYNB2Lvs3fcPCzs3sP/wCpP/kEq4mY0DDJnyQSUxsqQXmGuExkWORowH4hWstrT+H0j+2Po9GtX17sPr59a41zXFGlK50pWul2q76LkWbRyszdOWbhJs5bn6FkVjpmBMxDiBujc+wCOwiADpXDC4IpteGPMXLXBXkBFZox4qm9dIlc1jI9MmSEVh8kUKQetlLRRrD4hBwsRlMAyIPeFDvE1UyG9YvUQ07pNxk6PNXNxbEWsyn0dCRdT+Rt/pWqvk/420nyl1WbrW8BV+XuQTLf3MfIUERzp5H3Jc+DcEEjx4I9IVo5L+VXkrPUrzFysONcz1mjVjF9V4d8KsUY/ncaZrpMwlPk96Iy81UIerUe2uIa0yKr+GAZBQe4DrTS3N3Y7Hl2XWMjOO3yvbmjRUXHx41KSKb2IZbKrWJJX7vT6fSvFWB0n521HVo/jnRfvNbm5U+TNt9xlzpk4cycLxPDIXlniDxKqS/01s36mP6hJbnHO46yXkvi1kfFOB8xcTr3x55fTFHomQT8ZomTxdab9fYCDsEPbrQSpS0MubGsjbjpou37du+cOnnfFOmPSIhJbqTHycnGyMWCbEmx8sqj+yOBdgCGbiR9ha1yASTfxVM+MMTcaXTbzTb/AG2u3+q2/XknnxxsnXJjggeSN4Y/dRx+4WK7IjMiovAhhexqw85Hyec/4Wc23mRAx+KZ7HsihWHWXYDEEfLwKtSuBq7GIX3IYVCUFZNKn2O7A6dGFiut4ErkplkkyCJwq/b+o5+Fy3EYiOObe4IwRxaw5PxP8pa58el/P41vr/HD/IvqXZEx/jnLfPi2qGQYj5ZST3YfcYwY/vJa8scPBBzUc+JCsxstQCx5w24wY+411vkRzSz/AGOtSGa4+zN+PmFuPMdUck36RVhFoxkrcslSjmeTrlMi412/3NCOFEJJwmYgnVanMUhoDH0+sx9auw3OQyvMG9qOIK7G1vuc3soF/wBJ+4/iPStr7j5I7zt+6T9P+NtRDNDrJIjn5me02NAocMRDjqI/cmdgv+8oMSm9lkHkSezT5ktQsuScUcdbrecmhw04/wCPLvgyek+JUlHY2meRdSnq6zRY3qwU9V3H4/j7U/lm4A/RFFZFNM6pCHMYTKHk8zscMmTFr53k/s8EbRkwEIZlI8My+EBJ9fB+oqj9a+Ftjh6XYdx1mLhf/rH22ZDmou1VslNfKkhJgjmAbIMSoT7ZDKSeJIAsorI5X5+xjmqeqMLg7AlX49YaxnDu4OlVSNerWS6z60idkpN3TJ96epIvbVbrAtHInMUpE2jFMoIoFH11VK1tc/GzZETCgXHw4lIVR5Y3tdnb+Zjb8gPQD6nePx71HedZxcjK7RtZ9v2PNkDzSsojhj48uEONAv2xRJyYepdz9zn0VYpIIrul0WrZBZy5crJN27ZumdZdwuscqaKCCKZTKKrKqHApSlATGMIAAb6i7FjxHljV/kljiRnkIVFFySbAD8b/AJV6NONkvxy8kyJicqcmccRHI7njk6oV+zUXj+iEKkx4tVh44YzEdKZLs8qznBqmVrEmQVEUGLFeQjUUk/3sFjKBsPWtruloMrZRjJ3kqBli8WhHg3ckGzn8ACVH4XvXjPukHcf8msmTQdJzJNN8VYORJHPn/eW2cgDIyY8aFPdxYz4LO6xysT68AtJPzJ/PsuvPbAROP1VwmfBlfkrdFWK5TrTJb+zydpi4Zq4MyrKjdtXK0m0jTyrkHDgFFXJHHcpgKZRKBg6ex96n32B+wih9mMuCx5liwA8D0Xxfz9fp4qR+Fv8AE/W/FPbf+W5+z/umWmO0cKHGWJYmcjlICZJbtxHEWClbnz5rz5D2iIj2iO4iI77iI9u5h7RMO+qDY168CBf0+lc65r7qWvC7L3HXC2bIO28oOPKPI/FqajMr+pBaJGtP4lwi/br+3GabU3s6yJpNQUItFvgTbvCmAO+REOoZXTZevw81Zdnj/uMX6ryII8+vjw3j+U+D+IrXfyb13uPZetS4HRtwdNvSDxl9pZFYcSOBv90dzYiVLslv0sDarZs75c8nqLynM5wi8mcqeVcXkOrIyktx2sEMSovIa0t3Qz2N4Uc6SD8khTaNi3wjeHLCRrB8YjMhSgoskQExtWfl9STJbNWXKykkS5hI4kN6oPdvdVTwvFQbD6kC1efOqde/yIydHH1ifC0WhyMOcqmwjcyh4iOGQ/7JV4zT5N2m96WSMFySVVjyq07iT56svyVetLfy24g4nrHGvHmYoaNdcmHMsaTrHHyVnIaRXx0VxCWWBsEvPX1ZzGih7UhzMlCEXKcyKBfq7Rqe8Psj7u0w4l10cw/rXuIiQeHhgSW8W5Lb1vYCtE/IP+LWP0mJsDoHYs+bueXrnYa0Lxl2CI6jIs8bxokFnv7U3uAkFQzn0WdJ58+WdmvzI7zlHP2f6BkqZgnkTirhnOu8dWuGx9j2qTKDx3PO7RYLKyLUZa0LWp0keMsR0UlmBwEQVIUSiTuh3vW83sL5OfkRyOpCY5KMFUG9+RP2luR+17XH4io7Z/FHzZ1n4ZxdH1PU5eFjSq+Vt0XIiefIlQqEEccbe6sQiB9zHBIceOJN+UTDcUOIXlrc3MHX7HE9lTmhbcm+8+X8DRcbZKVRcRY4oruPtcHYL5es1tDSNZvLtN8zdItP92sUUgKLgxjGSE0V/atT1zdQT4zS5ksvKSIAqsaIeQLNJ5Deb29APr5tV/8A+ffIXzT8ZbPU7mHB61gYXt4mczRTT5eRODE6QQYZCyQAqylr+47EkR2HKzpcJ53y6cC43wFkrkn5fOQcDWiU5BZNbQOfs6MlJiIoUjFIMsl1y7Sd5komjFf1941dEbwXhIhwYyzNQETLgUVDZOmk67g48GRstfJBK2Q9pZBcKQA4YsQtx5stlPp4v61BfJmJ8w9s3O20vS+3Ym1wY9TjF8DCIR51ctjSQrCrz8ZAQWn5SqOLryCk8Rd1zusNftdR4f2eKccrbPXbHyXxVYK5K8QXRzEesZVss+jJnMSgJmBXBDtquVSUHYvUUxPRq67ySOSPEkQ5bRtkoQYPwI8GT/8AtEeWrzJ8VYeXgbDsWDkDQQZkGkyo5E2w/SykKyYf4ZwIIi/1qydYPsqnaQR7w+/d/UfVD9R/7P0Ppasf1rSy/pH8K69K5o0pRpSulwum1bruVuoEWyKrhUSFMc4JIpmUUEhCgJjmAhR2AAERHsDXXNKmPC+RLf2o0LGwubKLnwPJ8D0HrXZDC+RMmPFb3JGCi5sLsQBcnwBc+v0+tN6FZxzkdkFsd11lKpWCLbNFHkqzctHTmMYvPEtWzpFU6Siabd4gBygIAO4AO4hqjjQdF71ijsmTgxZMebjqjPMjI7RRvyRXBsVCutwDbyAaux33eOj5R65jZ0uPJhZDOEidXRZZE4uysOQYsjWNr+pHrW5ttYRn4UUmDeKCdjGboKnIyKB3LaGfrtBZpuiAkYFOgEB6R6dx2227QDUp2Xr8e51Xt4aY394x4m/ZyyrzSCRk4Bxbzbj48flUX1vfy6bZ88x8n+0Typ+8jjYK08avzKG4tfl58/nTd2CmLxOPaJWE6q7uy8HN19R23i5pzEg0coKKLu54HCveLuGLVycxvDmH1imAOzbVI3XVptb0nT6BNdJtpcTLxi6xTtCEZSWfI5G7MiMSfbPqCB9BV10/aI9j3Tcb99jHqosvEyAjSwLNzVgFTH4iyq7qAPcHpYn609qjVoq5SdnbNlHbcFCN3Z0EzOUU1f30iC5iiqiRXYOoCiAD8OttSY2PJkLkvGjZMYIVyo5KD6hWtdQfqAfPi9apTIyEgbGSR1x3sWQMQrFfQsoNmI+hI8fSu7cdtt+z07fBrtubW+ldX5/WuQ3EPh6Q7fh6Q+mP0Nc+bePSuPAPn1rjXFc0aUrTvLBCR8vEwD2SbtpmdI6Uh49QTeIkCMSd47MgAFEoggTtNuIdmovK3epwtnjaXKnRNrmBzBEb8pBGLuV8W+0eTc1J42l2uZrcncYsDvrMQoJpB+mMyGycvr9x8CtyACPo9PwB8I/talKjL29a0s7XYKzsiR1hi20sxSdtnybV2UxkivGZ+8bOAApyCCiJ+0vbtqK3Oi1HYcQYO7x48nDWVZAjgkB0N1YWI8qfIqV0+822gyzm6XIkxspo2jLoQCUcWZfIPhh4NYzSw+KsstWvY8w39ksGL4Jpw26IWQB6AADWPdb/AGZy1/8AiF29XbXRjbv9xvsnQ/tcpBiwxye+yWgk5/yRv9XT+cfSu/J0v7fRY29/dYr/ALmaSP2Fe88fD+eRP5Uf+Q/WlFqcqDrCkmQSUc+jjOHLQH7Rw0F2yVFF42BwkZLv2qob924S6uohtuwwb6xM/E/f4M2CXkiE0TJzjPF05AjkjfRhe6n6GsrByjg5sOaEjlMMqvwcckbiQeLD6qbWYfUUk6vUpetPfslwmZ2CRhI+Jj4iXIiuu3dszCLiWXkgEFnbt6A7H6igAarfXutbPQ5Vn2mVmadMSOGOGYBmV0/VM0v6neT+a4qydg7Jrd7iAprMXE27Zck0k0JKqyP+mFYv0okf0sSTS51b6qNGlKw5Fi2lGD2MeFMdpItHLF2QhzJHM2dJGRWKRQggdM4kOOwhsID2hrGzcODYYU2BlAnGniaNrEg8XBVrEeQbE+R6Vk4eXPgZkWdjEDJhlWRSQCOSEMtwfBFwPB8GkqkvTsYwVbgFX5IaJFy2rlfI/XcOlnL1yoYzZkDgwKqqrKmOOwm2D6eq3HN1j4+0+BpZZhi64yJi4wkZnLuxPGPkQSzEk2J/61YpIuzd92+duY4Tk7H22ycgxqqBUUDlJxFlCgW8D/pUkKO0MjHKuBMP+kqiHR6Nu6ExeodwAdx32+h2attVQefNLbSlGlKNKUaUo0pRpSsOR/3e+/2Nz/En0pUL/La/sG8UvxM1L+Sm0pTBeR3+qd4QfiiP/S6z6Uq1nSlGlKNKUaUo0pUGYr9ZVef7j+L/AOvfLOlKnNpSjSlcCAGASiG4CAgID6BAQ2EP8IaUqJ2WYq9nqd7icYysNWsiLwsqhRJy1MFJSAiLCsif2RIzUa3KdR9GoLiBlEiAImL8A+jXRlDJbGdcNlTKKHgWF1DfQkfUA1LaCXSwbzEm7HFNPoFnQ5EcLBJZIQfvWNz4VytwpPofqKaXAEHySgoGbQ5K37G1/saz6NUr7/GtWf1WPYxycQ2SlUJJs/HqdOXE0VRVI5QKBUhAP/ZLhauPbRxsNvLDLLccTGpUAW83v+Juf4VYu85vx9m50L/HmDsMHXhHEq5cyzMzlyUKFfQCOwYG/wB3/UupdErS7qljjqHMQcJfX8FJo0yRsLc0hEsbCZsckY/kYpE5XMjHNXYlMsmn2mL2azsgTNC6YzKuUVPAt5UNbwSPUgfWq1p21sWzx8jdwzTaNJkM6RHi7RchzVHPhXZb8SfQ1GDhvaM03OCyTNZbzphXOjFrej1ipy2F63I16PqzqstAY3StWQsg1bqPJpvOHKYBKKhU0+zqEBAAh9DNsMhJpc7Jx8lBJxUxKVC8RZla/qQ1bH+WtZ1DUZmvxOraXcaWd8ITTJsJUlaYTNyx5YeJIVDGCPNrnzatcTlTlGEyc9ol+4mZbhq9LZzbYjxtkGsKR1sgbLWF4xw/Nl2zJtzoL0+nILJFQN3vWp1n+kOvj+85keYcfKwZ1hOT7SOtnVltf3G/8V+n413t8Z9bzOuJutH2jVzZ8WlOdl4swaCWGYOF/Yw3uJ5yCWFrCw/Ol46p40Pk+yutOxJZLGrnOuqQmXsslu5y1qgs8eMzq0tmNKkHR0zOLCu5Ol3zFIphEu6gj26yTB+224yIIHc5KWlk5/anD9P2E/zXI8D+NQke1G7+OH1G22mPjrpcgSYOF+3/AK2S2U1shv3Ci9ogAeMht9FFSSf+N8A/9ndyEj4F57O8T1eG9oeGV8D4np9fw/iujr27ejfbUs3LieFudja/pf6X/wBa19B7Pvx/uL/t+a8revG45W+l7Xt+dVcSOUPMYZYSutJunHJOxZ3TiImvwWYsS2ipowCbrIEvLxLm11aHlwFRBziKFM2fO0VelN6oHSG24jqnPmdqXXSY+RictlxAWWJlAu5I5KD6GIWY/j6V6Sg618Azdxw9xp+wHH6UZXlkwM6GcykYyI4gmkTwVzpOcaMLmMebVIHIvH6xynHzHFfuNuvOarvg+NJb5SMhlIOpJcmLVAwr4oVK/Qzho8gjQltdrFIq3EASKqAGEfTqUytXM2riiyJJMjJx15EDivvsoP2uLcbMfFqoug71r8bvewztTiYen025k9hHf3JzqIZJF/r4sgZZPcgUEh73K3FbvHmDMQZB4w4+oOUeMVTxrSl2sZfZzjpLmSlIHH1sbv3E+uzcLxzhNs8dRcic6xlCmFMwnEBLtuXXZi67BytPFjZuHHDAbOYD5VGvy+ni4PmsTfdz7VovkfP3fWux5Wx24Z8aPapdJMqEqIgwDglVdAFAIv4Hm/mvPFyQ4P8ALrPUe8fYm5E4UyRwSb5Atl+rEjD2KpYurGMY1GXlmdlYOK80gINV27q8WZwCwI+JTX7oDbmU3HWrdv13ebNS2Dl48vWhKzqQyxrGLnkOIUfpF7+t/wCNe8/j35l+K+kZCQdp0G41/wA1tgw40yPFPmTZjlEaFhK0sgVZn4ceXErcjwtq8/t8i6/CXi5QtSlwsNUh7TPxdZsIBsWegI+UdNIebKHSTpLLMEk1wDpLsCm2wejWsMlIosiSOBuUCuwVv/JQSA3+o817o0mTn5umxMzaRGDaTY0TzRf/AMKVkUyR/X9DEr6n0pc4fwTe83OZwlTNARETWixRbBcLpLlrVJhHk8+TjYCKlbIugu0ZS9geKCmxbmDvHJiG6ewphDJwNbk7FmEHFUW12c8UBJsoLegLHwB9ahO2d20vTY4W2gnlysjn7UGOnvZEixKXkdIQQzRxL5kYeFuL+SBXqbrHlDcfeJuOU8qtso1aM5LtYeJcY4u3JGZgHWEKTk4kYk6lx91Uo1gwuDFqBHarIHaTlwl3RFSl3KOtyw9G1ekxP3qzINuFHB5yDCklrn7fAYetr3PgGvzT2X+VXe/lHsB6zLrcmT47aVxlY+pjlXYZGHzKp/WLs0DH7Fk4FFN2QnzUKOcHmeYKe1bH9a47HXtOT4EbYTMUzA1FtRMDZEttnqy9VtVrlKs4aIvMgOTTSZZOHVco+GAoJir1CAE1Xuxdw1rQxRar78xeQlKrwhdmXizFT+vz9yE+Px/Ctw/DX+OPdYdlnbHvwGL1yf2DgRyTnJ2WLBDMJoYEmDFcUCMmGdUbnckLa9686wiJhE5x61BEwmUECgYxjDuc3qgUC9Ru3YAAPpa1X+Zr3z4tYeF/CljD1JSbp1ys0eo5cL0ZxXnc5HosSmaMa3YHx4QlgeSQrEK2FKzOmLJNAEzqKne9YbFIcdZCQmSCSVLloytxb+Vja5P/AOVxFvrf8BUTl7NMPbYetnVVjzBKI2LWJmiX3PbVLebxCWQtcBfbI9WFI/WPUtXGuaUaUo0pXOlKiTlysukLWu7ioV34FyxReuXLRs4VQM7MdYzxdRQoKFIoJ9hNvsH0tdiNYeawZ0PM2B4n8qbaZnZSdkDzEw8fSFgcrqO5KdfvnjuVk3JhIKTl44cqnUM4RKnsCgD1H9JhEQ313tI0jc3JMn1N/J/1rAxcTHxIv20CqmIoAWNVCqo83AA+hv6en4V6d/Jg86PB3DjjRkvAnJhW3KrQVlsORsQSkbEStqbTjqXhmviKLJrA8fKwAqT8Ymo1XTakbIldLHOJj+nZnTu5YOo1smBsuX2sXjIBa9wPtPnx5Hg2t5NeG/8AJP8Axp7R8jd1wu19IGPwlhTHy1Z0jKBHNplHFQ9o3IZSxY8VAsKro8y/zgcueZNEUqoWvGVJxVSscXC12aqsqlK2aSnnjWzR7CKLGW6UkpAsXNKMGUeXZVuwZlOoocegpR6QrnZO3ZXY0SKWKOOGN2I43JN7CzEmxtb8BW5PhP8Ax26/8MZOTssHNys7ZZmPFHK0qxKgMTM3KJFXkgZm9Gkf0Hk2vVRqj12q1bslXblVkyO5VaNFHCp2rRR4KQvFGzcxxRbndCgTvBKACfoL1b7BqqEsQBckD0H/AN1ehfagRmlRVErW5EDybXtc+pAubX9Ln8aVrbGuQntlgaYxo1sf260s4OQrVXYV+UfT8+ysrRu+rzqHiGrVV/IpTLN0mo2FJM3elOAl3312jGnaRYVjczMAQoBJIPkWHqbj0rAbd6WLCl2U2VjpgQO6ySNIqxxtGSsgdyQqlCCGuRYg3raZJw1lzDb2Ljst4yvmM382wGUh2V7qk1VXclH94KJnTNvMsmaq6RFSCUwlARKPYO2vvIw8vDYLlxSRswuAylbj/UCsfR9l692OOSbQZ2JmxRNxdoJUlCta9iUZgDbzXxiDIamJcpUHJyMBEWlehWmItTevzyXfQ8q6hnRHjVs/T6TiKHfplN6B2EoDrjEyDi5SZQUMY2DWPobfjXPZNOOw6TL0ZlkgXLgeIyR+HUOpUlT+NjSIlpWUnpOQm5uQeS0xLPXMjJycg4Vdvn794so4dO3blYx1V3DhZQxzGMIiJhEddDM0jF5CS5NyT6k1KY2Nj4kCYuMix48ahVVRYKoFgABawA8AD0Fq14bD6P8Ao0rI+0+RaudK5o0pX0UCD1ic4k6SCYuxBN1HDbYg7CHT1B8Pwa4NfD3BBA//AGD8alAhxgdRlCuVxyFl3EGO7FBVyHsFOxPJWhSzZPyj7wGjDxTKqQ9HZ2OJj1FmckC5hlXrAxCoqEMQFAAoyg1ZXHebIlijkVQVQm7ve1goUMB63+4j61QZO9xy7bG1up1+wy8SWZ45spYxHj43t8gxkadonYArxHtJJclTexJq9nBFc8vXgfwq5EV7kZeXdw5vP6lcKUvjleoWCWiMTZAy3iVlMUiC905TrgZZ/WXMU2VcWkzUDREoqZuQ4GITe74MXX9FpsiPYvz3ZRl4cSQjSR3UcT9p42F3t9p8CvLPas35e+VfkzT5nTsZcb4wTJhmGQJo1fKgxMspNJ7qf1EWUMwXG5f1YgHIIJrzCD1G9dQRMZTcwmE3UYxtx6zD2iO4m37R9OtaGvcS+nED0FenHgl5lHHa5cbsMcfOWtVyNLpcZXkJirGuD8PLGcQfLioZjmV68/rmVoCbclZTb+lTEmWQTbIPGYLIiUyKZlUzdWy9H2PXTa+HA2qSEYxCJFH6TrIbEOD4JUm4Fxf6C4rw/wDKnwt3HV9y2XbegZGHF/elfJyM7LFn1MuGgkWTFdByQTohjLFH4kWduJFrffN04hc2eYfB1hi+p4dx8yn8Q8m4d5ibGmNrA5k3k9x6ZVSQpVRkXhpw7VlX7LXkJRNWRZd6UiLRuYEwMYAKa29s1O62+lGLFDGHhyRwRDe8XHip8+hF/I/AeK88f4+/IXxn8efJz7zP2OW2LsdG4ysnJjCrHsDKs0qj27mSOQoRG9iSzAtYeavVwJSprF+DMNY2nXTFzPUDFdDpU67hwcJxbqVrdYjYiRVjyuSkcgwUdNTCmCgAbo23DfV4wIHxcGHGe3OOJVNvS4UA2/KvLXbNljbztOy3WKrLi5efPMge3ILLKzqGtccrHzbxf0p1dZVQNGlKNKUaUo0pWrmodhYIaSgZMiho2WZqsXibdU7VUzdcABQqS6PSogYQDsMXtDUdttVhbvVz6fYBjgZMRjcKxU8W9bMPKn8x5qR1W0zNLs4NvgsBnY0okQsA45L6Eq3hh+INYES4r0Qqyo8e+SK+h4Rqo3iVXCi8gnDN+hm2dKnU3MqXcoFE4iJhHt1h62fSauWHqOFMoy8XEQrCWLSCBbIrknywvYXJv9TWXsoN1so5e2ZsLHEystw0wULGZ2u7IoHgH1IFrW8ClHqdqDo0pWld2OCYzcVW3km2bz04i6cxEWoKnin6DEpju1UAAgpiVApRE25gHs1E5O90+HtsbRZWRGm4y0doYjfnIsYJcr4IsoBvcj8qlcfR7fL1WRvMbHkfUYjos0otwjaQgIG83uxIt4rGnK4E6+rr0ZiYixr0qEmDaNdeHayw933fg5YnSbxDINt+js7ddG40S7fMwcw5WVjnByPdCxPxSbxbhMLHnH+XisjUbw6rEzcQY2NP+9x/aLSpyeHzfnCbji/0vXdDzvtd3ONPZUrHexJH2f4iRb9w3lPsQK+LizgY/iGfb09XYO+uzWbj+55WZi/tsnH/AGk4j5SrxSW4B5xH+ZPNr+PNdOy1J12LiZJyMec5cHucYm5NF5I4Siw4v4vbz4re6maiKxVWLJZy2erM2qz1mChWbtVuko6aFWDpWBsuYoqoAqXsN0iHUHp1jyYmJLkR5csUbZUV+DlQXTl4PFiLrcetj5+tZEeXlRQSYsUsi4stuaBiFfj5HJQbNb6XBt9K1lmgwssFIwZpOThwkEipe0oZcGsm16FSK9bVcSmBMxujpHs+pEdR+/043+nn1H7jIxfeUD3YG4SpYg3RvoTax/ImpDQbc6HbwbYY+PleyxPtTrzie4Is6+LgXuPzFZC7hjBxjcj+VRZopIt45KRk3SCJlXIpA3QOosuYiarxY4dW3pOf0BrumyMPU4CLm5KxRKqxCWV1UlrcVuzEAux82/mNdEUGZts52w8dpZWZpDHEjGy35MAqgkIo8X/lFainQdgr8Y5Z2K2O7g8VknTtCReNEWSjZmuJRRjyJICJDJt9h2N6R3+hqM6tqN1pde+LvdlJtMpp2dZXRUKo1uMYC+LL9CfPn8Kkuz7bTbnPTK0mti1mMsCI0aOzhnW/KQlvN2+o9PFKzVlquUaUpO2qImZuJ8DA2NzVpDxrJx7WaNknaotmyxVHLIElxAnQ9TDoEfSUB31A9k1m022s/aafOk12Z7sbe8iK54qwLJZvFnHgn1Aqc67stZqtl+72+DHsMP2nX2XdkHJlIV7r5uh+4D0vSi+AA33EAABH0biAbCO3wbiGp0en51B/+n/0/KjXNKNKVhvI5hIggD9gzfeFcEdNQeNkXANnSf725b96Q/cuE/8AJOXYwfAOsXKwcLN4DNhimEbh05qrcXHoy8geLD6MPI+hrJxs3MwuZw5pYTIhR+DMvJD6q3Ei6n6g+DT91YVDQjQygJh1AYU+7ERAU+oQKJgH6k47dofR1lVjUodKUaUo0pRpSjSlGlKw5H/d77/Y3P8AEn0pUL/La/sG8UvxM1L+Sm0pTBeR3+qd4QfiiP8A0us+lKtZ0pRpSjSlGlKNKVBmK/WVXn+4/i/+vfLOlKnNpSjSlGlKau8NFCPkXndpFQVTImJgOUFFVS79QmT+q6SlAA37dtKVArjRG8h2OQeVi2bpm0SlQfZrVWwC2sCcCmxi8Y+zBOm2rYw6yro8T45bp3egRfqT9HaO0HqF2q5WadizmA5H9Hlawjt6Lb6X/HzW0/kTI6FPousJ02HGi2qae2zMRkLPmc7Eze4AOfEX/p3Wx/hTsWnBeObjmDGGcp1tMHyLiKLs8RR3LWxyLCJQZWxEG82EjXUFyMJtUyX72osQwoj2h8Gs2fXYs+dDsZOX7qAME+4gWYWN19D+X4VWNb3Pf6nqey6ZhNCNDtZYXyA0KNIWgN4+EpBaMA+oUjl6GtND0zK9dzio+rshi6B4yuqQ8We0GGqRIu+OsxSEsdy+ty0yxQQj1Yd6xEoLFNuuouAmNuPbrrjx86LYloTCmpMZJQLZ/dJuWuPBBH+t6ysvbdY2HTBFnx7Kf5FXNULkyT88YYCoFWARsS4kVv0kWUL4FvStZywzY84/4KueR49jLrv2rdOJZy8dAK2aNpryWKsg2vVyi0F26pqLVVgBzKKFNuRuXsARHbXzu9g2s1smWoa48AheQQnxzcX/AEL6t+VZPxj0+LvXdMTr87xCFmLsjyiF51SxbHx3II/czD7IR6Fj5tSa4hck67yWwGyyJVLO3yVMVv2jTrbY4irytIr1tyHWopq7l3FPi50pXSdamV3qIsnBg6DEV32DpEA6tHtotvrBlwP7zpdGYKUVpFAvwB88TcWNSPyr8e5/x33h9BtMZtdi5HGeCJ5kyJYMWZ2WNZ3j+33owre4vqCPzFO5hK637ImNYG3ZPxVJ4Uu0krLElcbzE2ysMhBJsZR2yj11pWOSQaLhLMEE3RQKUBTKsBR7Q31ma/IycrEWfMhbHnYm8ZIJWxIFyPHkeaq3cdPo9D2CbVdb2ce408YQplpG0SyFkVnARiWHtsShufJW4rUVqYz45zrkeFtNToUfx8j6vWXOL7hFTyznIM7bHPR70sLRAGWMhGRTH1vCqlTIKuwdo7jt8QybM7GWOaOIasIvtsG+9m/mDL6AD6GsrY4nSE6Xr8zW5WdJ3uTJlXMgeMDFjgH+y0Mtru7eOYubflTzOjOSNHZ2aaSrwjVwZmksYU0FXhUTi1TWUABFNFRfpAxg7QKIjqQa/E8f1WNv4/SqjEIzKomJEJYciPULf7iB9Ta9h+NNVg+SzVL4xhZHkZVaTSspOHEyWw16iTS87UWsenIuEodRnLPRFRwd3FAmdcphECKGEv0tYWufYPhq+1SOPMueSoeSgX8WJ/EWv+dWfuWP1DF7HNj9ByczM60qx+1LkxiOcsUBkDIvgBXuFt5IANecznpxTreM+QM7XMu54w9izi1ydvEplWCcSUvO1S24UsdUqww8nI1Kkwpka/c4hU8oDZVkIlQVI5Fc5CrEKOtVdl00OJtGhzsmCDTZkhkFyVaJlWxKoPtcebEfW9/W1e/PhH5N2HYuiQ7Dquk22z+Set4aYUgRI54NhFPN7iJPkSXlx3ATmsgBZSnAEoTUlbb5JHG7K+VMT5DrMvB1XjivhNijZWlIkJCDs+R8gPYsVK7kJgrIA9hY2IlWjtF84RTFM6ipQJ09Bh1LTfHmpzc2DKhZU1Rx/uCEhpHt9sgv4AIIJHj+Fa81f+YvyF1jrO00OximyfkBdwxhbIVZIcTGV/6uKwXjIzoytGrG4AN73ApT5XwTWfK2ww2yvh16xyrg+gwCEFkLjxdCY/Sd5Zvliskg9h8y2u/SDF05fSOPFnhBjIZJksBCE6G4p7m13Zutg6brxm4BE2uiTi8D8P6rliRKzkeSl/tQA/lao7rHddl/kp25usdsR9Z3LOnMmLtMf90VwcaKJVfAhxVZVVMoKRNkNItybycvFeW7kNzJ5L8qvAN875ZsV7hoaZlJ2vVp4DJjXK+8lh6VfZcTGtmjchW7UAQQFXvToogJSGADG303td9t91YbKd5I1YsqmwVSfwAt/p+A8V+lHQ/iT46+Mg79J1ePhZc0SRyzLyaWVU9ObuzHy33NawZrEi4FRj/Z/wCQNQ9bGrnSlXU+WFnnjhcq9P8AAjlZjapnpWcJBtF0nLcNEsK/eo+zO5hpLxtVtV1ZIJTSsKrNs0loxwdYwM3QAkJBIcok2D1DZ6nIibrW6hT9vkmySgBXDE3Cs482uLqb+D4rx/8A5H9I+QdRnwfN/wAY7DKG40yM+RhSO0uM0QjZHmhx2JjEntsyzIFHuJdgQykNEjn1xCx5w3yVF46pfImq5zl1kJNW3w0JDniZnG75B2AMIefFvLTsWuu6aqCAlK5I6SVROCyKe5RNB9m0WLoMxcXHykyZCDyAFih+gbyR/wCtxbyBW0/g75V33y112Xf7jQZOlxQyCB5JOceWpX7pIrpG4Ab6lChVl4u3m0DNVut20aUo0pRpSgfRt8A+kB7QH6O4egdKUgbvjyPu6LRIqrGHkgeNESzK6KvcNGay6SbtZ8kxQXeO0GrYTHKQhTH3AQKAiO2vqO3IBjZSfJ/D8/x/6eaxspXMLPEvOZVJVbgcjbwtyQBf0BJAH1Nqa2z0W+3TIlRx7D1OJm5uxL1mm1U2N6W6CRsgRDNCAbuW8FGImkpKYfNEivH32Hv3CwiofYwm2z1L50648KgyEhRxXybeAbDzcjyfqfrVUlGL1bUzbbZTtHiIsk0hnmBSPmxkZTI54qqE8V88VXwPFewXiV5GflRZSaZYp5n3IvK19wffGeO8rSFtnJXGKdfvBoFtIyFfrzWIhIuPsEM2WBUSvCgqYB9QxzdgjuPU9J6tlCWK+RLkQPwk5EpZrXIFlAI/Ovza+Qf8ovnrRS4Gw46fA1W0xDkYqxIuT7kPMqskheR2Rz4uvj8QB5AnNUf+X58r2nSKUo2w3b59wg/iJFulbcnWSeZIrw0ijJIp+CVFBsu0eqIAk7RVKom4biZMwdJh1ORdC6zC3JYXY3B+5yR4N/T8/qPrWrNj/lt847KEwPsceJCrqTFjRo1nUqTy8kML3VhYq1iPIFW8t6DQmllaXNnRaYyt8fBIVePtTOrQTaxsKy0MQ7SusZtFgSSZwTQyZRSaJqFQT6Q6ShsGraIIBJ7wRPeC8Q3EXA/AG17fle1een222kwm1smVktrnlMrRGVzG0h9ZGQtxMhubuRyP1NUw/wDMNUewW7y77PY6ziet5RmKFdYKcmJebrhbBNYzojuPmIu0XqsuCrIPop1HruWZF1iGUSSQOZVVMxU+olO+QIZJevtLHEsrxuCSRcopBDMPwsbXPpbyR4r0l/iFtMTX/MMGHm582Dj5eM6IiSe2mTOrI8cMgsQ4YByqmxLAKrfdY/ngMXTNulIldxqUio7jzNWKyrp43NFPTOmqwSaJWqqRHapW6KiPdLAdESrCbp6ikEPPqlRe4vcePyP4/wD76/YaWKWQxtFIUCvdhYHkLEcTcePJBuLHxa9r1IfhnWrhcOWfHCs0GNgpe3zWZ8eMYSPtEQ3n6w5crWaOKclkh3bR80kIHuOsXaaqR0hRA3V2bjqQ1EUs21xooAplaZAAwuP1D1BBBH4/lVO+TM3X674/3GdtXlj10WtyGdonKSACJv8AbcFSr3/QQQeVrealp5yzjjM48wPM5uKKdSaY4RUiY2ajaHADW6dGZEiWvsu8s4GPSIhHC1CZZmMczNFBoCwnKmTsERlu4nWnfzf2rgMfwCFHFQ4FmAHp6/h4v6Vr/wDxtTuyfEutHfTkNuTzZGnk9yZsdjygMjG7X4Gw5lnsASfIqrXVYrfNGlKNKVIfjvm6Kw/k/C9quFHhr/RsV5fgMtydRMxjGUpbnME4jnJYF/ZVGa74Ydf2WQpW6vfNEzKKHMip1GKOfgZqYeTDLOiyQRTCQrYAta3gta9vHp6flVL7j1afsOj2eDrsqTE2mfrpMRZeTMsQkDD3Fj5BeY5H7hZiAAGFhZfct+Y9x5b8nb7yZudeqSU5bnUgxa10K1HJQqNUSZOYWrs5lox8K3mZ+GhFUgUfnKCi7xsRce0AAO7bbibbbN9lMqe49xxsLcbWW9vUgW8/Ui9RHx/8b634/wCkYnSdZNkHGxlVjJ7jFzKSHlKFrlI3k5WQeFRio8VH/I8qtOPIKcknVYkJ2Zr8Y+ln1WMi3Q9RuSPYR8zCs2EfGwlijGMeRN0m2T7tX1VTCZQ5zmwcly7K7cS5UE8f+liALAj62/73q3aOFMWOXFgWdMSOZlRZbk+vJmR2ZmeNixKljceVFgABJ/injnlvTLbSc14yibpiCk+KJJjyStuMpWSwxRYpm8Bk5vkvZpmqT1Xbta68H1HiBVHqCw9LUe+MBRlNVj7WKZMzGV4Yb395kJjUA25ElSPH4jz+HmqN3/d/H2y1+V1reSY2w2XEr/boslFy5mK8hAsaSxyEyAeUYhGXy/2gmv0SeJF25Et/c3D+VIyXzTUIXj7Qr6hzxCViWlZz3eLgZGVewsRSUGqcnGkjoiTIoi8VPs5boFOZNMx9tegtTNsBwxMoGaEY6v8AurgCVm8kBfUWB8H6j1r8efkHWdOb9z2HQvHrNjJt54Do+LGTBhhuiu0xPFuTqQyAfaxIDMBTxYtneTslmTP8Xl+jY5ruDYWWriXG601WcXkbndIVdmsezucgRaki7Sh3bJ8VMrcCItushh9U23UOZjPsmzJ1y0jXCUj2WU/cw+vIXNrfTwKre8xelQ9d1U/X8nMl7PIkn9wilTjDC4I9sQNwXkCL8rs/08i9qkTqQqoVqZ9KbWgJ1GsumLGyrQ0olXH0o3UeRjKfUYrkhnck0SOmq6j20iZM6yRTAZRMpigICOviQSGNhEQJLGxPkA28Ej+NZGI2KuXE2arvhCRTIqmzNHyHMKfoxW4B+hsaROGo/LUVi6lx2drFUrZl5pDkSvtjocO5gKhLTYLrCdzAw7w53LFn4YUy9JxAROUxgAAEADpw1y1xUXOZHywPuKCyk/kDUn2Obr8+9yZuqw5GP11pLwRzuJJkSw8SOPDNe/n8LD86SOPZrkc+zLnCKybSsdwWCodaqBx+tlasDqRvFxQcxxlLia+wqpzN4Q8bJ7EaAmUneJjv63p11Y77E5k6ZKRrhLx9plN2bx93IegsfSpDb43TYut6ufSZOZL2mQS/v4pECwxENaL2Htd+S+WuTY/h6UoL/nfEGK7linHmQ79CVS7ZxsD2qYkrkmLzx96sUak0WfRUP4dquiDhsm/REwrHSJuqUANuO2vufOxMWaLHnkVZpmIQG92It4H/AFHrWHqur9g3muztvqcWSfW6yISZUi8eMKNy4s1yCb8WNlBNlJtYXp2tZdQFdBWjXxQvQatvHHRBsLzuEgdC3A/WCAuOnvhRA/b079O/btroGLjfuf3YjT92V48+I5lb3C8rX4382va/mu45OR+3/amST9oG5cOR4crW5cb8eVvF7XtWqr9ii7OxVkYhRdRqk+exxzOGq7NTxTBYUHJQScEIcyZVA9U+3SYPRqP0m81/YMNs7WM7Y6zSREsjIecZ4uLMAbA+h9D9KkNzpNhoMtcLZKi5DQpKOLq44SDkvlSRe3qPUfWt5qXqJrDVjo9d41kV2DJaRYkVTZP1WqCj1mmuAlWTaujkFdAiwCPUBDABvh1iyYOFNlR50sMTZ0IIjkKKXQN+oI5HJQ31AIB+tZUebmxYsmDFNKuFKQXjDMEcr+ksgPFiPoSDb6Vm7D6RAe36Xp1l2Pr9KxaAH0b9oB8A/Q1x9b0P5etJapw07CMXjewWdxanTiVevWz1yzSZGZMHBimaxRE0jqFOmzKAgBxEBNv6A1Xut6vcanDlg3WwfY5D5MjpIyBCkbH7IgATcIPAb1P4VP8AYtnqNrlRT6bATX46Y8aMiuXDyKPvlJIFi58keg/GlSACICP0P2enVhqArjSlaWfrkFaWScbYYxtLMEnbZ+m1dAYUyPGZ+8auC9BiCCiJx3Dt2+jqJ3Wi0/YsRcDd48eThrKkgR72Dobo3gjyp9KldPvNv1/LbO0uRJjZjRNGXS1yjizL5B8MPWt19D6Qbf4uzUtUVRpSjSlJqWcWtOeraENHxbquLqPQtL145OlIR6ZEgFgaMQKYCuTKrbgp1AOxQ1AbGbske5wIdXDjyaNzJ+7kdyssYA/pmJb2YlvDXvYelTuuh66+mzpdnNkR7xAn7REQGOQk/wBT3W9VAXypFvNKX9nZ6NT/AK+R6VBUCIB2mMBQ+iYQAO3sDtEQDcR1wSB5JAFcgE+B5NH/AE65FcUlH0xMRtsg0HKdfb0x61VReSEhKkYyylgMqJWEXHt1DlTcpukwDcQ6j777B2arGftNrg9hxoJhhR9XliYNLLKEmOTf+nFEjEBww9bXb8PTzZcDWarO6/kzQnMk7NFKrLFHEXhGMAPclkcAlCp/Gy29fWpYMUiIM2ySaZUiERT6UymExSblAwlAw9pgAR9Pw6s/8fWq1WXpSjSlGlKNKUaUo0pWHI/7vff7G5/iT6UqF/ltf2DeKX4mal/JTaUpgvI7/VO8IPxRH/pdZ9KVazpSjSlGlKNKUaUqDMV+sqvP9x/F/wDXvlnSlTm0pRpSjSlJq1RiUhFqnHpKszAy6KhhAuwFD7IQTGEAADlD4fhANKVWva86Zqw5TOQGS8v4TXslUol3j2eGq3glZe75IyLj+RdM45OcmIB04InGTrR47BRduQxCkbpnNt6oCMFNsthgY+Vl5+PzhikAiWH75JEJA5FfQNc3IH0raus6X0/tm20XXuqbhcfZ5uGzZ8uyAx8TFykVnMccoBLxsq2ViCSxAv5sHgeYvpF/vGKc5y0bYWl0olakwqSKs1JxjeLZXuOaqS7GwVxs4LGyUk3TEE91yHFuqU3SPZrPOHj5ORDsZA4yIkPHyQAHAuGW9if+tqqkXZNxo9Ns+l4smO2ozchPfIjRy7YzMEaKUguiH1+0jmtr07u+s6qrWFJRsdNRz+HmGDKViZRovHycXJNkXsfIsHSZknTJ8zcEUbumjlIwlUTOUxDlEQENfLokilJAGRhYg+QQfoR9RXdj5GRiTplYjvFlROGR0JVkYG4ZWFirA+QQbg10QsHCVuLYwdch4qvwsa3Sax0RCx7WLjGDVAoJot2jFkki2bopEAAKUpQAADbXEcccSCOJVWMeAAAAP4AV95mbmbDJfN2E0s+ZIxLvIzO7E+SWZiSSfxJrZlOU4AYhinL8BiGKco9vbsYomKPbr69axypHhhY02UJiCkV7K11zPGt5ct6yDA12tWZy4npR1DKxNWMsaIJH19Zc8VFuS9+PeqoplOr2dQ+nfEjwceLMk2CA/uZVVWuTay+lhewP4m1WHN7VuM/rOH1HIaI6XAnlmhAiQSB5rcy0oAdx4FlYkD6UyWHJR/D5/wCUFEm+T7bMlvWd17IVawS8ikYh7x6pEwxVThYYzxqkYZWIsbt2gcHBziqUAD1Q31HYLtHs8zHkzBPPcOsJFjAhFgPzDX9fP8KuXbcaHL6P1vd4XW21OqCS4suyVy67TIjYGSTiT9kkSqwKgWP41UfZc9u+NZOYEJ5pdncET5SXGnSuNMBYfyPLW6crGNAdTMXMTdUXaO269OrCTho3OcouE3K6iBygkIj0jSJtm2pGfH3R/wD88kQxwxSFisdyCVsftX0+vm3pXqXX9Hi+Qj1TN/xqxlJ61iTpl7POxEgjmy7RvHHMGVhkTEFwPsKKGW7AeacblLzi4TZo4EZayHh+xQEra8VxMXi7FDzI+O493fIa2SKUMo0j6wwyFETHijydajlzrLmAxVk2qxj7nIGsrcdj69setT5WCymaFRHGXQFwxt4UODe6gkn8AagPjb4Z+YuofN+r0Pa4J4tZs5XzM1cXKZcaSBTIGaZ8V0twmZQqeql0A8E158+XHmV555eUPG+L7Uzq1LoeNWkIZjF0to6jns3YYeHUgxsEtIlcEFIjpicP+z26aTFA5QMmQvo1q/edt2e9xocOYJHjRAWCeCWAtyJ/Mfyj7R9BXuz4s/x36R8U7vYdk1j5OZu9g8nJ8hlZY4pHEntItvJDD/dYmRh4ZjVfi8jIu0u6dSMg7QExT905fOnKInIPqn7tZY6YnIPoHbcNVguzCzEkfmTW9Ux4Im5Rxor29Qqg2/iBes+tBXhsleC3mlyVIZyIC0nr5Wpp4lcGQbhOHhCvtmRpcsZ3otgW+xCt09XZvr6i9r3V9/l7HIcuPrxv5tfxe17X+tdGx/f/ANvyP7V7X909iT2fdv7fu8T7fucfu4c7c+Pnje3mpC8tFOJJsoJpcNG+UUcWM4CPZPnGU1UFJCXsrcDFezUIQTjKNIqRSEpjoPCpqJOSqAmUEejUruzozmf/AKAE37IKAfc9S31I+oB+oPob28VQ/i5flIdbLfLba1uytOzKMIEIkJ/THJ/IzobgNGSCnHked6jvX69YLZKNISqwUzZZl+48IxioCMey8g7ddB1AbNmjBFddZwKaZjAQpRMIAI7dmoqKKWdxHArPITYBQSSfwsPrV9zs/B1eM2Zs54cfERbs8rrGij0uWYgAX+t7U4WGKXGWvM+OKXcJReow0pkWqV+zyyjlrEyNfZvbCzjn7sh5YCNmjuNMcTG74AKl0iJg7NtZWvx1n2EWPO3CNpVVj6FQWAJ8/UVBdv2+Rq+obDcamMZWZHr5pYUALrKyxM6rZPLK/p9vrcWr1Oc8ODPltX6y4nuLK+wuNbxknLlNY2B9ig7/ACKvl2Puco6TWcTFfr0y6LXTW5+mcvvSUpWjU5RACmHs1ubsnXOp5U0GQsixZM06BjHeT3QxPqqk8eZ/9z0H4V+aPwn80f5CaPXbPUzYM2x02v1U7RLmhcUYLY6CwjlljHu+wtj+zJLuLeRVMnmv8IsQcF8n43oeKpm+Tpb5V568v17lIw7xCIYJWJxBRleiyRsWycreDFmc53ThRU6wGKGwCAiOv+7dewet5sONhNK3uoznmQbDlYKLAelvJPrXrr/GH5j7X80dc2G77NFgwnCyYsZRAkimRjEJHlfm7AcuQARAApB9RYCqTVMr05RpSudcUrjXNK2+OmsPf8u48xAlMptZq+XCErKqiINVDQTCSX3kp56Z86YR5GsLHJqOVCKLpiciYgHbtvma/DOfmR4oPEO4F/w/E+SPQefWqz2/skfVOu5m+aP3mxMd5BHcjmyi6p9qs13ayiymxIv4r3ieX7w74MYlxxVMucW0a7llK1NPHw2f5FQlnmZhywTdVicf06Vk2KLqpRz180cpLtGYIlAwGIbqANejOtaDQa3FXK1IE3MX94+SSPtJUn9IJBuB+dfjB83fLvy73beT6H5DaXXjGcg69B7UcauRKizKrETOFKEO9zaxFr0+fKvmxxc4Q08LvyIyTC0hCSctfZtcjWxZu/2lR258GL2EpkUJp6ZatDkMLl33fcIFKPWoA7FGV2m51eli/cbCRUBPoPLN9PCjyR+J9B+NUPoXxn3r5O2P9r6fhS5RQHlIx4QRWF7PM39NSR+lL8mPovrVMHID/mc+GtCj4sOP2P8AJef5t9uq+LMMFMTV2FSTWFM6Dt9ON5OZePlkhBRIGzJVDYBA6hDbANN2HyXp4FH7BJJ3PrccAP8AU3N/4C3416T6l/hD8kbWVz23Lw9Tir+ng37qRyR9FQoiqD4PJw31AIqweT5N8sc10Hyx838Y8ZGb435JXKoWjlNEKx8TbVMb4Vs0PDP1Tnnn5oxZh4JVw7L49qj3xzJh6gfUjYG2W0zMfW5utjtj5Lq0wsG4Rm31NrfXyB5rUEPSeh9a23des92zQ2402PNHrH5PF+4zI2kAsi8g17J/TYlQCfP1FmNkrkBcK9O1K1Q8fYqvZ4mQgbFAS7ZN7FTULKtlGcjFyLRUBTcs3jVUxDkH0gP0dWSSNJo2ilAaJgQQfQg+oNaVwszL12XFsMGR4c6CRZI5ENmR1IKspHkEEXBrx889f+W7vN05NRM5wZZ0Oh4EvbIry2xNyuDltH4hsibhQj5KDjVUZOzTNVkm5k1WjduDxZsoCiZzATo21HvfjqefZCTSBI8GQXYM3iM/Ww8kqfoBcj+Ffol8T/5na3V9IkxPlBsrK7ViNaJoYgWy47DiXYcYkkU3Ds3BWFiATero/LV8nHj55daoZBjJaaylyDl6yeu2DJU/3TSIiWciEa4mYmiVlAgJREc4fx4CDlydd+okPSY4AAAFy651DX9eP7hS0uwK2Ln0F7XCj6C49Tc/nXmv5q/yN7f8xKdROkeD1GOYSR4yXLuV5BHnkP6mCtbioVAfNjXj+89zilZ+NvmAZZs54TwmNuQEy5y7jyWYRiMfBCFiOC1mrTYrMoM0ZCuT/fEUS9VUyKiSxigCoDrUfedXJrt/LLxtjTn3EIFh59R//Kbj/ofrX6I/4qd+we5fE+BgCXlutTEMTIVmLP8A0xaOQ388ZI7EHyOQZQftqnGMjJKakWMPDR76WlpN0iyjYuMaLv5GQeuDgm3ZsmTVNVy6dLqGApEyFMcxh2ABHVQVWdgiAs5PgDyT/CvSORk4+JC2RlOkeOgJZmIVVA8kkmwAA9SfSuHUbIsSmO9j3zMpFO6OZ0zcNylVARAUzGWTIBVAEohsPb2aFWHqDXCZOPJ+h1Pi/gj0qQ+JOHvJ7OTCPmcWYJypc6/LSqEFHWSBpFgkK4vLOnRWREDTSDE7BJBBcR79YTikgBDdYlHUhi6jZ5yh8SCV4y1gwUlb+nr6fxP0qn9h+Ruj9XmfG3m1wMbLjjMjRvNGsgQDlfgW5XI/SLXa4tS/j/Ly5hyWUbvh1PCdpYXahVy/W6YRsLctWhHVZxq8MztU3D2ayjE19/GtjFE6S3iSJLJesU222u9ev7dsl8T2WE0asxv9osn6iCbAgfjfzUNN8xfHcOjxexHZQPrMuaCJDGfdcSZAvEjxxc3Vz6FeNwfB9Kh5KRjyGk5KIkCIkfRD93Gvit3TV83I8YuVGrgqD1ks4ZPEQXTECqoqKJKB6xTGKICMQymNijfqBsfr6fmPB/0rZGNkQ5MEeRESY5EDLcFSQRcXDAMDY+hAI9CAfFa8f2D/AOT6evmwrubwPFfoteSPTsPXvgNXJOLv8llqr5CqFYp+TsD26cNecW4fslPbuWcrR4Csz7ZwMahZEzoTL9q5M5TOu6ASD0FKAehOlwYk+iV1kMsUiBXiY8kQr6qAb2v4Yg38kf6fjf8A5NbHseq+V5oMjDj12dh5Es2NnQp7OVlxzEFZpJYyORiIaGNlCEKhv5vey97nSMp3IvHPFWNw/krwdlxdL3WMyfX6sgTCVMjaodSNa0aXnEVEk4ifcN2ZQZMyJdHdCQA2AQ2srZyQ7CPViGXi0RYOF/pqF8cSfofHgW/CtKRdWm2PTszvk+xwvcgzkhbGklP72ZpbMZ0TzzjBb73Jvfle9SQ/YGpGqbTP0bL6V4yZmPGhcf5ErKmHn9YYKXO0QHs2k5BGzRR5UrnHUx36nt5rDAn3L0wkJ3SxgDt31hwZYnyZsb25FMLAcmFla4v9h/mt6H86sW0662r0mu3Zy8OddikrCGJ+U2P7bcLZCWHtl/1J5NxTwazKrtR9suU8sw/I/G+JIbAc9Y8O26j2WyXHkQ3n2Tav47s0OdyWGpchXlEDPpF3YiokEixFCAn3xfVMAHEuBJlZSbGPESAtiMjFpb+FIvZbfUn/AOtW3C0WgyOnZvYMnaxQ9ix8mOOHAMZL5Eb8ecyyXAUJc+OJvxNyLqDILWfVSpucgY9hbkauzw1ijSt7o0l7Yx7ZrlWWE+5pUo4MilISdcdOG6z6EkXTNIExWanTUNsACOwBqF3+Js8rAJ0bY0e6RlMUk6c0TyOXgeQStwCPrarJ1rdR6vJkxtjJnDr2UhTJhxpjEZ1AJRX8hXVWN+LAj1t5pxCdfQTvBKJwITvBKGxRP0h1iUB3ECiYB2AfQGplbhQG8vbz+F/r/wCtVxuPM8fC3NvxA+l/zt6mvvYfT8H0foa58+v0r5uPQ18gAB2AAAG4jsAAAbj2iOwAAbjrgAD0FhX0ST63rnXNcUaUrRs4QWc5MTftSUce10WKPsty4A8XHeCIJO9jm4FAUVHW+6g7juOofF1Bxtxlbb9zkSDJSNfZZrwxe2LXiX+Uv6sfqal8ra/udTi6r9vjocZpD7qraWX3De0rfzBPRfHgVvNTFRFGlKQLKuWlxa152fsgDGRr157swsMB2jZSMetgSMlZSHT2kHbdUOpI5R2LqlYmj7FN2R9xuc++vgmf9rBBdEMLrYrlAj+q6N5Qg2FXLL3fXoevJqNPg2z54k/dTz2dhKjXDYpB/pow8OpHml9q61TaNKUaUo0pRpSjSlGlKYm6nMvISFRvq/tqPtj8H2NICuIvo6SO4rjcZFVhMzaRCIoqO3QEBMxzCQQEQHWne2O82fP1nub/ALvC2U/uarHxVkil5Yq+6Y58hQFUu/EKSSDex8Vt7qqLDhQ9k6en7XM1sPt7TIyWjkiC5LCJZIIGJYhF5FgByHg1tqxOZKkla+ta2lWo5Hko+KNZXdi9nJWFIyIZim1OLgwJSrZffvwKGwkAB2DfbUn17b99z5MKbsceu1CyZEg/aM/uTzQBB7YQ8jaZGv7gHgqAbC9Ru/1PRcCPNi67Jsds0ePGf3SpwghnLkOXHEXiZf8AbJ838XNqedtRoC8vYxKfj20inBSbaeZkckUMDV8yOUzZ2iYiieyxTjsHVuG3wavO20Wn3ogG4x48gY06zRcr/ZKn6XWxH3D6XuPyqkarebfSGc6nIkxzkwNDLxt98TfqRrg/afrbzUiADYNg7ADsAPpalv4+tRXp4rnSlGlKNKUaUo0pRpSsOR/3e+/2Nz/En0pUL/La/sG8UvxM1L+Sm0pTBeR3+qd4QfiiP/S6z6Uq0pzKRjNQEncixaqiUDAm5dt0FBKO+xgIqoQ2w7fQ0pUQeRHJy94PyhxlrkRiWGvOLc95creHJ7JQZKbQknRrJbG9gcw6kdSyVmXUtrQEoAxlz+PYlTBQAATGAQFSmJunmFTtW51yPD9xQ8YVatximJEGuTspZWuNIe3x7k+HkZZaIxpCtsMWCkT8/AmYg2Kxd2mPdPnKgETIA7bqVIXlRydmsCy2EaFRqDE5AyjyDu89SqDHW68I4yoLJxVabMXmdeWq9KwNoVjzjDwx02LRtHO3b5ycCkIBSnMVSlHxU5P1HlLgmLzdDN21baFlbtV7hFqzsdNx9XtuNrLL1K7MErQw7uKmoVhLwa6jaRSBNJyzMmt0k6hKVSo14fzLiLNHmNZOl8P5Sx5lSKgeGGLYecksdXOu3RhDyx845ZckjJR3XJGRbsJAzcesEVTFU6O3bbSlWX6Uo0pRpSkrcVDEhVilUEnWchTgAFEVCb7iXcxyCAbgG4h1D9LSlQ5qdJydCZhyfeZ/MEhZ8b22MrDSg4kVrzGPZYyfxCJ05+SaWFBYz+aPZVDAc5FSEBHbYBHWBBjZkedNkyzl8WQLwj4gCMj1PIG55fgate03HXMzqmu0uDqY8fsGLJM2TnCVmbLWQgxo0RHGMRDwCpPL62pqs6ZAzhF514wY1xExBvXLpZbNOZns0vSX1grbfH1ajk1FIBGytDgjU7ZLvFf9DOsUSLbbbhtsOFssrYpssPEwhaGR2aVihZQij05fysT6Xqz9K0XTcnpXZOw9pflsMPHhjwIY8hYpTlTMR7phIvPDGo/qBfK/9pLDban7z+5PvRXQuYxYzoU8ZuM96fYXeiiEz7v+J9reye+KJPE913PUAh1b9mpj34Pe/b80/cWvwuOXH/yt62/O1q14NVtP7b/eP22R/aPc9v3/AG39n3LX9v3bcOdvPDlyt5tWZOTkJWYh7YLJMxNegYwhFJKcnZJnEQ8emooVFM76TkFm7JoRRU5SlFQ5QExgAO0dtcySRwxmWZgkY9SSAB/qbCunDws3Y5SYOvhlnzZDZI40Z5GIFzxRQWbx5sB6AmtNPW2jxqEOxsVvrEOnelhr9ZJJWSKi1bU+k23SSLrB3D1FWXlHDZyBkkmneLCBimKHaGuuWfHRQs0iKJftW7AcifovkXP4Wuay8HV7nIeWfX4uTK2EPdlKRO4hVG/XNZSEQEeS9l8EGtFiTFNNwlQobGVBJKo1eunkVI9Cdnn9jlkxlpJ1Ku/FSsquvIOCmeO1BTA5ukhNil7ADXXg4UGuxlw8bkIUvYMSx8knyT5Pm9Znaez7buO8m7FvDEdlOEDGONYkPBFReKIAo+1Rew8nyfJpydZdV+mTy3A22rVXI2RcA4xxxYuRMrXo5hCq2tNpWU7j7OesyMoe23FoglLqREaw707ciiwkIomUpenfUdnRTwwy5WshibalQBysvKxFgzDzYC9quHV83V7LZ6/Qd52Owx+hRTs0ghLTexyVuUkGOxKB3awYhbkEk3rzw+fhxoyJPQWFuWx4wiykHTIjGmX4WBaLSLGmSCyq840mRmCiJlaz7elHMeRVQgAU4pCIh3mtW/JmoypYsfeFfKxiOUKLhP5r3/8AHkSvn/6171/wg+RNDg524+LhIQs+W+XgySMFbIQARtH7f0m9pElKg+RyAH21XDwX4i+YbkqDmZvAsarjzElnjHUhMXTJ0ZEjj2cSj2TtMj+BgrPCzi85L9ysZBF5GshV7pYxQW7vvA1VOuaPtOXG0msHtYLi5eQD22sD5UMDyP0BUXsT5t4r0D81fKfwN17Mhw+7yDP7TjSBY8fDd/3UfNlPGWSGSMRx3AZklfjdRdA3E1VY7brMXDpo9SO1cM3C7Z2guUUVW7hBUyS6KyZ+kySiSpRKYo7CAhtqlkFSVbwQbGvTEUiTIssJDROoZSPIIIuCPxBHkH608ePcPZ25JWpRpi3GNyydYn67Nq4Cm1NQ0cgqVgKTAj15HM2ldh+/ZRpugyyiBVjJmHcxxERz8XA2W2mthwyTSMQPtXx6eL2AVbgfW1VLe9r6V8e6wS9l2WJrtegYj35/uI5XbirsZZLM4uFDFQR6KBafcJ5O/Jhu9w7G5WtGJMLWHM1wTrteoF3ujJHIh4Vsm0cWGwsoQgki5J5BtXWwxRXpHqywkIXbrA2rNF0TbhoEzXgx5ciTiqO4528XYD0JA/lve/8AGtH5n+WHx1JDtsjrGNtdxgajEMsuTj47HF9xiwiiaT9aLIw/3jGY1Fyb2Iq6mkeSDwJwZExsryKydL3iXN3LwHV4vcPiWkvnUP1LybZpEovEXLuEeFOmDlJd4uqimP74HVvrYON8d9a1qB9rM0r+v3uIkJHqAPqD9QTcD614/wBx/mT8391ypMXoOuiwsXytsfGkzshVksEZnIIWRfPBljUMf5fFR9rnKvjZh+VV4lcua/x5gMdXy1NbxF3rhxNRzWOxTXqpLs/ipp9/sOOo0Ze0zLgkeDl7MHfqqlZqbOiifrAIuHc6nAk/se7XFXFlkDh8UgCMKR7aOyC7Hxcvyvb1q9bD4y+Qu14q/KXxZPvp99hYpxnxt9GzNmyzxt+9nxosp/bhjHPhHjiIAyC8R42qgfkbaIiP5U5wt2L7hBXGuSOVbvP1O3RkeD+Am4axyD1+2WRYWBiYrtFJtJmQEV0O1VMTk2HpNrWW1mRd1kz4ciyRGdyrDypDEn0YefBt5Hr5r3J0DW5c/wAZ6bVdkxJsTPj1mPFPA78ZY5IkVSC0TfaSU5fa3obH6ivTB5GznBeU47LWYYWEawGfYaGpeLLDRUjonp1OoMO0cuqq9xjHOhcSELX7ZLN3Cz9BZZXuJBA/ciVPbfb3x02tzEnz41C7NVSNkv8AYqAEr7YNyqsQSRfww8eK/Oz/ADNj7p1nI1fU8yZp+jzTZGbFkkH38jJkYLMuYwssksCFViZVXlEy87teqm/NGzVnTlJJMZnImN65WpXixM3DGWX16YzWGuV2esl1dJ0Rojap1UkrZTz8AxBYzdoZds2XKooBSAYxho/cs/Y7hw+VCiSYTNHLxH2hmY8Byby3JRewuAbmvUP+NfUOl/GuO+JodhkZON2aKDLwROw92WKLHU5LGGMcIvalYqHfi7LxW5sBVOIiBQExh2AAERH6AB2iP+DVDr1oBc2FWwYJ8rfklkOpoTUfij23crVRaLlrHSNrXcw+PE6DKS7lZ2tLzgnSjJ62yzJmgVCAA/WLB8ZcxiKEKXV31vTdvlQCRYeWQ8aSxhvCcCfq3oWIAsl/Qk3vXl/uv+Sfx7oto2HPtPZ1GNm5ODlGACTK/cpGAoSPy8cCMzFsm1hLGEAIJNVaWJi9grBPQs7HmgJqInJSIloN2iowcQ8uykF2j2IVZuulw1VYukjIgkcO8J0gA9uqZKjxStHIvGRWII9LEHyLfSxr0ngZWLm4EGZhzLPiSwo6SAhhIjKCrhh4bkDe48G96ay6X2LrEQ5dNXTCQkiqg0QYJPUTqlXN1AY6xEhVOmVsACYwGANxDbcN9fKi5tXfJKqLf61C1Z46XdrPlHCovF1lHCrgDmKqKyoiY5wOAgcBETfR9Gu9fHpWAyiQEPY39a9I+Nv+ZHzTiLibSsBUPjti+HyPSKw3psdlFu8VY1r2S2jlmhLCljKHiYxg1uqz5QHiq5XvgFXImOZqPWIa2PjfIuZiapMCDHiGTGvHn6C1vXgAByJ8k3tf6V4r3f8Ahh1vsHf8rte02+dLpsqczNjEBpORYH2/3LszGEKOAXhzC2Ak8VRLyG5N555X3pDJfIjJk9lO8tINlWmtgnyx6ThrBRyzpwyjG7eLZR7FFugs9VN6qQGMJvWEdg2o+w2WftZ/3OwlaWfja5t6D6eAB9TXqrp/Rup9C1Z03T8GHA1plaQpHysXYAMxLszXIUfW3imTZtHEg8aMGhCKOnzluzbEUWRbkO4dLEQRIdw4URboEMqoACdQ5SFDtMIAAjrCCliFHqTVonkSGJppLiNASbAk2Av6AEn+AF6/Sj4HZ0b1am8T+GeGoiK5G1vCeIonFfJ7P2PLVH+6GAMl1GqJuImo2CHVQcqWBxbHLddFBePdrNyGREQOcN9vRmjzfaixNPhgZEcMQSaVG+2JlXwpH15G4BBtX4q/KfV22Gy7B8j9jkfT5uy2LZOtwMiJvdzseWWzSo4I9v2lKsyyIGIb0Hi9tmrZXn+jSlGlKrx56eWfx/8AMTPh8M6yuQI5rh+dlJNiypE6lEo2SKmyNAlq7MeKbPSNEXZ2KZivGxCPUthKU/SIgNf3vW9f2Aw/vjIBCxICm3IH1B/C/wCI81t/4p+a+2/Dy7H/AItHiPJsYlVmmQuY2S/CRLFbkciODEofqLiu/AnlVeX9xqlatZcWcaaK3ulPZINIa9WtJ1dbam4bOiPUJw7+xLPWadlSdJlOnIIN0HSXSAJmKAba5wOr6HXMsmNjJ7yCwZvub+Nzfz+YANfHa/nj5a7nBPhbzdZR1uSxLwRWhiIIsU4xgMYyCQY2ZlP1BNTIu+KMUZMhFKvkfGOOL7XVXbeRUr1ypNXskSs9ZGOLV+eMl4x22UcNTrG6FBJ1EEw7D26l58XFyU9rIijkjvezKCLj8iPzrXOr3+/0mT++02dm4mYFKiSGaWNwp9V5IwNjbyL2NhcVD6fw5xJ4unyvFo5LleOlj8wG2scZx8kxucjGqfGS/q7msVmPwrEJoGiaRNRkav3jNJqmkiVVNPcQApChEyYmq1nuqJDjyZ7BLhrfeV4r7Y9FIHpbx6VsTE7F8gd5GBO+FHuMPqcBySrQqw/brIJZTmMTymRytnLEkgsbXLGvBfmnmnnvjnZOaXDXCOcMoz/G+8ZAv9BnkMvqRc9kSaMwnG0Ja7G6srUVH0VO2xxXVEXotXPh3TJc5FEupQ+tFZm5z9dJm6fCnlbXPI6n3LFzY2Y39QWtY2PkHzX6tdZ+Muqdzw+tfI/Z9Zgw9xxcPHnQ4nNMdAyF4oxGftZIhICgZeSOoII4iqvUHCrYFgSEgA4bnbKgdJFXdFQxDGAnekP3R9yBscvScA9AhuOqyPHp9a3u0avx5XspBHkj0/ha/wDA+Pyq4DynPLcxl5gLrOrq/wCXJOsv8O1lnM1fDdMc1GPyVmaSkYmzPkWVXlLc+OyjmEY9gEGz1b2c6KT2gkYyiGwCe3dV67jb8znIlKmFbrGvHnISGNlLGwsQATY+o9K85/P/AM0bz4mj1cep18c0OxnKS5kwlbGxFV4lLSrEvJmZZGZB7ik+2wAb6e1jjdDTOCMBcZMC8S8IYvotvjndNk+QHH/ImV66+y3ijGFhcPT2S5T7yrJFf3e7EcqIkbu3TciTohiJgYQIAF3NrlfBwMbA1UMSSgqZYndfcRDe7Er5ZvwJHmvzP7nk43ae17vtXyBs8/L1zLMuBn4+LIuLlZMYX24UEptDCRcsisSpu318r/FnLXIuYuX2QcY40qNGyPxGpcNKVmZz5SbSZ1MUTkJUlGpLXivIcA6WSK2eALgxGhWyBuwneGVMURKXvxdrkZm3fGxlSTVICplU3KyrbkjD/wBB4/1qI3vQNP1349xN5vMjKw/kHJkSVMGaKyT4EvL2snHcDyvgFizfXjxBAJkhyRzIjx4wJlzObiCLZkMVUqVuStePNsa2WZLGiiHgBnZIijGK74Vv31QpgAQ2ABEQDUjscwa/AlzSvIRIWtcC9vzPgVTel9bbt/a9f1ZJfYbPyVhEnBpOHK/3e2v3Na3oP41uYjMVMPiKk5hu9hrOOKrbalTbMd/brZBRtfhVbpEx8kwiVbO+ds4R0sdd+CCShFAK6OACmA7gGvtMyH9pHl5DLFEyK33MAByF7cjYfWwP1rHyOubIdgyuuayGfMz8fImi4xRO0j+yzKziNVZwLLyIP6R606BDkUIRRM5VE1CEUTUIYDkUTOUDkUIcoiU5DkEBAQ7BAdw1lfw9KgiCCQfBBr60rijSlch6Q+HtDs+jpQ+lN5BKzkNT5Z17dDKMu1cy7tkoxMyaKPRKoBm1fTVbquGqazQA7vqMPVv6Q+DVJ1Em31XV8nJGZ/yHZxvM8Zj9tC/n7cYFS6hk/Tcm/wCIBq6baPU7Ts+NjftP+P62RIUkEnNxGLffkkMFYh/1WHi3oa2TeGdTEpVrg9dzsG8Yw6qbyokfENFGcSaRDrJyqSZQI6eRxxEhDhsACG+s2DV5G02Gu7RmSZmJkw4pD4QkHslpVBYTAeHeI+FYePHisKbZ4+t1+w6ziR4mXjTZIKZhjPvBYyQphJ8okosWU+fNfNOgrNBIypLNcF7eo9kjuo9VaPRj/ZbIwCBY8gInU78pBH6odh7PRp1fT9g08WSm/wBo+zeWcvEWjWP2Y/pEApPIDx9x/wClOzbfQ7aTHfQ6xNYkUASQLI0nuyfWQ8gON/wFLLVpqsUkL9aS0unztkEhFF41kJ2SCxFjpOH6yibdkiqVuUy/cquFSgYxQ9Uvb6NVjufYV6p1jM3zBTLBFeNWDENIxCopC/dxLEAkeg8+gqy9O6+3auy4eiBKxTy2dlKgrGoLOylvt5BQSAfU+PWuijXJtb49wUQ7qchDM2NnaJIOSMmUyu0I5WbsHDkpReNSdQgU5dw109Q7VB2fCcMOG4xCkeUgVgiTMgcpGzAe4gv4YV39t6vN1rMQqeWpy+cmK5ZS7wq5QNIqk8HNvKm1LbVtqp0aUrnYdt9h2D0j8Glj6/Sl/pXGlKNh/wAfo0pSctNrhaZFkmJ5ZwgwO/ZRpTtmqzxTxcgoKTUoooAY4EMcO03oL8OoHsfZNV1XXDabhnTDM0cV1RnPOQ2UcVBNifU+gqc691za9o2B1enVHzBC8pDOqDhGLueTWFwPQep+lKPU9UHRpSjSlGlK0UatYFZWcJMR0a3iGzluWtvGzkXD141Mju6UepHIAM1U1vVKBR9YP8eobAk3Muzy02kGPHrkkX9q6PyeRCv3mQEfYQ3gAeoqYz49NHrcRtbPO+xeNv3KMvGONw32BCD/AFAV8m/oa0jGruH8klMXRCEl5aCmpF3TnzFsu3Vhox4QqSaSgnU6VXwpBsofYSj8HbqKw+vTZucu07WmJk7LDy5XwpI1ZTBE4AANzYyW/UbEH6VK5e/hwsFtX1Z8vG1uZixJmxu6sJ5UJJIsPEd/Kj1/GndqjwjOYRMqciaSpFEjnP2AHUXcvrbgBdzAHaOrdVSp6wHftDtAe0BD4dKUaUo0pRpSjSlGlKNKVhyP+733+xuf4k+lKhf5bX9g3il+JmpfyU2lKYLyO/1TvCD8UR/6XWfSlTLytxB4u5zsqVxzFgPFmS7UjHIRCVguVQiZ2WTjGp1FG7Ajx63VWK1RUWMJSb7AJh+jpSo08j+FuSbbHcZqhxWt2DeP+NeN2VK9mGGpM1h+etUe9s9XPPBGRMenWcg0dpCVpynZHJ3KZElHB1xAxVChuAqVv+RvFPOXJp9FUW75nx9D8fCXjEuQ5+t1zFDsmVH0xiyxQdzShYm/yVxew8NCTVpgETmcFiDyTdmY6Ka+5hPpSltnbj9ljM+O7PS5yf48Xo73LLq11GPzVgEMh0quY9LGg3gq08rZbjFKyt0gJM6jlOcK4b96Q/dGb9Am3Upb8S+MtW4nYJreE66+LPpMH1psdmnVIiNgkbLb71YZO1W2WRrkSmSHgox3LyypGrBuUEGjMiaJdwJuKlMzUa7X655kmQG9egYWBbueEuL3DlGFimMUk5cBnXLJAXcJsUECrrATsAxgEwB2b7aUqf2lKNKUaUrQWSNcSkads17oVgUIoUqoAG/SPoKcexM2w+nSlQlyzNZ8hrbh1hh6h0q4VSYvpozOctabErDSdIx8VFH/ALfpzNJ0gWwTviRVJ4YxVA6SgPT276js2TZJNjjAjjeBpbTFmsUjt6qB6tf6fwq49YwukZWs28vbM3MxNpDhctckEQkTIyrn+lOxB9uO1jzFj5PnxUffMK5ot+EmEm12jIaPt+SrjaY2oY0ociMkDexyhzg8m3TpKIOWTOwg4RNRVTuN1O9OkUCiBh2jO0dgHXteMhFD5cjhY0N/uP8AMSB5so8+Pyq8/BHxDJ8xdxbTZMz4vXsTGefLyV4XiT9MaqZPs5SSWUcvFgxv4Faa04jqvJ1THfIbAWSIPDHJd5T8Yu7BlODiPeG7jhZ+b3jk8WyVVm3qJ4Wv2pwuoVJ04ZJOgKTqAw9oa+JsGDce1tdbMuPtjHGWkUcn9o/cYypPhX8+SAay9Z2rZ/HIz+h941023+PFy8xYsKWT2sf+4L/STMSaNT7ksIAJRJGS5sR6VFHzPMftuWcdlfG2EMg5UtPIDjdQq/IWDi9TzeHouRmuSJ6EdwchdWkl4SOsjqBZFF42TTXDuTEL1h27ahO4Yv8Ae1nxddLM+zxIlJx1/RJ7hBBe/hiB5Hmtn/447yT4vyNX2HuWBrMbo3Yc6VYtxP5ycU4kciyLjlOTxLI39NyV+4E2rT8JOL2Mac2vaecByJNVHiZZaZkCL+PeuKQtXoeeGlTazGSMz4utrKaWXloA5Y5Nuo1VAzJj4UOhMercfjrunxIBIux91oMJ1ce8tlSYLeSWNgTceALfpFhYVlfMXyR2PbyYTdM/YQ7TtGPkYr/22X3JsnWtOY8TAzIGjASUci4dbSSczybxU1aPkvjBEZtz5yNYvrXGRMnivFUrc89z8y/V482indSTWuo0F+LheJTmY1boSkhRRIt3puk4dXVqwY+Xp02GVtVLqjQxl5mJ9hl+gQ+lx/NYfX+Nag3PXvkfK6do+gTJiyZMezzUx9bFGo2kOR5aU5K2DmNxcxcmK2Fx4tUg+PtOyNTa1YDZFzW5z0ta7zN3emWNzAsINOAoFiBo7rFJZpRwilJR8K13FJ2b7IuVXcdSmrgy8eFjl5ByecrOjEBbI3lUFvUAfX61Q+9bbr+32MA0GnXSJi4UePkRCRpDLkxclmyGL+UaQ+qDwpWsnC+IFcJw12jXuT8h5MC03+z5ANJ5Ll0ZN5Vm88oVz7pwi5EkCMqpAkSEGyRtxTKI7jrnX4P9vjkVppZg8rPeQ3K8v5R+Cj6V19v7UvcMvDyIdbga79tgw43DEQosxiFvfkBJLTSE/ew9TUM/Mbx9x+zdxOyBmK0IWrK8NjTH92PVAwxcHj9vIPZdRiwcquGdalCRdlZwkoyQcOyqKCLdu3W9A7hqA7Xi6vY6SXPl5zxwxPx9pibk2B8KbNYgE39ADW3PgDfd56d8n4PU9a2Lq8vY52OJv38CqVVOTABpkLwtIjMsZAHJ2T1pvuIHGyTydTsYZMuVqiJzBk/wZqmDscwtWdXup2+OSsqBy5HezTeTXBv/AKUcyxGDg3U8bG2MXYoFHWNotS+XBDl5Dq2ubWrCgUurDl+stf8AHzxPqPpU78rfIWN1zbbLrupxZYe6Q9zm2OVJMuNNAxiP/wAVYygv4+0yqLI48G5vT50Dys+BGNSwTqA43UiZnq21USj7FeVZu5Scg6M3XRB/YkZmTXhptyoK26hlWggPYIFAwAOpHG6Z1nD4tFiRtKg8M92JP4tc2P8AqKpm8/yW+cOxe/FndhzYsHIYFosYR46ItweMRjQSRgW8cX/IkjxXbkK+5H4TcFMiZEsEfgdfImLYCUewsHUYZzjnEsogezkRqdaTjGIspBNynDP+ju0BAyzso9AbCIjzlZWV17rcuXKMY5UKkgIDHEfu+1bDyPBt+Z/jXxodH1/5i+acDQYD7tdDsp0WSSeQZeah9m88xduSkGRb3bwqHybgV57+R3nZ1rM7jGdjYcOsbymRsUKnmaPkHKE3NzXuXbpCLjTPZ6oVuEexrdEWVjZEWRRk3Egksk2R6ygcTbat2vyHFntDKmBEcuDyjyEnixAuyKpA8MPAYm4Ar3f8f/4d7DqCbHAn7bsIuv7MCPIxcOOOP9xAjvxinmkVyeUTFWaJYmVnfiSLVWryCyFzR5W1tfkznI+SL5jRnZnNSZXBaKWaYyrE+ugRVzXYKLYkTjIYBQIUihgSEDmKCaixlAAuqntMrsG7hO32Puy4gfjytaNW/wDEAeB/9gTXoXomi+IfjHYD466Z/b8LsL4yztAHDZk0YNhLI7EvJ5uQC3gEsqhTeoakKQhQBMpSk9IAQoFL29u4AXs7dQAt9K22xYn7r3/Ovr0B+18Af/o1zXFWzeWDOyeB87P860vHeU+SEpT8dsYNjS8Nx5BjE8lZcRfxMNV7vJSIlFrGMItm6OMiiioik8J0biADvdenSPrNidljxTZckcQASIePcluArk+gAB8jxevL3+R+Hj926UnStvn6zr+Nl57SNkZ7HmcTBKvJNjol+Ts7IPaZgzIb2/C+/nByN4kZkp8bxTydiK+ZQ5G2+NiJxzhHEcASw3/C2R5KtnVbOLFaEyNq3GzlfJJ+HUVXM6TBs4FUUhJ6Nmdi22jzoBpMyCWbauATDEOTxSFf5m/SGF7XN/BvavEHw38f/KXUttJ8m9b2uDrfj/FkeNdjnSe1jbDESXyIoSTK8UvDkFXgeahOQNedpPgpa4qfmMw5EwRmnHXGSk5poWOMjVCzOUG+TYZlaH3gZ13GTSMIEfN1uuuehNSRbNhFQHKZEtzgY5dWf8anSVs/LxsiLTx5CJIrH+oAx8kECxVfQsB9bCve7fNWsycGLqeh3enz/kXM0+Tl4k8IJw5GhXlGrxmTnHNKLkRO/gqS3iwPqkqfE3lFiKsU3B/G7lEMRx6Rx1kpm6tuToI14znUblZSdWKX2P3C7JKASpdAZggn4F2JDGTKcOkwmKJN2Ymm2Wvx01+tyz/bRG45SDlKpb/b4eLcU8eD+BvX5fb/AOS+kdv3GV3Du3X0PcWy8Y+ziP7GDNHF/wDnf7gBjIZ8huRMiXFyPIAN/FB5u2G20fnfKuboi32axy62bbFhnKbWfx9G0V09yPi6l0Rna8qxqVfmpKOkazkufkVHwOTN2BgfLnIYhhMBh012zCUZ0ucrlnM5je6BfvRV5OLEgq5JN7DyTX6T/wCPXZppeqYHV8iCKHGGsjzMUx5DzquPkzTtFisZEVllxo1CFQz/AGKCCALVTl27iIjuYe0R9IiI+kRH0iI6qQ9K9IgC1Gua5rjXHilOthvBuYOQ12ZY5whje25Qu8gmsu2rlQiHEo+8O3KBl3TkUwK3YtEtwAyq500wMYA6txABy8PBy9hMMfCjeWc/RRc1Xuy9r630/WNuez5uPg6xCAZJWCLc+gF/LN+S3P5eK9AHl8f8vzy/n864eyFyrxhVMfYEipQtpvlUvc7Ey9mtETFuDpqY/e0iCklZaMdWkoCQV3B0E2zYwqgfvATIa+9f6DtpM6HI2sSx4IPJlYglgP5eINxy9Lm1h5ryL8vf5c/H2L1bYajoedkZfanT2oJYEdI43ceJxM6hGEXrZeRZhxta5Hr3qWJpnjjlLEuNOK3GrBdF4tT8LPK52tsG4Sqt4r07XY0WeOU4uEanK4vSjwhhQcPHwunCSYm3UL/lbahxG12VFj6zGgj1jA+6w+1gQPssB+r8CTf+P4/nnsewY/ctHsN13zdbXK71FIgwYnBlhkSRr5HJyLQAfqVE4KSBYH6TM1MVrejSlGlKNKVgycnGwsa/mJmRYxERFNHEhKS0o7QYRsawaJmWdPpB86USas2bZEgnUVUMUhCgIiIBr5ZlRS7kBACST4AA9ST9K7YIJ8qZMbGR5MiRgqooLMzE2CqouSSfAAFyfSoF8krrhbCL2oeYBH0PL/IGzGgYPBdXace5R1e4+SpORp4z01nb1JhKkq0tGRrxl1ryxROqkiYClEdw2gtjNh4TJv1SaeWwjX2jyBVje/G/E2/8voK2v0zXdl7NFkfEsuVr9RhCV86U56iBlmx04+0ZWQyozBvERAF7k/WlBySmc9Rd+YT9e4j445J4ixpjR3lGpi7nIVPMqXI2OmyMq5W6FEWTvImHSNXnAuTS4EBZIxVEynAekpvvYvnpkCRMSPJxY4ua+R7nvA2AUHwPBvy9fXzWH0zH6pPqHxczsGbpewZ2aMWWyP8AtDr2TlJJO0f3v/UHH2r2P2mxFyPPbzT/AOXViL+6yXn/ABDnFhjnJmYLG8vtN4+ZRPFtY5e73NkFml8RscgLzJDObSrZXTwjVcElW47FT6ATDvi0Hc/Hq5DSZ+JOI8mZiyxPa3JvPth7+W5Xt9Pp+deu/jP/ADEyNQmF1LsGrfM0muhEE2fi8iwghb20y2gCeIvbCFxcN6m5P2ny68iOGfKPiYNfDkbhS54lJanEm1ra9nbMytZleHFEJAjFywePUDigVwQwdQl6yG6i9QbjrWWw0+z1IX+4wvEHvbkPW3r5817q6Z8l9F+QBMOnbLGz2xwpkERN0D343BAIvY/wPg2Pio3NHryOWB0weOmDgpTEBwzcrNVwIcNjkBZA6agFMHpDfYdRoLL5U2NXSeKKVOMyqyfUEXFX6eV/izLfKOGlMK8aMptMC8oHMTD3HIuXLFN5AqktdMAVq1VyUr9GpE7DSb2ZdWBrYiLOpMzdimxdxCaCIrdKSgGvnWMbL2aHC1svsbSwZ5GLKWiBBCqQSbg+T4sVsL+DXkn5z3fXujZMfZu6YLbTpAkaHHxY44JVhzpIpFkmmR1CCMxkJGGkMiSs7hbstvXwwpvOrAWE82yGMsd8VMp5qtGfD2jHVXrcYth2nyGKZh02QlpnKEyU8ApYMrMY9I67l+KygvFB7TKCHSO2lh3eBhTtjR4suY0/JAB7amM+pc/bye3qfr+dfnnNsvi3tnZtZFuszfYHWoNV7WRLK/7uVcpASqYyWk9vFLHisfEBR9F9RMyl3XEnI3Gh5mszFJy3jeyllK3MnYeFs1Oln0S5NGWeAdIvEDtZBKOlUFEFU1CGKJiB6ewdTMM2JscbnGUlx2up/mUkeCPzsa1tstZ2Dp27GNmx5Ov3UPGROV45kVxyjkBU3UshDAg/WsXL/H7CufMZK4YzDjivXrFSq0AuNDkiPGUARSrLpOa73CMK7jF2yUOqgTuE0zlTKBQLsJezXGXgYedjfs8uNXxfH2m9vt/T6Een0rs6923svVN2Ox9fzJsXegSD314s/wDV8SXLhgS/1JBN/Pr5pDcmpPlLXKDV2XDyh4tt12fWeLrc0OVbC9ga5RqKtGPGq1xZJMlknE+/rrxNsYrDc5nCIGDpMPZrH2TbSOBV1CRPMWAPM2Cra3IWNyR48fhUp0iDoubt55fkXLzsfWLA0iftYw8k8/IEQtcERiReQ9zwFa3kU4mDo/M0ViakR/IaxUq2ZpaxaieQLHjqLdwtJlpfxzoyLivxb9JB20ahHGRKYpyE3VKYQKACAaycJcxMRF2DI2YB95QWUm59AfyqH7PL1uff5M3UIcnH60zj2I8hg8ypxFxIykgnlyI8nxbyadbWVUDXIb7ht6dw2/b08/T1ofTz6U3UQwrJq7aKxjOTjoFdo9kmbtzEEF2aAsr3ZVyuu3dGMB3gG9YxRESjqjavC6+2k2PXugZEGFPFNKjtCOf7bKk+5mZXJu4Pkr6Vdtlmb5N1r+wd8x58yCWKJ0WY8BkYqeFVWUAhPoD6/wAa3rOYj4hev1KZsTZ7a3kV3iJVwBB9OjHIlJISSTZMopEAyhBOYoCAF37NTOJs8HWS4PWdrnRy9jlx7qG+2TI9tbSShQLAXBJF/F7CojJ1mbsos3smrwpIuuxZFmK/dHj+4bxxFibnwQAfN/rX1IXKpxUmzhZGxw7SXfuiMmkYq+S8aq7UIU6aBm5BMoic5DAJe8AoGAQ21zm9q61rthFqc7PxY9nNII0iMi+4XIuF4i5UkEW5WvcWrjD6v2TYYEu1wsHKk1kMZd5RGfbCAkFuRsCAQQeN7W80n3t9eRkncm7upyziPqiMMu0dxSjeRez5pQSAok1ikjFct/BGN6wqdhigIhqFyu55Ouz9pDk63JkwtasDI8JWV8ky25BIQeS+2T55eCLkVM4vT8XYYOrlx9ljR5mxadXSYNEmP7V7F5SOLc/oF8gkA0kWmKXziyQTybsb+wVaBdubXDNpV0sFia2iRXRW7h6dMgN3ECzbJlIm3P2lNvqsYnxxmTb7Dy9rnzZvXcKR8yBJnb9ymXKytxkIHF8ZFAVYm9DerJlfImJDoszG1eDFh7/MjTEnaFF/bPiRKRyjBPJch2JZpV9RanyKUpeoSFITqHqN0lKXqH6JukA6h2D0jrbwRFJsALnzYfX/AE9a1KWYgXJIA8XNIaUnLqElIR0FUEVUWLiEFGYlpNNrHybF4oPtcWZEd1yO4tINwKfsOP8A01DY7ftYz58HT6xHjhkg4zTShIpo5D/WKBfuDwr6A+GP/rbdfqeqnBhzdvs2WSWOflDDEXkikQf0eZNlKTN9R5Uf+itkpBrEx76UenOmyjmq7x0oRI6xyt26ZlFTESTAyihgKXsKUBEdWfPzcfW4U2wyyRiwRs7kAsQqi5Nhcnx9ACareBhZGyzYtfigHKnkVEBIUFmNgCTYDz9SbCorZUlLdJXPH0rSMhEgGVkhO9hIp8VyxaqolMseYl35nSARyRkmCpBTI6N194QAKG49vnL5G2HZ8/tek2XUt4MLEz8TlBDIHjVgLmaaTmvtAiNgVWU8uS2UXr0P8eYHWsHq+513bNIczLwcvjPNHxkYMbCCGPg3uEGQEM0X28WJYkCpYtwMVBADreIOVBEp3GxQBwcqZQMv0l9QoLG9bYOzt7NekoAywIGbm/AXawHI28t48fcfPjx58V5ymKmVyqcFLtZf/EX8L58/aPHn8PNaeJgzRUhPvxl5WRCcfJvSs365VWkSVNEqPhItMpSig2OIdQlER7dRet1Da7NzMw5OTOMuYSBJGDJDZQvCEW+1T6kG/mpTY7Zdhh4eGuNjwHEiKF41Iea7X5ynzyYegPjxW7MQhw6TkIcu4D0nKU5dw9A7GAQ3D4B+DUuyq4s4BH5i/wD3qIVmU3QkH8jb/tX1r6rijSlGlKSrqUnm9vi4wGkKWsPot2orILyQJTZplE26TJjGCcBdNhQ9Y5ylES/S+Gu5Gw3MPZ8fXiPFHX5sdyZGl4z++p+2OOK/3oV8sQCR+VWHHwNRN1vIzzJlHfRZCARrFeAQMPMkktvsbl4UEi9KrViqvV0KOmqKyDdZy3ScOhOVqgosmRZyZMvUcrdMxgOsYhe0QKA7BrpkyMeKRIZZEWaQkIpYAtbyeIJu1h5Nr2HrXcmPPJG80aO0MYBZgpKqD4BYgWW58C/qayk1BRORUEwVFIwKgkb6lUUx6wTN9I4l2/w67jext5NvT8a6h5IB9L05ONLhM3avDKztUd05+m+dszRLx2i8VFJsqZNF2CqJSkKm7TADlL2iAD26geubPZ7fWDN22DJrswyuvsyMrMFViFe6+LOPuA9QDU32LWa3UbM4eqzY9hhiNG96NWVeTKCyWbzdD9pPoSPFOJqeqDo0pRpSjSlGlKNKVhyP+733+xuf4k+lKhf5bX9g3il+JmpfyU2lKYLyO/1TvCD8UR/6XWfSlWs6Uo0pRpSjSlGlKgzFfrKrz/cfxf8A175Z0pU5tKUaUo0pWkn5U0QxFyREyxjG7ouw7FTMYpuk59wEBKBvg+HSlQm5INMgSXH/ADc3xYzln2SnmNbehRm0FIjCy69tdRLkkMlFzAdkZIHenDuVdwEpwDtD06j9suU2ryFwgTlmFggBseVvFj9D+Bq3/H8mkh71p5uytEnXk2MByWkT3IxCJAZC8f8AOnH9S/UVAPh1OchJq50/j3zPwWNnmMTYaoeS8d5+uFWa2J46vsgzZkuUU/sLhN7ERFrrC7/2aiu1OR68KyOooI9W+qzoZNpJkJquwY3N4IEkjmZQbubcwSbgMpPEEeTYk1u/5Zw+iYmoy+9/EW6/bYu02+TiZWsgmaJVxlZvYdYxxd4ZgvusrgxxmRVX0tTpczMbceafDFyta7zkPjvZsiZWwzXbDlTB7eVC83l/CSThpSMf2EIojgfc6QVcGSdgCZEwTAvWI7AGs3f4mrgT97PJLizTTRK0kIPNyCQiNb+U3sfFretVr4k7D3za5Z6zrMLA32uwNZnyxYWxKftsdZEDZGVFzt/XUAMnkm97W81tOSVgPd8DZYzNw/zLS6NkTHU0rI3jJFZrMbZ3lqRw4lIOLDh60vEWppIqbtQCNjKbLqMygIpEHq197WX9zrZtho5448uJrvIqhuXteTExtfz6X8kfSsb49wRpu7avqPytqMzN0GwhCY+JNM8KwnPKiLPhUnhdfLgfaJP5j4qvDy8sKWiW5EZCuzPkK6YUfIlZhsv5Y4ijDXm40aUqua62+dRzJ5bchFbMknrafeKHORogooLZIiKggT1QqvVtfM+1lyVyiMeVBLLjWd0KyqSPue3m/wCA9ABW+vnnuGtxuhYGml0KPutfkSYOFvPcxsfJSfXzKrssGLdipiUAF2A5kut281ajcMaWHF1SwthXjfgHEljwY9v4xGYKXbXhWcJUMYyblSXl5usQb467awzKk0sY4NVSKlEwgIF9G10nxJcODH1+oxoG1xltKjGwWMm5Kg+GN/oa80ansWB2PabjuHyDvNpj9zTB54ORAvKSfMQBEjmkWxijEYA5i3jwT+LQxS+Ziipx9Rudl5D0bNthzfAT3KLFElUaqTh9BFh02tMoR4+BUkQc2OAFYUmS3et10FukVCAAAGsFGz7/ANsEj5WPkNMrZEZVf2wt9iWW92X0HkEfhVqyI+okDvT4mPodzp4NdLFp81Z5jvZPcJnyQ0gS0UtryLZlZbhSfWpgY+q8Fi7HtAwxO5BkrvIsaqaqM5vIk6zdX/IyEe1UJKSUkKq6budlFGq4i6OiUwkJsI7anMWCLDxYtfJKZGCcQZCPcew8k/Unz5tWqd7ss3su+zu3YeBHh48mSJmjxYmGLilmHBEsOMaAj7AxHn8awKXgyk4gw8+wzgOKjcRV5OLs6FTSh2Z5djVp2zA8XUnUo6XcOgkTIyzvxJkFjimqIdA+qOvnH12PhYJwNYogi4txsLhS1/usfXz5sfWu7b9z3Ha+2J27u8sm1zzLCZzI3ttNFDxHt80C8LxrwDKLr6+tInjTlZtc2FwxcrNT94tXH6RiMcXzKbyvREBWMi28sWR9JytVRgnruNKVqKoJum5ATM2V9US9usfUZoyFkwyzST4pEbyFQFdrXJXibfxH0NTPyH1iXUT4nZEhgwtZvY5MvGwllklmxYC/FEmMih/usSjG/NfINSb1MVrmvL95/nMCHfMqnw4pUqi+dtJeMyHlxzFyrB40ZqsUpBGt0WaYpoKuWc00dqJShg75MyaYplOQevs078m71HVNBjEMwYPKQQR9eKEfQ/zeviv0h/wa+KcqCbK+WtxEUiaJ8XBDoysQxQzZMbEgNGV5Qj7SCeRDeKhH5HmDcFZm5TTDjMBGFomqLVF52gYunqoWwVa0v1yOUJKwz7t4VeIQGoogmdozcJm8Wu4Axd+5EBr3x1rtbsNyxzrPLGl0jK3Vj5uzE+Pt+gPqT+Vbj/zL7p3XqPxpFH1QvjYebkiPJzIp/amhUWKxRBbOffN+cin7FWx/XevQ5iey8N6xO564pcWrpD23MOYpHMuQ5is2ptMZFxpC5PaM1IyxRFmM8jFa9CREZJlS7+EAB60eoCAbqDfaeFNoYZcnSaaRZM/IMrlWBkjEgFiGuOIAP8v4eleC+0a75Z2OFpPk75Kw5cXqmpjwMWOaEx4mXJhs3OJ4eLiWR3S/HI8Wa1yLVW1yJ8iW0ZDxvU73jqcxTR+Rzeuvl8vUWpR0pCYUvttM+dPvaOOyqgKtCWctTdz4UzYWCiwk6QbEKYdVLa/G02ViJlYrQR7YIfdRQRE7XvdP/AkeLWtf8BXoXoP+aut0PYcrSb+HZ5vQGyFGDkzukmwxoOIXhlW8ZIDfdzD+4FvcyMRXm9uOCsxY+ykXCV1xzaa1ldSXj4NCjykcohMPZGXOkSKTjy7i3kG8iZcvcronOiff6rsHbU0+tz8XN/t2RE6ZtwOBHkk+lvxv9CPFfoJqe69S33Wv+Y6fYY2R1gRPIclHvGqICXL/AFQpY8lYBh+HpVwvk+SlAvt0YYfyvll/j6NxDOWfNrXG8DWk60GSHdFj3VgUtF1yzBPEJ963xWZo7VNEPiHYmYKCVMd+8AL30STFyZxgZs5iTHZphGq8fcKDkWeUHkfbsftb7ePp9a8of5X4u80mok7Z1jVpnZG1hh15y5ZjN+0XJZYhDj4MimNTm8kHvxkSCUAsP016GrJyLwTNwsjmvjDm3ixDnZ3rG8nyIybMxrZ4pKY7nVFYxpHPJuIbNZQtonCFI2i1XZjlJ0iQekNbTl2uulQ7DT5GEpEqGeQj1QmwBIAPI+ApP8K8Ga/oPdcLLj6f8j6fsswfCy01eJG5UJlRgOzLG5ZPZj8tMqAE+D5qLeUI7MXPvj9OVnBd3hM8YUzNnO8oSl0yS0ncOSnH2CoC8Q8pcFXImMO3m75Gx1rQP36piCLpDpEC7CI6hsxc/s2raHXSLk6/IyXu8nKIwBLcAoFi4DDyfqK2T1vI6n8Hd5h2PdcObSdv1OlximPiNHnps5MkOuRLK7gx4ztARxW44NcX8UpeQecm8/RbtSsgIcyMXzXAq2ceLtkvIuAqwevVTPb120YtX0HQ5STfFG24oauX4qWJNY6ItkERMcR6R1I52dzx2gyf3kcmA0LO8S2WU+LhCT9yX8ve1hVN6t1n9tuYNrpD1zOwu2Q7GHFx9hJ7k2AoZir5Cqv9LLIFscqCGY+PWvHX5p13k3WYsqFtMnVo/J1qyPdUbtQIDB72h1l/Aup9hY6rnei3iTmpZxbkclt2aaC6iZUEdmhlEinI4E+tRdomf93L7pUZLSNyURcQRcESqxJ5c7W/Dxcetfon8EayCPrmB+wjnfSY+FCYJ5M1Z5VcRtHLgzQKiCI4xYlQSx+8KxDJaqg9VOvRo9KNK5qR/Eqm4ByHnuh0fk1c7bjjD9nkDRNlyHT1oRJ5RwXJ9gs8sWejpRotXIwxRO8KRPvu67SDuGwyOphwMnPSDZO8eIxsXW11/M3BFh9frVH+Qtp23T9Ty9p0rGx8zsMCc48eUPxmt6xrwZSJG9EN+N/B8ea91nkAYwwviTjJmeoYpybjrNknFchbOjZcr46ibAlGTcQvCw40mLNYLJXa66m1WEMiodYGqZmaKq5iFMYdhHd/Q8bDxNbNDiSRzOMg3dAbEWHEXIF7C/p4F6/K/wDyz3nZew911ux7BhZetgk1EZixchoyyMJH95vbjklCXcgDmQ7BQbD0q93V5ryxTG5dzqww/asH1V5j3Jt2WzjkcmN2ExQKwefg8fujsivQtGS3xFkgrVQKU3QLsQPscB7OwdYOXnLiSwRNHK5nk4Aotwv1u5+i/nVo691absWDtM+PMwcVdXhfuWTIk9t5xe3tYy2PuS/Xh48Wp8tZ1VejSlGlKNKUmbpTqxkWnWrH92h2thpt2r8rVrXAvjKlZzVem2irCVjHR0FEVyt3rNYxDCQ5DAA9ggPbrqmhiyIWgnAaF1KsPoQfUVnazZZ2m2MG21kjQ7LGlWWJ1tdJEIZWAIIuCAfIIrAxjjmk4YoFQxbi+vtadQKFBt61UK1GrO1GcHBtBOKEe3WeOHLxVMgqGETKqKHMIiIiI6+cbHhw4ExsVeECCygE+B+HrXZutvsuxbXI3e7lORtcqUySyMFBdz9SAAo8ACwAAHoKx3GVMcM8mRmGnV2rrfK8xUnt8isfqyBCWh/S4177Nf2VtHDsY8Q1f7pHV37DAP0BHT91jjKGGXUZRXmFv93EGxa34X8V9poty+kfsiYsx0EeQIGyOJ9pZmHJYy3/AJlfIH4fxpmOTXGsnIuQ47vxtMfWi4Gz7U84AR7VmNp94/dls7b+wGSrtyh7AcvBdAJXyXeHT6fqB7NsLZa0bF8duQUQZCyfp5X4/S5/Te/rVm6T3Q9Oh3EQgeY7XUS4X2ytF7fukH3CAD7gFvMZsD+Irw7+fLgTkdA+YBlS7WPGMyhj7N0jIWXD8lHSsjb4+xQFCrEaNqnWTcz58auSLJm2UcSLQUUEmqAiJCgQBNrSnesDYpvpZ5YmEExLRkEtcKByI8mxA8keK/T/APxS7Z03K+JsHWYedGdtrEWPLVlWExyTyN7SMeK81LELG92LEC5v4rB8mLy9a5nPlhiWZ5E2ak1CqlgmOW8c4jtbyDfW7kNEEUlAjjwFSlI6ahpmipPYdROYSeGbO1W6pRbJKB1nJ89O6/Hm7WJ9kyJHxDpGxHKUebWUggrcfcDY29PxHb/kl8w5vWegZ+P02HJyM0yNi5GXEHWLAaycucyOjpMVcGEoGUMPvK+A3qx4tcNOANJ5UcmOeuCsoxFqusLO23G0/DsZqv1jD3HKbawDOHteP2MbHM4iOi0iskmyagvDGTZJmEEhDqEdbS1mn0MGzyd9gyhpVZkIBCxxECzJYWA8ePPp9K8Fd5+R/lnZdF0vxV2nAkg10sUWSjskkuXsELl4pyzM7MSeRAQXkIu17VIXgji/kZDjm3PPKOySrTJ3Ia7JTKeFobIfxhYSxLUKwD2OqKuH3APZAka0vEK8TeySZVTEOsUhgAPqQkNFi7FPez9mx/c5D39sNzjjUeF9vybcgbnzY+KqPypven5H9s6r0eCM6TUYpT94+P7Gblyy2aUZY4pyMLqUjJW4BIufWnwyfmfj5w8gsZRNoatMc13KuVorFtCh6NR11It3kW9OXLtsi5jKtHg2iEpR4VRVy+WIRLvBEyhtx31mZWbr9QkaSD2kllCKFXwXa5HhR4ufUmqxo+t9u+RcrOnwWbMzMDAbKneecchjwAAkNK134iwVFJNvQVmQOY8jrXrkjB3Hj9eKjQ8Jt4t7jzIiUhHT6fIJkrXns1Pe4VdZJoyDN9BO2YMQQXOczlwqQCiHo19JmZBnyY5seRIIbFH8H3Ra54AeQQfHn1NdWX1zTLq9Lla7b4uRtdmWXIxyrR/sGEionvyG6lXB58lH2qDe9OBiDJTXMWMaVlBjVLnR2l3hEZxCo5EhFK5d4BNZVZEGFkglFFTxkiUURMKYmH1DFH4dZOJkDLxkygjxh1vxcWYfkR9DUT2LSydc3mVopZ8bKkxZShlx39yGQgA8o3FuS+bX/EGnK6TCUTgUwlAQATbCJQEfQAj6AEdZFQtxe31r50pXIekO3b6fo2+nv8G2n/pXB8i3rSLqThwu6tRV6X7pFQn1UkHf+if/AMWJgmG1gAWyKRh77bp3UEx/p6q3W555sjYibVf2xUzWCv8AZ/8AMFv/AM5+wA/d6fddvzq09jhhhx9cYdr/AHIvhgsn3/8AxDf/APN/uY/p9ftsv5UplY2OXfNpNZgyVkmSSqDOQUaonfNEV/35Js6MQVkU1gH1ilMAG+HU9Jr8CbMj2EsMTZ8SlUkKKZEVv1BXI5KG+oBAP1qCjz86HEkwIppVwZWDPGHYRuy/pLIDxYj6Ei4+lNnZMdwLizR9ha0euSzuVmGbq2Tco5WRfMW0UgBo+QYFExiHcIKplASF6CmKHrbh6KDvej6afsEG7xtRg5OVk5SPmZEzsskawreOSMXILKQAQLAgfdf1F70XddvBoZ9LkbbOxsbHxXTEgiUGORpmtJHIbXCsCSCbkE/bb6qp25pNWJM3505iotGTTYkmrL3wHQdptzeEYd84RMqQ5SGP0F6Q9PYOrFk5PU+vJld0yZMbHiyBGJ8rlcOF+yPkykggE8Rb/Wq/jY/a+wPi9PgTJnmgMnsYvGzIW++TipAPkC5v/pWRFVmFZTsxb2BnZ5C0tI4r1Q75dZio2Zoh4I7Roce6a9aZwERL2m+HWRr9BqsXc5XZ8IyNnbGOL3CZGaMqi/YUQ/alwfJA8/WsfYb7aZWoxetZgjGFr5JeAEarIGc/eHcfc1iLAH09BXbORk6/e19xE2A8K0jJMXc0yIyRdFn2ApgX2adVQwHZl6/W6ygI/wCLX1t9ft83LwptbnHExoJ+c6BA/wC5jtb2iTYoL+eQr41Ow1OHiZsWywhl5M8HCBy5T9vJe/ugC4kNvHE2FKPf9n+H6ep30qDtbxXAbfCAGD4SiACBg37SiA9mwhp4v5FxXP8AA2P41GjIGH7va5Wrvwn67PJwz6cM6RsjBVugMPLum5kItBlHlOkuDFqmcpVDGKbrEB+DWg+6/GHbeybHXZn73BzI8SXI5rlRsq+xMylYVSMEN7aAgMSp5WN63x0z5M6p13XbDDOHm4cmVFj8DjSBm9+FW5Ss8hDL7jkEqARxuPrUlU000UkkUilIkimmkkmX6lNJMgETTKHwEIUuwa3zHHHFGsUQ4xooUD8AAAB/oBWiXkeaRpZSWkZiST9STck/mfWvrX3XzRpSvhRQqSaqp9+hFJRY/SG5uhIhlDdIfCPSXs18SOIo2lb9KqSf4AXP/oK+o0aSRY1/UzAD+JNh/wB6aJ1meulx+zyGyauVot9OIwaDSQVRi3YuDyJo5ZT7KKhBKiYgnAA3Exfoa1lkfKujXpcXd8SORtfNljHVJCsL8jKYmP3XFgQWsPJH4VsuD4u3Z7lL0rLkjXYRYjZDPGGlTiIvdUfbY3Nwtz6H8aeDW0K1lWpewUPIyUVLvmCDmTgzuFIh4oBu+YHdpgk4MgIGAAFVMNh3AdRuVp9ZnZ+Ns8uFJNhhljC5/VGXFmK/xHg3vUlibjZ4ODk63EmePBywomQekgQ3Xl/A+RW1MAiU5Sm6TGIYpT7b9BhAQKfb4ek3btqQYEqQDZrGx/A29f8ASo5SAwJFwCLj8R9R/rTaVWGYOlEGtjsTK/XWjSb1Y0wZqm0fQZpgDKNmx2iBxTQMViIEAR36gDfsHVB65qsLJkTH32dDuu2aieQmfgEkxzPcqhRSQpEf238kgX9avfYdpmY8b5GiwptP1XbwRr7PMvHP7JAZw7C7AyXawtY+PSnM1sCqHS7qE04TctIgE0vDKGXMYwFHvROIdYG3D/1RDt+kOnqb/WlOrpSjSlGlKNKUaUo0pWHI/wC733+xuf4k+lKhf5bX9g3il+JmpfyU2lKYLyO/1TvCD8UR/wCl1n0pVrOlKNKUaUo0pRpSoMxX6yq8/wBx/F/9e+WdKVObSlGlKNKVjOmbZ6l3LtEq6W4G6D79O4egdgEO0NKViN4WMbN3DVFqQqDoR78giYe8AQ22ERHqACh2Bt6NKVA7m7mhfibx8y5mmKiY+0PMf15OXiq/LPzRUW8kZCQaxsa0fOwWTWFuVV2QTFSMCyoB0J+uIait5s20+pn2KqHaJbhSbAkmwuf9fT1P0rYPxT0iL5G+Q9X0yeZ8eDOn4PKic3RArOxVbEcrLYFhxH6m8CvMPl//AJgjM90p8FCY6wzSccWIY14ndZmdXcXVu6knDIzZBSlxztJirXPBrrC4SXcqOHCZyl2N2DvqLO+T9hkY6xYmPHFNY8y33gn0+0G3G3re5Ir9EOq/4KdR1G1mzN/t8zYa8SA48cQGOVQNc/uHBYS8gOLKoVCCfH4IrygOQ1st+V71xaWiLg7kuTlavTS3Z3hrfMnu2O2bOGfSjGzMYhwk4rKTppKKnKrIKJA7cOXiXWp0l6TY/RNrPPnSaYrIWzEflMGPNBYnkB5W4PgsRe5H4VMf5W9D1mq6vhfJaS4q4/XMjGaDWyQR/t8pmkVGhZxabiyAFYgeCJG1lubj1yYuoZMZY9plENYpu6OqjWYitOLtazNXFttQRLcECS1jfNkUSupFx2mPsAFAR7ADW8MPGGHix4xdpGjQLzb9TW+rH8a/LLsm7PYt7l7oY8OHFlZLzDHguIIeZuUiUk2Uegv5pO5Tn84QczilPD1EqNyhpa/NWGXJC0WRSAe0vHpmxlHFlrDchyBPziLsAKVqIGAxe3p+EOvMk2MckIwYo5I2lAlLNYon/kv4tf6Vn9awum5mHtG7Zm5WJlRYJbBSGL3VyMq9hDMT/txlfJk8efrWxxZhfFeD4yyQeJqZFUiKt1ynMgWJjFqPTEmbjYzonnLC4CQePFPGSQoEFQExKkHSGxQ19Yevwtcjx4UaxpJIXYC/l29WNyfJ+tY/Ze39m7lkY+Z2jLlzMrExI8aJn43jx4gfbiHFVHFLm17nz5JrZ2DF2PbXdaPkax1OKl7zjT2wNCszsi5pKrmsDUGUyMaJFyIF9oNA6D94Q/Z6Nh7dfUuHjTZEeXKitkQ34MfVeQsbfxFY+D2XfazUZug1+VLFpdj7f7mFSOE3tHlHzuCftbyLEfn4prOTsrmdGkQ1TwZGWVC45EtEbTlMmVtGAfnwpGuzAu5yTJwthUI0nIyOKgKR25N1B7zqL2gG+JuHzhjrBrVcZErhPcXifaB88yG8ED8PWrN8cYvUX3M2z7pJjnU6/GecYkxkX+4MvgYiSRjlG7k3DHx4sfF6YrM1kpXlycdH97xxRaos8seXaMa+lcFsMfG3K8ZImWMRc749bw4SztlKyooqOu6RKm0TOPTsUoAAxmfLj9U1RycWNCzzpz/UA7yEB3Nrm59fwFXTqOv3Hz/35NL2DNyhFj6rJGNb2meDHxI2kx8ZTJwVkS4S7EuR5uT6bljSeXsXiG+nxbyhxnlzItszgpbKHdch1xorSKTh9xKNySuK2qdXF4WWk4Vkiqi1eH3P35hA+wh2di4+8XAlOHmQz5b5HJGdRwSK/mP7fUj6H8fwrDm3HxXk9qwR2Trex1WgxdN7GTj4srfuMjPVDwzGM3EokjFWdB44j7b1Tj5ovlXw175E1TIGIMr1uEzFykvMk4nsaZPsTCEjXbmHrLdex22hO/Cmk5FBj7PKo7julVRNNQDI79IlGhdx6XHlbVMrBmRc/MlN45GAHhfuZPqbW8r9PpXrT/G3/JjK0fQcrRdr1eRN1PrWEgiy8OJpHVZJiIoMleXBS3MhJfAJFn9b1WNFFaYQxPaGyebMUwFmoJMe2/DbHBNtaZAnrRyOqFqckPN31xDxB5drXparFdbJu1zxDVYEyFSBQ5tVBOOuwXUZMCzRcGiELc2adG9XIF+JW/gniDYWr0ZkmXuPaMZ30+zn12ccqDPbZQHFjh1M8I/p4wkf2zKk3Dyiid15EtxAqY+Ncjjimf478tWvmFtrhATuR4yT5gRMFEQFQcU5Syx4KN6+7wvFRTKXvFhmn5XTaVlSIHOmkgRyYSbAOp/Fy/2cmLvF2gkhaYHKChU48h+kwgAuWNwzW+l61L2Lr47Ng7/4uk6G2Jnw6900TySSzicQvYyrsHdo8aKNSrwwlgCzNGL3q+/MnmF4zo+MqtlLDFFyNytr96WurCsvsCwDqzQSM1R0mR5OLskv3JFIMVFXwJJm7hUTHSPsUenWy8/tGHj4iZuvjlzY5C4UwryF0tcMfp6/gfrXiHqfwP2Lc9jyetduzdf1jOwhjtKuylWGUx5BYI8Ud/6lgtz9yizC5qs3zEuUHEXLlJxVZsfxUzO85bK7ptExc4orp5GXnEFodTMfJTWNblaFwQhYN8jIyR4562KmeRAVzd0dInUYKh2rcaPNx4JsZWbsTlUjKEh4muCY3b0BueJHlvPi1eivgT44+U+rbjaa7eyxQ/DGMs+TmDJVXxs6ERskeXjwi8kilVEsbkiL7RyDGwPnmWyFcOLXKjIDyMi21VkKbk+xNbFQ6pNKmhACMmnpJbGi9jdoupmQpCwqKxkkkCm8ky6iqCbcohq05WRpd1KyKEeOZuSKfHgm8Zb1Keqt/wCQ9a95JodT8l/GmBDkytkwZmtiMWTPGPc++NeGWIlIjXJHiaFrf0pLFQPIq2yZsnIDzEaHgqA4y43o+BYG/wA/L0TkBiKn1+vxeLSSeOXzK54/ylYHbdD4wV6YkylEGgN25FDNVkDbiYqobXiSbadqxcaPURR40UjFJolVRHdCGSRrfeUsQLC9iK8tYeu6L8CbvdZvyLsM3eZuDBHk6zOnllfM4Zatj5WHEpP7UZBZGfk5HNWFgCtT18v7kv3Fgo+JmvMLF8tVsRQuVf8A8u8eZ7XNA8gYfLadnb11g9qcs9QZV+HxJG2Vwk1YpPHJH50lCkUT704FJbur7EqseIcyNo4RJ+4WbxKJOXH7SQAIw1goJBt6i9ed/nXpiSzZnYR1zKiy9i+J/aJ9b/V174ZiMlplUs75rRAvKUQxhgSrcQSbfeRlgwvB4EyjLch3ECrgtKkTPxgtLE+I2hZqsOGZzuIRM5njPxTqe2Kg0RRVKq5XUIRP1jBq6bGTCTAlbYcf2XtnmCbAj8P4n6W9a8y9OxOy5XbMGDqAlHaTlJ+3Ma3dJQfD24mwj/UzEEKoJPi9fmE84eTa/LDkTb8lsGykRjmJI2x9g6oqRjOHGi4MpajmMxhTTMWLl8iRzC1wUyuDi4XOo4McwqG3DbzTu9kdrsXyVFscfbGtgOMa+EWwv6D18nzX7ifFnSF6D07H00zCTcSXyM2Xkze/mzWfJmuwU2eS5UcVsthxFRG1E1sWjShNqlxwk4zZv5fZ5gsA4IFqxsl+YSkXP2SWRWCtVeoINVJWdlLLKt4+RXiY0zWO7oO6KVV2qcrUgmFYSGlNLrc3b5y4GDYSSAgsfRV9SSbGw8f6+n1rXHyd3fq/x51WXtnaeT4eI6skSEe5LKSFjSNSyhmu1/JsoBc243H6S3C/hdjfg/TrzR8YS8wFZyHdkMhKVF84ahWqXYnNejImwRdCblSSepQUk9YC56HB1lgEShuAFANejdLpsbSQvBik+3JIG4n0U8QCF+tjb61+L/yT8lbr5P2WLtN5HH+9w8Y4/vKD7s0YkZ42nN+PuKrcbqAPX1vUytTFa3r6Kc5QMBTGKBw6TgUwgBi/QMAD6wft6VwQD6186VzRpSudh0pXHo0pVNnnh8yajxJ4dLxNpxxNZLW5CTi2L4eJjbbLUVhHnjmza1yMpMWiuu2lnYJJtmBSIkYGKssoYSnORPq3p/dtvFqtPwljMhyG4ABitrfdcsCD/C3+vivR3+MHxzsO/wDyMJ8HMiwhqIhkuzRJOzciYlVYpAYj5JJMngAXALWqpPybPOa5VcmObcZxyzE0j7NjHJ8FLNKJC1itiZbDrimwCSkEBZ8zg0q6qBoqLM3kHMmo7XUdqprCcDnHVU6f3HabPcjXZYDYsoPEAf7fEePPrxsLMWJufNegf8j/APG7onSfjR+49dZoN3hSKZ3lk8ZnuyWf7LcRLyflGsQRQgK2IFekGk27i/zggMxu6W3NYXEC5yRxXv8AcPdiTpOQYfu0StbvVK1ZpJgysKUQBpEqqbtkczU6wgcgiYo62JDLrd1HMYfuKl4GbiVcf+QViL28+o8V4z2eu7x8X5mui2Teyky420gi91Jsd7m8MssSs0Zf7bFHAa3g+DSsr8fR+LWFozCWJUHl3sGIsQy73GWHJK9MZHK98iqmyeOGEei5nHIS0grKyXSz9oqpCgkqqUDCAAAa7oxBq8MYeKObxQnhGWvIwUGwuTfz6X9Kj8uXZ967K/ZuwFcXE2GwRcnMWBlxYHlKhmIQcRwX7/bDciAbeTelfjOTe5rw5S7RmDDHxe2K8U46txxBe0omyyVNPNIuGMzUJh14UWjwrhl6jgCFIVZJXoOX0hrtxmbNw0lzIfbkdPujazFb3BU+PPj/AKio/dQx9a7Fk4PXdl+7w8XI/o5cJaNZeBDJMi3uLN5W5JBFwfQ1UfV/K8xdV/NWg+R2KcPWvDtZxZCkyC8tp16/K4dyPYbTDHrDbHeLKg0NHvcUusfIEOuoZJJZsqVYxSFJuUTVSLrOLF2hdjiwtDFEvIt4MblhbgiixjKep9R5+legs35x3ud8Dy9N3+xg2WfsJP24i/qLmY8cT+6cjKlPJcoZBsoBKsCoJv5q3aX4/wCDp2jZExlJ4ppHxfZecSL3KdUjoJpDRt+fTJ255eSspYcjBaSlZEzVIVXRz9+cUy7n7NWx9fhPBLjNFH+3m8uoFg5PqWt6k2Hn1rz3j9u7RibXD3cGwyv7xrlUYsrSF2gVL8Fi53CotzZQOIufHmtTgWr5gpsDaa5lWQxo5hoi4PozCkZjKHlYdlWMKRjRpH0as2Mksqqo8s0RHtgSXWSEUjFKGwjr4wIsuGNosoxFFciMID9sYACqb+pA+o8VkdrzuvbLLgzdCmauTJjq2a2S6OZc1iWnkj4AcY3Y3CnyPN6e5Vk1fHbEctGjsUnCarfxbdFwCDko7JuEe+IfuVkxHsOXYwfAOs3iGNiAfPjxVYWSSIExsy3BBsSLj6g2PkH6j0qAfGnzLuMnJ3L2UcCVeekqZmjGOSr5jkMcXwjKOtF4LjhZyjZLrSmka7kk3tPQUZq9Ky526/2M26Qbagtb2TW7PLlwImKZkUjJwbwW4erLa918H1sfHpW2e6fCvdukdewe2Z0SZPXM7CgyP3EHJoof3AUxwzFgpEv3LcKGXyPuqQ2OeQMfkvMed8OtseZRrUhgh/VmMld7hWDxNCyCe0xhpNFzjafOuoFlbRJA7p4YCE7pXYO3WfjZ4ycufE9uVWgIBZhZXuL/AGH62+tVDc9Rm0nXNV2N8zBmh2qSssEMnOfH9puJGTHYe2X9UFzcUWPBz6wciMc59Ty/lCEYY/pFnprjC8RLooYouq1kVMonabZDCmKz2xQe4A0VA2xAKX0esBkmE0mwjz/elVY0ZfbB+xr/AMxH4j6GuMPtEWJ1DM6odfgyzZeVHMMx0JyoRGB/Sif6Rvb7h9bn18WfvWfVUrVTkMysUPJQUj4gGEszVYvPCLqNXPcLBsfuHKQgoioAegwdoajtvq8Xd6vI1Gdz/Z5MRjfgxRuLDzxYeVP5ipHU7TK0uzg2+Dw/eY0okTmodeQ9OSnww/I13xke2iI1hFM+98JGs27FsK6pl1+4apFSTFZdQetVTpANzD2iOu3X4MGswIddjcv22PEsa8iWbiosOTHyxsPJPk105+dNss6bY5PH9zPK0jcQFXkxueKjwBc+APArHhp2GsTQ7+Ck2ksyTdOGR3TJTvUSu2h+7ctxNsH2VBTsMHwDrp1W41W8xjmafIjycRZGjLobqHQ2Zb/ip8EV3bTUbTSZIw9vBJjZTRq4RxZiji6tb8GHkGklkRSVYsI6cZv5UsVCvDL2KvQ0QnLvbVGOiFaDFlTMYqiBCCoJzHIICBd/oarXeH2WHhwbfFmyRrcSXlk40EImkzInsntAXBUC5JI82ufpVl6SmuzMufUZUOOdjlRBcbJnmMMeJKhL+6T5DGwsFP1t+NaKv0Kj2TGBaujCzsTU59ZaRPCSq7lnLMlTP/Eil65lFGiZV0gEhA3L0D9PUNpem9R3vx9/x+LEzMbrWaxkMErMk0ZMnPj5JKAMoKqDbj/Gpbc9w7Zou/HsEmXh5PZMNRGJ4lV4XHt8OXiwc8TZmPnl/ClGW3ptXNrgY6uTyq1HhGrpFRdArWNmyeD60GUVIrH7tddJNICqiOwFH06nR2dMfI2Wmw8HLabUYiOpZeEU44XWOGVjZmAFmuAAfNQZ6088Gu3ObnYiw7bLdCFblLAedmkmiUXVSTdbXLD0r5aTVgtsbSbFU3UK1iZBZJ5Z2z7qfLmjjJdK7CLctgFEsg2dFEhjjsUenXzi7Td9kwNRvOtSYsesmcPlpJ/UYxEWaKJ0+33FcEEnx4/0r7ydXpuuZ+10nY48qTZQoUxWj+xRIDdZJUf7vbZCCFHnzS//AGdv/V9LV1qmUaUr4U6xTUBMSgoJDgmJwESAoJR6BOAbCJQPtvt8Gvl+fA+3YSWNr+l7eL/lf1/KvpOHMe5f27i9vW1/Nvzt6VqK8nPpRDUlncRrqcAVvGLxKKqDA4CsoKHcprD3hRKh0gbf0m31GaRN1HrY17A8Em2HLm0KlYz9x48Q3kWW1/zqS3T6aTZSPoEnj1J48FmYNIPtHLkV8G7Xt+Vq3WpWoujSlH7faHwh9H6X7WlK0cvWK5YE2KE5DMJNtGvSSDNs4QIZBB4QBArgiRelMVOkw9hgEB+Ht1D7Pr2i3UcMG2xIcjHgmEqKyjisg/mCjwT/ABFvyqX1u/3emeabU5U0E88Jidlb7mQ/yk+Tb+BBrGq6NuSTlS295DvFlJp0eENDoqN00II3QDFu9BQpe8fp+t1mLuAhttrH69F2aNMkdnlxZZDlOYPYUqFxzb21kuBeQeeR9PSxrv7BL1uR8c9aiyYolxUE/vMGLZAv7jR2JtGfHEHz6111W1o2tKYVRiJqJCHmnkKcs0z8Gd4dkIAZ8wL1G7+PW6vUP2CbbXx1zscXY48qSLGysYYuW8BE6cC5T1kjFzyja/2t9a++w9el66+NHLk4uScnFScGB+YQP/JJ6cZB/Mv0pU/4dv8ABvqx1XqSM7UG0uuwdMpF/XXLabZTUg4gu5aLT/gSGTTj5pQExM8ZHIbYQHt2ANVnb9Yg2c8OTizzYM8eXHPK2PxRsn2wQIpza7xkeCCb/nVl1HZZ9bDNj5UEOdBJiPBGuRydcfmbmSAXskgPkEUsSAUyhQOPQQxg6hABHpKI9ogAenpDVmPk3qtDwKXCGMqzKSVRtTtSQVlaq5dyESq1kVm7Qyr5EW6pXjZHpRfJlR7AA+4APbqB2nXddt9ng7bL9393r5HeLi7Kt3Xi3NR4cW9A3pU5rew7DVa3N1WJ7X7TYIqS8kVmsh5Dgx8ob+pX1p1dT1QdGlKNKUaUo0pRpSsOR/3e+/2Nz/En0pUL/La/sG8UvxM1L+Sm0pTBeR3+qd4QfiiP/S6z6Uq1nSlGlKNKUaUo0pUGYr9ZVef7j+L/AOvfLOlKnNpSjSlGlKNKUaUqsfzTcL37kXxLzFhnFsYxnb3bY6rFgY5zMMa+iu4jbdCSzojqWd/6K38OzjznAFfq9un4Q1Xe1a7J2ugyMDDAbJkC2BPG9mB8k+B4Fbi+Ae4aPoPy5qO29klaHS4jTe66xmUgPBJGLIvk3ZgPHpe9edLk35EvIZ9Lzd9wxY4K8ryTLHZEKfZrMi2uL+dVrbdDJE1PWWTJHV1BFlPthM0SQFTv0FQAAJ0etrLcfG+0eRsnXusjEJ9jN9xPEe4WY2X1Hi17g/lXuT44/wA1uhwYsOj7djzYSRvlXnhhLQLGJScSOKFC0pLREBywHFluS3LxNTyb+BHIbi5aLnkjM1RpFKCegJqguIN9HneZKBxEzrZ9GTsTYmj15Dmqc8kooVQU+7UXI3R3KJQ6hsPQus7TTTSZeekcfJWSxF5PDXBDAkcT/pewrUH+Wnzf0P5J1uH17qOXmZnsTx5IkV+OJZ4irxPEVV/fiNrXuFLP5ubV6AdbOrwxTKuGWfx5Axsi1nMeE4zFxu7bStdVjnw5RVyqaQEzKTaSgJjHkqZIvYqiQn7wVe3b4dR5XZ/3RWVov7P7Rutj7nuX8EH042/9auEc3Rx0WSCWHPPyKdgrJKHX9mMLj9yMl+fv8/Ia1rfWtndcT1m1XmhZaeJzzi8YhjriajM2NkkouAeO7LEi0dNrBCtlSR02DgUSERM4Kbw4mExdfWTgwzZEec3L9xAG4AMQpLCx5D0P5X9KxtP2fZazS53VoTAul2rwfuWaJHlUQvyUxSEc47XJYIRyAsaQXFXJucMp4pNZ+ReFFsBZHZ2OwxL6nqvyvWLqHjXRgi7JGrqLrLpx0iyEBEVjAInIYxfUEusbS5myzcL3tpj/ALbLDsCpNxYHwR9bEfj/ANqnPkzrnTOs9n/t3QdwN5198eJ1nC8WEjr98TgAAsjf+I+oB8g0yOWs1ZExnlms3CiZTx9mPHmVilYwmCkJOFb2KMr2OY2Sl8pXHE0rDLvXWTbkskkmiWH6iFIpsXfceyPzdhl4Wak+NNFPiTeBDccgqAmRoyLmR/pwq49W6hoexdXyNVutbn6nf6w8pNiUkMTy5TomHBmpIFGJACSxnsSRc2t6vzg+Zydk9harvkhhX0MV388BOYapsrUHsJkCp1N/EgWTicsRsqZ0296CSImMUEdwTT+HtAAktdJmZiyZGWF/Zy2MSFSrqpHkSA3HK9UjuWJ1zrk2LpuvvO3ZsH3Y8/ISdZMWedX+x8J47H2eFv1ep/1qFWXScTKFxNsmOeS9Fs/DjBlezbIMKBBwttkI+xX6TgZobRD26jK1MzyZdNbO/XVd+zx6jmTTHq7ADav539kxdI+Lt43wNcuSQihiGcq3IMnG5+4+bVuDqrfKG8+T8ff/AB3m43bO6ZGmVsqSSBWixklj9mSDJE3GMGFQE93wASLVDLhxym8vhpl5WZufKbO+Urm1mD4pxzF8sI9jKkg1pqYVFe8Y6l00nylURnGZxjnTlVdouRpsVcpQEACA0O56uM73MjNyZsgN7cYyQDa58vGfPG48Ekg29RW3Pln4z+dpeqjF1HWdLrdQ0X73LfSsyGQRoLY+Ul1Exjb+qiBXUv5Qm1UxZDqOIsAly7nHGdkUsMhJ8lb9j/jrb8a2qxVqEqFTjGLqXn5tiVSGAz97ViWOMYoGcuhau0lVFEREU+vVAyoMHWCfY4b83bLdIHjZlCqASxHjyU5KoubEE29K9daHa9q7ydV0zseOIII+u42VtYMuCKaSed2VIo2tJ9qze1NIwROaMqq9g1qrwcmm5WfmJmccKykh1L2uyvnDtq7dP0VliPJCVUFVwCUg6cC6FTp6hOcxh7PTqqt7kkrSSfc/6mNxc/ifzvet+RjDxcGHEwlEUHiGFQrKFIHFEFhdQONvSwt/CrweGubbp5ddqqlgydKZulsXWGzRkTBUyOsrerYYrNNzbV4i2weWZKurlmDWaaBg6cOQQZJopt141UpnKhzlANi6HYT9WnSXLOQ2G7gBA3GJVlUMJSvnk1iTZbAFSL+a8Z/LfTtP896zKweuxaaLsuPjO8uQ8Jnz5p9fM8EmEko4CGPkFUtIWZ1lUiNQDW7xV5vdGxwE9Vcl4Wpeem5OR73I8JmaSxxUqdMzlXVfK+JtpavFxp0vjUiilKEe/Uc9SiWxV1CnLvrswu9YuJyhy8ePJX92XEpjVSVv5fiB/uD+U3/ib1h9n/xU3XYDBsuu7fM0kh6+uJJgJlzzxxzBRxg953v+yfz7sYTwfKKQbVWHzt5NVblnyFtOWqZi6s4trjtRywjWULGt4+dtSBJB05TuF98IYzR3c5lNcDOjp9QFH1BOpt1jT+y7eHd7V83HgSGI+AFFi3knk/0LH6//AFr0f8KfHWz+L+iY3V9vssnZbBQGdpHLRwngq/t8bl9wx4yPsBtcebLe1X1eUPJ8McKcXaNmy/2HHx87Oso2XHq9wjRmpGzUVxk8hW1UqVy2SI2gEHUREGOZ6oQWKSJtu/6usobL6M+g1+mj2GS0X9yMzJyFyye5+lW+i3A8n9IH1vXiL/KrG+XO4fJWb07RwZ//AAtdbDlCB/bSHJGH908+P5vKVkewjB9xmH6LWNUb4wjcK8d+bOTWnmHYCumVq/PQmT2spUq4aXkrBdrmSZC6VedYsopePLkWHmS19VRg+F+VhuoDsVRInuNI0/7bW7+eHskEsqsr/aLks1+QI9OYNvBvbze9q9P/ACId53b4l1Ox+FNpg6+eOTFAncIixRGP2ZIn5Bv2zxcwJEKGQW9viCalTyIzxkbzd8uZNwvDZyTwt5Z1RNWcnfGfdoOt+BxOtjbDyMw+rk5HxdjhpuYhxlJwW7gpzSCTSURRWA/QmGrRsM/I7ZlS4aT+z1tOL82C/ZwjuQQCCRc2PrZgD6CtG9Q6ppv8e+v4XZMrVnZ/NeQJcb9tC8l8r9xmcFkRnjdEfinJSBGXiLL6mqnMT+TZz5zm7dhi3FEROxBIlnZmEy/yJQIdu+q02sc1VmxburELxkhbIrofsCrppi4ZqFVJuXt1VcTp+9zmP7WIFLXBLILqf0ny1/uHlfxHmt/b/wDyR+Kuqxqd7nvFkGRo2QY+Q/GVP91LrFYmJrpJxJ4uCp80+uJv+Xq8y/J8VYZWVxjWMSBDIyxY5hlW3soSWtElGLERTjoaOiUZtwklJ9QnavHYN2KpCiIK+jfOxPj/ALJkqzvGsQW9g7AFiPoAL+v0JsPzqrdg/wAwPhbR5EOPBnT7AyFOTYsJkSNWueTsxQfb6MqFnB8cavx4t/8ALPcTarUqbY+TU/k7JORntWqD+5USPtEbX6JWruiim7tUTFStdjkpiyVxZ8YzYgrqpHFBPqKYDG3C96v431UUSSbJpZMkqpZeQChv5gCBci/j+FeT+9/5r9+2OxycPpUWFhaVZ5lhnaNpJ5ICSIndJGKRyBbMQARy8EeKvqwLxV438XYVSB4+YWx/idm5ZIR0i6qkC1az00ybOlnjZCw2dYF7FYit3LgxiC9dLmLuGw7AG16wNXrtYnDAhjiBFjxHk/xb9R/1Jryj2vvnc+85Aye3bLLz3ViyiVyY0JAUmOIWjjuAAeCKD/qa5zFxrxrnS64Ev16PbCT/ABvyIplDG5a5ZncDGntCjdm1MFpj2yZ07HE90wT/ANGUEhe0wb7HMAszXY+dNBPPy548nNLGw5eP1D6jx6f/AH189d7nuur63a6rViA4m5w/22R7kYdvbux/psSODfcfusfobXAIf7WfVUo0pRpSjSlMTeqBmWfzXhi9U3OCtJxJSG9tTyxhstRj5cmYV5iPM3rC57S4WI+rHuq+EFwBAh/EbdJtt9YM+PmPnQzwzcMVOXOPiD7lx9v3eq8fXx61atVtut4nWdlq9jqxldgyjEcTM91k/ZhGvKPaA4y+6Pt+63H1qL3LnzI8QcbcPZuyNQzVvPt14/XmAx9k3E0FkKDpdgrFimjPFFmEm+sLRch5CMYsFl1GrRB4uciZhAoAU4ljNt2LE12JNkQcZ5oJAjoGCkE39SfqLE+LnxV3+PfhrsPdOxa3TbT3tVrttiyT4+U8DTJIiBbFVRhYOWUBnZFFx58i/lM/5h+0czsg5Xp9uvtUyFXuGC1bxlK4VBy2QNjwl5u2LYC03FuWajgIhYLLHybx4zF0t2Am3MRDpIIgbVvyDJucjJSadZF0/FDH/wCPJkDN5Hq17i5/DxXvb/D/AAfjfT6LJ12pnw5fkdZslcwg2yDDDlSRRHgxvHGVCPxFvLAtc+jH+XzwprEDnvhFeLjyXzhgJ/yElpeHrKFFocvS8nRd1jFocY1gSVGceGeYgvqb8W6NhFqm2eAmskZApSGOGB1/TRJn4U8uTPjnIYheKlXDC1he/wDtte3K1j6Wq0/L3yXn5fU+z6vXaTV7aPTxo8pnnSfGaFg/I8eAtlwceRx+RZLqwe5Aq+/PHHTl1z85JUzkjgWQzTw3iMY5JsHEHMNWNMEx5brFiKvWl9Y57kFj6YSBCPmErUaSSaptfBHcqmbAYHKhAEAvedrtvvdimywTNhpFIceQX4MUViTKh+vK9rWPp62ryf1TuPx78TdLyemdrTW9kyM7CTbYknA5EMeXJEsceBkIbshi4li3MKOVuANXI4549YAqOSKPNLWNllXldhDCZMVpZPvNubzObzY8lFVXBX94hGMkmfw88/cdYvFmRRUBQSpq+sO9wx8DAiyEfl7u0hh4c2a8nE/+Yv8AW/rb/WvOG57d2zYabKx1gbA6Hs9mcn9tBCUwvfUAFYJGU+UVbcFewsCV8eK7s6VPzh6mlTJCGzmheLDmXKWM8fzcJgHFMGlSeNNIibgpKW/LUgvcPFy1mjrTTSEj5Bubw4tlznUSP2FAa/mw9ujCuk4eSaVEIijHGFQ12k+65IZfBBtY+lbi6tsP8dM9siHJ1TY2HrsHJnSTPyn9/YzPDxixVEVliaKa8kbDlyWwZfUh283eZpkzEnKW7cSK5wgy5lXI0bVoG64iWrloimiGc6SMqwibzcIEgwL1rXoumEcKrqFcuDqLi3OmJUhEgjmZvZcjE2b6qPCmlyAoaOzD+qt7Mw+0gBfrc+bebVXes/Cek7B0XG+Qcvs+vwNM87w5fuRMf2U3EtBC/wDUUyNLYD7VAHIEFvIEvOZl/UrOCZaqV3kHVOL+aMznY404/ZEt/h10EMxzKrZ9BQLFi5avUJCSkkmqzTpFFQEyrCqBDCUCjK7jI9rCMUeQuNmzfZE7fSQ+QAPS59PT61r3441P77tMefmaifedb1obJz8eG4JxEBV3YhlIVSQ/qL8eNxe4R/F+q+YVW7LDocschYJu2P2GEKtCPVceQksyusnnpg9RSs9keyTqIi2K9VkYhIwgQhEjGdnEwIJFDYerWQ7+OUDaSQPAIVB4g8jLfySbAcSP+p+gqQ7vnfEeZhSN0LE2mNt32cjj9w6mFcEqfbjVQ7sJFcj1JsgsXYnxMS2x05MVSzxFYnxqllla9NRtdtJWSUkaszr6OctomwFjlzFRkBhn6ibgEDiBVe76RHYdS8qyPEyxNwlKkBrXsSPBt9bHzatd6+bFxs+DIzov3GFHMjSRXK+4isC8fIeV5qCvIel71CrAPHDOdHytVrfnuzYCzk5pGEGNOi87NsOxNO5EzOTJKbfO7vLvZxggdrD0ebiHBEgYtlhMosXrU3ETbw2Brs6DKWbOeCdkgCiX2wspck8iSPRSLeB9fWtmdt7n1bZ6DI1/VINtq0ytoZmwTmNNr0xlRRAojY3eZHBPNh4BsPpU9xOcxSkMYxik36CiIiUm/aPSA9hdx+hqd/7VqcAA3HrXzpXNR+5C5YybiOIx9IYw4/2/kG+tuTqxSbPEU6YYw7igVGbOuWXyZLqPmb0ruErYJFFZAgEOfvA9cobjrA2GXk4iRtjQPkF5ArBTbip9XPr4H4Vbeo6DSb/Iy4t5tsfURY+DJNG8qFxPKluGMgBWzyXNmubW9CfFSEOAFOYoGA5SmEAMHoOADsBg+gBg7dSHoaqIuRf0NaCGYTDJaYPLTgzCT2TUdRSIskmoREeYhSkjetMRF10HATd4bYR31D6rC2mHLlNscv8AdRy5BaFeCp7MRFhF4J52PnkfJqX2mZrMuPFXXYn7V4scJK3uF/ekBuZbH9Fx44jx4rPYRsdFIGaxbBnHNjLKuDN2LZFqiZwubrXXMkgQhBWWP2mMIbmH06zMLAwddCcfXwxQQFixWNQilmN2aygDkx9T6n61iZmfnbGUT7CaWecKFDSMXbiosq3Yk2UeAPQfSsSfiVJ2JdRaUrJQh3Pc9MlELAhIN+6WTVHuFRKYCgqBOg30SmHWNutY+41smvjycjEeTj/VhbjItmDfafpytxP5E1k6bZJqNlHsJMaDLSO94pl5RtdSv3L9bX5D8xW2IUSEIQTmUEhCEFQ47nUEhQKJzj8Jz7bj9PUkgKoqMSxCgXPqbD1P5n61Gs3JiwAUEk2HoL/QfkPpXW6OcjRyYqBnZk27hRNoAhu6OVE5galA25AFwIdHaG25tfGS7LjSMEMrCNiE/wDM8SeP4Xb9PkW8+a+8dFfIjVnEamRQXP8AILj7vHn7f1ev08U2NPvCShmFfkqNL0FwWBdTzhs7bNka9DNUH6rfwq0oiVuzSeLE2X6OgoAQ24jrX3V+3xytDpc/UZWlnGE+QyuqLjQIshXg0q8UEjAe5x4iynz5vV+7N1KRFm3ODt8bcQ/vEx1ZHZsmZ2jDcxE3Jyi/o5XNyPArsvlrI3h2C1anHCzs0/AJORqrRjZ3vs5666Tg6ZpuBFtHOiAIGcb+oHaG+vvuXZEh1cMuhzHbJObjhv2aR5biOR7Hmgb7InHgy/QeReuvp/XWm2c0e8w0XGGFkFf3byYqe4iXHFyv3SKfIi+p8G1OiPpHWxD61QB6VxrilGlKb6w332a7fw9egZG3WOKWhjScIwAzQ7SNmFTELJ+MXRM3WSakL1HIQRNt9DVJ3ncv2GTNq9Hhz7Pe4zQGWCP7CkU5I93mw4MEAuwBvV00nT/32NDs91mQa3R5Cz+1PJZw8sIB9rgpDqXJsrEWpwfgD0+gN/8AFq7VS60R3FgCxoNSRrIawaLWWcSpnghIJywLdKLRNj09J2x0fWE++4D+1qIefdDepjJBEevnHZmmL/1RMG8II7eUK+eV/WpdIdKdG+Q88o34yFVYeA9sw8bs5kvcMD4429K3ZzdBDnHcQIQxxKUOoxgIUTCUpfhMIB2fT1Ks3FSxubAnx6+Bfx+JqKRebBBa5IH/AF/H8qa9pcUHSUbf5KceVCnqIuIVauWiNTjXCs0rIGbMnyrg5zroCqJelJMPVOA79mte43Z4ciLH7nsMyXWdXZWgbFy4hE5nMnCORnJLC/oi+jCxq/ZPWZseWfpuBiRbPs6ss4ycWUyqIBHzkjVQApt+p2PlT4pwIybiZwjpSJkmskmxdrRzs7VYqwNnzYQ79oqID6iyQj2h8Grpr9trtssj62eOdIZTE5RuQWRf1I34EfUVTc/VbHUtHHsYJIHmiWVA68ecbfpdfxU/Q1stSFYNGlK+iEOoYCJlMc5h2KQoCYxh+AAANxHSlP7Et/CxrJAQEokbpAJBN1dJhIAmABEAHYDCP7WlK2OlKNKUaUo0pRpSjSlYcj/u99/sbn+JPpSoX+W1/YN4pfiZqX8lNpSmC8jv9U7wg/FEf+l1n0pVrOlKNKUaUo0pRpSoMxX6yq8/3H8X/wBe+WdKVObSlGlKNKUaUrAk3oR7Fw7Eh1O5JuUiZRMYxzCBSbgHoKBh7R+ANKfxqAlS5BNspZYk6zRK6pdMas4KZdSuda9PRUhSmGRYScCHmMVuWaagyhLPHBuqqIl7spQ2+EB1F4+yTLzDBip7mIEN5lYFA4axjI9eQ9TV63HSJuvdaTab3IGJ2OSdAmtljdchsWSP3I80MRw9l/0r5uT5p+tSlUWjSlM/lLB1IzDM4qnbetZ0n2G762yPTy12ySEA1WsTRAG6SdjasjAnPwwph6zRb7GYf2xAcHM12PnyQSTl+WPL7i8WKjl6fdb1H5GrX1ruW56pibPC1QxjDt8E4k/uwrKRExuTEW8xSX9HXyKeAR3ERH4e3WdVUpvsr1q9W7HlorWNr8bFt5l48iFcv5YRpYjVp4DlBQz4sK/2aPutumdPpP2B19Qdoaxc6HJnxXixJfZyWH2vblxN/Wx8Hx4qe6vsdLqt9jbHsOCNnpYnvLje40XvLYjj7i/cvkg3Hra1R9jz2DkXXeRGAc8YoyBj+hVU0RRG+U1rGlXS50hfZxHk3fac5hitnVXYC/j91UxOfYqwk36dw1GKZdpFlazZQSxYyWT3OXH3ltcutvKi49Pzq9TjB6DsNB3npOzwc7d5XPJOGIjL/bpOXGPFyFkuszcX+02Hlb+tR143S2CrhkLHGPeB6+F3mC+JU/bIPMqUpUbNJ2uIlr3HvF4R1hLIEygu3dDMyke6PLukHJklSh077bF1E6l9bPlRYvWjjnXYLsJbqxYFwbe058eSDyIPmr98g4vddTodhvvmtduvde0QQSYBSeFIXTGZRINhixkEe2jIIEZOS+v4mp2WSkKtMnts3PMq2aBiK5j2ZqrigSUwwY4mcvX70HrW6WRF0RNUJiOVAqJVhXIQER29PpskuOVzBsGmdY0iK8CQI7k35tf6j0/hWldfuEl623TotZjz5eRnxzDJRGbNCqvE48RW49thduPEnl5qjjkLhznawrGGKRlfFFH5mU2YzPO5pylkxnPPX6eMI+ZmVI1WvY0CRlqwnSoOCx+4B2zkl/Eot3Qn2ASJ7jrvaYHY1hgx82GLYQHIaWSTlf2wTbjHcrwATyGNwDf6CvZnQu2fCk+x2+56ztM3qO1i1Eevw8NolX940cfMS5nFJv3EkmUOEkK8GZOP8zWqLHO/y1cScYvYnJnF8/XpPis6cY199oOYsh71ll07980X8+thuQ7gkBKhMRDYPEKOHJy7CsUOkggbUL2XqWDpuO3w3Q6W8fMFucp+4FjEfQ3Hrc/jWzPhP/IbtXyN73x32SDIj+TFGX+3kji/bYSr+3KxjPW/uJ7ch+1VQfyHyfFdOYcR37I1zpeJ+EkhXq5xP5bDGcjMcUPOBqhHM8g5YQFaUtFQoTSeZrzTIke5aIEfRZAbtmxSk2OZA3UbjPwcrLyI8HrxRNJm2nRJuIDyerKl/Pggcl8AW/A1z1TtOj6/qMvs/wAxR5GR8odW56rLydcJ2bFwjZIZ8lomEbcgWMUx5O12uA4sK2bTluDio/JXG+6caMESeUafe7S4hMs0euScdYAu9ZdtkZmEkIwLGwq76rypoRym5XBAibdIN2zYANvqpT50aLLqcjDxjmRyNaRFIbmpsQRyClTY3NrW9BXobW9WzMmbX/IGn7Fu4utZeFAJMLIlR4v28ykxyK/tNMsye4hReRLMbSSG1PbLYSgc2zg5P5N+YfhJ8+aYmrd6LBVGJl7a+YwdTjEDQ+JnjOPiafSqtMoMXBouPYpOxBdwbpLuG59SL66LYyfvNztMcsIVeyguQFHiIgBEU2+1Rfyf+tU3G7jm9Owf+N/HPQtwkDbSbG9yeRIFaSdzzzVZ3nyJkLATSyNGOKi5/Cms5uYJo8Axx/nnGFohabgzPUCN1484Ll2lrRulNpxnS0TZmskY6E1WI1Ra3RDt2JRmFlnHiQMAbB2YPYtbjRJFssN1j1uSvOCEhuaLezX/AFKPvBP6iTerN8N923OdNn9I7HizZfdNJN+32myjaE4+RPxDwsljHM4EDol/YULwsTVcGqpXoGpe8MstZLx3kmdp+PMbK5pa5spc3jG34gSiVppW5w0ogK7NVm2RIoZjIwMyi3dkebF7lNNQomKRQw6ndBnZmLmNj4kP7hciMxvFa/MH0t+BU2N/pWq/l3q/Xd912Da77YDTyafMjzIM4uIxBIhswYm3JJYyyGPzyJBAJAFMxlOZzjB2x4hcrPeoXL+Mhk8all55c0m/prOtM3tYGksHj4HXcNopm5csASIoKaDVQSJbenWJPkbDHzL5TSjNhJjuxvxC/bxB/BRcWv4HgVYNVqun7nr5i0cOE3WNmq5XGJQqztMRL+4Ki3mRgknIryZhdqiFWKgSmZBxlf8APOE8iVPEVlylFjbUUYK6VHHcxi965jVJyJp8iLBxMvVSRoOVwBu8d+IbFTKUph3MOZDGIposnOhkXDeQcvDKhQ2uFNrnxc+Cbiq9ssx83V5+j6tssOfsWNhOI7vDLkR5KBgjTLyCKOfFTyRQrXuR4A9svl/KcQuLXIPJ81h7EuRsZcbs40/C9ax1zEyvmMlhxjm+UbMo4uO8Y0uAl45rL1u491PuGx2Ky53CKjA6SqLcCkKO6NAdRq8+R8OKSLXTpGEneS6SEW4IoIBDeSLXuCDcCvzK+Wx8h966nhY3Ys/Dze56vJzJMjU4uH7eThqWb9xkzOjskkV41bmFCsJAyvJ5Imt5dOe75n6V5kzlpzXW8v1Cr8oLNUsTsoap2KoSGNqbFtBTLR5ttZKpVl5V6zVTIp4luaRRMYTj4kQMBCzPXs+fPfMeWZZYkyWVAFKlFH8p5Kt7fj938a1n8xdT1fUoOuYuDrJtdsJ9HFLlF5Y5VyJWP+8hjmlCg+RxYRG3Ee2LEmyzVkrStGlKNKUaUo0pRpSjSlH7fZ9MdgAPpiIiAAGlKoG5x+XnyI51tcvYwjcBcQsG1l5ySC1RWaJL2hMZByzRHdMk4Y2WnTeqOgMyy5Gv1iNkySwpIHaOFAENi9RqJuuv7DeCXFSDEgjOTyEh8s6lbe59p/3B4H3WFj/19Y/GHy71D4tk127yNr2DaZa6b2mw14rBizLMr/tAZl84rC7ExciGUG92sPGFmflLzOrtWvPDLM+Xch2Wo0vISEZaqDe5mRsTFKVxkuaChImPPPkPMQtcjvBiZJu0WbouEzJmMQQKTWm8zabiKJ9PmSyNCklmRiSLp4AF/IHj0Bsa/SfrXRPjbNzsX5I63r8ODY5OGWingRY2KZI5uze39jyNyF2cMym4v5NXQ+Q68oVJrvKfzBr3U8pZBtXE6DhKLXca4tjhuclN1nNLl73jSr1iadrO0HtQcV8fCEQdpIptXKxz9pAMNy6KYIUyt/OkskuKoUIg5EiQnwoJ/lt48gWJrzV/lWm22eZoviTV5GDh4G+leeTJyWMSpJhhTeSRBYiYSffdGYuqgetq9PnKDk7niPqddgsP8PuQmUILMuIoSWVyNQ7FVqdZMSOshLIxXs92ysAuXTC7UqIkgkzgCaiSSifQI7FOcuy9ls85YlTDxMiVJoQeakKyc/FiD5DKPu/D/vXiDo3SOqzbCbK7F2LUYOVrdi6ftp45Zo8sY4LcgY7BoZnX2x5DEG48kA5D6n8TPLvrafKLLc3ZpXIUlVMWceb/AMkbIhZ71k7JaJnzCJpja2xtb8bGKO1ZFmgVd21YI9JUCiqoIF3H6aLVdei/ueWzGcqkTSm7O/oF5Bbgm4FyAPQXNdcOf375hzj0jr0OPHqlnyc+DXx+1BjY9gzymJpOLcQrMVRpG8seKi9SxfU7J7vONYyCwy0dlhqPx1MV+cwn7sR6ydiusjIkew9+G2qCEqwPER3+jgyIHdKfVG7RHUo0OSc5chZbYYjIMfEfcxPh+XqLDxb0qgxbLRxdXn1EuvDdjfMSRM33WBjhVeLwe1+lg7fdz9RTsixZqOivxZNTv0UToJyAtEDv0WyhgMo3TeCmLlNucwAJiAYCCPpDWXxHLlYcgPW1QHuyBPaDMIib8bnjcfXj6E/nUduUZ0IvHkRcEONIcp7NS77TpeoY+ata0tOQkw6mEI1XIFfd2gpmUW/pzF0o7MskIOASIYCCG4iEdtCEgWYY/wC6lR1Kr9twb+XF/AK+tx5+lXHoofI28mufdf2HBysWZJsgmQJIgQsMeQR/cyzMAgU/bcgmvqAwFLxHKC7clls15Uk4q74xr+P0sAScoVXEdQfRCsW5WukDFAuYELbIiwOm5WBMOsHCm5jAJQD6jwWTZPsfekKvGF9q/wBi2t9yj8fFvT8a+MvtePkdIxemLrcFJsbNkn/fKoGVKHDj2XfjcxLyBUFjbitgDcmR2pCqdRpSjSlGlKNKUaUo0pRpSjSlGljSjSlN7lmJl57Gtyh4MVjSb6FcpNm7dIq6r36kyrBJI5il7x8kApgbcBL1bhqkfJWt2e56FtNXqOZ2EuIwVVAZpPqYwD4vILrf6XvV1+OdjrdR3rWbPbcRgRZSlmYlVT6CQkX8RmzW+trVHjizSrjU5e4uLRWJSvkfRUUgzWfNyIJrAg5VEzZAQUOcQSTEDbD2BvrR/wDjt1TtPW9ptJuw6/IwkmxoljMi8Q3FzdV8k+BY2rdf+Qfausdj1ush0Gwx81ociZnWNuRXkoszeAPJuL/+lTJ16nry/RpSmSyVmNrQLbSquZvHrFsTlM82/evDIBBRSjgjYrwUkw3ETiJjAYw9GyYgOtS98+UsfpXZtV190hYZrgzyO/H9vCXChyB+JuQT9tlI9a2v0X4xye5dc2u/V5lOFGRBHGgb9xMFLcLn8BYED7rsPNdIZnrtjWsMNUZBlGTreRSgK/YLC23rFimTKkAGcW7aHUUfqFSMIgmPSYOoB22Ht6h8q6PeS52q6zPFj7lJxjY2Tkqf2uTOSLJE6EmQ8fPE2IuDa1dh+Lt3pIsLZ9kglyNQ8ByMnHxn/wDlY0Fjd5UcARi/q3kGxF708bhd6ziV3QNDSUi1jzLCyaCCYv3yDfrO2aCsPSn4hcogTqHsAQ31tHImy8TWtke37+dHAW9tPHuSBblUJ8Dkwstz4v5rWMEOJlbFMf3BBhSTBfck8+3GzWDPx9eK+Wt62r5hnjqRiY5++jV4d47aIuHMW6Omo5j1lC7narHS+xmUS32ES9muNXlZGdroMzLgfFyZY1ZoXILRsR5RiPBI+tq+tpi4+DsZsPEnTKxo5GVZkBCyKD4dQfIB+l62Ws+sCsORjo6Xbi0lWLSSaiqiv4d83Tco9+3OCiKvdqgcveInDchvSUfRrFzsHB2cH7bZQxZGPyVuMih15KbqbMCLqfI/A1lYWdm62X9xrppIMjiV5RsUbiwswutvDDwR9RXLOPYR5VisGTVkVy4UduCtEE0Crultu9cKgmUoKLKbdph7R1zi4WHgh1woo4lkcuwRQvJz6sbepP1J81xk5mZmlWzJZJWRAi82LcVHooveyj6AeK7AdNTOVGRXTYzxFMiyrQq6RnSSKgiBFVG4HFVNI4h2GEAAfg19jJx2nbFWSM5SqGKBgXCn0JW9wD9CRY18HHyFgXKaNxjMbByp4Fh6gNbiSPqAbj6iu/XdXTSmqjMrqXR71BRVJIBOJiKd33RwARTUPsYpjEEQ2Hb6OlKeoP2fD/06Uo0pRpSjSlGlKNKUaUrDkf8Ad77/AGNz/En0pUL/AC2v7BvFL8TNS/kptKUwXkd/qneEH4oj/wBLrPpSrWdKUaUo0pRpSjSlQZiv1lV5/uP4v/r3yzpSpzaUo0pRpSjSlaefOYkNIGKIAbw5wARU7v4O3Y2w7jt8H+V6NKVGet1OrU5k5jahW4Krxz6TfTTxjX4plENHczKLC4k5Zy3YIoJLSMi4MJ1ljAKihh3MIjrqihhgUrCiopYkgAAEnyT4+p+tZ+w2mz20q5G0yJsmdI1jVpXZ2WNBZEBYkhFHhVHgDwKUGu2sCknfIizz9ItsHSbQFHuEvXpSOq9yGNRmAq067aqJRk8MS4MVCSCNcmKr3JxAqnTsOunJjmlx3jx39udkIV7X4k+jW+tj5tUppMrXYO5xczcY37zUxTo80HIp70asC8fMeU5jxyHkXvUcsn59huF3Hin2bkRcZrK11YMoCoKOKfXGaN5zVfVRbtnoUikJukEHMu6BYXZ2SKu6aBDCG4gADE5mzj6/q0m2sjT5ChV+1Rzmf0PBL+v1t+F62B1zo2X8vd8y9d0LEh1mnkeWcCeVjja/GFyv7jIKkhBbgJGHliAfxrAqPK47PMlexNmWtssdy2a3h5Xi5HtlJWbmch0mMqzOx2F/e0UGQx9Dn4dVwKCjJdUBMYpilERL1G+YN1xz0wdggifIN8ceSzoFDMX8WRgfHEmu7afGSy9Tn7T1LIfPxdOgTcMwSOPFyHmaKJcYluWTFIAGEir4uCbXsGinMxQPFWNyTjGq8hqpnDOLbIQZYtdK5E5ajarM45xPbnrZ9PJxbtVPclep1dRUcxbEvWsqUR2IIbAOBJnx6ZJcODKTI2Il91knlClInNzY/wDiq+VFWnC6nnfJk+v7HtNDlabprYH7KHI1WE80eXmwKViLrf8A3Z5SFmkNlBt5+tPPyYUrWeeLD5lUqlcs+Y/zOlT4pJHClvbVqff1CenGK61zr9oWDpTi4QrQHC/QAmWQKYmwAYwhIbcw7PTFYUkysbI4j+k3FipYHkrH6C1/Hr6VUfjsbHpPyWk20ysTR7zUNO5OwgMsSzxRsBBLCPV5L8Vv4ViDe4FNJWMmcQvLfwVIYslcxt5dng8ke3sqBmrOeyOwC6SsgapNLTF1GPM7TScO+pqguuQRSKAGUEN+3BhzNF1PWnCfIDLjW5ejSDmTxDBRf8rn/WrTsuu/Kn+QPdI+zY2paKXcljCeTRYjft4098wvO/EkLZ2VT5PhQbeGryly+Szz5dlqzOjgepTPvJENJe7cdsoZLjo2SNhN3bCxYXd69ilYuSZozrJJB7EdaTcFVFCFAxtvWw83ejZdVfPGMjc1u8EkgB9otbmSLEchYr6Xqy9a+KW6T8+Y3UW3eVF+3maPH2mHiO6f3BYOf7dVcOjGNi0c9i3EA+BWTSZrjpkby4My0TjBnM+JcbVWGteN7LkzJS726tsVzcqmxkblXJZ/Y1kG8/FpR02eNQUSW8L0LlFE4gADrnGk1WV1SfH0+T7GKitG0kl39sny6kt6ix4ixt58V8bjD7/oP8gNRuvkjS/3TsGVNBlxYmIFxzmxpyTHlRYgTE5eMTMGXndSHHk1SdkZvzuqTbiZDXSm1FxjuElnx+IXFaKimLqbk1cbUdyhG5gjHR2ExGJrqrSQT3cyksKLoTCUqBkNh1r3KHZIBgx5EcZxFJ/a44AJPtp4lHgjzfnZmsfS1q9hdfk+FNrL2jK1GZlLv5ol/vm5d2WNBl5AL4LjlG5ACftuUMPJPBLh/FOlyCxRnvhjx945XCOyIrcrPmyx50yZk7KL+i1h6wod2yjjeDXQq1YmSNrC+oDnwzWQT76CVborPCHMQCEKQpc3Z4Wz0GrxJ0k9yXJeaSSQopCPJGPtU+TH45C6WBP+lVrovZ+j/Lnet/qsjXjE1umx9diYeGmRMrZOPh5cgM00ZMS5Qu0R45KsyxkXuSxNV2buIFypzanX2lIzOTcRXPCBOQjLK8bWZ+MZqwyrozC1QNpkZFRUGErC2Js4aEVXMk5fJiVciHScNUvY6LIgWPJx+U2DJj++JeLAEejKx+hBuPPkjzbzXpjpvytqNrLl6PcmHXdqw9z/AGtsJ5onYSAcoZYVQDkkkTI5C3SM3RnupqOlsqYxq6eQoWizDTD9icsDQxVJ51IxDN3JNF1m1Ulrkwaope8UUu1UXO1VKV4miQp1CABuoYqfH4H91HEw17nxdiQCR4UuP5ha9vW3rV/1e0GTGdDmZsLdsx1b3P6Sq7BSA0yY7E/0nBChwTGWJCt4tVl2QeT2Eb1wmmafV8OX21Z8qlEpuDJS62J58ZmP6hjKnv05ySyzELMYZpAUCflJN+rEMXaQld9xuKqhusQPbsrc63J680EOPLJskjSEu39RFjUgmUWUKjEkqCPNvU1520Pxx3LS/MMO22W2wcbpGVm5GxTHiX9nlT5c6mNMJw0jS5MSIqzyI105eFUW8VV40xjkHMl0h8c4sqUzebzYBclhqzANRdyT8WbVV45FJPcpCERbomMY5zFIHZuPbqmYmHlZ+QuJhxtJkveyqLk28mvTfYex6LqWol7B2XKhwtLBb3JpTxReRCi/18kgWFzXpZxZgOe8snF+CrPYS2TFcnbZWt5R5K8phxvXr81xkxfKkrheKTNkiwUuMalfDrNAXeN1RKg87wxgMnuIbbwtZL1DCxppOcLyEST5Htq/tg/b+3AtyHO4uQfBv9K/O/sveML/ACM7Lutbgft9nj4sUuHqdN+7lxjlsoMv96Ziwx3ONZ+Mbrdk4gWbxVqvmHcEmnOjD8fWq1enWNJuGUkrfW0GMZHI1W7TssyZHhU8iimz9rLxzDuxUTVREy6aixxEBHVz7T1teyYIiikMUi3dQAOLsQLe54vYfiPPmvM3wL81y/C/a32Owwl2OFKEglLOxmx4kZvcOLduAZr8SrfaQoFxXmz5YcX88cahqfl8WvMUtl7G3ICGxw9TRB1KP6liRA2RYk0/LN6o4kDu4FRjPskSpyPfsknUWdVLpKCnZqrbava6N4+tPkNNjZSofqVi+8cjxJ8WYD7rgFbivf8A8ed86D8pY+X824+ni1m50c2UD4VJ84jFf2kM6qBIGjJJiCyMkwVrm3m3i9cdf0V3lXZcqA2yBzC6oNpYW/Ck/I47roI44yPapaKj423NGFlNPoSL+KsK6jxB4sJ3KKQlIURENxvWZgSdP6fkI8qzyRsGib21HB2IAaxvchvN/WvKPXO14P8AkZ/kfqMuHBl1OHlwNFnwjKlY5WNCju8JeP2yoeICMoLKSCTYG1RY8lp1YuQmZ47JPINtn+1ZPwzD5Dt+G8wzUtOt8aWWsZQfKxV3q1kItGIxFqko2Tk1VYcxHBgbJAdMC/YCCET0DKyNrme9tBkPlQh3jlNwjK5s6t4AYgm62P5fQVsX/LrQ6boHXf7Z0STUY+i2csEGbgJwbKhlxlDwTRgOXiRkQLKGQEmxv95r0J4Rx7nCjWHOUjmHOhcxwV6yi+tWG4EtRY1f4mccLpLEZY2F2zOdS0CyUOQ3jltlDiX6fZs3Cx82CSdsyf3keUtGOPH20+iX/m/jXhns+36vtMTVw9d1f9tysXBWLMk91pf3mQLXybHxFfz9i+Bf8qf/AFn1UqNKUaUo0pRpSjSlJW9U6HyHSrbQbCeTTgbrXJerTSkLJOYaYTjJtkqweni5dkdN5GPyoLCKS6RgOkfYQ7Q11TwpkQvBJfg6kGxINiLGxHoaz9VscjT7LH22JwOXjTJKnNQ6ckIYBkPhluPKnwR4NRiyTgKXpWD8XQmEL3kqCV4qRo26lVMb04IhmdOh1WQLC42zLbZRCRlZWpzSyRRer7GXE3rB2gG0Zk4Dw4USYTyA4o5KvL/c4qbJIxuSp+p9avGl7Zj7PtGdk9nxcKVd+/szS+wL4ZnlXnk4cSlVSVATwH6a8cvPPizFcvceYe5g8cLvE3zOnO69ZDynauOC7OAjrghZMeQ60DeYnBsnMsIa7XarVGRi3abhkcDGWKmV0AH7sDDqHeatNxjw7fWuJM7Od3MNgGuoswiJAZlUgi318HzX6MfFXe8j463Ox+Ou5YsmL1fq2Lj4sWxDO0JjyGDwPmIjPBDLKrIVcW43KEi9qtH8tKuWLhx5WuLeTWF7XXMa0m0ZkfZG51WTMlQdTF0j8U1b/wDp7P1/EQUqFXlXcxGv2i7iF8aQDoHeKCcwAJ97N1uOTT9Xi2eE6xwNMXyTIt2Ea/YRHxBN/Urf0v8AxrRnzVmYfyN8653SOy482btINcMfSR4coSFsqX/5EcmX7zhAjAqs3AkMEFgfBGp4/YGyX5llaZMF+eNrzz5cWDs0ZIbTMoaXyhjnktfIxeso3aPY5Il3sI0Z2GVp0tMt/Z5ilVaosNyJiKmxSfOBg5PZIwpzmn6/BM4Ju6TOOIYByRYlSRb6W/8ATv7d2vR/DGY8ydVx9V8xbXW45RQmNka2BhKYWbGVZGaNZlRvc8hi/k+LkzG4OtMmRiONJbh3mPBc/wCUShP2Vm2PyFTtD3kY+u4ryjK0NPb15hY1qBCZYEgRgKKE/wBBDoSKJxARmNKMhRG+olgbqYZv92/u8vIYXYD/ANz9P5ela4+T30sz5uP8ia7axf5BGGIkYBjGvENlaJuMEjH/APNL+59pPPyxt4qeDXF3mAXTGOAk7pyUxlijMNKzQ4tudnWJ8fKTdFy9hxtLOxjMXxjexii/rL6QgxRB3IEDqK4ARLuHpm1xt/NjQCfJjizEm5S+2l1kjB8IL+QSLXNask3vxJrd5tm1mlzs/rmTrBFgjLn4T4mYUHLJYx3WQK/LhGfBW16ffkFgmVzotiFSHzXlTCpcVZdr+UnyeLpYIwuTI6C6wUxrfOpVL2hR5kD/AOlIet17fUjrPz8F832eE0sPtTB/sNudv5H/ABU/Wqp1HtUHVl2IyNbgbI5+ukxV/cpy/bM//wDkweDxnT+VvFvxqQqihQMoqPQkQBOoYdwKRMnacw9RtgKQhfhHsAA1IfnVRUE2X1Pp/Gov0OczrbuQlztLC/YMuXDFzj+LjsdpUd4E7klHMDB+1Rt6k9YowzqvuK2gmRwQiCbgy6apiFMQglNvGQPnTbB5VkgfTlAF4m7+4COVyPFvX6n1+lqu+1xer6/qONgzYm0x/kgZbNke+vt45xGVjFwja0gkN1NyoWwJBYEWk2qoRBJZdQelJuis4VNsJulJBMyqpgAO0wlTII7fDqSvYXPpVKVSzhF/USAP4k2FMTxv5L4d5aYyRzBgqxvbTQV7JZKklLSEFLVxwadqT4I2cbezJps0fAk2djsRXo7tUo9RRENYWu2WHtcb95gsWxyxW5BHlTY+D5/1+tWjuXS+w9B3Z672iFINsIY5SqyJIOEo5IeSErci9xe49D5p+dZ1VWjSlGlKNKUaUo0pRpStG7hVnU9EzZZmUbJRbZ63PCt1SlipMXnT0rv0hKJ1FmnT9j2Hs31EZOpkyNzjbdcrIjjx45FMCkCGXnazSD1LJb7alsfaRY+oydU2LjySZEkbCdgfdiCXusZ9AHv91/Wt5qXqJpOjMyfvUED7uP8A2OMP7R96++S9m+N74E/Y/c9PfC6Eg9fVvtt8GoL+6bD/AJH/AGf9jP8A2v8Aa+7+8uPa9zlb2eP6udvuve1vpU3/AGzX/wDHv7x++h/uf7n2v2lj7vDjf3uXpwv9trXvWDf5+Yq9RmJ6Chxn5VgRuZrFbLmF0KrtBFXsbFMuYEkVDH2KH+TrE7pu9p17rOVuNNi/vNlCFKQ/d993VT+gFvtBLG34Vl9N02s3/ZcbT7fJGHrpiweb7ftsjEfqIX7iAPJ+tKhoqouzZrrJdyuu0bLro9u6CyqBFFUe0AN9iOYS9vb2asOLI82LFPKvCV40Zl/8Syglf9CbeagMmNIcqWGJucSSMqt/5BWIDfh5Av8A61ka766KaXImGajk2UiJWwi+TViWrlp0MFE24vkljFOkR2v3ZljEaKlExCgO3rCHoHWs+8fFPWO/7HF2G994SY0bJaMheasbqHaxayHyoH4kVsjpPyj2XoWvydfpfZaLJkV7yAtwZfDFFuAC4NmP5A+tNvjfGiNFPewCsWiaaVd0D2osLCMa5ZT8s3SUW9u1RIrYAi5F13CbcVO023T29mqL0ToMXT33P/6P2GXja6T3MKPJMTpkzKpb9xhgJ/SlfisXLybW8+KvHee+SduTUH9/r8TJ2EfDMkxvdV8eFmC/t8slv6sS8mk4+Be/gXqRkDIPZaFi5ORinME+fM0nLuFeKFUdxi6gD1s3ChCkKdVP4RAADW9dPnZWy1WPsM3Gkw8yaIM8DkF4mPqjEWBI+ptWkNxh4uv2mRgYWRHmYcUpVJkBCSqPR1BuQp+gNbbUjUdRpSjSlGlK0aNagm1hfWpCORTsEmxbxj+TKZQVnDFoYDN25iicUgImYNwECgI/R1Dw6DTwbqbscMCru54ViklueTRobqpF7WB/AX/E1LS73bz6WHr007NpoJmljisOKyP4ZgbXuR+dvyrYyCLpxHvm7B34B+u0cIsnwplWBk6USORB33BtireHUEDdIjsbbbWdnRZM+FNBhyezlvEypJbl7blSFfj9eJsbfX0rCwpcaHMimzI/exElUvHfjzQEFk5D05C4v9PWk9jGRkMbS9bqtlkbVkGwXqQfkGwpxyfs2IWQQTWX8R0m/wCyIwUy/Yy7jucTfS1QdLLm9MODoN/lbDcbfaZEhOT7f9KJlUEhrH+jEf5B5+64FXzcxYXcRnb7Q4uBqNTrIIwMb3f6sqsxAK3H9aW/6z4+217mpea2RWuqNKUaUo0pRpSjSlGlKw5H/d77/Y3P8SfSlQv8tr+wbxS/EzUv5KbSlMF5Hf6p3hB+KI/9LrPpSrWdKVFbOHKuFwRlXAmM7FjDKc8xz/fYnGsLkusxVfc49qFvn05dWEi7i/krJGTLdWRThFjFBkyeiQolMcCgO4KUj75zmxzU+RDLjLXKpbck5Gbq1VO7e60pjyJjKB76FF1AoSg3q8VKUssstDlGQWj4BrLPm7ASqqJFA5AFSnN5F8k63x1i6KD2o3bJV6ypdEsf4uxbjhlEvrpebP7GlbLIIRw2CYr1ejI6ErkE7fPXsg+aNGyCPrH6jEKZSlLgHOtM5GYuhMq0hCcjouTdz0NK1+1R3sa11C1VObf1q3VC1RXfOCR0/W7BFuGrghVVEhMn1pnOmYpxUqPUI4QceZTejN10Fyl4QYvKYUVk1eg3x7ZZHpP3ZjdJtu3t9OlKnZpSjSlGlKNKU3t8dp9y0ZFUDve9FdVMDG6wT6RKQRD6kSmHf0/Q0pTYGMUhROcQKQhROcxjAUpSFDqMY5hHYpSlDcREezS9vPpXIBJsPJNQjV5BWPA7fJWSeVmRcYq4VuGZq5S+M0piiEnbJKJQVoUeMIqIv60M0fpO51xJpEKZ0gJmyJSG6zh2arp2kusE2VupYf7c+QqY5jBY2bwA9gbkn6jwPNbiXomv7s+u698ZYGxHcMTUS5G3TNkjiQyQhWd8YSFSsSoT9jWdiRYGn9yi1zuvYMTGw9JY8YVpvekVc2I3hnJOpaRxwDUe+Z0JRhui0s4uu0p3GyXR/hAZTMXZmWA4BiEIk/qhwSTHb0T6Br/jVH63L0pMHaDtcee+xbCI15x2QImXfw2Ty8tDx+i+b1Can4Z5fT3OuLyZyCSotswRU8f20Mfe60smFZrN1PaF1aRMBRJ3vZZtkdnWFO6dTTYCpbCBSn7BAa9BgbyTsi5ez9p9akTcOJ+1X5HgeB8+5x8Fh4/CtxbXt3xThfCsvXeinNxe65WdCcr3kPvTY/sgZEf7mO0ZxWmF0x3Jb1JXzUlco5HzbRHmXrWx44Ncr1vH9UrEriBOn2GNXyhkezTLxBpca8hGyTc5awhCoLCsVYDf6UkmIesI7BLZuXscdp51xBNDEimLiw9x2J+5QCPtt+P1rXnW+v8ATt1DqtZP2BtZsM/KmTO9+Jxh4kMaloJS6Ee8ZCOJUj7GI/1ru504CxXkzLtEzLyFwlE0bHCuF1YBDOUXYDK5AiuQOS2wV2kUK3UODYSKtuQqrtZDwkm4ILNisU/WbutyjVeyavCzM6PP2mMsWJ+34+8G+8TSDiiOgB5cTazHwpv9K318L957N13qud1Lom5lzewjbiU654rYr6zEPu5GTBkyOggM6hg8KnnICthysaW/G7DXOpvx3oPEHLzZPF1SJiG0w8ryQxrfa+OVcd2GKvLhTHlWq8LFm9lPWrmlt24LvgSExe+OU5+vfWTqcDsf9qi0efaGAQMDPG6+4jBzwVQPB+wC5/O30qG+Qe2/Cz99zvlXqjHZbQ7WGRNTl40v7LKikxgMqaaR/vVhkFuMd7HiCBxpibLxw4o8gc15xn5K4X/jhO8Lsq0RXO2X1JWF9mZ+tqEREOIe+3dzOpumjBVmrCnTEiKZUDqvTn7sTGDaNm1Ol2ewyZmeXEl186e9LcWmawKu9/S1vwt5Pirtr/kD5O6N0/TYOPiYPYMLt+syRrcAJJz1kBeQPjY4j4swYSA3JLBYwvKwNS9qWWvL8yvc8+XXCcPRsy8hJrG0zS7jjNFrIMbVl6rYfbNXjCqxNUsaKdef18r1m1I1fNmnh1TiUwGOAanYc7rGbkZORr1jyNo0RR47ENKsViFCt4K3AAYCxrVO06v86dY0+j03cJc3U9Eh2EeRBlllaHBmziVaZ5oj7qy8WcvG78wLiwNRLyplTH+EL1x3uljxTJ8d6TmvF88+zzxlyAwgq/x3g2cg6eObPY7MjGxRmOQORcO0jUjMIcqAqPmySZQAB2DUJm52LrsnFyJYDi4+RCTNjuFWAXvyZrCzziw4pbyLVtHrPWd53LS7/T6/aR7/AHOn2Ua63b4rSy7WRkCrDFEXfljap2c+5kFrROWJ8XqpLDfH3mTn7NpcW40zvcW2JM9x+SMn1+/VqfdO6PA0GSk5hnGPrJVmbwBpMvbGcem0LWlzR7wjRQgAQUi7apGBq99sth+zxMmT9jkiSRXUngqEkAsoP2FgLe39pt+VepO297+JOj9P/wCS9h0mI3adHJiYcuNNEq5EmUixs6xTMv8A8iOBnMhy1EsZkBJPI1NPy+Mu0HjpcbvgDKHJ5jXbnehVxHWOOERVbHmikYmzYg7RqMZfo3IJ2sxVi+03ZTKu4kiiTZosqdFUelP1Z/q+fi6rIk1eZmBciS8SwBWlSOW/EOHsV8nyVBsD4PpWoPnfqu87/qcLvXW+uPPqMK2dNtZJotfkZuvKmd8Z8Xkk32LZUnILyKA6i7ee+8chPMTMtnrD6FvkHEJVYqMxdWLrRoTEDTHdhcUwpbHaMj1xzYXTGHlZS/QiSkc7j2PfN4Z0oBvVUL0D9ZG07TfJwVc+3GojV0EXBuH3NIvIgEut1IFwh/OvnTdE+BOGk7W+JGMvKlfMmx8mTObKiGR/ShxJRErSImNIRKksnFshAR5XyKqbne+HOMs2DR2kZlvNmA4iotpB7CvsgxqldfchLhU02Vsyc+jY1IkVJJ1Bw7TSK2j1W4O1o8UxVBLbVKyMnQ4ew/bgT5GsVASC44md1+6QgeDwP0Frketq9M6jSfLXY+nf3mWTVabvEuUVWRcVxKurx5i0OGruS6GcKWLyqxRZQ3HlSnv3OxDC8Xe8AcYSVG1YasOGoLDVrmbBTgZ1DIr0El3tqySwoK3hnMVaZJ48SQTePHDhYpmPe7D1E6e3J7INekmr0/tyYDY6xMWX7XPqzhPoxuBcknxf8KjtH8KP2/Jwu8/I5ysXtsG2kz4Ein5T4q+FhxGyRdXhRVLGONFUiTj4sb3i+U/beEeGeFDzknXau5xzKJ2CJxtm2yzUhJ5Ety9vauGsdEHISLYqrQ8DPu587hi2btUzpNF9nCioEA2tjdKm67r+vHbRIYn5COZiS7c/Qeg8BixIAHgHyTXjT/J/VfMXbvmFPj3YZK7DGMD5evijVMWAQEM7+XYB5YhEFkdnILreNVvar3Fm8dLM0yPWbGTYOStnQN5Bm3fNVS7EXbqKNXSaqQmLuBg6i9RR+gOtkFY3WzAFPBsfP8K8UrJkYspaJ3jnUkXUlWHqD9wI9fI8GxFRvQruGeGdFz9mKVnbjH0uas07m/KElOyc3dxhnbsqCEiFWh0UV3sdCIgYoIx7Uhik37OwOyJEWv0GNk58jSDHZzNISS9ibX4jyQPwUVsB8/t3y5utH1PGhxJNvDjR67DSNI8f3FW5T3pCQrSHzylci/1ryb80vNbgc8ZIt1jxFxyx1Bt7LUU8aWC/ZOaSlsvVzpkJNozFXBOPaTEXBVFKMlGCD9AqSa7jvQBNdRQhekdJ9g7tFscuSbAxYlDp7bPICzuitdfAIVbEch6m/g3FfqD8Q/4x5vSevYuB2rsGfNJj5Ry4sbDKQY+PPJGY5rs0byTl0ZonJKrxuUVSb1YD5KfICh5lxhkfivyWsDjLNjv+RZGy0ShZGr8lcoCTrMRW2s5aBUkJBi7gWqLaaJ4gjZdUogqG6RA7A1Z/j7aY+fhzaXbuZ5ZZSyJICylQoZvJBHr5tf8AhWi/8wei7rqPZNf8l/HcC6vX4OvSHJycSRceVJnlaOEBUZZGLR/aWVT4/Ua9KEFCw1Xgoqr1qKj6/WoJmjHQkBDNEI6HiGDcvSgzjo9qRNs0bJF7ClIUA1tqOOOGNYYQFhUWCgAAD8gK/PXNzMvZZsuy2Msk+xncvJLIxeR2Pqzs1yxP1JNJ60ZHoNJmaVXbfcICtz2SJxWtUCHl36bR/cbAg3K7WhoBA/rP5BJscpxTL29Ih9HXVNl4uPJHDPIqSzNxQE2Lt62H4ms7W9f3m4xMzP1WJPkYOvhE2TIillgiJ4iSU/yqSCLn8KWmsioim4nMwYrrOSKVh6w5Aq8NlXI8bMzNBx9ISRELRb4qukOpOyEFGiUTvWsSmmYyxgEOkCj6dh1jvl4seSmHJIq5UgJVCfuYD1IH5VM4vXd7m6bJ7FiYk8mhw3RJ8hVvFE0nhFdvoWJFv4inH1kVDUaUo0pRpSjSlcCAGASmABKYBKYogAgYpgEDFEB3ASmAdhAewQ0p6efrVc/PLD/HiJhML8usuXabwdAcFb8TLqdtxtS2MnKOoKaUb1mYojtnFxD2aJVLS9mUyvkY5PvFu8MY4CG5i17e4mvSOHbZTtAmDJ7nJFubHwVIAvxN/PHyfrW5PirsPcMnJ2Xx91/Gj2mZ2nE/aGLImKqHQGVJwzOE92IIeBkNhYAH6HtzzZcOYanafzkyDn6+VLBE5i+CxFXsINq26mMIWax5qdFkqDdrDRIiJeSnvC5VkEUlDqNwIghuCokEDAZny4eG6bvIyHTBaMRiPjeMtJ5RioF7+fw8D1r56pg9k7JjZPxfqNTi5Haos58uTNMipmxx4Q4zwxzu4X2wFYqA33E+AfFn34fY7yrjDAVYqGcIbBcLk5CTtLuxM+OtVGo4oUZyU69dwKkTCKtGaxJBSEVRB+c5PsjnrENw21najHy8bAWHNEC5F2uIRxSxJtYWH09fzqq/Im40O87ZPsery7WXRlIhG2wl93KBVFDhnuw4hwfbAPhbU4uTcFYfzHRS4yyVj2uWeglnoS0FqijQ8ZEEsFckyTULKEbwyscJHDGUTBYNh6Tm36wMAiA5GTg4mZB+2yY1aDkG4+guDcHxb0PmobSdp7F1zaf3rS5c0G2MTxe7fk/tyLwdbuG8FfH4gehFOuAAAAUA2ApSlAPoFKAFKAfSAA1l1AepufWudKV1rJJuEV26xAURcorN10x3AFEHCZkVkxEBAQA6ZxAdu3t0IBFj6GuVYowdfDKQR/EG4/8AWmhwVgDD/GbHrbFOC6Qxx7j5nMzlha1qOey8g2RmLK9GRm3oOZuQk5Ax3zw3WJRVEhOwpAKUADWJg4GJrccYuEgjgBJsCT5Pk+pJ9f8ApVg7R2vsPdNu2+7Pktl7Zo0QyFUUlIxxQWjVV8D62ufU3NPFtv2bbgICAgO2wgIbCA79ggIayj6VXvTzWFHxkZEt/BREbGxLQqii3gopizjmoLLG61lvCsUkUSrLn9Y5unqObtHfXCqqCygBfwAt/wBvFdk08+Q/u5DvI9gLuxY2HoLtc2HoBewrN/xB9ABENx2+gG+46+q66NKVqJCwV+IexEZLT0JFSVgcqMq/HSctHx8hPPEU++WZwrJ24RcyztFH1zptyqHKXtENtfLSRoQrMoZjYAkAn8hf1P5CsiHDy8iKSbGilkhhUNIyozKik2DOwBCAnwCxAv4rb6+qx6NKUaUo0pRpSk7HktQWKdUk14c9WOix920GiaxZdJcCf9ojKKHDuFEzn/euntAPTqCwk7IN5mNnvinrpWP9qqBvfVrf1fdJ+0i/6LegqczG66dNhrgJkjsAeT9yzlfZK3/pCID7gQL87/WkVEqNJTL9rVPCzzJ/Wa5FxiUws/X93pdpJGI7ORpHCiRv41ooOx1AOcdt+wNVTWyY2w+TtlI+LmxZuvwYYhO0jftpklIchIrBfcQ+CwYn6WFWnZR5Gv8AjXXImVhy4efmyymFY1/cwvFdAXk5cvbceQvED0NzTsdoD2CID/iH9m2tkgkGtc+D/CuP/LvriuaNKUaUpOM6tGMbNMWtFSQGUnGbJi9TVfLKxxEWH7wZoxN9hbKm/wAsxe02oLF69gYe/wArskLTf3DMijjkBkYxBY/0lIz4RvxI9anMrsGfmaLF67KsI1+JLJIhEYEhaT9XOT1ZfwB9KUf/AEft6nag6NKUmrEnbjqwQ1VeGQSJMIGsgS6ayii0CBR8QlFiiAgnICbbpE2xdQG8TszyYf8Axx8VIhlKcr3gxLY9vuEXH0k/AnxU9pH62keYOwplPIcVv23skALkX+0y39Y7eoHmlIYdgMYoCbYDGKUPqjbAIgUPg3N6NTzGwJUXt6D8fyqCUXIU+PI8/wD1rSV2VfTUUjISUG9rrtVZ0meKkFEVXSREHCiKSxjoCZMSOUyAoX4ek2ojRbHM2utXNz8SXByWdwYZCpcBWKhiV8WcDkPyNS2812Jqti2Fg5cWbjqqESxghCWUEqA3m6ElT+YreamKiKNKVnxr48c9buyD0giqUygB2AZIB+yAIb7dpN/paXIB82H1riwJ9PNPhFS0dNMkpGLeNnzNbqAi7VdFyl1kHpUTFRA6iYKJmDYxd9yj2Drox8rGy4/dxJI5YbkckYMtx4IupIuD4Pn1rvyMbJxJPZyo5IpbA8XUq1j5BswBsR5Bt5FbHXfXTRpSjSlGlKNKUaUrDkf93vv9jc/xJ9KVC/y2v7BvFL8TNS/kptKUwXkd/qnOEP4oj/0us+lKlrmPiNjfOFrSuFrufIKAk0YtvElZYx5IZuxRXTN2qiqiaytax5ea7BqyBhVEDuTICsoAABjCABspUT+VXHLkinF8RKRxZpdOyRT+PWcKlm2dm+QnI/IyN5lFKgNnRSqpLHMUTK0/YVpROzGUCRkHwi2BEqQJGJsJVKYDP/loZUyHnbkBd6ZWsEjH8upjjVb7hl63SE0fMnGez4LVgizQYgFpV3AWZKeZQRTRyxJCBO0eLKmcFXRHu9KVL7kHhnk7kC4YkzVS4bES+SOLWebnY8S1KcuVmja/lXDN7xTJY2sDO8WVGpyi9FyEc9icOkFGkfKMkvBJlHfvj9ClO5w+4/2PCWDZCm5Re1ywXvI1+yvlfKCNbI8UpzezZiuM1bZus10ZNNF+9r0G3liR6azhNNV0VEVjEIKnQClR1wVgLCvH/wAxXKkHhPGFNxdEWThri2esEdTIVtCtZiZJm/LLUkm/RalKRd4Vt6gHEN+ns0pVoelKNKUaUrqXVK3RVXPuJUUzqGAB23AhRMIdvZuO2lKYB++XkXSrtwcxzqGHp6vQRPcehMofAUgDpSm4yhNSNcxtfJ6IpErkuUianOPGWO4NVJCYu6xGCxQq8YsuQ6KTyZKYUSGMAgAm9GsbMkaHEklSMzOEJ4D1fx+kfmfSprreJj5/YcHCysyLXY0uVGrZUgJjxxyH9ZwPJWP9RA8+Kr04vwHIGuVCQvquDmuIeOdXxPJKYl4GLRbCZysyyMxlpKUfPJ61zjbwyErNOidUaBDCTu3ZespDEERq+nj2kWOcn9sINUkB9rDsDIJASTyYjwT/AC/kfxrfPyRm9F2G1j0i7ptt37J2afveyB2TCbEZERRHDGblI18S3F7obEg1Y/SZ6VtFOq1knKtK0eZnoKNlpWmTirdeZqz963Is5gpRZoItVX8aoYU1DJ+qJg7NvRq148rzQJNIhjkZQSp9VJ+ht9RXn7cYWNrdtk6/DyY83EgndEyIwRHMqmwkQN9wVx5F/NKfXdUbWM9apv2T1gsZUqL9m7YrGRVOisVF43UbKmRWJsdFYqaoiU5e0pthDtDXDKGUofQgj/rXZDK0EyTpbmjhhcXF1IIuPQi48j6iq9s3On/l08J3khgLG8rnOKxJMMpR3V8j2aatNgUrM9ZjurHMjLA0ezEq9hXD8p26ZU9kEtzCIFTEdVfYlurdfLayFsmOBrlXYs3FmuxvYkkE3HjwP4Gt79Nig+fPmFMfvGwi02TtImRZsSGOGITRw8Yo+HJY0WQKQxJ+4+PVhTS8eMrcyq9D8a7nygzNgqv13kHk27un1OuCrFG7BXrbHIyGFMZYmmK8knDzsw3bB4l2LjZwCSvdHExyDrB1ebvo0w8jcZGMsWVK91a3PiwvFHEV+0kepv5sbHzVn771n4kz8rsOo+N9Rup9hotdjqs8AY4/uwOU2GZmpKTJHGT9icft5LyFgalzyCxA5sd0oGULDkBpW8BYth8hzvInEDunoztfzbX1YIqjQ1sRKmcr4KkRgdwkVdBycw7d2AGDfU5s8EzZEWZLKE1kCu08RW6yra/3fjxtcXB/KtXdF7Wmv1Gd1vAwXyO8bObFj1ecs5jl18okswgJ8r75YISrIB6tcVU9jI+S63l7x5JnBJMx83KPk9zwx5VwdUcN7DWabWU2DnHGLHtKCnx0bExzertjlfPHCQLCUxSnOuKZSjSsQ5cOdzDY377YxSftckLZlRbcIynEAAKPJPn+NenuxDr2w6r7LQ7s9T6bm4Y3+mknBimnmLDLzVyPfd3czEGONW43uQE5E01+VuNGTKBlTF01zP4+5L5twbyuz2V+ReaKrcbIyxLRbTIBIwL9H3FIyRrw1+h0eKSeKnbN2T1cxutMCjsQcLN1OXi5sMnYMWbYRlWlnlVmESMbqfstx4ogv4AJ+lWTrPyH13d9Z2WH8Rb7XdNzEniwtVr5oImzsmFOEqn9yWMvu5OQ7RgM0kajw1/WpK03I2J4PibVsscZI6G8urMfNPJdfr2MCT0G4yC2vzqiunsXVgWg2bUWERV7jBtRD2gkzRTZJrCdQTKG3GWgy8KPSJm6gLqs7YSqsfJefPhcL9o8BWUfqtYX8+a15t9B2jM+Ucnq/wAiyTd+6n1DXSy5hikGK2MuSFea0jNykmgkP+0XYyFeKgKLCUHFvEOMcZYey3yZglsJ3bNOYo2TtGV7DVbHGNMASWYqojOxyzKkysw1boUKHlbE8FGRX7wh/FKnUEQEC6mdPg4eJgT7eI48mwnBaRlYCEyrcEIT+hSxsx/E3rW/yV2vsnYu16v46zRuMPqGpkSHCinidtmmDMY3DZCRknJkSJeUS2I4KFH1NeQvmFf8zRFkkuO2Rcc1DENZx3cZy6UXFlVXLYoXHyOTI5tKz6VKvhnTx1a6pdlVk5AzpRd0VVUpQRUImQSDovfZWekp1eVCkEETl0jX7gnuC7cHuSyv+q9zfxbxX6qfFGi6jla+Pv2g2GVtdjn4kePkZkwMUmUcR2SI5GNZVhnxwDEECpZSealiCIR7iAFLv2F7Ch8AAPaOwfBuOq9W5Pz+prjXNK9Vflf2O28V+B2Lbo2h8QyyXKLl5BQZm+Sb21rLVKqvkm1XF/Fg6By0k7qivEGMxiigCioABjFA22tz9Oln0vWoZwsDfvM4D+o/H7T9txfwXFvC+p/jX5l/5IYGq+TPmzZ6iSXaxP1vqskl8TGaVjMpM3B+NmTHIkAkmPhfQEirMadXpPJtG5X1PiLd8x8c8ohyUde8OSM11Z7bGB7EwdRy9qUxnAWB44bOMfz8O2M2aHR7tJE5xMUvYGrfjxPl42bDo5J8XM/d/dJMpYXFuXtq38jDwLWtevOu1z8frm56xtPlPD1HYOt/8eX2sTXzLC3tOriH95LEoYZUUh5uGuWAsTSO8wfzLMCcR6VbMVyzmHytnyUpSka3xSeMTmoAHMzEHQSdZRKZZNnE16UKJzqMxFR2qmoUAR6BE4Y/aO3azR474chWbZtHb27XHkf+5+Cn8PW30qX+CP8AHju/ynuMXsuKsur6PFmczm8zHLaNwSMPwWeVPAEniNSD93IWrwzzMiSWl5WVSjIyESk5F7IJwsIis1hokjtwouEdEtnC7pdvHM+vu0SHVUMRMoB1DtrznI4kdpAoUEk2HgD8h+AH081+0GJA2LixYzSyTNHGqmSQgyPxAHNyAAXa12IABJ9Kv28g3Ot+j8yZG49tFyyVcsuN7Rc6a3kIk8nH0+/xItwJLKPktnMFBTibgqUh0djpRNEvYcQEdmfGeyykz5dWPuieJnQEXCuLeb+qg/zfj4rw5/nB0vR5HUtf3yVfb2GPsIceco/Bp8Z7/YFPiSSOxMV/0AsfQV6pKOwzE4xAwjsmTdRZ5qc1iWZTdko0c7NTo20uiSCMRLwsbK/6Us0jSqNlTJKiHeqJmDsAQ1ufHXOOCEzGjGwKEFkB4BvNioP0Hi4/KvzO3M/U07XJP12HKfqC5KNHFkuv7h4V4l45HT7Qz2dQV/SCD6ikvjDDs8woGKGPI2frHIHMuMXT2ZaZdkqNEwrpOyOnDkqVgrUV3SoVeQSiTpNTKNjEOcqW4j266cPAkXFhTbOmVnwkn3SgU8j/ADKPPHxYXHrUl2PtmDPvNpN0CDJ0XUdkqxtgpkvIDEALxTPce8pcF7OCBe1P2H7NtSdUio81SmvbrlSxZGzDgzGkRbMV2WZq/HnJ6J4203hzjWbYkGUlUZVdiWSpC806MZJyxRVAqpfqtw7RjYIpJ8t8jOx4lkhcrDILMxjPqb2utz6rV322fharr+Lp+rbfPmwthjJLs8Vg0MCZSMeMfENxyAi2KyMPB9LfSQ2pKqRRpSjSlGwj2B6R7A/b0pUTZ7nRxNqSGXHdzzVV6TH4MyDFYpyZK3AkjXouDyHORB5yHqzV9IM00JqRko5I50fCCqU4kMXfcNRT7vVRCZpplRYJAjlrgBiLhbkWJIHi1X/F+Lu/Z766PXa2fJl2mI2VjLFxkZ8dHEbykK10VWIB52PkeKdDAOfMV8n8WV3NGE7Etbcc2taXbwU4rEycIs8WgpR1CyhBi5hs0kUBbyLNQnrpgBgL1BuAhrJ1+fi7PFXMwm5473sbEeht6Gx9ag+29U33R97N1rssIx9xAELpzVwA6h1+5CVN1IPg+PQ+a1luw1KWPJ7zJfxk3BSHNh6x4zLg6QVYuMPS03LuVn7C/wA1EqtlXa1nj1hI3IqAiBEC9gAYO34mwnkyTk+4/D2Sntn/AGyT5DkevIen8K7td2SDC0a6T9ljjI/uMeT+9UEZaIgCtAj3AETeWItct+VUv5r4w5Nwf5OULhDPNGmeReZaLlyvWGr03B1tvjlrYLdIZaeyuP039qdsXVpZ1aEYSvVIGFIiLcUyFKZMoFUCn5msyMLqC4WejZGYkoIWMsbsXJW5P3WAPn6C3i3rXpPrXd9J2j/IuXs/UsqLT9cytfIkk2bFApSJcQLPaJWERkdkATySwJJDeVN/VYczrysVp5aIttB2Z1XoRzY4Vk7GQZQ88vGtlJeLaSHSXxzWPkDKIprbB3pSAb4dXyIyNErSgLKVHIA3ANvIv+R8V5Nzo8WLOniwHaXBWZxG7DizxhiEcr/KWWxI+hNq3e2vusWmrzfO5YrGJL7YMFUaDyZl+KgxdUCg2SdCswVonfGtE/Z0nPGOkSMbgxUWVA4nIBjpgXqL1b6xc18qLFeTBRZMsC6qTYMbjwT9PFTvWcTQZ2/xcTtGVLhdfkltPPGnuPEnFvuVACWPLiPQ2BvY2tS2qruwP6tWX1timsDbHtdg3lpgmDz2iwhLI6i2q89DsZDYPHs4uVUWQSX2DvU0wP8ADrviMjRK0oCylRyA82awuL/UA+hqNz48SHOni18jS4CzOsTsOLPGGIR2X+UsliV+hNq32vusSmdvtjzTFZHw5DY+xzWrXjOxy9hb5pucta/Ys1jmHZxZF66/rcB4dT3pcS8qIoqp9ZO5IAD8O4Yc8mauTCmPGrYzE+4xaxQW8ED63Pj8qseqw+tT6bY5O3zZsfdwxxnChSLmmQ5a0iySXHtBE8g2PI+K7b/kW51DIOHKfA4aud/rmSpqdjLhkWAdxjau4eYxMUZ+xm7eg+OR69YzbsPCola7nKr2jv2APORkTxTwwxwvJHIxDOLWjsLgsD639BauNTptbsdRsdjlbLGxM3CiRoceQMZMxnfiUiI8KUH3Ny8W/wCtanHfHmh4xy3nPNFckLo5t3ISRq0ne2c9a5CZq7BzToxeJiS02uuv9DqyCrZwYXJENwXPsI7bAGvnHwIMbLnzIy5myCpa7EqOIsOI+nj1H1rv3Hb9rvOvavreauMuv1CSrAUiVJWEzBm96QeZSCPtJ9B/GtpNYQps9m6j5/fPbcS84+pVooUEwZ2qVaUtxCW1wRzKLzVPRUCIl5lJQn+jO1SiogXbpHsLt9PhQvmpnkv78aFALnjZvW6+hP4GujG7PscXrGV1KJcc6vLyYp3YxKZg8QsoSY/eiEfqUeG+vqbu/rLqu0z+RcA4fyzdsRZGyFSmFmuuB7G/tuJZ507kkF6dYpRqkyfyTNBk8btXp127dMOhyRZMpiFMBQMADrDyMDEyposnIQNPAxZDc/aTbzYEX/1uPrVh1HbOw6DWbDT6jJaHW7WFYspAqESxqSQpLAlbXPlCpIJF7Gng9Osyq9RpSk68tcFH2SIqLt6ZKfnmjx/FsvDrmBw1Ybi6UFwUgoJCnt2AYQEfg1BZfY9Phb7F6zkyld1mRPJEnFiGSP8AWeVuItb0JufpU3i9d2+bosnsuNEG02HIkcr8lHF5P0DiTyN7+oFh9aUWp2oSjSlaOWln8c7g2zOCfS6UrIizevGh0SIwbcEhU9ovgVHqUbicOnYnbv8A9MRstjmYOThwYuHNkxZE/CR0KhcdeN/ckv5K38WFS2u12Hm42XPlZkONJjwc0RwxbIa9vbjt6Nbzc+KYpeiwVjnrpYMd3y4w1wg5aRTnWyDp06ilJo7Y7hGLGOfAmgdmKyYCAJiYu47bh2a09N0/Ubzc7Xd9G3O0xO0YeTIMhVd3hM5QsIfakspTkPAUkX8XrbsPbtvo9PqtJ3bT6vK6zmY0Zx2ZFWUQBgrS+6l2D8T6sAfrS7w1YFJqlMmUm+eObVAmWjbY0lXKbmajpUHDg3cyfdgAInVSL1JkHcQT2AREQ1cPivdSbXqcWJsZpZOx4RaLMSZg08U3Jvtlt+kkeVB8hbDyRVR+UNMms7TLl6+GKPruYBJhvEpWCWHiovFf9QB8MR4LXIFqdbWyK11RpSjSlaU1giC2BKrmdiE4rGHmE2Xcq7Gj01e4Mv3/AEdyGyoCHT1dX0tRLbzWJul680n/AOl2xzOE4t5jDcS3K3H1+l71KrpNm2mbsCx//ohZxCXuviQjkF435en1tase0VeNt8WWIlVJBJqR+xkQPGvVo9z4iPV75Aoro+uZEx+w5PQYNdPYuvYHZ9eNZsmnXHE0ct4pDG/KNuSjkvniT+ofWu7r/YM7rWedjrlhbIMMkdpYxIvGQcW+1vHID9J+hpRanT+NQnp4o7fgAfo64Hn0oTb1rRyBbEMxBGi1IskGU7z3jTdkXNIKJ9yHgfZZ09kyGBftU6/8n0aiM1d6dnhnXtjLpwX/AHIcMZSOP9P2iPAIb9V/p6VLYbaQazLGeuQduQn7YoQIweX9T3QfJ+39Nvr61vNS9RNGlKbiwXSyREpLMI/HNhsDWPZxLlpKMHDdNtKrSDlJF20alVT3KrFJnFRXcR3KUdttUTdds32r2OThYOizs3HhihZJY2ULM0rhXRAR4aEEs1ybhT4FXjTdW0Wz1+NmZu8wsLImlmV4pFYtCsaFkdyD5ExAVbAWJFYuV7fUa3WHEfbXL9uWzMn0fHs4zxichIOwQKcrJs7ZpqCzWWVMRMDm7NzfQ31jfI/Z+saPr74PZZJkXYRSRxJFzEkjhQfbR0B9tiSF5Hxc/wAayPjrrfZd5v483rccLtgSxySPLwMcacrF2RyOagAsVHnx/CkxxnVyDieFioafhpJ/XbXIOXLWJSIg3Ux4kczhy4eTh1ClUdryyhyB0FEekSgID622qB8O6buPS9Ti67bY0z63YTu6RBVU65fuYtkMbMzTG3gXC2uD91qv3y9uOody22TsNXkwpstfAiNKSzDYN9qhccD7UEQv5NiwNiPF6mmleI1Z4m37ldNFQwF8UqJClLv6BOnuIgXq7BHfs16DrQNLFNVJYoHRUIqQfQZMxTl/xlEQ30pXZpSjSlGlKNKVhyP+733+xuf4k+lKhf5bX9g3il+JmpfyU2lKYLyO/wBU7wg/FEf+l1n0pVrOlKNKUaUo0pRpSoMxX6yq8/3H8X/175Z0pU5tKUaUo0pXS4MmRBYywbpFSUMoAgJgFMCiJw6Q7R3LpSo8KCUVFBT/AHsTnEnZtsQTCJA2+D1R0pXBCicxSAIB1GAu4jsAbjtuI/QDQVwTYXpksN51rmaqfartEVq+U6KqFwuFOkmmQ6w6rEs4cUk5SSc3HRyxllnteeEETtHJf38pR2LuG2o/X7GLYQPkIksaI7qea8T9nqQPqp9QR61ce29K2HUNtjabKyMLLycrEgnQ4syzIBkfojdxYLKp8On8pPrSsxdlCj5no0JknG8yNgpljB6MPLGYP4szr2c/cxjzdjKNmj5EUXrRQmx0y9XTuG4CA678PMxthjLl4jcoH9DYi9iQfUA/SovsnW9z1HdTde7BEINvj8fcTkr8eah1+5Cym6sD4Jt6HzS6ci4TauVGqJV3RGzhRogop3KbhyRE5m6B1RAe6TWWApTH2HpAd/g1kG4BI9bf+v0qGjEbSKsh4xlgCQLkC/kgfUgXNvr6U3uJprJlhoMJL5fpUNjzILo8kE5Ua/YS2mKjCISTpCMO2myEIV2Z7GJpLnAN+7OoJN+zWLgyZcuKr50axZRvdVbkB5NvP5ip3tGH17A3k2L1TMmz9CoT255YvZdyUUveO548XLKPxABpX2KAh7ZX5yrWJinKV+yxEjAzkasdVNKQiJZqqxkWSqiB0lyJumi5iCJDFMAD2CA6yJY454mglAaJ1KkfiD4I/wBaisDOy9XnQ7LAcx52PKssbgAlXQhkYAgi6sARcEVDG948xpx+h7DkzME/VkuKmDaVTVcQY/dY5CwyOAbHAmCEc3utTpEZiyycq/NIpkQApDqoCO+4gA71/JxcTVxvl5zJ/ZceNfaT2+RhZfBdW8sSb+Pw/hW3dLvuxd6y8frvU4Mk/Ju5zJxnZK5XtLs4pf6i400d44kReBLXIDenqaZXN/Gi1SeAMLZHpuWsmZ+tmAHymZsewWRshxeNYHN8pZ5WMm68lli0rJxaTGCrkO4MCSCxkk1kN0FgAT9kdsNTM+sx8vHnmypsU+8gkcIspYgr7reLKo9B6EeDVv6d8h6zG7zt+v7bV67RavexjAypMXFfLk1yQo8cpwoRzLSSyC7MoJVrOhNqm5dWeYpnFtQlMXQ2J67l8haW+ctrm0UmqhWWL0GK1+ia8/hS+J75JkougwXbnKir0lMbcohqxZC5z4cb4awJnfYfuF1UG3MKR+XgEeK07p5up4nZMvG7JLtMjqh/cKDjsI55mXkMZ5Vk8WLBXlVhyW5A80yHNvk7kLjPF4te1DF1PudbvN2bVe/3PJN1Y0bHdAhXqrRql7YmXaxSHmJ4XKhGKJyKJLmRMQQEwgUY7sO4ytQkL48KPFJIFd5HCRoPA8k/Vr/aPI8Grj8O/HGi+Q8jZRbXZZeJscLDM2Nj4mO2TlZUigsfbjUf7cdgZGBDLyBHgE1FTmjw+PmzlDxmtcJJcjUGkiKTWpX7DsrVm2KuLqNfYoyElYV4lxHmF0llNsYrMVElUwIJesnUAgTUN2DRHYbjDmRsoKfCvEV9vHt5LEW8+4PHj/1rZnxF8rL07427FrMuPr7SJcz42ekxzNwZWKpEHD/acI/1LMpvext61KuxcduOz645Cw5cbgi9oGXsQA2ccOm76Pr1N936xMHnLTkSr16AGKm05SalSlF+5TWIUygdo9o9MzLqtU88uBPIDjTwW/a3CpxU3Z1VbG7H1Nx5rWeB33v0GpwO26nEKbzVbW43xV5Z/dmj9uHFmll5x8I0v7aFTYH0/HwrcmMlQ2XM85OvtYhFaxT5OxmjKTWFnb14auUqrsmdVqkOCj909cpi1g4ZETpiqoCapzABjB2j5x2+XHnbObJhXhAXsi3vxVQFUeSfoB4+lftN8ddey+q9I1uk2UwydtHj88iYKq+7kTM000llCg3kkaxsCQASB6Uxeo6rpQO+w9JTHN/kkKG5jm/ySlAO0TGHsANcfwrkfn4FeyTAOIFOHPA/i6rmF3gieokLYbZyFvUBmaKOws6NoslOG80ClYkUmAOzjMnxkk0M2M4cEAQOYekuxRAd96zAbQ9bw/35xmxVZp3EoswZk5okV/AkBFrn0PpX5Kd57Wvyz82dlTqa7uHczY8GrxpMB+UJhin/AG+TkZ3D7nw3Vg4VD5A8nz4q85befFnzNsW2q/H2Dd8d64s1/wC3J/2s0smRZYzlsKbiPbTAsUoyvxyJzGEijREHZxEB7wm3bTd58lbPYIIdWpxYrfc1wzn8gbWUfw8/nXpH4t/wm6R07JbZd7nXfbBW/pxcGixU4m4Zo+ReVyLXDtwHn7Tc1V/gvi9yM5bWoj2h0XIl+jn9yhYG9ZLaREhZ2FadTztum4mLLJLuk1HAx7FUXSpBW7zuUx7Q7NU7XabbbublixSyoZAryAFgpb1LH8h5Pn0r0j3T5J6B8W60w7vO1+DkJiSS42I0iwtMsSmyQqAePJgEU8bBjU+755emLrHhO8RvD8M1Zv5HYyyZT4HMVQu9CcUOXotZTirm1lJyvxboyDSQr1smmrVwQyizhdu1STERJ1H3s+V1bDl18iaH9xkbWGZRKrpwKLZ7kD/xYgG5JIFvStH6P547Lgdwwsn5WOn03x9stdPJgT4+SMmPJmL45SOVxdllgjZ0ICqrOW8NYWSXlF5vsfGflzTkpRhXmFMzJaUMFX2atDHuAryijk7lsERaSlOjGy6Ez3QLM+86XZBT6w26DB09G2M2o3kYdVGPkOIXLD9Pm/hvob+o+vi/0qS/yo6br/kX4sy2xXnfbajGOyxo4Wv7otY84fV0MfLjJb7Dy4m/IH2hT2OJeWyzRslI5Iu0JE0uAssFIYwjV2ZKRc3M/wBJW83Zm6qB3ispAdH+iCmcoFH0/Dvv2XEd86LLEsipGrAxi3B+XoW8Xuv0r8icLsGLi9XzevPr8ObJzJ4ZFzHDHIx1i9Y4SDxCS3++4N/p+XGI8R1fCtXeVGovrTIRb60WO2rOLfZJC0ypZO0SBpKRQRkpIx3CUYiufZu3AehEnYG/aI84ODDr4TBAXKF2b7mLG7G58n6fgPpTtPadl2/ZJtdqmMmSmNFABBEsKcIUCISiAAuQPub1Y+ayMn5dx9hqIg57I86Nfi7JboCjQzksdJSYu7PZ3ItISOFKLau1kCu1yiArHKVJMA3OYA1zmZ2LgIsuW3GN5FQGxN2bwB4va/4+lfHXOrb3tuVNhdfg9/Kx8SXJkHNE4wwjlI93ZQeI/lF2P0FcUJDMCM1kz40HVHcQKl0VNiVOnt5JGSaUEGiZU293O+EyLiyg/AwmO3+xCT0fBsxv3wkl/eGMxe5/S4XB4f8A4r/zX/DxTeP1R8PXf8bXNXNGGP33vlCjZNzc4/HyIeNrB/uv/rWLW8sw9nyjkbFDSu3VhMY0j61IylhmK27j6ZOJWhudy0RqliVHws65YFIJXZE9hRP2DvsOvmHOjmzZcELIJIVUlitkPIXsrfzEfW3pXZsOr5eu63gdnkyMOTD2MkyJFHKrZEZhNmM8Q8xhv5Cf1DzTqbegdw2+ENw3/b23321nVWv4+lca4pRp6eaVF3IEVVcm5gZ4DyPxZb5BxU6qbTMjjLtogarM4yb5Jrk37Ogqw8iniakm4vzNqPjGrwxDAkiOwG9O0XkLFk5YwMnGEmLxEnuEKU5g2AP15D1B/Cr1qMjO0fXW7Zpd6cPfLkHDGJE8qZJxpE5PIHUhRAx+xkBuW+lK7MNFiMk47tmBKjld9gm22yvHWh5rFUhXYPI9UYEkUFnFjq8CYAMk3UXIKKrgG3dD3pg6wOO+u7MgTJx2wIZTBK6+ChUOov5Kj/0va1R/XdrkabcY/bNjgLtdfjzWdMpZHxpW4kCOWT0Jt5C8r+B4tTxQ0erEw8REryL2XXi4qNjVpeSMQ8jKrMGaLVWTkDpETSM/fnSFVYSFKUVDjsAB2azEUqgUkkgAXPqbeLn8z9areTMuRkyZCIsaPIzBFvxQMSeKg+Qq3sLm9gK2hTmIO5BEo7CG5REB2ENhDcB9Ahr6/wC9dJAPr5FUX+b55uDvgFL4vwritpTVs3ZTZpWOSs+TYW2SWP8AFePHcm8hGF1kGlXSVfWZ6rMRboho5BNZQrZEygl6zJFPSO3dsOheLCxQn72QXJcMVRSbcvt8nyDcC/j87V6j/wAef8fU+WcfO7LvWyR1jBYxrFjPEs+TkKodoVMpCxgIyESMVBZgL2DEN55ffnjL87eYdf4wV/DkRB1ZPFtvsE/lBaek1JOyW6isW53crVq4dkxQh6bZVjCu2avSnkGyShSqGAxRLrH0HdjvNsutSFVi9piXubllHqo8WU+tj5FS3y5/jAvxX8dy93y9jJLnnOiSPGCLxjimaypLJyYvNGPDOhEbEXAsQa9COr/Xkb+NGlKNKUaUph8b8dqJi3LGc8yVyUvLy18g5auTN4YWK2vpuqxbqrxp4uOTpNfcEK2qzZZucRcESMbvT7DuAAABg4+vgxcqfMjMhlnILAsSosLDiPp+dWrc9w2u90Gr63mJirgahJEgaOIJKwlYMxmkBvIQR9pNrfxJNfd35M4Lx1mPF3H24ZCi4rM2ZiPFsdUAG8g9m51mxTeHcSahGTVdvFxXUwWTI4dHSSOqmYgCJg0m2WFj5kWvmkAzJv0L5ubX8+ngeD6/hauNZ0rtG467ndt1+JJJ1zXWGRPdVRCbWUciCzfcpKqCQCCfWn11nVVqNKUaUo0pR+z/AM/+DSlJer1o9dYmavZqRs7vx8g8Sl5sqCsi3RfLd6EeisQm6bNqA9JAAQ7PTqu9e0EmjxDj5eXPsMn35HWafiZVWQ39pWAuET0AuPFWDsG+Xd5YyMTFgwMb2Y0MMBYRs0Yt7jKTYu/qTY+aVGrFVfo0pRpSvgiSSYnMmkkmZU3WqZNMhBVPtt1qiUAFQ+3wjuOvhI44yWjVVZjc2AFz+Jt6n8zX28kkgAdmYKLC5JsPwF/QfkPFYLGHiYxeQdR0ayYOZVwDyUXaN00VZF2ACUHDw5AAy6wAI+sbce3WJh6vW6+WbIwceKGfJk5zMihTK/pycgfc35nzWVlbPY50MOPmzyzQY8fCJXYsI09eKD+VfyFZLx40j2jl+/cpM2TNBRy7dLnBNBugkUTqrLHNsBE0yhuI/AGu7KysbBxpMzMdYsSJCzuxsqqouWY/QAeprpxsbJzchMPDRpcqVgqIouzMfAUD6knwBXyxfM5Nm1kY50i9YPUSOWbtscFG7luqHUmsioXsOmcO0B0w8zF2GLHnYMiS4cqBkdTdWU+jKfqDXOXiZOBlSYWbG0WXE5V0YWZWHqGH0I+tdTOVjJBd+2Yv2rxxFOCtJNBusRRVg6MTvCoOiFERSVFPtAB7dtdWLscDNmmgw5opZsZ+EqqwYxuRcK4H6SR5sa7MrXZ+FDDkZcMkcGQheJmUgSKDbkhPqL/UUh3jmXtE2RvWpJ7Whp1kbI2U8lAlOlZ4o7cq6kdEvVwE3hjmHtWT9BtVDKyNn2HbCDQzy686vPVcoy44Iy4ioYxQyG54H6upHmrbiQa3r+qM+9gizxs8FmxRFkEHFlDFRLNGLfcPojDyKccfSO3YGr5VG9PFJqxFtxlYH3VVhEkSzCA2T2wRY6ikFt9nJFiluBJATfUib1dtQG8XszSYZ642KsIylOV7wYk4/wDMIeP/ALl/QnxU7pW60seZ/wAhXKaU4rDG9kqAMj+Uy39YwPUDzWVYq+xs0d7LkFXyLbxbN6B494qwc96xXBwiUXCI9YpGOXY5fQYOzWRu9Lib/B/t+a0ywe6j3jdo2vGwZRyXzxJ/UPQjxXRpdzl6LN/uGEsLT+06WkRZFtIvFvtbxyAP2n1B81hwtqaTkzZoVuwl2q9WeN2Tx0/ZHbMX6jlIypFYlwYwg9QIBdjGDbYRDWNqex4232mw1MMGTHNrpVjd5IyschYXBhb/ANxQBYkWsaytr13J1OswNrNNjSQ7CNnRI5A0kYUgETL/ACMb3A/ClPqwVX6NKUn560QdZGJ9tOzNPbcs3hIzpbuHHiJJ0BhQbj3CagIgYCCPWfpKHwjqG3PYdT1/9t/dpfa/d5KwRWVm5Sv+lftB43t+o2A+pqZ0+g2u+/c/2qISftMZp5fuVeMS/qb7iL2v6C7H6Cts9KwIkZeSBiVBr9kMu/BuCDYQEA70VXP2NHYR+q3D9vUjljDWP3M4RezH55SceK2+t28L/HxUfiHMaQQ4JlM0njjHy5N+XFfLfw80noOOsSE3ZZOSsyMzX5dVk4rESi0RSLBNSomBwQHqW/tArw5gOU247B6NQWnwd3Dt8/YZ+wXK0uS0bYkKoo/boF+4e4v+4HJvc3t9Kmdtm6WbVYGBg4DYu5xldcqVnY/uGv8AaeB/2+ABBFh59aUq66DZIy7ldBsiTbrWcKpoIk37A6lVTEIXcdttx7dWCaaHHjM2Q6JCPVmYKo/C7MQBUDFDNPIIoEeSU+iqCzH+AAJNLWsW9sybIIgRFxHKAooV2zVIt3plD7gsChTikqmAF6fVNrmKWKeMTQOrwsLhlIZSPxBBIP8ApXEkUsMhinRklXwVYFWB/AggEf6ineTUIqQiqZgMmoQpyGDtAxTABimAfoCA67K+K+9KUaUo0pWHI/7vff7G5/iT6UqF/ltf2DeKX4mal/JTaUpgvI7/AFTvCD8UR/6XWfSlWs6Uo0pRpSjSlGlKgzFfrKrz/cfxf/XvlnSlTm0pRpSjSlJu0ySUfFrEMO6zwhm6BAAe0TBscwiAhsUpB/x6Uqqrk15j3EziPZpGi5lvExH39lUmNza0qDqkzMS01EyirxCLRi3pEUYLx8gtHqlImu6RKTp3OYpe3Vc3Ha9Jo5jjbCRhlBA/AKSSDe1j+m5sfqPzrdHx1/j98o/KWuj3XUcKKTRvlNjtkSTRxpG6BS5dSTJxUOpJVGvewBPitX5eXN1DnFiSxZEfwtWpE3GZHt1ZjKZGWMkjNr1CFGOVhbNKRjpb2myWfoyAFW6SGbFVKJSHENdfVuwjseC+WypHIsrKEDXJQWsxHqL3/hcVlfPPw5J8M9px9BBNk5mHJr4JnneIpGJ5OQkiRwODBSn23IcqblRUw4rKeO7He7bi6GvFcmcjUKPipK60hpJourHV4ydJ1QzubjAEVWbWUTHdETdhwHU8mZiy5L4SSI2XEAXQG7KD6Ej6X+lanyetb/A0mL2XLw8iHr+dI6Y+QyERTPH/ALixv6MyfzW9KWrdu3aJEbtGzdo3T3BNs1bpNW6YGETGBNBAiaSYCYREdgDcR1kBVUcQLL+VQ7ySSsZJWZ5D6liST/Em5NMpirD8/je7Zst0vmPIeSmOW7m2tkFULk5SXr+IGLdB0gapY/TIuqZtAODOAOcpikHqTL2aj8LAlxMjInkyJZlnkDKrfpiH/in/AOGrh2btWD2DT6fV4upwNfNqsQwSTwAiXOYkH38okAGUWIFifBPmsnL2ULnjabxLF1fC14yuzyLfmtQtMxVFmrdnimBcN++PfrYV0iqdevtj+oYqYkNuH1XwDznZeTiSQJDjyTrLKFYra0Qt+tvyFfHVet6nsGJtMnZbjD1kuvwWnhjnDFs2QG37aDiQBKR5BNx+VPE8ametXrBN2q0O9au2Kb9AelZkd0go2TfICPoWaGUBVP6BihrPZeQKg2JBF/w/OqnDL7MiTsocIwYqfRrEEqfya3E/lVbHBC8w8RQJnD2W8j5btF4e5tytTachyvcQ5shZNj6as2WfPaZFKFV9tUpo3HvUAHvu7Dq9YQANql1rIjjxmwM6ad8g5EiL+5tzkC2uVH1T6j1tXoP5r0uXlbyLtfV9fqsbTJpsLIyDpRJ+1w3nDBVyHFvbyGP2t+m/jxe5Mo85Y1s+VlqtjqRiMcznG2xRtljuQNZtJZltY5OHRZt3NURpjmIVbNmKbWTbdboVDk2SIHSPZtqa2OJNnFMR1ibUsGEytcMRb7eFvQX9fTxWt+mdh13WEyd/jy7CH5Bx5IX1k0PtmJHLETHIDhixKGyWB+4m9dFutr6f4wXGxcQn9LvcrF0SYhcTmZulJ2pSMpWG5oYldScxzoh3DpukxUaJFBYpiOik6zBsI6+Z53k07y6MxyusZEdvuUlfHHwfysPz9a+9Vq4cH5HxMD5VTMwsWXNjkzeS+3OqTH3DKVceFPIOx4kFCbCqgpfjU+x75ZOJ7Tyu465N5C3rE7i+XOYwRFZBs7NFpF5BkJCUfTWSRcLPHb9DHUY3I5X6DiowADdwcBEdUaTUti9Qgm3WLNlZMHNzCHYeHJJL39fbHk/h9DXqvF+QoN7/AJGbTW/GO/12h0m0XGx49k+LCxZ8VVRY8SwCqctzwW4tJ45r4FS24Nc2r5lex0XEl848HxbULth2CyBxztlCezF0oExjSvxy0U+a22yyINxrM2zdRhWzBismLlwQgmPsHSc831zsGTnTRYOTi+zBJjh4GQl0KKLEMx/SwtYD1NvNat+Z/h7SdY1+b2nSb4bPa4e2kxdrBkrHj5UeXI4dWghW/vRsr85ZFPBSQF+oCvzBjLN+fuT+RccOIyyYGxzVcYY6msXctseV5ozyW7nD2NKSveK4+6PHIHUrU03L3b9iQpAAhNzdXVsPfnYmx2e3lxSrY2IkMZjyUW0nItd4w5/lP1FRXVOxdN6P8cYHYEkx932DJ2WVHmaTKlZsRY/aKY2Y2Oo/3Yz5jkJJubC1qh/5j/k5UrOrWTzBxsgW1TzaLWJYPKJBlgKvRb6/WkyhJ3GwLOCERiptBkqdRcWhCi+OUBNufcRgu19Cx9kDn6lQmwsAUHFUc38s1/QgetvWtr/4/f5Z7fpUkfVPkGdsrpvJ2XIk92bJxlCHhBEAbvGWAC8yfbHgWXwPLHn7i9nzi9ZvdTOmM7HQ36pto2RetvFVqdJuoBVoCzMhXhZdM4on2BJYT+obcvqjrTOz02z08vs7KF4m+hI+0/wYeD/1r9LOjfJPR/knXf3Tpexx82ED70U8Zo/TxLC1pEPkfqW3kefIqWnla8J6vzazpZazfZ2yV+jYypI5EnDVpqQHk+u2mo2Pj6wEw4IdtBnfmdHWFXpOsKSB+7KOwiE303r0HYdk8WSzrjQx8zxHliCAFufS/r9TYGtXf5K/MOx+HelY+x0kGPPu9jmftYxMTxjBjdmm9sWMnDiF43C3Ycj9C2vNvnnnHmnaWKeR3rGIo9FeSsZS6BWxetq8xbNpJ6jHSsw2WeLt5q2t4wStTyQppKKJF26Sh2axOw9l2HYJgMohceMkIi3CgX8Ei9mYDxytVi+HPhLpnxBrHbr6PLuc1EfIyZeJlYlFLpGQoMcBe7iK7AE3uag1/i7BD0+gdu3YQDYRAdVytzm9q9tHk+c4cc8jMXJYOqmFkcRXDDVQiVLO1pEKijjKXaGVNHIzzN6js9YTM06Ioqo1eissc4KnKscobB6F6J2LF2uF/boMf2J8dByCD+mR6cgfUEm9w3n8zX47/wCV3w12HoHZD3PZ7c7XVbfLcQtkSE5iNbkYmU/a0cYIAePioHEFAT5s6zJHWOTx3kmvYqDHiOY7hSZxlU2t4KmnEzr8zPwHXaUo4Cz0nAtmjg6SxyAoKIGANw1b9gkr4ssWF7X7+SMhQ/oTa33fUj8fWvOPUsnX4++1+f2b9+ep4mZG05xyS8a8uX9Ev/SSUsAQDbkQfWvPBh3ytecOMqrh2rVCTxbS0JnKdnuWZiXGBomXIygW2KcpN6NljHrOfg1XbdueuNitU2jVwR4VQpFFdv8AJ1dr+m9iw4YIYDBGDMzS8wkoRh4SVOS3/T4sDf8AGvena/8AJT4a7Hs9tsdrHs8t4dZDj68wSZOC+TA4JycLKaKQKT7pLmR1KEEqv5+gmMQ5FJ56cDLyOOXvG1PFUW3aii3fI5UdZkTeNyy0i7ESDFpU50xKqdNMpxUKqYA22DfWzo12o2R5tEdT7A+h9wy38k/TgRf8714VyZOgt0lf2sewT5BOzcm5U4S4BU+2q/zmcNYEkWIB+tPnqSql10OGzV0UhHbZq7TTVTXTI6bouU010jAZFdMi5DlIskftIcAAxR7QENcFVb9QB/j5r7jkliJaJmQkWJBI8H1BII8H6j0NZG4/R/b/APPrmviuRMYQAomMJQ3EAEREAEfTsHoDfSuLC9/rTEjx7pA8ii8nfat0+MEuNTYrCH95nPuD7uGkPaQvRqXdeFGw+I7PGdfX0ertqO/teP8A3X+8cpP3Xs+3bl9nG978fTl+dXX/AJ3uB0H/APVz7WH/AGL+4/vfc9kfufd48OPv3v7VvPC1r+afTUjVLrkNtw37A37R+lpSmFxHb8/WO8Zzisv4nruPKLVLw1i8FWiFtqdif5SoyjAyrm0T8WQwmq79B6BUwbGAgiBh9UQKBz4OJLnvNOmXEscCvaJg1+a/iR9KtfYNf1TD1mryOvZ8uZtJ8UtmxPEY1xpri0aMR/UBFzyBI8A3BPER8i+E8tJ8gOR3IvI2VDzGTMi1OVxPx2vlQghq92414Vm416WQp8e6K7UhbdMo2F0SQbSTtqZwkdLpA/SYShHppXbPydhkS3yZVKRMo4tDGQbqPoxv5BIvVun+TMeDqWm6fpsAR6XDyFythBM/uw7HMRl4ysLc4kMYMbRowUg3tcXqRzObpnGnEFFj8xZpBzH1xrV6CvlnLs3HRkxdbRIKpxUUvOyJhQZL2ayPzgUpCdqig7Bv2jqQWSHW4aLmzfaoVPckNizHwLn05Mf+tUyTG2XdexZc3XNbaaZpZxiYiMyQxKC7hF8kRxr5JPoPWnu/Z+z9vWdVZpu8h4hxPlyJXgcqYxx/kiHdEbpuI+71GCsqCqTVcXLZERlGThUEUHAioUgGAgHER23EdY+RiYmWhjyo45EP0ZQf+4NTGn7F2Dr2QMrQ52XhZCkkNDK8ZBIsSODDyR4Jte1M7hHg7xE45WN5bcF8dsX43t8g5m1Bs9brLRCxNkbMZD2zExkwqCz6Lgn3hky+AbnTaEKUAKmAdmsTC0up10pmwcaKOU3+4Dz59QD9AfHj0qx9n+T/AJC7lhLru0bjOzdcip/SkkJjJiB4OyeA0i3P3sC5vckmtTb+cGB64ame6slP5oStmfUeNki5wjDjkJtjvJwpCs/Z5NVjnKQVKLhCiXxq6oHFHrD1R7R11y7rBj4e0WmDT+yfbHLg/wBQ9v0gfX8KyNf8YdqzBknPjh1jY+pOxUZr+wcjGvZTjBgfdZ/PADw1vUVL0wdJhKIgIlEQ3AdwHYdtwH4Q1L1rwefNcaUpnIuzZocZ2tlTlsaV9hgWOodfl6jllG2lc2Ww3949OlYak+pYNymjIyIZgCqbzvBBU3ZsO/q4aS5hzXieNf2IjBWTlcl7/cvG3gD6GrHPg9aj6rj7DHzZn7W+XIsuL7Vo48cLeOVZr/czt4KW8f8Ad4lRFJFVcxDiRJNVUekgmMYEkzKGImH+WoYC7AAdojrMJsL/AIVXQpZgni5IH/Wol4Es2B+WCVY5aQ+EZOv3+subvjGq23MGNGtVzDW4mGmHUXPMYlZ6LyVjavOOjqKo92sUjhNQREoCJgCKwZMHahdqkBWdSyK0iBZAAbEA+SFP0tV/7Xhdp6E0/QMnZpNqZ1hyZYsTIMuJIzoGjZwLK0qAAG48ECxIsalrqVrX9GlKNKUaUo0pRpXFxRpXP5fWjSlchv6Po6U/OkxVbUyt7B1IMWMswSaSj2KOjMMTMHKizA4JqropGMcVGiojumffYwar/W+x4nZsGTOxIsmGOPIkhKzxmNi0ZsWUG90P8rfWp/sXXsrrWZHhZcuNNJLjxzBoJBIoWQXCkj0cD9S/Q/Wux/WI6RsUFZnC0iSQryD9Biig9VRj1SSBO7XM+ZF+xuzkD6gTfUjr7zev4WdvMPsEzzjNwUkWNVkKxsJRZvcjHhyP5SfSuvD3+bg6TM0MSwnCzWjaRmQNIDGbr7bnygP8wHrW7ctGz9suyeN0XbR0iog5auEyrIOEFSiVRFZI4CVRNQo7CUQ2ENS+RjQZkD4mVGsuLIpV0YBlZT4KsD4II9QfWorHyJ8SdMrFd48mNgyup4srA3DKR5BB9CPIr5aNGka0bs2TdBixZIlRbNm6ZEGzVukGxE0ky7JpJJlDsANgANfOPjYuBjJiYiJFhxIFVVAVUUegAFgAB9PQVzkZGTnZL5WU7y5crlmZiWZ2PqST5JJ+vqax49tDkF0/iUY4BlVvEvnscDcwSLlMO575w4bCYrlYgF6eoREQ9GujBx9WpkzdasF8h+ckkXH+o48cmZf1kAWuSSPSu/Nn2bCPD2TT2x04xpJy/pqfPFVb9IJ82AAPrWy3H6I6kLmsGuNcUrrMskRRNI6qRFVuruUjKEKot0BufukxEDKdAdo7AOwa+GkjR1RmUSPfiCQC1vWw9Tb629K+1jldGkRWMa+pAJAv6XPoL/S/rXZr7r4rncR2AREQD0dvo/a0pYDz9TXAiAAIiIAAAIiI9gAAdoiI/AABoSB5PpT18D1rAjZSNmWhJCJftZJiodVNN2zWIu3OogoZJYhVSCJRMmoUSj9AdYeBscDa4ozdZNHkYbEgPGwZSVNmFx4uCCD+dZmfrs/V5Jw9lDJBlqASjqVYBhdSQfNiDcflWWomkoBRVSTV7s3eE7xMinQcoDsdMDlN0qB8AhsOsl0jcAyKrcTcXANiPqL+h/AjzWMjyISI2K8hY2JFwfobeo/EHxSRTGByXU3jWSin5oGZ8XHP4yYbOIx2um1c9CgHSKci6aSiqIGIYpg6i6rK/wBn791uXHzsaY6fK5xSRTK0TsEexuAQwBK3Ugi481ZHG36J2OLIwciEbfF4SxywssqKXW4sSCpIDWYEeD4riZNYK7H1mOpNdZyrNB/GxD5u6fi0LD1pFHuTvm51TCd0u0TTKUqYiJj/AA642p3Wjwtfg9SwYsnGSeKGRXk4CDFVeJdSTd2QBQFN7/ma51a6bdZufm9rzpcfJeGWaNkj5mbKZuQRgBZFckkt6D+Fbqx1yEtcS6gp9kSSiXYpmXaqmVSKoZBQFEjdaJ01CiQ4APYYNSu90Wp7JrpNPuoRkayQgshJAPE3XypBFiPoRUZpN5teu7GPb6aUwbKMHi4AJHIWYWYEeQSPINYEAWqV4W9DgVmDRWDjU3SNfScmUeMotZY3duTpKnO48OqsqOxzCO4m1h6Udb0vDp2meGKXEgDLjB7vHCx8OQSW4lj+ok+T/CsvcHsW5D9w3CTSRZc5VskpZJJVUXUEALyCj0AHgVv47IyVdvELVVmk05Umot4/8Ui0OtDN2jIT9aLh6KoJtnomT3TJt6wD9PTJ7HiY3ZMbrDw5JzMqB5VkWMmBVjvdXkvZXNvtW3nxXGN13Kyeu5PZUlxhiY06RNG0gEzNJazJHa7IL/c1/FSBh5hKZRVXQRVSTSV7r7L07mHpAwiUC79gb6sP/aoCtxpSjSlYcj/u99/sbn+JPpSoX+W1/YN4pfiZqX8lNpSmC8jv9U7wg/FEf+l1n0pVrOlKNKUaUo0pRpSoMxX6yq8/3H8X/wBe+WdKVObSlGlKNKUkbPAOpozQzZVFMUAVA4LCYAED9Owl6SGHcNv8Q6UqtTlp5SfG3mZZ2d7y3Fz7K+sYdrXyW+mWx/CSTmFjiPRiot80dM5KIWaRrmQVVJ0t01THNsZQS7BqsbvqOl38wyc5HGUF480Yg2F7AjyLAm/pf863n8X/AORPyX8R619J1abFfSPMZfYyIRIgkbjzdWBSQMwUA3YrYeFB80kuGXk6YK4TXdTKGO7HerJkR5UZOmSM1a7A3Uh3MTLP2b50ZtXY+IbItHIDHIkKYVldgKIgACYddGg6Zq+u5BzMRpXyjGVJYi1ibn7beD4H1NSny5/kv375i0w63v4dfj6FMpMhI4Y2EgdFZVvKzksPvYkBV9R62qRk7jDItQ5PUqWxtx/xtJVLLMBNNuRHIYkxGwl9r6lUZGNj+FUixOlK3JlIOABIB6FStA7fVANS0iTwbeOTExYjBOh9+a4VxxH2C3q4P/pWvsPK1W1+OsvE7Bvs5NprJ4zq9ZweTFk95v8A5UnOxSBlH3W8e4fxp78iVnI0FQblNY7gIe7X2Krcs/ptQkJb2Ixs9jatFFYmDeTC4JIxbeRdlKmdcxilTA24iHp1IZTzx40kmKgkyQhKqTYM1vAJ+l/S9U/Q42pzN1iYm+yHxNJJkIs86IZGiiLAPIsYuXKrchfr6Uka1UM6XbBsY9sLWv4WznZsfGPIxzR0jd4bGmQZCNUKUqC5TqM7OxgJIxDfvgkcAXbcQHfXTE2dka4GULj7F4vIB5iNyPp9GAP/AFqUz4uq6juUkWA8m36bj5/2MwOPJl4yt5uLcoWkS48C63pFcU+HL3jNjFSlHyDYMi2Sw2KTvd8t1sl3kgaWvNhI3NYXNfZLpm93K2u5b9baOIY5GwGEAMO+sfS6oajC/bmR5pXcu7MSbu36iB/Kv4L9Kmfk3vz/ACL2MbhcLF1+vx8dMbGghQL7ePFf2hKw/wB2YA2eUgF7C4qSvuJKj/8AtDL69b/Nalq15Ue8212RrNqwvYmOBXWabU4uytQibZCNYc7zDMbPxTv2lc5GUlileRlbW8MRs6FpuoIKBuHoAYzYOIpoJVxjPNz4hgBeIEeWJPov0NqvHUsds/WbbAm3aanWDEEzwO0nHPaJ14QKifa8ouXTn48GmE491bzBsjZwsdz5FV2hYFwpXK06qMLhmvzkLkVXJE1IuXaxMjLW5kdWQgSxDMqbY0ep0EcApuBAEDDqM1jdkytk+RtFjx9eqcRECH5k/wA/L1FvTj6Grv3iH4U0XToNR0PIzN12/IyVnkz5Y5MUYsagA4ogNlk5td/dHlSLXPit9hbiNNFyDJZensdssA2mhN8kYjxbRqDffaOKJrHM1LrS8ZkqWo0W0SiG9ympF2ddQFNlkttjBuIbdmBrCMo57xftpow8UaI94yhNw5QeORJvWH27vKtoo+q4ew/vmtzWxM3MyMnH45qZUaCN8SPIclzAiKEFvB9Qai/gDiFy4x9lLJFPmMr3WVvdptNOyzmzkba63Fy+E8z15UJ2KJhzGdBcqiOP5iIjXaYyqyRBRcCQDAUodIGh9Zqtxi5UsDzStlO6yTTsoMUq/cPajQn7CAfuPobVsbvHffjffddwNliazBi0mJiz4Wv1cM0kefgS/wBN/wB9l5QH/wApHdT7Kt5QEi5NyJJcT8HeYDRLDfKVyfydg/JWHmkU9QxfZKFXHlPv7h89lk1CJWOHYxrCuRcTGwSyyDdJqU5yqkIYTm7R1K6TH7JjzSwbibHmweJ9tkXg5JP8wACgAEgAf9aoPyfufhbdYOFt/jjX7bXdraVTmRZMwnxgqoQTE7M0ru8gVmLkAqSABWPjLgHaOKQCHFu7P50MhZNq0rmIOQ96td/BrQI0ZQZdvjjuWzcIexqhJD3Yq9SSnSXrN6oa4w9FJpf/APTuze7Mpl992f7Be/t/g3n+HpXZ2L5Wxfk23/6ycWGD9hrpkwf7XjQ418l+HtnLuT7kQ4eePkXNh5qw9pQ24Im8Y5VFcRP3YtzACZQ3+xibqLuc23p9Aas1aPHp59aiTzH4rz3ITBNixbGV3E13dy83WnKURlxGwGqybJlMtVZd+3dVpRtPxNgQiQVBm4arJnKqbYxugRDUPvte201j4aRwyFmXxLfjYHybrZg1vQgg1sn4n7jD0Pu0HZMjK2WHDFDMOeCYxMWaNhGpWUGJ4i9vcV1YEC4FwKjXkbi3FeXtxbzROcCeNy89lq5owcazqtFXnbJOvrFJCWvsLOueyO5p44hKMWRVf+EAe5ECmExfWOIxOTroesaeeTrWITmyBQFW7EsftDHkT4S5NvT/ANav+l7nsvnP5G1GJ829hSPrGG0jNLOI4o1iX+q0QESxqJMgosfP9Q8AHwK8gt58srzIUL1PtLJxLzFNWR9NndzE3Wqwzmau+lJtUsg5espuuLK1szQ670RUM3OCCBgMT1egQDR2R1XtH7hhLhztMWuWVbqSfJ8r9v1+nj1/Cv1N1Hz58EnTQPr+yauHXxwhY45ZWSVUjHFVZJR7vIBfHIcmFj5uKnJxj8ivmIfPeLEOSOB2nxEykgZPJT6MyvSUpCBhpCJfpA8TQiJ9eacSUO+MioCKCKhTKAACJigcNWHUfH+6/uUP91xx/bmP32kW4BB/A3uDbwBWn/kT/Lz4zPSdkfj/AHLL3ONL4ofDnKyOrqeJLxiMI68lLMQQLnwbVfRV/Ir4vY0SSkMS3PNNEtsa0inEbOMMlSgNZG01l29laxP2+Jas2rOxEZyzkguGogk3Xbpd2BCCc5jbHh6DpsMc8F8iKYAWPuHyym6swt91j6j0I8V4t2f+W3yZ2FzB2iDTZuqkdw8bYaclhmCpNFA5Zmi5IDxf7mVjyJIAAb3LXA3mVmGuYfaZRSxxdeR2Ma3kWXq/LOo3ydxw0gbfFWEspjOt2ihRjAvvFXLaQSqTCaDMUUjJEIAiUD95jZuj3mdFAM32pNrCrlclXMfFg141ZAPuVvV7Cw8f6zPWPlP4t6nsdrJ1ls3D+P8AZT4qTaWfGjyzJA8XDLlhyXb+lLD5EBZ+RDEnzbjaZgXEuaK3hyhQ2echQeQsyMoNP3/tsNEkiIiWn11lnKwR7Rs2ZIkask1StyqAgj3wJd4JCibYLlrI82LAij2Uglzwv3sBYFvyHjwPT0F/W1ebu75vVtj2vOzOk4cmB1R5j+2gkcu8cYAA5MWYksQWI5Hje1za9Ot7iSv+sMvr1v8ANBrOqq0x1LofJsM0ZfNfpDFJ8BGZ1b4jm9dRmCZEQfA3N74jfl3KIRizc7nbwXhxESl+q7dRuP8A3T+4TnKMJ1tl9njf3L/zc7+LX9LVctuehnqGq/si7Ad4Dzf3EylDilb/AND9sAeYIX/c5+CfSkpyoheWdcp9GdcVapSrtcXmV6XG3uPtjti2ZxuJ3SzsLpOR5pSWg0TyzBMiXclTUVV9YelI4htrq3Em4jgjbTIkk5mUOG9BH/ORcjyPp/2qS+NsL45ztrmR/JeVlYmqXWTvjNAGLNmgL+3jbikh9tjy5EhR4F2FSePRJLrP3bhp0dRujqOrv0bj07gCQgA7al/H0rXQvYX9bea+fcOV/wBYZfXrf5rSuaPcOV/1hl9et/mtKUe4cr/rDL69b/NaUo9w5X/WGX16v+a0pR7hyno8Qy+vV/zOlKPcOV/1ll9er/mdKVG6S4tXu85UyC/zFbaDlLj1MRdAdY2wVY8fRjsmP7/UXZ30jdlrG7TXWnHUg7AijYh0y+DOUBIPZ2xrYUk+VI2ayS68heEZQHi6+rcvU3/9KukPZsHWaHDi67Bk4Pbo5Jxk50eQ6+/BKLLCIxYIFFw1j949RUkfcSVH/wDaGX16v+a1JVS6PcOV/wBYZfXrf5rSlfadHlUlE1QXYiKZyHABOrsIlMAgA/Yg7B20Bsb18sOSkfjUPuHvApHh/GZxjIDI8tei5xzzds7ya0/HRsSavSV2KxK6rMcWHRKV7FMfBAJFlw744mHfYNRGo1KalZ1SRpPfnaUkgCxa3gW+gt/GtjfIvyFk/Ik+rny8SHFOr1UOCojZ29xYeVpG5+jNy8geBapfe4cr/rDL69b/ADWpetd0e4cr/rDL69X/ADWlK59xJX/WGX+E6v8AmdKVFt5xryRSc7Zb5PN8v5MvsBJ4bQrcDxXbOkfcNjYaqgjJhP01FwuVNO625SOFr1qkSJ1uzdZxL0gWNbFmhzpdkJZZIzDYQ+OII+q//ia3/U+au8e/1uy6vgdKfAwMXLTYmR9mQfeaOUleEtluYog3K3I+EHFQbkuthle75XxXRsi23Hthw7ZLbBN5aXxhfVm6lwpTpU6qYws+eN79kL1IiQHHuzbAU4bgBtwDJxJ3ysZMiWN4pHW5Rv1L+Rt4qB7FqcTR7zK1GBmY+ww4JSqZMFxFMBY805eeJvbz+HjxTm+4cr/rDL69b/NayahqPcOV/wBYZfXrf5rSlHuHK/6wy+vW/wA1pSj3Dlf9YZfXrf5rSlHuHK/6wy+vW/zWlKTVuxpdpSBesarZY2tTqwoCzmVmppBNqVNch1ymaKJCRXvkQEvo9XffUD2bD3mx0suJ1zMXA3DFeE5QSBLMC32HweS3H5XvU71vN0mv3EWX2HEbO1KhucIcxl7ghfvHkcT5/OlAhQpkiKJFnjJVYiKRFlQFUoKrFTKVVQCAkPQChwE23wb6mohIsSJK3KUIAx9LtbybfS582qFlZGlZohxiLEqPWwv4F/rYeK7fcOV/1hl9et/mtdlfFHuHK/6wy+vW/wA1pStfL0yytouQcxScfJyiDNdSPj1nZ2qb12RMTN2p3J0wIgRZQAL1iOxd9YmxmzYcGabXxLPnrGxjjZuIdwPtUt/KCfBP0rL18WHLnQw7CRocFpFEkiryKIT9zBf5iB5t9aIml2R1Fx7mWSj42UXZoKyEem6O6TZPDpgZdsRyRISLkROIgBg7B21xr5cyfAhm2MSw57RqZI1bmqOR9yhv5gD4v9a52EWHDnTQ6+VpsFZGEcjLwLoD9rFf5SR5t9K1FsxJYLJGt2LK1OayqhJsJA7+GUMV0uizUMdWOVFRIA8I9A2ym3bsGovsmlyd9gph4mdk6+RMiOQyQW5sqEkxG/8AI/o38Kkuu7nG0Wc+ZlYWPnxtjyRiOa/BWcWEgt/Onqv5mu22UC6r16VRqy9d9urt+6YlmheDFj3pyEckdg3T7/oM1McA6e3q213djTdTaXJi69+3G3dLR+/yMXkgMH4/dYoWtb62rq66+mh3ONJ2D9wdQj3k9jiJfAJUpy+2/PiTf6XoqeIDVKvx0DEIxcczZpCcWjZd6q2TduTC4figdyCq4pKPFDmL1CIgAhr563odf1nSwabWRJBixLfghYqHb7pOJcs1i5Yi59DX12Le5/ZNzPuNlK8+TK1ubBQxRftjDBAFuECg2HqKUXuHK/6wy+vW/wA1qcqEo9w5X/WGX163+a0pWoe4lLIykRNPUo5eUgfFDEuzLOgOyF6mCLkSFIUqZxVTDb1gHb4NRmXptZnbDG2uVEH2GHz9lyTdPcHF7AGxuPHkH8qksXcbLC1+TqsWVk1+Zw95ABZ/bPJLki4sfPgi/wBa2/uJK/6wy+vW/wA1qTqNo9w5X/WGX163+a0pXA0OUEBAXDIQEBAQE6ogICGwgIdz2gIaWB8H0oDY3HqKxGGM1YpqRlGJREezTMocjVmQzduU6xxUVOVJNACgZRQwiI/CI6xcLBwtdjjE18UcOKCSERQqgk3JAHjyfJ/E1lZmdmbHIOXnyyTZTAAu7FmIAsASfwHgfgKzPcOV/wBYZfXrf5rWVWLXI0SVH0uWX163+a1yTc3PrT+FJqx0TIPTEe6jqtFMEy0Gb9sC9MUYHY/jQj+4S3CREenoE3q+nfUDvf8AkfHG/wCOHFDfuk/ce+GN8f8An9vj6SenG/ipzRnr18n/AJCMor+2f2PZKj/5Hjh7nL/2/XlbzSkGiSm47OGe2/ZuZQOz4PQl9DU6fWoIenn1rXlxcJJNaaI3hiy67VNitJlTMD5VkkYDptVHAId4ZAhw3AojsA6wl1uvTYNtlgiGzeIRtLxHuGMG4Qt6lQfIHoDWc2x2D4C6pppTrEkMixcj7YcixcL6BiPU+tbH3Fle0PEsth7dutbbcPQP716Q1m+bW+lYNgf4121XG6UFb5K6OXr5WUfQjeC8KnIuDQxGiDgrjvixZyFRI+MdMAFYBEwlEQ+HUEnX8KPsUnZlef8AfyYywFTIfZ4K3IER+ge48t6keKnH3+ZJ16PrTJB+xjyWnDe2Pe5svEgyepSx8L6A+adPU7UJRpSsOR/3e+/2Nz/En0pUL/La/sG8UvxM1L+Sm0pTBeR3+qd4QfiiP/S6z6Uq1nSlGlKNKUaUo0pUGYr9ZVef7j+L/wCvfLOlKnNpSjSlGlKNKUaUo0pRpSjSlGlKNKUaUo0pRpSjSlGlKNKUaUo0pRpSjSlGlKNKUaUo0pRpSjSlGlKNKUaUo0pRpSjSlGlKNKUaUo0pRpSjSlGlKNKUaUo0pRpSjSlGlKNKUaUo0pRpSjSlGlKNKUaUo0pRpSjSlGlKNKUaUo0pRpSjSlGlKNKUaUo0pRpSjSlGlKNKUaUo0pRpSjSlGlKw5H/d77/Y3P8AEn0pUL/La/sG8UvxM1L+Sm0pTBeR3+qd4QfiiP8A0us+lKtZ0pRpSjSlGlKNKVX3leq8m6Vy8dZ4wzhmkZiqdj441jEkkzn8ypYrl4KxVrJlxuAuSIuaJcEZaNfRlnIUDFOidNVIQEBAQHSlb745eeXyIsZ/PAjfyIaUo+OXnl8iLGfzwI38iGlKPjl55fIixn88CN/IhpSj45eeXyIsZ/PAjfyIaUo+OXnl8iLGfzwI38iGlKPjl55fIixn88CN/IhpSj45eeXyIsZ/PAjfyIaUo+OXnl8iLGfzwI38iGlK5DMnPH4eEeM/2/8A8wI38iGlKbDDXL7mPnXGNSyzSeEFFTrFzZvXsSlMcuIdrJppMZZ/DrA8QbYZeN0zi6jlBKBFTh0CUREB3KClOd8cvPL5EWM/ngRv5ENKUfHLzy+RFjP54Eb+RDSlHxy88vkRYz+eBG/kQ0pR8cvPL5EWM/ngRv5ENKUfHLzy+RFjP54Eb+RDSlHxy88vkRYz+eBG/kQ0pR8cvPL5EWM/ngRv5ENKUfHLzy+RFjP54Eb+RDSlHxy88vkRYz+eBG/kQ0pR8cvPL5EWM/ngRv5ENKUfHLzy+RFjP54Eb+RDSlHxy88vkRYz+eBG/kQ0pR8cvPL5EWM/ngRv5ENKU19U5g8x7jkvLOKYjhBRgs+GfcILco65bw6UYoORa8vZoIIlwTDCi7nuY9uYrjvEkehXYCdYetpSnQ+OXnl8iLGfzwI38iGlKPjl55fIixn88CN/IhpSj45eeXyIsZ/PAjfyIaUo+OXnl8iLGfzwI38iGlKPjl55fIixn88CN/IhpSj45eeXyIsZ/PAjfyIaUo+OXnl8iLGfzwI38iGlKPjl55fIixn88CN/IhpSj45eeXyIsZ/PAjfyIaUo+OXnl8iLGfzwI38iGlKPjl55fIixn88CN/IhpSj45eeXyIsZ/PAjfyIaUo+OXnl8iLGfzwI38iGlKbC18wOY9OyTibFcxwgoo2bMx72nUTteW8OrGpnx5W07TOBLOD4ZTXbd7HKAVv3aa3Wr2H6C+tpSnP8Ajl55fIixn88CN/IhpSj45eeXyIsZ/PAjfyIaUo+OXnl8iLGfzwI38iGlKPjl55fIixn88CN/IhpSj45eeXyIsZ/PAjfyIaUo+OXnl8iLGfzwI38iGlKPjl55fIixn88CN/IhpSj45eeXyIsZ/PAjfyIaUo+OXnl8iLGfzwI38iGlKPjl55fIixn88CN/IhpSj45eeXyIsZ/PAjfyIaUo+OXnl8iLGfzwI38iGlKPjl55fIixn88CN/IhpSmxzNy+5kYKxhcMtXbhBRVKvSWDeSl0oblxDu5RRBzJMotMjJu5wyzbqq+IfkEQOqmHSA9ojsAqU53xyc8fkR4zH/8A7AjfyIaUo+OXnl8iLGfzwI38iGlKPjl55fIixn88CN/IhpSj45eeXyIsZ/PAjfyIaUo+OXnl8iLGfzwI38iGlKPjl55fIixn88CN/IhpSj45eeXyIsZ/PAjfyIaUo+OXnl8iLGfzwI38iGlKPjl55fIixn88CN/IhpSj45eeXyIsZ/PAjfyIaUo+OXnl8iLGfzwI38iGlKPjl55fIixn88CN/IhpSj45eeXyIsZ/PAjfyIaUrkMyc8REN+EeMw+mPMCO2D6Y7YQEdg0pTWYY5h8xs746iMn0jg/R0q3NStwh2ic1y3hmkiV3SLnYaJMisg2w08QKkebrTgyIlUN1oCQwgUwiUFKdL45eeXyIsZ/PAjfyIaUo+OXnl8iLGfzwI38iGlKPjl55fIixn88CN/IhpSj45eeXyIsZ/PAjfyIaUo+OXnl8iLGfzwI38iGlKPjl55fIixn88CN/IhpSj45eeXyIsZ/PAjfyIaUo+OXnl8iLGfzwI38iGlK6HOYOeS7ZwgHCPGYCs3WSKI8wI3YDKJmIUR//AKIb7AYe36WlKdPhli244S4qYDxLkFKJb3igYzrVbtTeCkVJeGbzjFmBZBCNlFWjBSRZormEpFhQSFQA6ugu+2lKiB5Hf6p3hB+KI/8AS6z6Uq1nSlGlKNKUaUo0pRpSjSlGlKNKUaUo0pRpSjSlGlKipM8x8Nw3Kio8PhWs0nli2VGz3BR5EQZXlHqiFWims+tA2+zi9SJE2mXgXPjWTAiK6qjQgqqd0UyYnUpq+OPOXj3lK2UXEGM6JkWgwt8reQbRg6bnaLFVrHOVKxjefRirzJ49Wh5ySeNW0ZJyhFu7lGMUq4SW71IhwEdKU4ufeYtQ43yqxsiYuzifHkUlXF7fmyvUZjK4mo7ezzCMGwXscypY2c+qgzfOUvHHjox+VimoU6wlLuIKVLgpinKU5RAxTlAxTB6BKYNwEPpCA6Ur60pRpSjSlGlKNKUaUo0pRpSjSlGlKipy05j4c4YUiAvGXj2h42s9qhapB1+jwZLJany0tLxcS9myxJ30cRGsVb2ug4lnyipU2bc5R9ZQ6aZ1KavJfOPj1g3MmQ6pI0PIjuUr0thqCzxlqm0WJfUuhTOUxaw+IY/I88ecj7I+Wft5VAiQsI+TIxQcE70yQCIApT/5/wCRtO48xtLPPV+8Xq15KtpKPjjG+M4NpYb5d7IERJWB81hY6SlYKJRaQ8BDunr108etWrZuiInUARKUylKTBmbKPyGxlX8rY9Vlfd6eWmo9SPn408NZK9P1ickazaKvZ4VVRVWIsVbsUQ5ZPG4mOBFkR6THIJTCpTuaUo0pRpSjSlGlKNKUaUo0pRpSj9n7Ph0pSHyTkan4koloyTfpdGCqFPiXExNySpTqGTbobFSbtGyQGXfST9yoRu1bJAZVy4VIkmUxzlAVKhqhzSxxe8DYk5f1DjjnfJdTmIa+WKKWjqdjWOv+JIyCGVg7k4t7C8ZLq5ayo4JBOUFiMnbsx00hKoAB2aUp5IDlri6V4kM+aU6zuePsOr4fVzi+b3qu+zrtB0NKDVsYOJatRL6bAkivDplWSQQcL94VUnSYerSlYfHrlxR+Q9ku1JjaTlPGd7oMDRrfNUvLNai6/OLUzJLeXcUm2Rh4Gw2mIeRE2EC8TAviiO0FW5iLIpjtupUqtKUaUo0pRpSjSlGlKNKUaUo0pQI7aUo0pUDa1zSwbyKNyix9QMcZEzcbjo+qcLdKxH1GsOIvKPvUD5zGOMWr2y0Q8DdIdo/gHaKrpysxb+JZKd0ZUpSnFSnN408rK5yad5gi4fGeXMYzWD761xreIjK0LU45b3sXrcVal2UI/pl1vENLFjIucag7Erkh266oJmL1AOylavHPMzGuW8z2TDmOq5fbSWoTNmrViyawZVUuN4+x09RZnYogq7i3I3V6WNmW54475CEVi/aJDIA5E5TdKlS50pRpSjSlGlKNKUaUo0pRpSjSlGlKNKVFi18w8PVLk/jTiK5Vs0rlvJ0JZJxkMDCpyFUqiVerz60oR16sJnzdOvzdnhIt0vFtASWWcpNzqHBJMSHOpTcYy5b0tHPdV4jI8Z88YYsdkq2S8i1hewVDGDDHalXp1gZEtFjVdUbJtofwzefstqR8Id1HoGeu3g79J+vZSnB5T8ycN8QIqgSOVDWmQd5KvtVoFYr1Hg07HYTuLRZYOqmtEiwO/jk46lVqTsjIZSQUVArYjggFKoociZlKlbpSjSlGlKNKUaUo0pRpSjSlGlKqm8jv9U7wg/FEf+l1n0pVrOlKNKUaUo0pRpSjSlGlKNKUaUo0pRpSjSlGlKhhJ8IKNK2p7bVs2cvWzt9PHsCkRG8rc1R1YRcHeg+GPZ1xpakotnCAcOgGaaZUCo/YwL09mlKiMh5amU6Xy8wfnShcqcsSlFrN85K5LyJX7shimQfEsmZqzDRce0iHrfFiFjscO4WjUmTgsjKGXZRjFuk1VT2HSlNfxL4BchuPuZ6llFGi4iq1kx/jrPMNki9weULbYXPMO55AkSTdFeTtVla2lGYXh42xNwkXwsfEqprKiiiU7cALpSn05LY65tZwyZhiFsuDsVXXjDWK5Rr3kvGsVnxxT3t6z0yfsZRSItq8njaV94sO4ylGZX0exSFspPv00zPSkRSKiZSrYy/Uh2AXsD1Q9Bez0B2B2BpSudKUaUo0pRpSjSlGlKNKUaUo0pUZswcW6rmezNrTNZQ5G0101jEYskXifkBk3F9cVSRVVVK6cV+nz8ZFryRxVEDuDJiqcoFAR2KGlKrj5u+VBcs1Y4thMH8o88Q1/d42o2LIOFyTc61kKuva3A5ahcizS0rcck0S+5BYysks1M4WXaP0heKsGSKwCgkBAUpNci/Lqz3kzP1vvbFnQMhzljkuOrrFXJ243qXqWQOPLLECkMra0l8U1WspU3J0nZH7F8+bKqC2R76TMkqmVJJMAUqaPIPFnJO8XTD2bqVTsayF84xZ0vstQ6FLX2TiYjK+G75jJ7QJNxL2gKs9Ci39JxMndIJeEkGZCswIZQ3fj0KU63CzBtuwFhRSuZDfQj3I13yVlrM2QCVhZ24q0Va8v5CsF8kK9W3L9Fs9dRFcSmUmSa6qaZ3JkDLCUvX0gpUs9KUaUo0pRpSjSlGlKNKUaUo0pTRZjw3DZrrzGtzdwyrS2zCUTlU5LEmTbfiyeXVTRVRBo9nKZJxci8jTlVExm6hxSMcCmENwDSlRMu3l3V6yVqBr8DyX5ZVpxXsl1XKjKancuLZncjYaY2kkq81PFZ2ick10sQyfSPjgQIzKBnzdBcRE6KYgpUe6DwW5M0zy12HCuRymhcrte73KQGULtPSsSwJVcG3vLUlYckMaY4q1MgAl5qUx86cNkUXKBDA+llh78EkUy6UqZHJLFGasiYgypxzwqnQ8b0u1caLRRaNkN6+dOpKrZEWK0rddqq1JJCrsRoitPOuVaQI5M5bHApE25uwwKVH3y++IF5435Ay5bneMsf8AH3H14o+K6uwwvQcm2jMCEjd6GnPoTuVpK32yJinsWeciZNnGtotADpkQYAsqPfKGEVKtO0pRpSjSlGlKNKUaUo0pRpSjSlaKzwDe1V2brTt/NRbadjHkWvJVuYfV+fYpPUDoHdQ85GLN5GJkUSn6knCByKpHADFEBDSlQodeX/TDQtni4zkPzNYOrLUbNUAlJDlLlS2+yULRDuYR1LMIW4zU7XVplg2dmO0WctFwQXAFCl6gAQUpjeHnBHM3EXO3KK/oZrsWW6Dc8QYKoeFqrdSY5gzKyGJKlZIpBK1p0LGNSThW0S5eoM2SzYyvfNXLhZyRRYpDaUp/uLODctcY+O2NaWmzqF8zBbsmr5H5NWSQsEhGMH9ny1b5C35pt9cekh3TqdfQC8oLWFZuE2xHLRogkdREpQ2UqDeCfLXyRhzPlCdQ1VxNWabjTlbnPkkvyRgZ6RUzflel5haXhZDBVqrpoFv4eLZSt1TI/XWlXUco2iUFGzVNyc59KVeZpSjSlGlKNKUaUo0pRpSjSlGlKNKVDOx8I6PZrXL253mrl3HO5mYWml4ev8q80QFYaLLOPEGZRVci7U2i4uIIb1StUUyJET9UAANKVEWS8tTJdf5eYJz5j/lHlp7Sq9nnL+a8rVW4fFa+ci5v2NnFPYw1el/isPbJuKcIpNodRKQlVDsIcuzRRNUoG0pUr8e4QybDcj+VvJm3N6vK2u4VyoYp48Qac07UaQuJ6DCu51NnPSBosqkG+v8AlSdePZEiBHBUW6DX1lDJgAKVC3mz5cWcuWFfNlOu55ueJ88XCv8AGWGuWMIiXoNiw1WWuL8p1LJF0YUqxW3FEzc2hUZRm8fIqIKM0Zh+zaC7RBMBTKpVzka1XYxzBk5kHUs5ZsmrVxKviNE3smu3QTSVkHibBszYkdPFCCooCKKSQHMIEIUuxQUrN0pRpSjSlGlKNKUaUo0pRpSqb/JQyZjiC8qzhTFTmQKTDSjPEp03cbK2uBj37VT3usw9Dhm7fouED7fAYoDpSrSfjixF+FPHH3cVn7aaUo+OLEX4U8cfdxWftppSj44sRfhTxx93FZ+2mlKPjixF+FPHH3cVn7aaUo+OLEX4U8cfdxWftppSj44sRfhTxx93FZ+2mlKPjixF+FPHH3cVn7aaUo+OLEX4U8cfdxWftppSj44sRfhTxx93FZ+2mlKPjixF+FPHH3cVn7aaUo+OLEX4U8cfdxWftppSj44sRfhTxx93FZ+2mlKPjixF+FPHH3cVn7aaUo+OLEX4U8cfdxWftppSj44sRfhTxx93FZ+2mlKPjixF+FPHH3cVn7aaUo+OLEX4U8cfdxWftppSj44sRfhTxx93FZ+2mlKPjixF+FPHH3cVn7aaUo+OLEX4U8cfdxWftppSj44sRfhTxx93FZ+2mlKPjixF+FPHH3cVn7aaUo+OLEX4U8cfdxWftppSj44sRfhTxx93FZ+2mlKPjixF+FPHH3cVn7aaUo+OLEX4U8cfdxWftppSj44sRfhTxx93FZ+2mlKPjixF+FPHH3cVn7aaUo+OLEX4U8cfdxWftppSj44sRfhTxx93FZ+2mlKPjixF+FPHH3cVn7aaUo+OLEX4U8cfdxWftppSj44sRfhTxx93FZ+2mlKPjixF+FPHH3cVn7aaUo+OLEX4U8cfdxWftppSj44sRfhTxx93FZ+2mlKPjixF+FPHH3cVn7aaUo+OLEX4U8cfdxWftppSj44sRfhTxx93FZ+2mlKPjixF+FPHH3cVn7aaUo+OLEX4U8cfdxWftppSj44sRfhTxx93FZ+2mlKPjixF+FPHH3cVn7aaUo+OLEX4U8cfdxWftppSj44sRfhTxx93FZ+2mlKPjixF+FPHH3cVn7aaUo+OLEX4U8cfdxWftppSj44sRfhTxx93FZ+2mlKPjixF+FPHH3cVn7aaUo+OLEX4U8cfdxWftppSj44sRfhTxx93FZ+2mlKPjixF+FPHH3cVn7aaUo+OLEX4U8cfdxWftppSj44sRfhTxx93FZ+2mlKPjixF+FPHH3cVn7aaUo+OLEX4U8cfdxWftppSj44sRfhTxx93FZ+2mlKPjixF+FPHH3cVn7aaUo+OLEX4U8cfdxWftppSj44sRfhTxx93FZ+2mlKPjixF+FPHH3cVn7aaUo+OLEX4U8cfdxWftppSj44sRfhTxx93FZ+2mlKPjixF+FPHH3cVn7aaUo+OLEX4U8cfdxWftppSj44sRfhTxx93FZ+2mlKPjixF+FPHH3cVn7aaUo+OLEX4U8cfdxWftppSj44sRfhTxx93FZ+2mlKPjixF+FPHH3cVn7aaUo+OLEX4U8cfdxWftppSj44sRfhTxx93FZ+2mlKPjixF+FPHH3cVn7aaUo+OLEX4U8cfdxWftppSj44sRfhTxx93FZ+2mlKPjixF+FPHH3cVn7aaUo+OLEX4U8cfdxWftppSj44sRfhTxx93FZ+2mlKPjixF+FPHH3cVn7aaUo+OLEX4U8cfdxWftppSj44sRfhTxx93FZ+2mlKPjixF+FPHH3cVn7aaUo+OLEX4U8cfdxWftppSv//Z) # **Neste projeto, será realizada uma Análise Exploratória de Dados (EDA), com o intuito de explorar a relação entre os hubs (locais estratégicos para a redistribuição de mercadorias) da empresa Loggi e seus pontos de entregas localizados no Distrito Federal (DF).** # --- # ## **Índice** # 1. Contexto # 1.1. O Projeto # 1.2. A Empresa # 1.3. Os Dados # 2. Pacotes e bibliotecas # 3. Exploração dos Dados # # 3.1. Coleta dos Dados # 3.2. Wrangling # 3.3. Geocodificação # 3.4. Qualidade # 4. Visualização # 5. Insight # --- # # **1. Contexto** # ## **1.1. O Projeto** # Esse projeto consiste em analisar um conjunto de dados que contém as entregas da empresa Loggi na região de Brasilia. # Verificaremos a relação entre a distância dos hubs e suas entregas e também a capacidade dos veículos. # Após a análise, iremos discutir se e como essas relações podem ser otimizadas. # ## **1.2. A Empresa** # A Loggi é uma das maiores transportadoras privadas do país e uma empresa brasileira de logística que atua principalmente no setor de e-commerce. # ## **1.3. Os Dados** # Os dados são sintetizados de fontes públicas (IBGE, IPEA, etc.) e são representativos dos desafios que a startup enfrenta no dia a dia, especialmente com relação a sua escala. # # **2\. Pacotes e bibliotecas** # * import json - Codifica e decodifica dados no formato JSON # # * import pandas as pd - Manipula e análisa dados tabulares # * import geopy - Geocodificação em Python # * from geopy.geocoders import Nominatim - Converte endereços em coordenadas geográficas e vice-versa # * from geopy.extra.rate_limiter import RateLimiter - Limita as taxas de chamada de uma função # # * import numpy as np - Fornece suporte para operações com matrizes e funções matemáticas # # * import geopandas - Adiciona funcionalidades geoespaciais ao Panda # # * import matplotlib.pyplot as plt - Gera gráficos # # * import seaborn as sns - Gera gráficos # # # # **3. Exploração dos Dados** # ## **3.1. Coleta dos Dados** # **Baixar os dados de um arquivo JSON:** # **Carregar os dados do arquivo em um dicionário Python:** import json import pandas as pd with open("deliveries.json", mode="r", encoding="utf8") as file: data = json.load(file) # **Conhecendo alguns dos dados:** # O Número de linhas len(data) # O nome das colunas nomecol = data[0] print(nomecol.keys()) # ## **3.2. Wrangling** # **Criar um dataframe Pandas para facilitar a visualização e manipulação:** # entregas_df = pd.DataFrame(data) entregas_df.head() # **Normalizando a coluna origin** # A coluna origin tem dados aninhados, vamos normalizar: # 1. Em um novo dataframe, transforrmar cada chave da coluna origin em novas colunas *(normalize)* # 2. Juntar o novo dataframe com as novas colunas ao dataframe principal # 3. Remover a coluna origin # 4. Reordenar as colunas hub_origem_df = pd.json_normalize(entregas_df["origin"]) entregas_df = pd.merge( left=entregas_df, right=hub_origem_df, how="inner", left_index=True, right_index=True, ) entregas_df = entregas_df.drop("origin", axis=1) entregas_df = entregas_df[ ["name", "region", "lng", "lat", "vehicle_capacity", "deliveries"] ] entregas_df.head() # # **Normalizando a coluna deliveries** # A coluna deliveries também tem dados aninhados, vamos normalizar: # 1. Em um novo dataframe, transformar cada elemento da lista contida na coluna em uma linha *(explode)*. # 2. Criar um dataframe para cada coluna e depois concatenar os dataframes, ou seja combinar os 3 dataframes em um novo dataframe. (A coluna ID não é relevante para as análises desse projeto) # 5. Remover a coluna deliveries # 6. Combinar o dataframe obtido com o dataframe principal # entregas_exploded_df = entregas_df[["deliveries"]].explode("deliveries") entregas_normalized_df = pd.concat( [ pd.DataFrame( entregas_exploded_df["deliveries"].apply(lambda record: record["size"]) ).rename(columns={"deliveries": "delivery_size"}), pd.DataFrame( entregas_exploded_df["deliveries"].apply( lambda record: record["point"]["lng"] ) ).rename(columns={"deliveries": "delivery_lng"}), pd.DataFrame( entregas_exploded_df["deliveries"].apply( lambda record: record["point"]["lat"] ) ).rename(columns={"deliveries": "delivery_lat"}), ], axis=1, ) entregas_df = entregas_df.drop("deliveries", axis=1) entregas_df = pd.merge( left=entregas_df, right=entregas_normalized_df, how="right", left_index=True, right_index=True, ) entregas_df.reset_index(inplace=True, drop=True) entregas_df.head() # **Vamos verificar alguns dados relacionados ao nosso dataframe:** entregas_df.info() # **Verificar se há Dados Faltantes** entregas_df.isna().any() # **Não há dados faltantes** # ## **3.3. Geocodificação** # A Geocodificação é um processo que transforma uma localização descrita em texto (endereço) em sua respectiva coordenada geográfica (latitude/longitude). Há também a Geocodificação reversa que faz o oposto e é ela que vamos empregar aqui. Utilizaremos um serviço gratuito de geocodificação através do pacote Geopy, chamado Nominatim. # ### **3.3.1. Geocodificação Reversa dos Hubs** # Processaremos as coordenadas geográficas para ter informações textuais do endereço através da Geocodificação reversa dos hubs. # Para isso vamos extrair os dados de localização dos hubs # aplicando a geocodificação nas coordenadas de cada região e extrair informações de cidade e bairro # Extrair os dados das colunas region, lat e lnt para um novo dataframe hub_df = entregas_df[["region", "lat", "lng"]] hub_df = hub_df.drop_duplicates().sort_values(by="region").reset_index(drop=True) hub_df.head() import geopy from geopy.geocoders import Nominatim from geopy.extra.rate_limiter import RateLimiter geolocator = Nominatim(user_agent="ebac_geocoder") geocoder = RateLimiter(geolocator.reverse, min_delay_seconds=1) # Criar a coluna coordinates com os dados da latitude e longitude dos hubs do tipo string # Criar a coluna geodata que irá receber os dados da coluna coordinates com aplicação da função geocoder hub_df["coordinates"] = hub_df["lat"].astype(str) + ", " + hub_df["lng"].astype(str) hub_df["geodata"] = hub_df["coordinates"].apply(geocoder) hub_df.head() # Normalizar a coluna geodata hub_geodata_df = pd.json_normalize(hub_df["geodata"].apply(lambda data: data.raw)) hub_geodata_df.head() import numpy as np # Extrair das colunas de interesse geradas # Renomear as colunas # Armazenar na coluna "hub_city" os dados da "hub_city", # se não existirem, armazenar os dados da "hub_town" # Armazenar na coluna "hub_suburb" os dados do "hub_suburb", # se não existirem, armazenar os dados do "hub_city" # Remove a coluna "hub_town" hub_geodata_df = hub_geodata_df[["address.town", "address.suburb", "address.city"]] hub_geodata_df.rename( columns={ "address.town": "hub_town", "address.suburb": "hub_suburb", "address.city": "hub_city", }, inplace=True, ) hub_geodata_df["hub_city"] = np.where( hub_geodata_df["hub_city"].notna(), hub_geodata_df["hub_city"], hub_geodata_df["hub_town"], ) hub_geodata_df["hub_suburb"] = np.where( hub_geodata_df["hub_suburb"].notna(), hub_geodata_df["hub_suburb"], hub_geodata_df["hub_city"], ) hub_geodata_df = hub_geodata_df.drop("hub_town", axis=1) hub_geodata_df.head() # Combinar o dataframe "hub_geodata_df" (que contem cidades e bairros) # com o dataframe "hub_df" (que contem as regioes) # Extrair os dados das colunas: region, hub_suburb e hub_city # Combinar o dataframe principal "entregas_df" com o novo dataframe "hub_df" # Reorganizar as colunas hub_df = pd.merge(left=hub_df, right=hub_geodata_df, left_index=True, right_index=True) hub_df = hub_df[["region", "hub_suburb", "hub_city"]] entregas_df = pd.merge(left=entregas_df, right=hub_df, how="inner", on="region") entregas_df = entregas_df[ [ "name", "region", "lng", "lat", "hub_city", "hub_suburb", "vehicle_capacity", "delivery_size", "delivery_lng", "delivery_lat", ] ] entregas_df.head() # ### **3.3.2 Geocodificação Reversa das Entregas** # Como as entregas possuem mais de 600.000 localizações, vamos baixar um arquivo que já contém os dados geocodificados, extrair esse arquivo para um dataframe e combinar as colunas "delivery_city" e "delivery_suburb" com nosso arquivo principal. # Download dos dados de geolocalização das entregas # Carregar o arquivo baixado deliveries_geodata_df = pd.read_csv("deliveries-geodata.csv") # Combinar com o arquivo principal e extrair as colunas "delivery_city" e "delivery_suburb" deliveries_df = pd.merge( left=entregas_df, right=deliveries_geodata_df[["delivery_city", "delivery_suburb"]], how="inner", left_index=True, right_index=True, ) deliveries_df.head() # ## **3.4 - Qualidade** # **Vamos observar alguns dados e verificar a qualidade do nosso material** # Verificar as informações do dataframe deliveries_df.info() # Verificar dados nulos deliveries_df.isna().any() # **Verificando as entregas relacionadas a cidade e ao bairro de Brasília** # Verificar a porcentagem de valores nulos nas entregas da cidade 100 * (deliveries_df["delivery_city"].isna().sum() / len(deliveries_df)) # Verificar a porcentagem de valores nulos nas entregas dos bairros 100 * (deliveries_df["delivery_suburb"].isna().sum() / len(deliveries_df)) # Verificar as entregas nas cidades de Brasilia prop_df = deliveries_df[["delivery_city"]].value_counts() / len(deliveries_df) prop_df.sort_values(ascending=False).head(10) # Verificar as entregas nos bairros de Brasilia prop_df = deliveries_df[["delivery_suburb"]].value_counts() / len(deliveries_df) prop_df.sort_values(ascending=False).head(10) # # **4. Visualização** # **Instalação e importação do Geopandas** # O GeopPandas adiciona funcionalidades geoespaciais ao pacote Python Pandas, que irá nos ajudar a visualizar as coordenadas dos hubs e das entregas no mapa do Distrito Federal, segmentados pela região dos hubs. # # Instalar o pacote geopandas import geopandas # **Vamos baixar os dados do mapa do Distrito Federal do site oficial do IBGE e gerar um dataframe** mapa = geopandas.read_file("distrito-federal.shp") mapa = mapa.loc[[0]] mapa.head() # **Criar um dataframe para os hubs com informações de geolocalização** hub_df = ( deliveries_df[["region", "lng", "lat"]].drop_duplicates().reset_index(drop=True) ) geo_hub_df = geopandas.GeoDataFrame( hub_df, geometry=geopandas.points_from_xy(hub_df["lng"], hub_df["lat"]) ) geo_hub_df.head() # **Criar um dataframe para as entregas com informações de geolocalização** # Criar o dataframe das entregas geo_deliveries_df = geopandas.GeoDataFrame( deliveries_df, geometry=geopandas.points_from_xy( deliveries_df["delivery_lng"], deliveries_df["delivery_lat"] ), ) geo_deliveries_df.head() # ## **4.1. Mapa dos hubs e das entregas** import matplotlib.pyplot as plt # cria o plot vazio fig, ax = plt.subplots(figsize=(25 / 2.54, 25 / 2.54)) # plot mapa do distrito federal mapa.plot(ax=ax, alpha=0.4, color="lightgrey") # plot das entregas geo_deliveries_df.query("region == 'df-0'").plot( ax=ax, markersize=1, color="sandybrown", label="df-0" ) geo_deliveries_df.query("region == 'df-1'").plot( ax=ax, markersize=1, color="darkred", label="df-1" ) geo_deliveries_df.query("region == 'df-2'").plot( ax=ax, markersize=1, color="firebrick", label="df-2" ) # plot dos hubs geo_hub_df.plot(ax=ax, markersize=30, marker="x", color="black", label="hub") # plot da legenda plt.title("Entregas no Distrito Federal por Região", fontdict={"fontsize": 14}) lgnd = plt.legend(prop={"size": 14}) for handle in lgnd.legendHandles: handle.set_sizes([50]) # De acordo o mapa, observamos que os três hubs parecem estar bem localizados em relação ao trajeto das entregas. O hub da região central (df-1) possui grande parte das entregas concentradas ao seu redor. Já região df-2 as entregas começam e se distanciar. E na região df-0 talvez por haver uma menor concentração de pessoas, exista o maior espaçamento entre os pontos de entregas e uma maior distância do hub dessa região. # ## **4.2. Gráfico de entregas por região** # Construiremos um gráfico de barras com o percentual de entregas por região, mas antes, extrairemos as colunas de interesse para um novo dataframe, contaremos quantas vezes as regiões aparecem e através do atributo Normalize faremos uma contagem relativa de proporção que resultará em uma porcentagem. # Extrair as colunas de regiao e de capacidade do veiculo # Contar o número de ocorrências de cada combinação única de "region" e "vehicle_capacity", criar nova coluna # e normalizar os resultados para que eles representem porcentagens em vez de contagens absolutas. data = pd.DataFrame( deliveries_df[["region", "vehicle_capacity"]].value_counts(normalize=True) ).reset_index() # Renomear a nova coluna chamada de 0, para "region_percent" data.rename(columns={0: "region_percent"}, inplace=True) data.head() # Visualizar o gráfico através do pacote seaborn import seaborn as sns with sns.axes_style("whitegrid"): grafico = sns.barplot( data=data, x="region", y="region_percent", errorbar=None, palette="rocket" ) grafico.set( title="Proporção de entregas por região", xlabel="Região", ylabel="Proporção" )
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/454/129454168.ipynb
null
null
[{"Id": 129454168, "ScriptId": 38491909, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13725001, "CreationDate": "05/14/2023 01:09:15", "VersionNumber": 1.0, "Title": "An\u00e1lise Explorat\u00f3ria de Dados de Log\u00edstica", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 364.0, "LinesInsertedFromPrevious": 364.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
null
null
null
null
# # **Análise Exploratória de Dados de Logística | Python** # ![mapadf_entregas-loggi2.jpg](data:image/jpeg;base64,/9j/4QAYRXhpZgAASUkqAAgAAAAAAAAAAAAAAP/sABFEdWNreQABAAQAAABkAAD/4QMraHR0cDovL25zLmFkb2JlLmNvbS94YXAvMS4wLwA8P3hwYWNrZXQgYmVnaW49Iu+7vyIgaWQ9Ilc1TTBNcENlaGlIenJlU3pOVGN6a2M5ZCI/PiA8eDp4bXBtZXRhIHhtbG5zOng9ImFkb2JlOm5zOm1ldGEvIiB4OnhtcHRrPSJBZG9iZSBYTVAgQ29yZSA1LjMtYzAxMSA2Ni4xNDU2NjEsIDIwMTIvMDIvMDYtMTQ6NTY6MjcgICAgICAgICI+IDxyZGY6UkRGIHhtbG5zOnJkZj0iaHR0cDovL3d3dy53My5vcmcvMTk5OS8wMi8yMi1yZGYtc3ludGF4LW5zIyI+IDxyZGY6RGVzY3JpcHRpb24gcmRmOmFib3V0PSIiIHhtbG5zOnhtcE1NPSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvbW0vIiB4bWxuczpzdFJlZj0iaHR0cDovL25zLmFkb2JlLmNvbS94YXAvMS4wL3NUeXBlL1Jlc291cmNlUmVmIyIgeG1sbnM6eG1wPSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvIiB4bXBNTTpEb2N1bWVudElEPSJ4bXAuZGlkOjMwOTQ5MjE2RjFEMTExRURBQTBGODdCNzg0OEVCNzYyIiB4bXBNTTpJbnN0YW5jZUlEPSJ4bXAuaWlkOjMwOTQ5MjE1RjFEMTExRURBQTBGODdCNzg0OEVCNzYyIiB4bXA6Q3JlYXRvclRvb2w9IkFkb2JlIFBob3Rvc2hvcCBDUzYgKFdpbmRvd3MpIj4gPHhtcE1NOkRlcml2ZWRGcm9tIHN0UmVmOmluc3RhbmNlSUQ9InhtcC5paWQ6QjVGMkE3NkNGMTkwMTFFRDgyODNFNjQ5ODA2NjhEMEMiIHN0UmVmOmRvY3VtZW50SUQ9InhtcC5kaWQ6QjVGMkE3NkRGMTkwMTFFRDgyODNFNjQ5ODA2NjhEMEMiLz4gPC9yZGY6RGVzY3JpcHRpb24+IDwvcmRmOlJERj4gPC94OnhtcG1ldGE+IDw/eHBhY2tldCBlbmQ9InIiPz7/7gAOQWRvYmUAZMAAAAAB/9sAhAABAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAgICAgICAgICAgIDAwMDAwMDAwMDAQEBAQEBAQIBAQICAgECAgMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwP/wAARCAGQAqADAREAAhEBAxEB/8QA5AAAAQQCAwEBAAAAAAAAAAAAAAYHCAkEBQEDCgILAQEAAgIDAQEAAAAAAAAAAAAABQYEBwEDCAIJEAAABgEDAQQEBA4MCgULAAsBAgMEBQYHABEIEiETFAkxIhUWQRdYmFFhMpIj1ZbWV3fX2Bk58HHSM5OztFW2tzh4gZFC0yRUdDV1CqGxJZU20eHxUmKyQ3O1VhjBNETUJoYnSChoiBEAAgEEAgEDAgQFAwMDAQMNAQIDABEEBRIGITETB0EiUWEyFHFCIxUIgTMWkbFSoUMkwfDRYnI0JRfh8VOCwmNENSb/2gAMAwEAAhEDEQA/ALkfKd8p7y6uQPl08UMyZl4oY2v+Tr9jY81cbjNDYwlZ+U95Z9mD194GeaNRWBq0TT9RMgdJA7NKVYd+g78p35EGIvr7d98+lKP0HflO/IgxF9fbvvn0pR+g78p35EGIvr7d98+lKP0HflO/IgxF9fbvvn0pR+g78p35EGIvr7d98+lKP0HflO/IgxF9fbvvn0pR+g78p35EGIvr7d98+lKP0HflO/IgxF9fbvvn0pR+g78p35EGIvr7d98+lKP0HflO/IgxF9fbvvn0pR+g78p35EGIvr7d98+lKP0HflO/IgxF9fbvvn0pR+g78p35EGIvr7d98+lKP0HflO/IgxF9fbvvn0pR+g78p35EGIvr7d98+lKP0HflO/IgxF9fbvvn0pR+g78p35EGIvr7d98+lKP0HflO/IgxF9fbvvn0pR+g78p35EGIvr7d98+lKP0HflO/IgxF9fbvvn0pR+g78p35EGIvr7d98+lKP0HflO/IgxF9fbvvn0pR+g78p35EGIvr7d98+lKP0HflO/IgxF9fbvvn0pR+g88pz5EOIvr7d98+lKP0HnlOB/8A2QYi+ut33z6Uo/Qd+U78iDEX19u++fSlH6DzynfkQYi+vt3+L/xP6dKUfoO/Kd+RBiL6+3ffPpSj9B35TvyIMRfX27759KUfoO/Kd+RBiL6+3ffPpSj9B35TvyIMRfX27759KUfoO/Kd+RBiL6+3ffPpSj9B35TvyIMRfX27759KUfoO/Kd+RBiL6+3ffPpSj9B35TvyIMRfX27759KUfoO/Kd+RBiL6+3ffPpSj9B35TvyIMRfX27759KUfoO/Kc+RBiL6+3ffPpSj9B35TvyIMRfXW7759KUfoO/Kd+RBiL6+3ffPpSj9B35TvyIMRfX27759KUfoO/Kd+RBiL6+3ffPpSj9B35TvyIMRfX27759KUfoO/Kd+RBiL6+3ffPpSj9B55TvyIMR/XW7759KUfoO/Kd+RBiL6+3ffPpSj9B35TvyIMRfX27759KUfoO/Kd+RBiL6+3ffPpSj9B35TvyIMRfX27759KUfoO/Kd+RBiL6+3ffPpSj9B35TvyIMRfX27759KUfoO/Kd+RBiL6+3ffPpSj9B35TvyIMRfX27759KUfoO/Kd+RBiL6+3ffPpSj9B35TvyIMRfX27759KUfoO/Kd+RBiL6+3ffPpSj9B35TvyIMRfX27759KUfoO/Kd+RBiL6+3ffPpSj9B35TvyIMRfX27759KUfoO/Kd+RBiL6+3ffPpSj9B35TvyIMRfX274f/wCZ9KUfoO/Kd+RBiL6+3ffPpSj9B35TvyIMRfX27759KUfoO/Kd+RBiL6+3ffPpSj9B35TvyIMRfX27759KUfoO/Kd+RBiL6+3ffPpSj9B35TvyIMRfX27759KUfoO/Kd+RBiL6+3ffPpSj9B35TvyIMRfX27759KUfoO/Kd+RBiL6+3ffPpSj9B35TvyIMRfX27759KUfoO/Kd+RBiL6+3ffPpSj9B35TvyIMRfX27759KUfoO/Kd+RBiL6+3ffPpSj9B35TvyIMRfX27759KUfoO/Kd+RBiL6+3ffPpSj9B35TvyIMRfX27759KUfoO/Kd+RBiL6+3ffPpSj9B35TvyIMRfX27759KUfoO/Kd+RBiL6+3ffPpSj9B35TvyIMRfX27759KUfoO/Kd+RBiL6+3ffPpSjyO/1TvCD8UR/wCl1n0pVrOlKNKUaUo0pRpSjSlGlKNKUaUo0pRpSjSlGlKNKUaUo0pRpSjSlGlKNKUaUo0pRpSjSlVS+cXylynxG4rUvI+JMpVbCk3Z+TvHjEliypcqjFXiBo9DyZe29duNic16aVRjnKkXEqmWIZQwAQSaUptuMfLpvGV/kllmzeZbjrn3UMF4Ot+UJ/HGKML43xzY6wxpzNxZHc8EpWZpYZFeQjIhdig2cnTbCsuU4nDp30pS9wN5t2N822vjVHSHHfkliDHnMGFSdcccy5SqVch6HkayqUw97SpaacfaJOxwExKQDZyrFrSDJs0lStjnbqmJ0mFSo88HfNK5AZHxxzNyNyP4yZVj6PxwzNzHKXJEeniaOr8HQ8AvHD6ExM7i4q8LT0tkdjFMztlX/hRYuXIgYXPTuJVKfmqeb3jWfwLG8lpnjlyZpWKMgR+KUuPDmw0+vGuHJi6ZgMqlVKBiKhRtne2h3NLHTBXxEigwYHYm8WRYW5THBSnRx/5mGL3xc5RnIDGOWOI124+4lPny80jOMdWwkHuEyGdNVMi1OYpFitdcssU0l2Zo9yig6F21fmTRUTAypN1KaugebpUbVkfi9jm68UOVmFnHMizuYfj/AGLKFOq0VX7ZBNqo+uRrRJLRtulHlXEYRu3V9lyCKEsBHqRu46QU6FKcLLXmUssD3xxHZg4rclKJhRnkeHxk95KSkRQVcbspOx2BrVK7ZnkCyvLnI7ehzNifIN05YYfuCFWKqoBU9zApWwyZ5jDet56yZx9w7xc5F8m7Tg5tSVs2S+IoyioQOPXmQYoZ+swp1LvdKq/s026rweNOhGIOQTSEAE3WPTpSp6WS4tavQZ7IEnHSSbGu0+UuMhEikknMEaxEKvNu44UVFioEkiotzJdJlAICvYJtu3SlVRYH85fFmckuLNrDjdycxvhXmDM1uj4czxf6jWWOPpHKdrj3ruDx9IpRlrk7KwWlnkW6ZMpUzEYh86QEEXByiUxlKU2XPNzxXjO451Y1zAnIzMmJ+KtjLUuT3IDFtQg5fGeHrE1YR8vaIpdORscZars6oMPKIuZ4sFHv/ZhBMCm5iGKClWkVqzwNzrEDcqrKNJys2mCjLJXZlgoCzGWhJlgjJRUi0VD98bPWTgihB+Epg0pXn54mXvzb+SHEZnzEpfL/AAjPWSSm81voPjve+MMFGUeZj8Z5TvdOiaavkmnWuOucW6m4uppk9ogit3ThUDGSMXfSlSPwf5hNZ5SOvK/yo0lctYtX5S1fkLIyuIID3QlcfGtmKakmjf61liak2oWFROgWCOdexV4cUwcLlEXACmIAClPLx68x8/J2egX+KuInJuUwXcJa7w9N5HPYvHbHHU8rSC2JstIrRS19C7wUHOzdbVj2Dt9GokUcrI9YJkP1gpUIeFXm5ZssfC7kNyl5acZMtRNS4+r8s7nO5MiUcSx9elqthDKVxhozGMLX4i9Ly6t8rtchyx7t0u3RYO3zFZUrgxTlMKlPVkLzCgybi/jzk42PeYXF3HmS+W/ESmYxuS1fxbFyecorM0rIGYwsvW56bnpqBxRKpokTnVFWzGZFoqiox36t9KV2cSPMF5FZQ5Z83MO5T46ZBYYhwbyImKJAZgM4xND0vFFDgcYRFrOa/rN7wpZZ5aecmUftnTVm6BJo/RIqKZiHAFKVOO/OIw7e5/DUnJYM5FY8488kMiN8V8feVd4p8JHYdybcpdw/aU5umm1sby6VKJyE7jFkoB/MxbJtJH7sCGDvCbqVI/zNM75D4w8BeVnIDE0lFw+SMT4jnrfT5WaiG89FR0ywUakReP4d2INpFugRYxjJH9U23bpSoA8TOccQyrV5ztmXzZsP8oscYpwSbKmVsa0TA9ApMrS2zhOJTLNnlqjJPp54dtNvSxrZiCXU8duE09usQDSlTAwl5kkBkzLWOsQ5N438ieMEvm+vWGz4FnM6V6qxdeyuxq0YlPzcUweVm12NWq29rWVyyIQ8yRi+UagoJSCZJQhVKrl5recNKWLjVI3jjXjfknj6hWfkJiXEuIuYg0+thibI8gfOVZqFqa14VZWWtEfVLSzbyUcwmJCIbMH5gMCKwCYhhUqyDNvmNQuMss3jCmLuOPIflNdsOVaAtudBwbX6s7h8Tx1njl5ivxczK2+1VdvOXOYhWir1KFivGPwbAUxyFFRMDKUib75t+DoRfiSxxTjLNnIyV5tY2yBkjAkPiCqxryTl2uOG0G6sMPZULFNQRKi8YFmTEdrvjptGCjNYq6hBAoGUpb5v5/3PBkU0sczwa5W2arRmN4TJWR7DVmWLHLXHjJ7De3LHXztJDI0fI3WzUVmmqEm2gkXwAokJUTKiJd1KQFt5Vy9+5ueVonhjI793x35RYP5X5Nlopk3boxd+j4HHmLLLjaVk0njIZRm5gjWJdQiRVETEVWOVUphDYFKk3za5p464KYtquWcmVe/W+EtuWcf4djIbG0IjY7StaMkSC8VXQaQh3jNeRIrIpFR7tATrGOqXpKIbiClNRijzJsb2y3Zhx1mzFeXuJeRMMYjdcg5qqZ5iq+xPYcDsFHTWUyhUZmo2Czwc1EwUi0M0kmpXBXzB0YiaqQCcu6lIHC3mw46ypfsF1i3ce+SOBaVyqVdN+LuX8w06ChqHmZ8WHXs0JENfZFkmZ+lTtxqrY8lDM51mwXkGxR6A6wEoKUr+OXmSQfKXMV/xdijjfyEfVzEec8ucfsqZplYSqROKafecTSDuOeERl3VpLK2dtOKtyCkEY0cqM/EJA7KiY2wKU2/mgciM8Ygv3BXF2E89Urja15IZ1uFAyFla70ao3tjBVyv40mbeyK0YXaSioJm7dyscRIFVFiiYD9Idu2lKafh75itygXvmHwXLTM+MM3Yk4Ir0ecR5iYjp6daq13hbjTFLNL0STrdal7NWXeSqY/TSZmQhXSgOVHiKQpkXN0ApUoMKeZRWsk5axviDKHHTkNxfms51qyWzAMznOv1WOr+WYyoxKFisEaweVa12VSq2yPq7ksmMTMkYvDMyqGAomSOUFKjlbfPEw3VKSOaP/wAaeU0xxsmsv1/CGNuRkfTawTG+TbpPZJZYuSdQKby2N7PF1D3jWcA2mJJg0ZSANDlbHUOdIp1KkmnnJ66zj5jNGq1m5CWy14Zxvh5+woEDW6FOMKlLXfGVglYdXAUafwclYLTOLoldPkJ9bwoyCSZUxBAT6UqNXl95/t9dzjZsBclci81EckzOJTZAxhWuYMPx6aRUzQqVLEb5BtVctuAhcRspZ4yVnG4STeXVQdMmAolTTEhTHFSl5VfOPwxZ5WjWdbB/ImucXcoZZJhTHHMaw1CEY4OtV5eWF1UYFcpAsal7h6TabSxVYxs+9iUIx0sBBBUCKJmMpVvulKNKUaUo0pRpSjSlGlKNKUaUo0pRpSjSlGlKNKUaUo0pRpSjSlGlKNKVVN5Hf6p3hB+KI/8AS6z6Uq1nSlGlKNKUaUo0pRpSjSlGlKNKUaUo0pRpSjSlGlKNKUaUo0pRpSjSlGlKNKUaUo0pRpSqvPNqwHnHkFx2xfDcfccRGWr5jTlrxnzm5x9N2+uUZpZapiTIjS1WaOTsNrMWCbu3Ee2EiRFtwUMbbYdKVopxxy55F4I5bYZtnAWM4zPsi8YcyVCgWY+e8LXpvbb/AG2oStZrtPkG9EKk7hW7taXFc8g6HwiIJCBu0waUpJWbh3nWRxB5M1PYVyDGa4Z5S4+2fPLYbLEot63CY+423DG1mVgnAnKjZ1GlqmEEEk2fUZZIwql9Qo6Upm8e8XeZmNcY+ZfxEHA9csmOuTlx5q5gw5yEjcw09mxfy/IuvOHdXx5aMbSSLa1wUk0sDw7JaQAyrAqZQWEwF30pSt5V+W/kXkH5eHB7DRK9TJnMPDlzxwyQpiS1XKwVbH+SbDibGqlFuOLpLIdBXQnK22l2Ew9Tj5diYU0HiSChgFET6Upgq55ZEZkzCfM6m2DgtSvL7mMy8Urhg9nmeT5aT3JSzKMJiSY2txGSKLuSmIesY4jZyvNX7tYHBXavdD9hKHUAqUyBeRHInklzJ8nvEWQahxwhpvEOTbreLWTBXIqo8hX12iaTg6crsjliMZ0pkHxYYq752n3BZtcr927fptiJiKBjmUprOQHlK8ysk0zPNCc8RsKZaz1aeRDzKcPz/wAi8j3MraLZjg2YYq8wVYqeMphEXmO7TBUtAkGm2Oo2g2iDQyqIqqKEKKlTv8xHh1yQzpka8yOAuF8HXM4PadVqvh7zCMf8x3eDLlTX8fGMRZy2V6DXmLWdvkVRpk64JxqqU0i/ZkBIvdFOYulKunsFMvMnxxm8eTEs1t2SpHCcnTJScACRjKzXh5RV4R5K7KgRKPazM8oZX1gKVIqnaAAG2lKqerXBvkXE+Xj5SHHtar18mUuJGY+DtzzZEFtkL7NgYXB6RwyK6h5pNUWNicRyhvsCbUxjvNx7vfSlNnZOKvmF4Pgef/F7j3hvEmW8Rc58zZjy7RuQNsy3GU1XA63JKIjIjJkTk3Gj6EfWHIBKa6bOXcOeHWOZ6kokisKQlESqVdnx4w7H8fcA4bwTEyTmZj8Q4upONmku838TKJ0+uR8D7RWKIiJDPTMhU6dx6ANtuO2+lKo/4jVTzZeNnEFjw9p3CTGERb4ubzYwguQV+5SUR1jeIZZLyre7jC3R3QKXEz1/kzQ0Va01DRpARUWXS6BUIG+lKejD3lrZC4zWPynKhj58xyFSuGVV5OI5nyHLSDKvP569ZmpKah7GzrhlFnS7W05CePDnRbAfwLYxRNvt2qUyeJuFvKyN5mYWyxififA+XxUK5luz3Xk/MY/5dOsjYh5GUaWipxB7UofjjDMW9cirFaLC9bSASriPiFY0yZzAZVQRA6lYsXw55u1/gd5gvl1fEFWZWGybGcz57BOf2OZ6WnA5DkeQd9tl7qNQnKFIps7RSpJqa5KNHTt2Y7Evherr2OGlKm5zA4qZmy7x34DY6oMDDPbNgPlXwmynkhi7sMZFNImk4ScJHyA8jHjkxW8u6jEyf6O2Q3Udf/DDSlIWncceSNK5Nc9MbyuGmNt4u89rQ7ty/ISuZWq0NPYnSnMFMcaT0BOYtmEm9mnHyUtDpmZuY1VRIU3HUp09AhpSon17hj5heSMKcMfL+y/iXEVDwhxLzHhO53jlbX8rxthUzDQ+NdgJYcfxWO8OtYNraaTbbutGsEpRSTXI3jylcCiqqJibqVa35meDMi8mOA/KjAmJIxhM5JypiiZqdOi5SWZQUe+mHrlkdFB3LyJ02LBEyaJt1FTAQNthHt0pSQ5a8G2vKXy88mcQm4VrHttyThWp0s8+hGIhHM7RU1axYo9tLuIIjV86r7iyVhJF2ZufvPDHOdL1wLpSoC8QuBD6AzbjOx5H8rXHWBZnHMdZTKcgmXOK9ZtRibLIVR/VV5jEuOJqSlX7YtkbSK6fXLEZrsmawkMKhw3FSo82jhH5nTTgjj3yzKvgjDctj7AeXcUOYLkq4zXBN/jYwnjjOEPkKNbR2L3UK3mafkkYFLu36j10VkCjQ5mxlTrELpSnW5J+WRbo3mVyK5CxXC+sc5KPyec0a3eEPyyuXGm+4cvNUp7GlzUQ8btpuHqN0odjaxjd21WSH2mwWBZMxFUzEEFKlXh7hDd8b8hvLbudMwTj/BuHONPG/lBQLljenZDVucViu0Zgk6TL1+s1iXsCbeyXRo7dxz87qQBMpSKiO+xDE0pUU+bfl68jMx8neVFve8U8S8x65nLHlUrvHDJuY89PqhW+IziMo76uWKDc4aURcFnmr20Le203sQQXT9Y4JOlEil6gUp7uLPBjkfiyc8mR3dK5XW6HDLi7yIxbn1djbIh4WAuGQaLjuBqrWAboKnUsjB7I1x0UyzUTJtiEKJx2MGlKWnneEvR8F8UgxgWtK5FL5hfExelNbiqu3q7+wtLXKO2MZNumrd26ZMJMyAtzOE0VTtxUBUCGEnSKlN1Z+FnKjnlmLkBlnlRjepcUoOZ4MZP4TYlpVbyhF5hssrI5dnWNptOWrTYa1FRUVH16JkINk2jooOp2siKyiwJHEoaUrBqXHfzDOQE95fOKOSmEsSYQxhwMyfj/ADDcswVDL8bkJbPtwwnQ5ugY+YY1oTCDj5igV2zKTZ5OU9tKEXaplBsiCmwiZSp1eXNx7yZx0oPI6BylDRcLKZI5vcrM4VhKKmGE0m/oGVskOrHTJV4vHmMRnIv4hQplmqn2dub1D7CGlKRPP3h5O8rc5+XvLO8b0TJ+HsHZ7u97zdX8gp1yVhC1eVxXPVuEce6VlbvGlrWCyPEPsBUVDJfvghsXfSlQgzJ5SV7X/wDzd4k4CjarQeCXM/E8ZkqswbSRbRFd46cw6XZIh85YQ9GjHDKYPiXNzOGZOZVKJ7ssc7bLmSKUyxNlK54leX06icx44l8m+VxjzA0pRK7cI59yIjucV3zSpEzc7R5OmvpbEOOpl/LOmoWlGRVT7yXSZrMmaogIKKFDSlVhckbByFxZwSw75XPheMt1YUzl7x2xRjvKOO+QlQvOQc1Ven8qandIOtQXHyDaOL5T8jVyMjeu0OJVRNgxQjXShVDKKkKClXMci+EHK/Is/wCbdI4lmo3Hsrywx1xjg8EW5G7KwTyecYsqTuOyLVZaTgO8smPm9oQMpDFk0y98kk9Mul2EEdKVFvjN5aWX4vm7hzOBODWG+EGDoXjPyFwpkSDo2cmeYMnzNpyXD12PjLJPTyaZEJSKP4RcjEE1XTwhgOs8OQxyE0pWXDcGfMGsfE7BflUXrE2JoPjthrI+LGk9zIh8tx8g7uuA8G5KjMh0uOrmFwhUrjCZasTavR0e+cO3BY1FVNwsChwVKAKV6W9KUaUo0pRpSjSlGlKNKUaUo0pRpSjSlGlKNKUaUo0pRpSjSlGlKNKUaUqqbyO/1TvCD8UR/wCl1n0pVrOlKNKUaUo0pRpSoQ5a5B8gY3kWPH3AuGMY5AeRmFoTMVisWSsxT2NUGzew3myUqPg4hhA4ryIrIrlUrSzhVZU7YhCmKUAMI6UrG+MDzFfky8WPnWZC/Nk0pR7/APmK/Jl4sfOsyF+bJpSj3/8AMV+TLxY+dZkL82TSlHv/AOYr8mXix86zIX5smlKPf/zFfky8WPnWZC/Nk0pR7/8AmK/Jl4sfOsyF+bJpSj3/APMV+TLxY+dZkL82TSlHv/5ivyZeLHzrMhfmyaUo9/8AzFfky8WPnWZC/Nk0pR7/APmK/Jl4sfOsyF+bJpSj3/8AMV+TLxY+dZkL82TSlHv/AOYr8mXix86zIX5smlKPf/zFfky8WPnWZC/Nk0pR7/8AmK/Jl4sfOsyF+bJpSj3/APMV+TLxY+dZkL82TSlHv/5ivyZeLHzrMhfmyaUo9/8AzFfky8WPnWZC/Nk0pR7/APmK/Jl4sfOsyF+bJpSj3/8AMV+TLxY+dZkL82TSlHv/AOYr8mXix86zIX5smlKPf/zFfky8WPnWZC/Nk0pR7/8AmK/Jl4sfOsyF+bJpSj3/APMV+TLxY+dZkL82TSldDq6+YY+auWT3i7xReM3iCzV20dcp78u2dNnCZkV27hBXjEZJZBdI4lOQwCUxREBDbSlMnivEPIXBkvNz+FvLp8vTEs7ZEwRsEzjfMUlS5WbQKsZwCEpIV3iZHu3yAODCp0KHMXrHq237dKU+fv8A+Yr8mXix86zIX5smlKPf/wAxX5MvFj51mQvzZNKUe/8A5ivyZeLHzrMhfmyaUo9//MV+TLxY+dZkL82TSlHv/wCYr8mXix86zIX5smlKPf8A8xX5MvFj51mQvzZNKUe//mK/Jl4sdn/+1uQvzZNKUe//AJivyZeLHzrMhfmyaUo9/wDzFfky8WPnWZC/Nk0pR7/+Yr8mXix86zIX5smlK49//MV+TLxY7fT/AP5WZC/Nk0pXPv8A+Yr8mXix86zIX5smlKPf/wAxX5MvFj51uQvzZNKUe/8A5ivyZeLHzrMhfmyaUo9//MV+TLxY+dZkL82TSlHv/wCYr8mXix86zIX5smlKPf8A8xX5MvFj51mQvzZNKUe//mK/Jl4sfOsyF+bJpSj3/wDMV+TLxY+dZkL82TSlHv8A+Yr8mXix86zIX5smlKPf/wAxX5MvFj51mQvzZNKUmLQ55yXdGHb3Ph3w2tiFesEVbIFGx8k7jNJQtog1DrQtjiiSPFxwWPnIlZQxmzpICLoGERIYBHSlKf3/APMV+TLxY+dZkL82TSlHv/5ivyZeLHzrMhfmyaUo9/8AzFfky8WPnW5C/Nk0pR7/APmK/Jl4sfOsyF+bJpSuPf8A8xUf/wC2Xix86zIX5smlK59//MV+TLxY+dZkL82TSlMNAYZz9Vclvsz1jy4/Lur2XpNw+dyGUIXLz+Mv711KAYJJy5tzPiWjPKuJADj35xXEyu49QjuOlKfn3/8AMV+TLxY+dZkL82TSlHv/AOYr8mXix86zIX5smlKPf/zFfky8WPnWZC/Nk0pR7/8AmK/Jl4sfOsyF+bJpSj3/APMV+TLxY+dZkL82TSlHv/5ivyZeLHzrMhfmyaUo9/8AzFfky8WPnWZC/Nk0pR7/APmK/Jl4sfOsyF+bJpSj3/8AMV+TLxY+dZkL82TSlHv/AOYr8mXix86zIX5smlKPf/zFfky8WPnWZC/Nk0pR7/8AmK/Jl4sfOsyF+bJpSj3/APMV+TLxY+dZkL82TSlHv/5ivyZeLHzrMhfmyaUo9/8AzFfky8WPnWZC/Nk0pR7/APmK/Jl4sfOsyF+bJpSj3/8AMV+TLxY+dZkL82TSlHv/AOYr8mXix86zIX5smlKPf/zFfky8WPnWZC/Nk0pR7/8AmK/Jl4sfOsyF+bJpSj3/APMV+TLxY+dZkL82TSlda2RPMURRWWHjLxYEEklFRD/8rcghuCZDH6QEeMmwCbp23Hs+jpSn14xZkc8hePmH83va2lT3uUKJBXB5VkJYZ5CBdSzUFnEWhNCwixlEWi3UUi4tkBUKAG6C77ApUHPI7/VO8IPxRH/pdZ9KVazpSvg6qafT3ihCdZgKXrMBRMYfQUoGEOow/AAenSldazpq3MQq7lBAyg7JlWWTSMcRHYAIBzFEwiI/BpSuxRVNIgqKqESTKG5lFDlIQofRExhAoBpSvlJZJcgKoKprJG+pUSUKoQ2w7DscgmKO2lKg9FfrKrz/AHH8X/175Z0pU5tKUaUo0pRpSjSlGlKNKUaUo0pRpSjSlGlKNKUaUo0pRpSjSlGlKNKUaUo0pRpSjSlGlKNKUaUo0pRpSjSlGlKNKUaUo0pRpSjSlGlKNKUaUo0pRpSjSlGlKNKUaUo0pRpSjSlGlKNKUaUo0pRpSjSlGlKNKUaUo0pRpSjSlGlKNKUaUo0pRpSkk7nJlWVexkFEs3oRhG3jnT+RMySKu7S79JugRJq6OqYqAgY4j0gHUAaUr58bd/5igP8Av139qdKUeNu/8xQH/frv7U6Uo8bd/wCYoD/v139qdKUeNu/8xQH/AH67+1OlKPG3f+YoD/v139qdKUeNu/8AMUB/367+1OlKPG3f+YoD/v139qdKUeNu/wDMUB/367+1OlKPG3f+YoD/AL9d/anSldasndG6Si6ldh100SGUOk1nVhcHIQBMYqILRiSRlRAOwDGABH4Q0pW5LIIStfNJNerw76LUcpAcOk4EVbGMBTl3HY5d9h+mGlKh75bX9g3il+JmpfyU2lKYLyO/1TvCD8UR/wCl1n0pU180XDklWX8IlgrCWOsrxzpo5Unnl3zY+xQ4iHhFilatmLNpi3IRZdFdERMZQyrYUxDp6Tb76Uqq/wAz+x1tbifjbI3MGtYgwfmen8kMLvsVx6OalLW3jW7bNuNVbBPV2fkILHAyL73WQWVkE/ZqoMWhTGMp0GMbSlN/5msLgRzlKD5pwubuO+QMnY9491JhijjNkei1jMLPOLYMnSV1rr3FajWYC1MLNdHkoEcwla8g7FNRJFRURRIOylTe5zRuCOUHHOzYaybnikYFPV7xgScyQ7uDavzcPR7Co+hsk1ekXmGtbqMrT1lambDuTN3qxUFkREDf+rpStn5ZtqWsnGa0w1agcWxUHjrMOWcbY4ueLaEtj7FGWq7W37c0Lliu0pq57hjEWB+/VRc+z3B2TlwyWUaqikcggpSYwKz5AM/MUyqXkJYMO2CdU4bYsNWVsN1K61KKawIZuyyCiE82utyubt7LmebmBVusgiCfZ3e/bpSrSNKUaUo0pRpSjSlGlKNKUaUoEQD09mlKbQ99WKooQjJsoUiiiYHI4E5Td2cxNwEgGL/k/R7NLiuSCPXwfz8V8e/7j+bkf4Y/7nSuKZ+v5Yzo4zVkCDseOqXHYJjqrWHeN70ws6zu5WO3OhJ71xE9WxESRMZFAJvDLgUBW2DtHfswIpNgdhLHLGg1oRTG4b7mY/qBX6AfQ1a87D6fH1DBzdfmZUndJMmZcrGaLjBFAv8AsvHN/Oz/AMy/y/8Ad4Pf9x/NyP8ADH/c6z6qlcBkBYdwBg3ESjsYCuDCJR+gYAARKP7e2lc2PqQbVz7/ALj+bkf4Y/7nSuKPf9x/NyP8Mf8Ac6Uo9/3H83I/wx/3OlKPf9x/NyP8Mf8Ac6Uo9/3H83I/wx/3OlKPf9x/NyP8Mf8Ac6Uo9/3H83I/wx/3OlKPf9x/NyP8Mf8Ac6UrTyGX4qKayjyRXiGjaDRBzMqKSBRGLQMHqqv00+tVqQ2/YJyhv8GovN3en12PkZWdlQR4+InKcl1Psr9DIASyA/TkBf6VJ4el2+wyMfFwsaeSfLfjAAjf1W/CMkBWI+tibfWs5lkssizayDFs1dMnzdJ2zdIrnMk4bLkBRFZI3SHURRMwCA/Q1mYmVjZ+LHm4TrJiTIHR1N1ZWF1YH8CPI/KsPKxcjByZMPMRo8uJyjo3gqymzKR+IPg1le/7j+bkf4Y/7nWRXRR7/uP5uR/hj/udKUe/7j+bkf4Y/wC50pR7/uP5uR/hj/udKUe/7j+bkf4Y/wC50pR7/uP5uR/hj/udKUnH+V51rY4OHbUxd/FSiD5WRsST0hGUGo1KAt0HaBiCsqd6I7EEogAagszZ7DH3mHq4MGWbX5CSGTJDKI8cp+lXU/cxk9Bb0qcxNZrp9Hl7SfOih2GO8YjxipMk4b9TIwPFRH6m/rWsrmb31hsFrhfcySjGtYdIsSzEiJ0Gkw5N1d77NTMmBlW6ZQAQVAximAfgHWBo+0Sbvd7LVDBysfF18ixieUcUnc35e0CASo9QwuCD9D4rP3fWI9LpddtWzcafJ2EZk9iI8nhQenum9gxPgrYEW+o80tvf9x/NyP8ADH/c6tlVSj3/AHH83I/wx/3OlKPf9x/NyP8ADH/c6Uo9/wBx/NyP8Mf9zpSj3/cfzcj/AAx/3OlKPf8Acfzej/DH/c6Uo9/3Hb/2egO3YOy5hEB+gIbbgOlx9KWI9aPf9x/NyP8ADH/c6Uo9/wBx/NyP8Mf9zpSj3/cfzcj/AAx/3OlKPf8Acfzcj/DH/c6Uo9/1x9EegO3YOy5x2H6A7F7B0pR7/uP5uR/hj/udKUe/7j+bkf4Y/wC50pSZuGSLm0gXTik1uHmLERVqDVhLSCjJiqiZYpXZ1HAGSEpkkNxKG/aOoHs2R2LF08k3VcaDL3YZeEcz+3GQWHMl/FiFuQL+TU71vH6/k7dIe0ZE2Lpir85Ik9xwQv2ALY3BbwTbwKUaWQHgppCrGtyKimmKpCrnEpVRIUVClHbtKU+4B9LU3GWKAyACTiLgegNvI/0Pi/1qEcIHIjJMdzYn1Iv4J/iK+/f9x/NyP8Mf9zr7r5o9/wBx/NyP8Mf9zpSsyIyBGPlHhXq7Fokz3Ksqm6KsRuqQBOqm7MURBsZNP1hA+wgHaPZrpXJxnDlJIyIiQ9mB4EC5DWP2kDyeVrDzXc2PkIUDxyAyAFLqRzBNgVuPuBPgWvc+KV0dNw0u3SdxUrHSTVYBMi4YvW7pBUoCJRFJVFQ5FAAwCA7COwhtpj5OPlwrk4kkcuO36XRg6t9PDKSD58eD60yMfIxJmx8uN4shfVHUqw8X8qwBHjz5HpW0313V00aUpH2C1ex3RGiTcjg/dAoqJzmIBOsfUKAAA7iIBvp5pSOmciTqERJrQcKwfTKTFypFM3bw7dq6kCpiLVByuHT3SCquwGNuHSHw6wNpJsYtbPLqY0m2axMYkduKPIB9qs38qk+CfoKztYmBLsYI9rI8WsaVRK6DkyRk/cyr/MwHkD60hQzzON3FVgFqmwlbc9MwJdoqEmkjlpiTtAiqkouRUqqzuNKc2xRIO+3aI9oap/8AzDYQ5Gu00mImR2SYxjPhgmU/sQ6gmVgbs8VzYEev1PkVbz1HAlg2G4TLfH65EJDgzTwsP3pRiBEpFgktvJB/Ow8GnN9/3H83oj9MFj/udX/+FUKj3/cfzcj/AAx/3OlKPf8Acfzcj/DH/c6Uo9/3H83I/wAMf9zpSj3/AHH83I/wx/3OlKPf9x/NyP8ADH/c6UrDkcnBFR76UesUyM41m5fuzkMsqcjZoiddcxEiEE6hippiIFABER7A1iZ+Zj67Bm2GWSuLBE0jkAkhUUsxAHkkAHwPJ+lZWDh5GxzYdfigNlTyrGgJABZyFUEnwBc+p8D1rHxXdGN5SmbRFFEY2cdxjlkdRJZuoZuMWgmU50FykVTOIk9BgAQ1j6bb4O/1UG61jM+vyYw8ZKlSVP1KtYg+PQisjcanO0O0n02yVUz8aQpIAwYBh+DDwR5HkU8WpOo2kTfciVDGcGaw3OZRiI7vQboicp1nLxyYomK2ZNECncOlxKUR6SFHYO0dtV3s/a9D0/XHa9gyFgxOXEepZ29eKKLsxt5sB6VYOtdW3vbtiNXoIGny7cjawVV/8nY/ao/MmtbjjLNDyvHOZGkTZJMjJQiT5qqiszkWJlAEUvFMXRE3CRVQKPSbYSm29OsPqXees93xHy+uZImWNrOpBSRCfTkjAMAfobWP41mdr6T2XpWUmL2LGMLSAlGBDI9vXi6kqSPqL3pyNW6qnRpSmQ5Eci8Q8VsVT2Zs32olSokAvGsFHacfIzUvLzc29SjIGtVmuwrV9OWWzz8kuRuyYMkFnTlU3SQg9ulKoozh55bSmZX5Gy+NIS72XFuEvL+d5rWxVYcC5FruX65yDkMpSVTqnxi1yXi4241vHbiDMzcul1GhGiTTrcd+AAOylWW+WFmbMfIDjXHZXzTkGfvtltUg1epFn+M9g4vhWSOIKJfPICDqtnkJGXt1aavXp/AzxzgSQS7S7gG+lKhxkDzP+U6FZ5LcrsS4DxBa+C/EjLdzxfkNaeuNvZciclRWIZxCvZpyPi2Pjo1eisoKoPzORYMpAVXMukwVMRRITphpSrwa/YYu3VaEtcGv4qEs8BG2GHciAF8TFzUcjJR6/SAmAO+aOSG23H06UrT1b/wDG/8AAlP4lXSlRa8tr+wbxS/EzUv5KbSlMF5Hf6p3hB+KI/8AS6z6Uq1nSlaObrFaspEErHXoOwJtTHO2Tm4lhKkbnVL0KnQI+brlRMoTsMJQDcOwdKV0e59S8RFu/deu+Kg0U20K69iRniIdul2JIRS/he8j0Uv8kqQkKHwBpSsqQrlflkXreVgoaTbyRkDyKEhFsXqL87UoEbHepOUFSOjNyB0kFQDCQOwNtKVsGbNpHtUGTBq2YsmqZUWzRmgk2at0SBsRJBuiUiSKZQ9BSgABpSoRRX6yq8/3H8X/ANe+WdKVObSlGlKNKUaUo0pRpSjSlGlKwJSPby0ZIxbsVQayTB5HuRbqmQXBu9bqNlhRWIIHRVBNQekwdpR7Q18uodCh/SRb/rXdjzvjZCZMVvcjdWFxcXU3Fx9RceRVfNMwSHHnj/L4Y44yThtK12Lup8aSeVpqUuaTW2zr6Sl2Li1Sa5VJOShkJp7udMCiYjcOgoDsGozH139r1RwdUbSKrcDKS/3MSRyJuSLn/p4FXnc90/5331O2d/jVsWeaAZaYUaQEwRKsbCFBZFkMa+CfV/JpoKHmDL2C2dNhudWQMVvbjmK/VfG2ITYZp1sJGvbW+hwUlY2dMuzMVkm7kx6m7lQqSBEtwMIdgFwcbPztasadklgORkSrHF7SPYsR5B8ePPoT4Aq0brqnVe6S5eX8LYOzTU6nBmy879/kQF1hWSyPHZrsQnhkF2Lel/rNOYmIivRryZsEtGQUPHJ99IS8y/axkWwR6yp988kHqqLRql3hwL1HOUu4gG/bqwyOkSmSVgsY9STYD+J9K0/iYmVn5CYmDFJPlyGypGrO7H1sqqCxNvNgKzEF0HKCLpssk4bOEknDdyioRVu4QWKVRFdBYhjJqorJmAxTFESmKO4Dtr6BDC4It/3/ANa63jeNzHICsikggixBHqCD5BHm4Pm9NDjrBdGxdesw5Eq61nUsOcLJG2m7km7LIzUQlKRTA0c1LWYp4YzWuMRQOIqItwAhz7D8ABrAxddjYeTPlw8/dyXDPdiRcC32g+F/MCrV2Dum67JpdToNkMcYOmx3hx/bhSNyjtyPvOv3Stf0ZvIFPHrPqpUaUo0pRpSjSlGlKNKUmIi0oTE/Za+nFTLNWsqM0lpB8z7iMkxeJd6U0Q56zeLIiHYoOwdJtV7Wdii2m6z9KmPlRSa9kDSyJxil5ryBhe55gejeBY1P7Lr8us0+BuXyMWWLPVyscb8pYuB4kTJb7CfVfJuK+iU2rEfWGR9hMFHdsTbo2U7hMzlOZSaEEjdJ63cGUbnIkURDYCAA/Dvr6Xq3XVy87O/Zwtk7IKMosOQnCCyB1YlSAPpax+tcN2jsDYmFhfu5lx9aWOKFIUwlzdijKAwJP1JJH0pQIIINUEWrVBJs1bJJoN2yCZUkEEUigRNFFIgFImmmUAACgAAAamoIIcaFMfHRY8dFCqqgBVUeAqgeAAPAA8AVCzzTZMz5GQ7PO7FmZiSzMfJJJ8kk+ST5NalywmFZ6MkW84LWFatHaMhA+DRUCTdLdPhXYvTfZUPCdodJew2/bqNyMLaSbnHzocsx6mON1kx+Cn3Xb9EnM+V4fgPWpODM1kemyMGbED7WSRGjyObD2kX9aCP0bn+J9LVu9S9RFGg9bU/P6Vo69Oo2Jis+RYSkcVF+9jxQl2ZmTk52SwomcJpGMYTNVhDdM/8AlBqI0m5i3mI+XFDkQKkzx8ZkMbkxmxYA+qN6q31FS+608uky1xJZsednhSTlC/NQHFwpIHh19GX6Gtso4bonRTWXRRUcqd03IqqRM66uwm7tEDmAVVOkBHpLuOwakpJ4YmVJXRXc2UFgCzetlB9TbzYXqNSGaVWeNGZEF2IBIUel2sPAv4ufrXdrt/h6V1f96QOR7mrRoSPlkWzZ0d5YYaEMm7O5ImROUciioqUWqSxxVTKG5dw6R+HVM7z2mTqOph2MUccjS50EFnLAAStxLDgGNwPIFrH61cekdXj7ZtJtfK8kaxYU84KBSSYl5BfvKixPg+b/AIU4Bt99hER27A3ER2D6H0tXQk+h9BVNFvWvnXFc0aUpP2Gz1+sN2ytgnGEESTXGOjnD9UEiLSCpB7pJLcBA6pREDbej6OoXedg0nX4I5N1lw4i5D+1E0hsGkI8Kv4kev4fjU1pNBut9M6abEmyzAnuSLGLlYwfJP4A+l66q22koWKjomzWVKwzxjOjDJrIt45eSKKyixCosUzbG8G3MUg9AD2F3H066tDBnanWwa3f56524Jf8AqsqxNKORYWjB/kUhTYfS5r73s+DtNjPsdDgNhagBB7Ss0qxHiASZCP52BYX+psKUup+oKtHZXM8zgZNzV45pL2BFADRca/c+DZvHAqplFJw6AQFEgIiY2+4biABqI32RusXT5E/XYIsndKl4YpH4I7XHhmuLDjc3/EVLaKDT5O3x4OwTy42mZ7SyxrzdFsfKr5ub2FvwNa2s1VnCu5qwAR2jNXA8dJ2Jsq/VesWsmg06FUIwh/VbtklFTl9X6sAAdR/X+u4uqysvcgSrtto0UuSpkMkayqlmWIHwiAsR49bA1nb7sGTtMbE0xMbavWCWLGYRhJHiZ7hpSPLMQFPn9NyKV2rPVbo0pRpSuQHYQEPgHfXINjelJqu1WLq4zQxZnxvb0w4nH/jnqz3pfOikKqDTvhHwrXYgbJl9UNQGi65ruunLOuMxOZlNPJ7kjP8A1Htfhy/Snjwo8Cp3edi2HYBijYCEfs8VceP241T+mhJHO36m8+WPk0pNT1QVGlKTzuajHb2UqbCdaNbWWGVekaEEFX8cg6KKDSWO1MHSdFJwoUxQEdjCGw+nUHk7XAy8zI63h5kcfYxiGQIPukiVhxSYofVVYgj8fQ+tTeNq87GxIOxZmJLJ105QQufEcrIeTwhh5DFQQT9PUeldtaj5iKgYyPsE2NjmGqBk302ZomxGRVFVQ4L+FS3TR6UzFLsH/q67NBhbTW6fHwd1lnP2kaWkyCgj903JDcB4XwQLD8L1177N1ex28+bpcQYOrke8cAcye0LAFeZ8t5BNz+Nq2b16zjmq76Qdt2LJqmZZw6dKkRQQTIAiY6iiglKUoAH+H4NSGXl4uBjvmZskcOJGCWd2CqoHkkk+PSsDFxcrOyExMON5cqRgqoilmZj6AAeTTKvs41uZbO4/GkrCWO4FkGUfFRMsu5iY+XWXXDxBI9+qRJN4dJoU5ygQ3aIAHwhrVGZ8vaHaQS4XQcjEz+ziaOKGGZmhjnZmHIRSMFDkJyYcT5sPxraeJ8S73WTx5ne8fLwesmGSSWaFVmkhVV+0yRrcoC/FTceASfpX3HXvGyN4k8Zx8eb3gspZJ9cVGSf/AGQnJFjTHlUZF6s4IcVvDAYgikUSl9AiA6+sDuHQYe35HQcGA/3nPEsmaUH9ESiK8yyuWBvxuPsFh9SDeuM/qPepepY/e8+a2nwTHHhCQ/1jEZbRNEgUi3Kxs5ufUAilKNZWi4KtoYumGsBX697Rdkh45FOVY2FuZFdVGLSeqrqmRKeRETCcpxHcwhuGp5uvy4GnwIPj/Kjw9HgmVxBGqzR5S8WKwh2YlR7vnkGNybVApv4s/b503fsaTM3Wb7aGeRjDJjNyUNMUVQGIi8BSvi16QNUyBl2ZGvybJCVLeqc6SjrpVnbtOLprhhOrORVdnSI7XK4mIdkikYqImASmOPZrXum7D33sK4eZHjzDuOtkWLOxWZYcNo52bk/2ueU0EYQiO4N29PWtgbjQdE0DZeFJkQnqGyjaTBylVpsxZMdVsnlV4wzOzAyWsQo8/Wpho3yQIAlWbNl/SAHDrRMI/AIlDcu2vQRAB8el60F5t59aZJxK3xgjfZ+xJx9jBu4eSdSiYQqjeQXim6CipI1+s4TEgyBzFACmL1FAA/wapwyu16nE2+z2iQZsULPJhQY4KytCqkiKRmFjKT/MARVwON1Xa5ep1uqefDlmVI8yacho1mZgDJGqm/tC/obE0jMNTtuslcmbrbnglZT8o6kK7C9CZ/d6DalUILYVkkklXBzqFNuBwE4CT6e2q18V7js2+0eV2vs0tsTNyHlxoLA/tsdLjhyChmJN/DDkOPg+asXyfqet6PeYvVetxXysPHSPJnuR+5yHseXEkhQBaxU2PLz6UjbJPRDW70jMcUg7LS1ICfYW+0NYk5x8KVdJtGN36KhAkCmF6QCkEifUIB6enVX3u51uP27U/KWtSUdVOFkR5uWkJ/QGCRLKpHufrFlKqSbDzarPo9Ps8jqe1+Mdi8Z7SM3Hkw8VpgPv4lpWjYH2/wBBJbk1gT6XqSbdZN2g3coCJ0XSKLhA3SICdFdMqiRukQ6iiJDB2DsIa3zDLHkRJPD90UiKy/mrC4NvXyDe30rRc8T48zwTeJY2Kt+RU2Pn08EVwg5bOinO2cIOSpqnRUMgqRYqayY7KInFMxgIqmbsMUdjAPpDXEU8E4JgdHVWKkqQwDD1UkXsR9R6j61zNBPjsFnR0ZlDAMCpKn0IuBcH1BHg13a7a6qNKUaUo0pXAgBgEpgAxTAIGKYAMUxR7BKYo9ggIfBrggMCpAINcgkG48EUqsZEIm5shCEKQhZhiBSEKBSlD2a37ClKAAUP2tFVVHFAAo+gFgP9BRmZjycksfqTc/8AWnn1zXFQp5mYeumTYGrStLaqzDqrOZIXkAiqUjh23kkmwA8ZpqGKm4ctTteno3AwkUHbftDXnj/IHoPYe46zCzOvI2RNhPJzgBAZlkC/coPhmXja3rY+K9A/Afe+v9Q2Wbh9gdYIMxI+MxFwrRlvtYi5VW5Xv6XHmkdwvwjf6DLWa33OMc1tGTiUYaPhXpyleujFdpu1X7tqmc4IEQBLoT6h6jdZuwA1Af49/HPaOr52ZvuwwviRzwCJIWI5t9wYuygnja1lv5Nz9Kn/AJ/+ROs9nwsTRdfmTLeGYyvKoPBftKhFYgcr3u1vHgeb1YPr1NXl+jSlVb+ahhnLWRKBxpyth2hSGYrBxJ5c4o5MTeEYh1GtprLNOp7Gz16x12uBMOGkO8tMUztftaMbOVU03LyPIQFCHMUdKVVbn7H/ACo5ZZ/8wXNcBwozfiqmZO8o+dwBiUb/AAVdiMi5OyWnkeWlz1qarsPYZkISdBB8BI1q5dCqsyKKo92UxSipVztM4fr2E3GvKVizZylx7ZMZ4sxHEymHKbltxWMRyUrU61Hpv2F1x+lEvEZddw7E6D8PEk74qYF3DbcVKqBteKOYmJeMvOHyyqNxNynkGY5I5tzo5wLyPhBqY8e4nEXKG3K2KQsmUbO+sbSbq1hxO0nZFN5G+z11pJRuj4YRBXcFK9HuNaUjjbF9Bx23cC7QoVDq1MRdj1bukqxX2MIRyPUAG+zlZdXb9HSld9X/APAMb/wJT+JV0pUWvLa/sG8UvxM1L+Sm0pTBeR3+qd4QfiiP/S6z6Uq1nSlGlKNKUaUo0pUGYr9ZVef7j+L/AOvfLOlKnNpSjSlGlKNKUaUo0pRpSjSlcD6B/aH/AKtKVEe55Bo1Hla8wuNsgqy+vNoNVaa0mX6TNezWVcF3CMHDJqDu8kVUEjHBMvbsH7WuibKxsZkWd1RpG4oCbcm/AfialdZotzuYsmfU4s2TBhQGado1LCGIEAySEfpQEgE1tXruCI/i42SdQgSrtRdaEj5BeO9pOlmRAM6Wh2bo/i11maZt1DNyiZIo7mEAHXYzRBgrleZPgG17j1sD58flWHDFmmGTIxlm/bKAJGQPwAbwokZftAY/pDGx+l6YHIGLLdl59mjH+X4nG9+4z3KgwcbVMeKNpdla5C1s1AkJtG5TKLtu3PAPJFogLXw5k1EukBMPYO8bk4U+c+Ri5whk1EkQCp5DFh5PM/gSBa1XnRdl1XVYdRveqS7DB+RMTOkebKBjaFYW+2MwRlSRIqlufK4a/j8kTVsS23kNxCDDHK7HLPDb+zRJqhYqHhu/u1W9fqtalmpailW7y0Xfu0FlIqJbCr0nOYgCZMfSOsWHCn2miGv3cQgZ14MkTn7VU/bxfyfIA/8ArUxs+0avofyr/wAv+Mdg+2hxpffiyc/GUGWaVG98y4zBVIDu9rgX8MKldWoBhVK3XqrFGdGi6zBxFejDP3Kj1+MfCR7eMZC9eqACjx2LZsXvFTbGUPuYe0dTcMSQRLCl+CKFF/JsBbyfqfHrWsdjnT7PYT7LJ4jJyZpJX4gKvKRi7cVHhVuxsB4AsBW612Vh0aUo0pRpSjSlGlKNKVjnetCOEWKjxqR4smdVuyO5RK6WRT37xVBqZQFlEiCA9RilEA+HXS2XjLOuI8sYynUlYywDsB6lUJ5ED6kCwruXFyGgbLSKQ4qkBnCkoCfQM9uIJ+gJuayNd1dNGlKNKUmYuLsLWxWGRkbH7RgpIGQQUD4JND2D4dICvB8YUwneeMUDq9YA6dV/X6/d428zs/Ozvf08/t/t8f2wv7bitn+8eX5nz59Kn9hsNLPpMLCwcH2NvB7n7jI9wt+45G6fZ6JwHjx6/Wu2DmJKVcz6L+vP4JKJllI+PcPFkVU55mQnUWWZAkACk2UHsAptx12afZ5+xnzIs3Bmw48bJMcbSEEZCAXEyW9EPpY+a6tvq8DXwYcuHmw5kmTjCSRUUg47k2ML39XHqSPFKEREfSO/7epwkn1qFsB6UkLfRq/eEYlGeSdm9iSqEzGrsXZ2TlB6gIbfZiAYTIKgUAOTb1gD06rHZ+oaTt0eNFuVkP7TJWeJo3MbK6+n3AElT6Mv1FWXrXbd11OTJk07RgZeO0MqugdWRvyNvuW5Kn6H6VqUqEeAqszX6LOSFffysmvLpTUib26syfPXKCz3u0nPQUW6iSQkIT0E6tw1HJ0xtN1zK0vT8ybCzcnIaYTyn9w0cjurSWD2+1gOIX0W/ipF+4ruOxYu57diQ5uHjwLC0EY/brJGiMqXK3+4Ehi3q1rGl+RL7Agk5EjpRNJEFFVEyCCq6ZCgdwCZgEpDHUATBt9Tvq5pFeFI8i0jqq3JA8sALtb0Fzc/leqc8n9Z5ILxozNYAnwpJIW/1AFh+dq7tdtdVH/o0pWqnJuNrkS9nJdY7eNjkirO1iIrODppmUIkUSooEUVUEVFADYAH06jdxtsDRa2Xb7NzHgQKGdgpYgEgfpUEnyR6VI6jU5282UWp1qB86duKKWVQTYnyzEAeAfWkxIwr+x2OLcSDSsS9BSiSP27OTjfFTaViUMVVnINTuCCk3blamDcNgOBtV/O1Obvd7jz50euyemLjCRUli5zjJJBSRCwKqoS1/Aa9T+FtMPR6TIhwpNhjdxbIMbPFLwgOMAQ8bhSGZy97eStqUryBh5CUipp4wRcSsGDkIl6p1d6x8Yn3TrudjAX7Mn2DuA6n8rTavN2ONtsqFH2OHz9mQ3vHzFm4+bfcPxHioHG3Gzw9fk6rGmdNfl8PeQW4ycDdeX1+0+fHrW31J1G0aUo0pRpSjSlGlKNKUaUo9Po0pWrnJmPrkRJTksqdvGxLVR4+WTRUcHSbpbdZioIlOqqICIdhQER1Hbfa4Oj1k+42bFNfjRl5GALEKPUhVBJP5AXqQ1Orzd3s4NRrVD5+TIEjUkKCx9AWYgAePUmkvIHcLBBW2k1uFmZGdPDoSEpICWLfJ1B2UHJ3BXByEcrHbEMQxG5vhH0dmq9mvPKMPs3U8HEys/MaBZJpf6Ugwn+8sGIDEqCpWI/X6eKsGGmPEcvrfas7LxcLEWZo4o7yxnMT7QpUEooYhgZAP9aQ7a9qQC9jRj4DJNvfvrw9jGzJ+wKi3anSRROr7KeKFBFGvERETJHNv1nAQ7NVCHuL6aXOjwcPfbPMm28kSJJGFRCFUn2XIsuMBcqx9WuKtk3UE3EOFLm5mi1mJDqY5XeOQlnBZgPdQXZsknwyj0BBrQZMUpbM+QrbMVewTMtEwEHBOYuSO5Tq814tVF7HpR6KZikcqs3CQC4VTETFAo9m2ofv8nU8Z932Xaa7NytljYWPjtFKXGJPzKyRiMAgMUZQZXUkgA+LXqY6HH2rKTS9a1eww8XX5ObPkLLEEOVBwUpIZCblQ6sfbRrAkjyDSIqENka3WyMexjKrUmgMkGlhrjuuwsPMwqyxHKCT1Bi/7tN03lpBIqpFTh0mTAmwh8Oqj1nU957R2THy8CLXanpUSJk4r4sEE8DEOqyLHJYOs0g5h28FbWtVs7Ltukda65kYmdLsdr3KV3xslMmeaGcKVYxtJHco0MZ4FF8hr3v9Kfqv4jqkI5PJu017DOmczygT0sYPaQNbEJwfR6hm4pJLoAicUymOBjgX4Q1uXR/GnW9TOc/KD524L5B/cTf7vHJv7kZK2DLxJVSwJt9RWntz8kdi2sAwcZkwtSI8cft4f9rljWMcgDXKtyHIhSAT9DW1qFAj6S9lDwklKEhHqDNCOqyq5lISvFbdZ1RiE1DHVTM9VUE6omEdzDqQ6x0vC6llZDaqfIGolRFiw2a8GMEuT7IJJHuElnufJNYHZe5Zna8XHXawQHbRO7S5aqBPk87ACYgAERgAJb0ApQRFchIFeZcxEeixXsEiaXmFEhUEX0icokM6VA5zgU4lH0F2D6WpzWaLUaafKyNZAsM2bOZpyL/1JT4Lm5Pkj8LCoTZbzbbiHFx9nO00OFAIYAbf04gbhBYDx/G5rd6lqiqP2fs/x6UrgpSlACkKUhQ7AKUpSlAPoAUoAUA1wFVRxUAL+A9K5ZmY8mJLfiTc/wDWuATS6e7FJIUg/wDhGTIKWwD1bCmId3tv27benXHCPjw4qY/wIHH8fT0tXPOTlzDN7n4gm/8A19aQQY+YM6xN1iCmrDBFnJF1KqSzWSO4l2Dx44RcLmYOFwHwyB+46QTD1SkMIB6dU0dJwsXr+X1/T5ebhjLnaYzJKWmjkdlZvbZr8V+2wUeACbVcD3TNyd/ib/cYuFmHEgSEQvEFhkRFZVEirbkwvct6kgXrdQ0LXaTFi1ZeDiWa7vv3Th06TQ8dLPOkq7pdZyqUhnr9YvUIAIdRh9UNSuq1Wj6nr/22L7WNjPJydnYL7kz25OzMQDJIRcgep9BUVs9pu+1Z/wC4yvdyclI+KKilvbhS5VFVRcJGDYE+g9TXB5uVLb0a6FZkDwykMeSPbQWTCMQfEWOmWGMh0d6Lo5CgcDdW2w+jRtvsV7Muj/t851RxTKczkPaWQMR7BW1+ZA5Xvax9K+l1OubrT7ts+EbRcoRDDsfdaMqD74a/HgD4ta9x61v/ABTUHJWYuW4PDJCuVoK6QOjIFHpMsVsJ++MiU3YJgL0gPw6mTkY4nGKZI/3RXkE5DmV9OXG/Ljfxe1r+L1DDHyDAcoRv+1DcS/E8Ax8heVuNyPNr3tWps9gRq0DJWBywkpJCMRKsowiGpnkm4A6yaPS0alEDLHKKgCIfAUBHUZ2DdQ9d02Rup4Z8iLHUMY4F5ytdgLIn8x8/9ATUloNNN2HcQaaCaCCXIYqJJnCRLYE3d/QDxYH8SKI2ywsq7GNavkQmEothMPIVQ3RKR7GTSTWaKPWo+sgJgVAogI9hg21zgb7VbHJ/YY8yf3RceKZ4CbSxxyqChkT1W97fxrjO0W01+N++yIm/tjZEkKTjzFI8TFXCP6N6X/MVvf8AF+1uG/8Ai9Opj8vrURSnxsskEhZEBUICxpZkoVMR9cSFjW4CYA+gA6+DJGJBESPcIuB9bCvsRyGMygH2gbE/S/4U9OvuvijSlGlKNKUaUo0pRpSjSlGlK61v3pX/AOWf/wB0dKUjKv8A+AY3/gSn8SrpSoteW1/YN4pfiZqX8lNpSmC8jv8AVO8IPxRH/pdZ9KVazpSjSlGlKNKUaUqDMV+sqvP9x/F/9e+WdKVObSlGlKNKUaUo0pRpSjSlGlK4H0D+0P8A1aUqrLkfx+uea898WrP31ONibBmQ7Dk22MZJeaa3Za5NY5ZlS16urHpmjV41BwqJnyDsxAOUodO/aAwW01k+w2WFNeP9jjStIwNw/K328beLD+a/rW2Oh961HT+mdn11ssdo3WFHhwMgjbHEDOGyBMGs4cgWjZLkH1tSypXFPCtGssXc2VflJ631+65AvtXs1vsc3ZJirTeTiop3BvALvngkZQz5FuUiTQSHSQLv0bCIiORj6XX40wyFRmyEkd1ZmLFTJ+q1/ofoPQfSobcfJ3cNzr5NRNPFBqp8PFxpoYIo4o5o8O5gMoVfukUklnuCx/VepGlKJjAUobmMIAUvwmEewAD6YjqVtfxVAuB5PgVWW8nqvIcqqpyFpXmDM2OJpy+HwFdeOsi7YTNHteVICJOza0GlvFOlSs3Usk7I8kCEIKy6hSF6+gdtVEyRNuk2mPtAMJpPZeA2KNIosET/AMXuQT9T4/hXoqHC2UHxnldE3HRHfs8OENnj7VFePIhw5H5HJyF9JsfipSIk8VFza4vVmggICID2CHYIfQENW6vOtcaUo0pRpSjSlGlKNKUk5KWnRma+2rbCJmYZWRdNLe/GUTK5gEkkiGQFBsmcfEOjqmEDpm9YgAAiHbqtZ+y3J2mFBoocbK1TTumbJ7oD46qAV4qD9zlrhlPkePFWTB1unGszZt5NkYu0WBHw4/aPHILEhuTEfagFirDwfPmkta5/FlevVefW12yjLinCvywMo+B4mi3iFllUXiQuiiDBMV1jGKBVAE4iPZqvdk3Px1o+34Ob2aSLH7QuLIMeaT3AqwsxVxzH9McmuAGBJJ8VP9d03yFuuo5uJ1uOXI6w2VH+4hj4FmmVQyHgf6h4rYkr4FvNOoAgYpTFEDFMAGKIdoGKIAYDAPwgYB3DWxQQQGHoRf8A0+n/AFrXpBUlT6g2NfQAI/8Ao1zY/SuLikahe6+rcXVGWVcMbA3QTdNG71EzdKZbmSOsstDKiI+OTZlIILCGwEHs7dVaHuOlk7PL1CVni3SIHRZF4idSpZjAb/1AlvvP8p8VZ5eo7hOtRdsjVJdM7lHZG5GBgQFWcf8Atl7jhe96wVa9dzw7NmS+d3KI2MJJ1LexWu7qBBc6o14G2/QnugYCd+HrerrDk0nb21kWIm5A2K5/uvN7Cffj8if23H0H2kL7nr4vWWm76mNlLkvp+WvbB9pIfff7MjiB+55+p+4FvbtbzanAHbfs/wAXp21dTa9x6VTRe3n1rjSlGlKNKUaUrku24b+jXI9fyvXBvbx62pucae+Yw8x77rvF5ELRMljjvmrNqqEGRYhY0qZGRhIdAUwESnN64gPbqi9APav7Zlf8teZ87+4TiIyKiH9uGtFYJ4K29GP3H61eO+Dq42WL/wATSJME6+Ay+2zuP3BUmW5cXDX8EDwPypxDFKcokOUpymDYxTlKcpg+gJTAJRDV3ZVccXAKn6HyP9RVJVmU8lJDD6g2P/UVz/0fSDsAA+gAfAAa+q49fJ9aNKUdv+D6P/6NKUaUrRKysgSyNYUkC+Ui14td+rZAURCPbO0lippxaiIj34uFyCJgMHq7B/ih32Ocm+j1IwpW1747SHKBX20cMAIiv6izDyCPFS8euw30cm0bMiXYJkLGuLZvcZCtzKG/TxU+CD5re6mKiKwpJ0uyjn7xqyVknLRm5ct45AxSrv10ETqJM0TG9Uqrk5QIUR7AEdYmfkS4mDNlY8TT5EUTOsS2DSMqkhFJ8AsRYX+prLwMeLLzocXIlWCCSVVaRhdY1ZgC7AebKDc2+gpEPbsk3q0E8sBHlHm7iJIWLZO2xpN1EWGQIsRkg6K3TFIxkDFBQROBUx9A6qOV22KHruHlbsS6fb7QiCKN0Mrw5MgYRq4UWPEgNc2X6E1a8Xqsk3YcvF0zRbbVawGeV0b2kmxoyC7IWNxyF1sLsPJFKyvspWOhY1jOS/t6YatwSkJjwxGXtBwUxupx4VL7GhuXYNg+hvqzaXE2OBqoMPb5P7zZxxgST8QnuNc/dwHhfwsPwqubnL12dtJ8zU437PWyPyjh5F/bUgfbzPlvxufxrcak6jK0tjh1Z+ClIVGWkIJWSamapy8UoCUjHmMYpvEM1B2AixenYB+gI6it7q5N1p8jVRZM+HJPGUE8JtLET/Mh+jeLf61K6PZpptvj7WXHhy44JA5hlF45LfyuPqpvWE/8fW6euDNm+t8lDQhEW7NYyZpCwuWiJEiguooApmcOzh1HEdwEd/h1h5n7vRdXZcWKbaZ2LiAKjEGXJdFAHInwWc+WJHk3/KsrE/abzsynJli1mDlZZLOtxHjI7EniBYhUHhQD48fnTct8gqWyKlqLHSzCmZiShCmNCqprKJQkks3I6TK1UXbnRfEatjFFUCFP3Qm2EB21R8fur9l12T1DAyYdV8oriD+gQSIJSoccSylZAiEcwoYITYg2q7z9NTrmxxu25uNNtPjJsu3vggGeIMUPIKwaMu4PDkV52uLXrLseSTU6qqNCLFut7hG0VFTLKOATJN5t2070JCfBEERiYdbujqHV6diF+AA137zvh6t1w46ONt3DEjhhnSIeFyHS/uZHG3swNZmL/wAot4FdGk6MOz9iXIZDquoZck00LyeC0CPx9vHvf3pluqhL/cfqa0ELOz2SHdZZ26pUZ5VH4LzbV4ytRZRUzuOS6WriPjwOko+QB0ZVNQek5AKA9X04jU7jc98yNfjdm1mnl63NynV48wTEvELI0cYIMi8y6sbFQB935y+11On6LjZ+T1rZbeLsUPGB0kxPaASU3dZJLERtwCMouGJ/T+TtVert6qlJtWT52uwfSSr9lHLFQTZQaKpQ3jYhBummRBiU+5gLt9UI62T17r0HXEyMfFmlfDmnMiRNxEeOrf8AtQqoAWMG5t+JJ+ta53/YJ+wvj5GVFEmZFAsbygsZMhgf92ZmJLSEWBP4AUp9WCq/RpSjSlGlKNKUaUpH3ioEvEEMCrNS0Egd8zduHUMsCDpwi1OYx2Cim4CDZ11evsO/YG2qv2/rC9v0/wDZpMvJw4jMjs8DcXZUJJjJ/wDB7/dbz4BqzdS7K3U9v/eI8XGzJRC6Kky8lVnAAkA/8k+n08muyKpcPD2SXtDNWTNJTUfGxjtNzILOWJG8UmRJsZq1U9VFcxUw7w+4ice0fTr713VNXq97k9hxmyDnZcEUTh5C0YWEAJxQ+FYgfc3kt6nzXxse07PZ6PG6/krjjBxZ5ZUKxqshaYktyceqgn7V8BRa3pWpUq0haDWCNyI1gJquFsDGTqDNkV4g5bN2JVDoKy5yGQ718i4EBL0mMQQ331GydezexNm4Hd48LL0P7yOXCRA4ZVjuVMxHG8isfFiVte9SSdhwuvjDzukyZmLvDhyRZjuUZWaSwYQg8rRlbg3APpathPXRpHpWBpBNveu119m1fOahGOUiTB0XqyREDCVUBIkVQinWAjvuAejWbue1Y+FHm42oT+5djwokkbCiYCcq5AUm/gAg3F/oKwtP1bIzJMLI3D/27rubK8aZsqEwgxqS1reSQRxIHoTWxQgId5MMLk4hyN7OEKSNB2sooZ2yZOgI6cRh+lTw5u6XMIGN0iO4DsO2s2HS6vJ2kHap8UJ2EYgi5knnHG9naI2PHwxsTa9x62rCm3OzxtZN1eHJL9fOUZeAACO6XVZRcchdfIF7WtcXpRbiUd/QIfth/wBXbqd838G1Qh8/nTGN2eQwzpMPWbFi2oqkFEJv5J7FtiOJEybYvUzipRBEHizlB6ICcixxTIQB6Q9GtQw43eB8v5WVjQwx9POHCJJZIVDSkKPshmUc2dZPJWQ8QoNrVtqXK6UfiPFxcmWaTtq5c5jiSVuMd28PNEx4KjJ4VkHIta/1pSu6hPL5ch7wnIIFrjCnvoJzHC6dA4VkXLldVJwVoCYtDpFIcu5xOBwEPR2ansnq+4l+TMbtyTINHDrJMdoub8jKzswYJbgRYi5J5A1BY3ZtRF8b5XU3hc7ubZx5Cy8F4iNUVSvO/MEkHwBxt9afTG5Ege2ZYxCdZZdkUFBKAnKQY1vuAG23AB1fiiFudhzta/1t/GqJzcIYwTwJvb6X/hTxguiOwAoAiI7B6e0f8X09fVfNQn52+YNxy8u3FjLKnIaflWzWelDwVOqNWjSTVzuk0k3F24YwUYq6YtQTZtg613DldBsiUxeo4CYoDD7reYGhxhk5zGzGyqouzH8h4/1JNq2P8Z/FfbvljdtpOqRRl4k5yyytwiiQmwLsAxuT4VVVmNjYeCab3y9vNN4seZTX7VI4DlLNF2iieCPcsc3+Jawlyg2UmdVOOl+4j5KXipOGeKomTK4aulSkVL0KAQwgA4+j7JrewIxwSwlT9SMLML+h8Egj8walflb4U7t8QZUEXZ0hfCyb+1kQMXidlsWS7Kjq4BvxZRceRcVZBqwVqOjSlRu5U8psY8QsVnypk8lllW76y12i0ylUaFPZcgZIyHcH5Yyp0KiVwizX2xZZ56IgkQ6qKKaZDqKqETIYwKVQlyM833P1OzlyvfY9w3yIrRcD+Wm5yu74y5HoFCirRRsyzGVpWHr+Yp5cbE7i7DSYmpnRcO/Zs69QM2bnIDcXAGLpSrYvK7m812/jFB3rPNh5Kzt6ui0bYHP/AOTVcw1VrA2JIVyHeKrUWJwoo5gkMbyTtydaMB8seTBMRBYCiABpSmptebeamLfM/wCM2ErxkXCth4ycoYzk49rFErGMZuMyLTmuFKFA2evu5/IsnanjSZfSj2c6XCTaNbolKlsAjvuClW3LfvSv/wAs/wD7o6UpGVf/AMAxv/AlP4lXSlRa8tr+wbxS/EzUv5KbSlMF5Hf6p3hB+KI/9LrPpSrWdKUaUo0pRpSjSlQZiv1lV5/uP4v/AK98s6Uqc2lKNKUaUo0pRpSjSlGlKNKVwPoH9of+rSlR3df/AK06/wBpcfxx9KV0aUrkBEogYBEBKICAh6QEPQID8AgOlD5Fj6U1DbBGEmZGqbTEeOW5GN5XyeyIjUIVMjTJDrpB1fG4FaACNtcdBet+X/SDbBubs1hDW68eBBFYSe4PtH+59X//ACj/AOXrVmk7r3GZmaXaZ7M+EMNiZ5DyxB6Yx+7zAPNo/wBHn0p1vT2j6dZtVmjSlGlKNKUaUo0pR6O3T080rSQlbg66MqaFj0mBpuUXmpUUjKG8ZJuQ+zu1O8MbpOpt6C7F+lqI1Wh1GjOS2qgWE5eQ081ifvlf9Tm5Pk/lYflUttN5tt2MddpM0wxMdYIrgDhEv6UFgPA/PzWmkZnHs05k6/MSNUkHcUZAstFSy8aZZmcel03Iui+6RKbsA5QDfbsEdRefs+k7afI0m0n1s2VjFRNDM0RaM+HUMJLWPowAv+NSmFq+6avHx91rINjDi5IYwzQrLxkHlGKmP1H8pJ/MVpmtwWvsMzk8TzEI6RZWRuwnlJdo7IROLbDtINWyHQmok9OkJRQOId2YvaHZqJxuzy9z1UWw+N8rEkihz1jyDMjgCJf9xEWwIci3AkcSPI8VJ5HWYenbSXA+RcbKjklwWkxxC6EmV/8Abdm8goDcSKPuB8etaE0dUMov7bMU+42OMmmfhahKy0I8coJsBjHaUkKTBs4KRqoq4BLoOumIgYgiG+of9l1j5EzNls+s7TPx9rFwwppsd3UR+04ltGjgKS1uLSLfkpIvUwM7svx9ia3Wdl1mDPq5eeZFDOisZPdQxXkZSWAS/JY2A4sATSxqshA3VBnbU6+7ayUSrIQbN7YYkrOdQI1MCDszc6gGWTaPjCJtyiBVNx1aet5um7ZDF2VMKWPOxmkx43yoQmQoQ8XKk3YJITe4IDearHYsLcdVll65JmxSYOSseQ6Y0xfHYuOSBgLAvGPFiLr4pdauFVGjT+NKNKUaUo0pSJPf4EZQkOzTlJJ2WxhV3wsYx0o3iZIWwu+uRXMQhEmXdht3wCYnV2aqbd00x2I1eMuTPkjO/aSe3E5WGXhzvKxAAj4/zi63q1L07bjXnZ5LY8GMcH93GJJUDTRcuFo1BJL3/kNmt5pWvHjSOaOn79wk0YskFHLt24OCaDZuiUTqrKnHsKmmUNxHVkysrGwcWTNzHWLEiQu7sbKqjyWJ+gA9armLi5Gbkx4eGjSZcrhURRdmYmwVR9ST6CktPTk8rX42Yx7HxNsVkHccokC8kVowVg3JjC6km7oDlBYySWxiF/y9V7cbfcS6WDadJgxtlJPLERyl4RmB/wBcqvccrLYqPPL8KsOo1Ooj3M+s7pNka6OGKUHjFzkE6j7ImSxIubhj4t+NKtF01cKLpN3TZwq2UBJymg4SWUbKGDcEnBEzmOgpsG/ScAHb4NWOLIx5neOCRHkjNmCsGKn8GAJKk/gbVXJMfIhjSSeN0jkF1LKwDD8VJADD8xcXrHfSsZGKMUpF+0ZKSbsrCOTcrFSM9fHKJiNGwG271wcoCIFDt10ZeywMB4Y82aOKTIlEcQYgGSQ+Qi39WIF7V34muz9gk0mDDJMmPGZJSqk+3GDYu34KCfWtht9MP2tw39O2s308fWsP/tTbSTEKCFitFcr1gt0rbJ2MXlIhvJd4KRjAdsd4wScEMkyZNEjiZRMu/V2dvZqh5+H/AMMGd2DRYWds9lssyJpoVlva90LxhhaONASWUevir1g5Z7icLr+8zcLW67XYcqwzNFa9rOEkKkF5HYAKx9PP405Pp7ewOzfb6H0v2w1fD+P5VRf+1N3UV7grasipWAj4sC3mo8lOM5bt0Wx40zNQXgsVkR710l4gA6jKdoDqj9Zn7PJ2Pex7sTDTJlRjBLoqqYuBL+2V8uOVrlvI/wCtXXskXWo+v6STSmE7Z8WQ5oRmZhKHHD3Fbwh43sF8W/61u4S2NJ2atEGhGy7ReqvG7J06kGJmzCQUcpiqVWIciYQeoEKXYxgANh1LajsmLuNrsNRDBlRTa6VY3eRCkchZeQMDekii1ifoaitt1zJ1Gr1+2lnxpYthEzokcnKSMKQCsy2+xjfwPqKVGrFVfr4Mmmp0iommoJDAcnWQh+g4eg5OoB6Th8Ah2hr5dEe3NVaxuLgGx/EX9D+Y819K7pfgSLixsSLj8Db1H5V96+q+aNKUaUrEkJFhEsXUnKOkGMcxRM4ePHSgJN2yBNgMqsoPYQgCIdusXNzsTWYkmw2EqQ4UK8nkc2VFHqWP0FZOFhZmyy48DXxvNmzMFREF2Zj6BR9TUZYhzNvcowMwreaXJVaVlJdarTqDOKPNT6KwJpuaCwMQBfJhFkMB1HO+5x7B7BANaA1k+2yvkPD2cm41U/XsjImbEyFSEz5INg2ujI/qAQj7ml9W9D+A31s4NVi/H2XrItRtYOwY+PCuXjs8ogxyLldhID/TPum6rHb7R59b07DqSb++Bounw1XmHLt4m0ysuDpu1mYqNVaAEcq9bgBVJE7hMxilTP1ep+3rZWTnwns513V8XXZWRJKE27c1SeGIp/SLqLGYsCQFa441rnGwJR1kbDs2VsMaCOIvqF4M8E0oe8ojbyIgpseQtdvzpSI0ipNpWFmmsCxaSNdYOoyDWaJi2TjWD0xjumzdqgYjYpFjKCI+qI7j2anIuo9ag2OJtoMOGPOwIXix2QcRFHISXVVUhAGJJPj6nzUJJ2zsk2uytVkZksmDnSpLOrnkZZIwAjM5uxKgADz9KVOrHVdo0pRpSjSlGlKNKUaUo0pRpStHZSWNSCkSVJaMQsZkiBFrTKaisYRXvCd4LpNH7IYoo9QBt/lbah9+u9fTzr1lsdN4VHtGcExA3F+YXzbje1vrb6VL6FtIm3hbsi5D6QMfdWAgSkWNuBPgHla9/peu2PiGbdwMurHxqdifsmbaZlGbUiS707dIn2Mzjp79RqksA92Uwj0l212YWsxMeY7OSCBd3NCizyogDSFQPBb9RRTfgCTYV1Zmyyp4RrUnnbSQyu0ETuSsYYnyF/SHYW5EDyb1ky0Y3mIuQiXgrlaSjJyxcHbLGbuAQdJGRVFuuUBMiqBTj0nD6ke3XfscCHaa+fW5XMY2RE0bFTxYK44kqw9GsfBHpXTrs6bV7CHY4wQ5OPKsihgGXkh5AMp8MLjyD61oPdlxF05Oq1SXcQ67CNSjoeYfl9sOmgImKJFnIL9HjVRKAlETbenf4NQ/9gn13Vl671zJfFmhgEUM0n9Zkt/M3K3uG1wSbet/pUwd9BsOzt2HsWMmVFNOZJoY/wCgj3H6V434C9iAPwt9a7K8jcEXk2WzPId7HAsyLXFI5BVu98Km26XqksBxFIXC7oOsoE9UoCIa+9JF2iLJy138uLLgh0GKYlKycAlpDNfxyZ/I4+APFfG6l6zLjYjaGLJizeDnJEjBo+Za6CG3niq/aS3knzSp1Yqr1KbHf77av+LM/wD6Y30pTlaUqgTz8/K1zH5j+L8PWLj5IwzjKuC5K19xRrJMIwUZc65cm8P7RTjph6JI1hYI57ANzI+KOmgqkooUTlN06ovd+tZe/wAeKTBIOTCW+0m3INa9j6A+PravVf8Ai3829f8AiPdZ+L2lZF0mySK80al2ikhL8SyD7mjYSNfiCQQDYi9MZ/y+/k88iuA1wy3nzk8aCq1yvtLaY6q+Na/YmFnWjoX22zn5ads0rCrOoTxi7mMbpNW6Cywpk7wxzAJilDC6N1XP0ksubsbLK6cQgINhe5JI8fQWAq0f5UfP3U/lHBwes9M9ybX4uQZ5Mh42jBfgyIkauA9gGYszKtzYAeCa9R2tj14so0pVcPmTccsvZxpHH3IWAYyAtOY+JXJ7G3J2l45tM6nV4DKPuaxsdbsNDc2Vwmszr8jLVq3Ozx7xwUWyD9FIVdibiClVm5p4r+YHyqzNzezTcONVZw/EZo8ruc4r4ZoquYse223Dk1xkCSsRoq8zkI+RgGgy4Pu+auUVFWLVsAJquO9ESlUq2OscFcNT6/HrKmUKvajZtw/jTFlcbuonLmToevRsvRq+xbA3c1GrXSOoNgI0kgWAx3DBwRyUA6xOXp2UqC3LhPnrNeYdxZzniry/7Dk3FPEpryNgPehDklgOouMpNc4UGrVmLl6/X7PPtJmvIQL6JWFwk/TBVUu3QHbuKlXjNV3LmLbuXrM8c8cMEV3Ueosi5OwcrNyqLsjuW4mQcGaqmEgnIIkMJdwEQHSlJir/APgGN/4Ep/Eq6UqLXltf2DeKX4mal/JTaUpgvI7/AFTvCD8UR/6XWfSlWs6Uo0pRpSjSlGlKgzFfrKrz/cfxf/XvlnSlTm0pRpSjSlGlKNKUaUo0pRpSuB9A/tD/ANWlKgByVy5ccJ0f32pGGbZnSTNdYWDkahTnrdhJxNelXTsJu6vHDps5RCFqjZv3zoOkBEhvSGo3a52Rr8UZGNjyZL81BVbAhT6v9fCgXNXXoPVtT3Detp9zt8XS437WWRJ51ZkeVAPbx1AKn3JyeKH0uPQ0v6DkSh5Vq0fecZ2+AvdLl1HiUVaKy/Tk4WRPHulWT0GrxL1FDNnaJkzh/kmKIay8bKxs2EZOHIsuO17MpuDbwfP8agN3od31nZyabsWJPhbeIKXhmUpIgZQy8lPpdSCPxFLHXfUTRpSjSlciAlHYQ2H6A6VwCD5FcaVzRpSuQAR3EAEQD0iAej9v6GlK40pRpSsV8o6SYvVWKBHL5No5UZNlFO6TcuyInM2bqK9ndprLAUom/wAkB3+DWPlvkR4ssmIgky1jYxoTYM4U8VJ+gZrAn6XvWRiJjyZUUeW5jxGkUOwFyqFhyYD6lVuQPra1RgyJh9/kmNqtjm4Wr1Cbbt5qQvpSOTFOZQzE6TERlmzdcXybQzdM5zqD6qe4Bvttrz53n4wzO+YOu3m2xNdq9siZEmxAfzf2ysZ95FYyBOKlix8KSBe1b96T8mYnRs7Y6PVZWw2Wqd4I9fdfFvcBkHtOy+2X5MAFHlrE29ajjjqtX+FeRTVC6SNFqstKuko+4tXJXFPnZtv3jeNBgkYyAP05M6fSn3hPsm3aUNuzRPRdB3PV5WPjw7bI0/XMnJcR5yMGwsidbrF7YPH3FlIsvJfu9CvjxvDu+96dtcbIyJ9VBt+xY2MhkwnUrmY8DWaX3GHL2zEDduLfb+PmnndzEfgrNLP2s6cx9btlUI5tcgDIxYmTsDdESDJxEUwSHwipnrYAVKBTAUVjfAOtqZO1wfh/5Xi/uUjwaHZ60PmS+3/RlyVW3uwwxA8GLqA4ANubW8GtX4+rzflz4tlGtjSbe63YlMSP3B70WMzX9qaaQ/eoRiUNxcIv1FTEjpFpLsGMqwWFwykmjd8zXEh0+9auUyqoKdCgFUIJ0zAOxgAQ16jwc7G2eFDscNueHPEsiNYi6sLqbHyLgjwfP415jzcLI1uZLrsteGVBIyOtwbMpKsLjwbEeo8H6VgMS2MJmZNIqRRoAwM/YCbQi5ZNIQS/0/wBpmP8AYTgK3730egvp1hYY3o2uU2c2MdMQn7YIG90eP6nuk/afu/Tx+nrWbltozq8UYIyBuRz/AHBcr7R8/wBP2gPuH2/qv9aTzdd7Z7BDWOrXdg4psanKRk3BMmyDwkrLJmOmVT2kACo1VjlDF6kyj6wh26g8eXK3+6xN717bRP1fHWWLIx0VXE0wuAfd9UMRtdR62qbmixtBpcrSdg1UydonMUsGQ7MhihNiR7Xo4lHox9AfFL30dv8Ag3H9np1dPz+lU7/vSaq1VjqhHuIyLWkV27iSfShzyb1aQcA5fqAquQiy25yNyGDYhPQUNQHXeu4PWcF9frmneB55JiZZDI3OQ8mAY+Qt/wBK+gqd7B2HO7Lmpn7BYUmSCOICJFjXjGLLdV8FrereppSanqgq4KUpBOJCEIKg9SgkIUonN6NziUAE47B6R3HXCoqXKADkbmwtc/ibep/jXJZmsGJNvS/mw/AX9B/Culy2bPW67R4gi6aOkjoOWzhMqqDhBQolURWSOAkUTOUdhAQ2ENdU+PBlwPjZSLJjSKVZWAKsp8EMD4II9Qa7IJ58WdMnGdo8mNgyspKsrDyGUjyCPoRQ0aNWDZBkxbIMmbRIiDZo2SIg3bIJl6U0UUiAUiSZC9gFAAANcY2Nj4eOmJhxpFixKFRFAVVUeiqB4AH0Ar6yMnIy8h8vLd5MmRizuxLMzH1ZifJJ+pNRFzTLhQWNgtuLbHHRrx1PhD5EimRAeOnk/LtHBWT904XUUNGuo5skcSES6AEwgYfQIa8z/LGzXpmHndl+Pc+DHy5M32dnDGPceTJmRvbkdmJMTxKCVCcbkgm9jXpL4s1jdxzMLrfyBgzz40eH72tmk/pomPC6mSNFUASpKxAYvysARcXFPHiB97346qUjYX8Na5eP3OaSQArszR+gIg3MsddIqiE2g2OUq5gAo9e4huHbraPxhljs3Rtbn7ybF2Wzg8mVbOUkW/EsWF1nVCBIbD7ibePNax+S8X/jXdtlgaWHK12sn8e032B42/UFCkhoGcExi58WvY+KWy9Vi/e9G9KryJJVnArwgIA8UCK9nmVUdKLKR/SJDvCmMOym+/T2batsvXdf/wAmXt8jzjYxYbQBRIfZ9skuWMXp7n4N628VVYuw7D/jTdSjSA66XMWflwHve4AECiX1CW/l9L+fFao7SrZOY1iwtJCSWYQs4E3ErsV3UaVw+jlDtjpPkDkIo5aFUTEBTOAFN9HUa+N175AxNfu8aed8LEy/fhaNmi5SRkpaRSAzJcEFSLG1/SpBcnsHQcvP0uTDCmXlYnsTLIqS8Y5AGBjYEhXsQQwNxelDJMZx1LQzthNljoxn472vGCxScjL9+gJGezk4gZp4Rf1/V+r9GpvPw9tkbLFycLLEGvi9z3ovbDe9yWyWc+U9tvu8evoahcHL1OPrsrGzMT39hL7fsy+4V9ni13+0eH5r9vn09aIBlNRsMm2npgLDKJGdqKyRGabDxJTKqqtkAbJbpkFJESp7/wCVtuOudNi7XX6xYNzlfvtipcmUII+QLEooQeAVFlv9fWm5ytXn7Rp9Pi/stcwQCIuZOJAAdizeTc3a309Kx6lOSdigm0rL12Qqj5VZ2ieDk1Cqu2ybdwdJJc5iAUnQ7IUFC7B9SOunrW42G908ex2eDPrstncHHmILqFYgMSABZx9w8ehru7JqcDSbeTX63Nh2OIqoRPECEYsoJABJN0P2nz6ilJqdqCrnYfoafnSuNKUaUo0pWO7Zs5BquxkGqD1i6TFF00dJEXbuETbdSSyKgGIombbtAQENdGVi42djvh5kaS4ki8XRgGVlPqGU+CK78bKycHITMw5Hiy425I6EqysPQqwsQfzqNcxHILy0djF8lARM25sjyUqD2gIsmM1Ra0kUj0km5bu0ymScyfdd2sdv9UUB9PZrQ20w4ZtnB8f5S4WNtpM95cF9cI459figBxM6uAQ8tuLmP1Hm59K3rq82WLXT9/xWzMnVR4CRZqbFpJIM/KJKGJGQkFYr8kWT0NvT1qRTKHi2Dp2/aMGiMlIlblk5FJukm9kzNUwTRUfLkKB3ByFDs6hHbfs1vHF1Wuw8iXNx4Ylz5wollCgSSlBZTIwALED0v6VpHJ2mwzIIsPImkfBgLGKIsSkXM3YRqfC3+tvWtlrPrBo0pRpSjSlGlKNKUlwtbQbkFKCOmBfDCjNhKeBH2H3PfFR8J7Q7zb2h1G37vp+p7d9V7/keP/yodTEGV+7OJ+4932//AI/Hlx4e7f8A3L/y29PN6sH/AB3JHWD2oz437QZXse17n/yOVuXP27f7dv5r+vi1Z8fYYaZYvJGFkG0u0YLPWrlWOUBz0PI8omdsti7bukh2ASencQD4dZmFu9VtMSXO1U8eTjQs6OYjys8Y++Px/OPS34kVhZum2msy4sHaQSY2TMqOokHG6SGyP5/kb1v+ANIkmW6t7NiXrhvPs3s5GzUpE19zCuiTrxCBFUH5CtCgZNJwIJCKRDnDvAENvTqpr8l9d/YY2XOmbFl5kGRLDjPAwyHXGv7g4C4DG32KxHK4tVsb437Cc7JxYXw5cTEngimyFnQwI2Rb2yXNiVF7OVB42N617jJMy/nqXC1WnybslgYtJ+deTaCkU3g645OKRzFVFToPOtzFETNDbj07bb6wp++bTN3Oq1XXNXkSpmwpk5EmQphXHxWNib3schSDeE+bWtesuDo2rw9PtNr2HZ48TYcr4+OkDCVp8lRceLXGO17CYeL3vanf1s6ta0aUpMQdecw8pZZJawy8wnYJFN83jpFQh2dfTTTOmLGJKXtTanE24gPwgGq/qNHPq9jn502bk5SZsyyLHKQUxgAR7cP4IfU/mKnttuoNnr8DBiwsbFfCgMbSRCz5BJB9yb8XFrA/gaU+rBUDRpSjSlKbHf77av8AizP/AOmN9KU5WlK+iHMmO5B6R+j/AOnQ0rPariYTAoff0dPVsH0dw30pWfpSjSlGlKBDf06Uo0pRpSutX96V/wDln/8AdHSlIyr/APgGN/4Ep/Eq6UqLXltf2DeKX4mal/JTaUpgvI7/AFTvCD8UR/6XWfSlWs6Uo0pRpSjSlGlKgzFfrKrz/cfxf/XvlnSlTm0pRpSjSlGlKNKUaUo0pRpSuB9A/tD/ANWlKjm/SScKSDddMiyDg71uuioUDpLILGVSWRVIb1TpKpmEpij2CAiGuCARY+Qa+lZkYSISJFIII9QR5BH5g+RSUpdHpmOa6wp9AqsBSqnFmcnjq5WItpDQrAzxwo7eGaRzFNFsiZy6VMocSlDqOYRHtHXVj42PiRCDFRY4F9FUAAX9fA8eakNvudv2DPfa7zJnzNpIBzlmdpJG4gKvJ2JJsAALnwABTc4wtGc529Znisp4xrlHodZtbKPwnaYa0JTslkmoqtVlH09YYpNdU1afNnZUyFbmKQTAcR27N9YmHNspMnITNhSPGRwImDXMi28lh/Kb/SrB2TW9LwtLqMrrOxyM3d5GKzbCGSExpiTggLFE5A91SLnl5ta1PdqRqm10Om6bxo7ZqioVF41cM1RSOKaoJOkToKCkoHamoBFBEpg7QHt1ww5KV/EWr7ikaKVJVtyRgwv5Fwbi4+o8eRURcGpcf+Lk5TOD9MuFyf3R5Wbdlmtwl6kJ24T72sOJxwrOSLu6OI9KPFJrIiciDVRQqpUy7FKIduoPXf2zTSR9dx5JDkFGkUOWYlSfJ5kW9fp61tTub95+ScLL+ZNviYiahcmDCmkxljgiWYRgRquOGLXZLFnAIJ9TfxUwdTtaopvJLLWM4jIDDE0jeK41ylLVSUvMPjxWRSLbZepQoOfaU/HxQ/ZXEa1O0UIZQPVAxRDWK+bhx5QwXlQZrIXEd/uKj1YD8PFT+P1bseVon7Rj4WQ3WoslMeTKCH2EnktwiZ/QO3IED8DUUsT3qC51ReEeVGH8l5lxtRcbXfJMXIYwcIsISJy3IQyh644jMhMUXz4i0NHSKArMjEOY25hEQKOoXCyY+xpjbrAmyIsaGSQGPwBKR9tnFz4BFxWze0aTN+Fsnc/Gna9dqNjuthhYjpmAtI+CkgEofFYqtpHQ8ZLgDxa5qReF7RmexY0CfznjOExvklOSsxFqNU7MjbY80RHvVyVt03myGFI72djyEUOkI/YVD9I7egJDGytl/bXyc7H45qK5ESMG5cblQD6cnsAPpc1Q+2avqGF2Ua/puylz+uMkH/yZ4TCyu4HvBo/XjExIuP1KPF/WnRhH7qUiI+RexbuEdvGxV14l8JDPI9QxjALdwZP7GKhQDfs+AddmpzcjY62HOyseXEyJUDNDIQXjP/ixHi/18fjVf2uHBr9lNg4uRHl48TlVmjBCSAfzLfzY/nW01IVH1iP2SEkwfRrkDC1kWbli5Ag9KgoO0ToLAmfYeg4pqDsO3YOsXNxIc/Dmwci/sTxPG1jY8XUq1j9DY+DWTh5U2BmRZ2Pb9xBIsi3FxyRgy3H1FxSYhqBVIWtQVTTi0pGGraqa8QlMFTkVmzpJZVwk8BRVIAB2kqqYSnAoCG+q/q+l9d1Ogw+tR46z6rAIaETgSsjglg4JH6wSbMACL+Kn9p3LsO13uX2N8hoNnnKVmMBMQZCArJYH9BAF1JINIbKOMjz0BbX9bKCtwlVYiTZqyywvGzNzBnKcpYdBZJYse4coEMQOgNjnN62wCOqf8hfH7bnTbPM0IDdoyWhlQzNzVHxzcCBWVhGzKCv2izMfNW3oHfV0+41uJvSV6zjrNE4hHB3ScEf1mUqZVViD93lVHi9Rnx7yOttfsyUJlCT6oUhnKcoq4ge6l4l0mh3STIEGJERIgg5JuYQTUEwD2dno0H0j527LpuwJqPkPIJ1ILCUtj8Z4XC2WPjGFsquPP2NcHxW+O6/B/W9xoW2vx/j22pCmILkcoJkLci/KQtdmU+ByWxHn85swt5qE/FspmNsUSZjIJCs1F0+bMXJkwUOkIqM3iiLpEetMQ2OQBHXrLVdv6zutfFtMDOxjiTryXnIsbWuV8o5Vx5BFiK8qbXqXZdLsJdZn4WQMuBrPwjaRQbA+HQMreCPQn/rW6jImKh24tYePZRjNVdZ2ZBggk3bncOTAddyJESlIZRc3aY3pHUvr9ZrtXB+21kMWPis7PxjUKpZzdnsPBLepP1qKz9jsdnMMjZzSz5KoqcpGLMFQWVLkkgKPAH0pDwiCT+32a3SkLPVx1DojVW7mWlCFgZmFROV57bZMtiN0Cir6oqnN1bdm/p1UNTFHl9n2HZthi5mBk4q/tFaaYDHngU8/fjTwq+fHNje1Wzayy4fWsDrWvysPOxspv3bJDEf3EE7Ap7Dv5ZvHngBa/wDpTecgrLkGMr0QOOI6XdFUXGXkrHAh4xJkxYgB0m5iN+979B6J+o3YJDELsG+qR82b/uuBpMU9EgypAz+/LlY/3iOOPyFIW/JZL3PgqVHi9XX4Y0PTM/dZS94nxo2VPZixsj7C8kngtdrcWS1h5BDGtpgjJk/kCtTji3sSR0tXJArV65BopHN12yjTxRFTtVg6kFm6ZB73t2HcB2DUh8O9/wB13XQ5k/aIRBs8Cfg7BDErKU5g8G8qygHn9D4PisD5e6Hpum7zEh6zKZ9bnQ80XmJWVg/AgOvhgxI4fX1HmnEQQaWmUrdygre8Wg2LeRSCPiHKS1fsAuRFHv3oh2qqMFCCBNvQYNXmGHG7FsMDtOn2kr6iFJR7cLhsfJ5fbyf8WjIPG3oapM0uT17Azur7fWRJtpXjb3JlK5GPx+7in4CQEFr+opLWDIDiuFmGd0XjqISWlXMHQLABzyyT4PB96EtJIEKZKOBqoYBEiolLtqubvu02jXKxe1vBp1yclsfW5FzMJPs5e9MoFouJNyHKqfxqw6bpsO8OLldWWfbtjY6z7DHsITH99vZiYm8nICwZQTf6fSmBic+2CruLoe22eBtS0K6r7KFi2aSce3m4hdX/AE2zwrlEFTPV1WpinFuI79vV6oAIa0vrfmbd9en2zdl2GHsZcSTHSCFAI1nhY/flwOt/cZkIJiJ+pbwARW4tj8PaXfw6qPrevzNfFlx5LzyuTI0Eyj+niTo1giq4I90D6cfJN6ZOQkpbLlitkTUIKxOoiw21ran/AHPdLrM0yNzRaajtFFNJAzduZfvkyibvAKAhuPaOtT5mw2Xybu9lrOsYedJq87ZpmSceLFAF9oF1AClVLc1BPIC4v6mtrYeDrvjbS67Zdly8KPZYWtfEj5XUOS3ukIxJYMwXgxA4k29PAqcuFqVIUmjLViXh2sS6TkZFNVdi+8WM6kqQqBJ5QwGP4Jw9SDsRD96Aoa9e/FHU83qXUG6/s8WPGyVnlDNHJz/cAgKMg+vttIv8g/TYV5K+U+1Yfau2rv8AW5MmTjtDGQskfD2CCWOOBYe4qH+f+a5pfVKrRtMgWlciVZBZgyO6URUlHp5B6Yztwo5V750oUhlCgooIFAQ9UuwfBq59b67gdV00Wi1jTvhQlypmcySfexY8nNifJNh9B4qndj7Bndo28u72SwJmShAwhQRxjgoUWQGw8AX/ABPmuyxuJuNgJBxU4dnLzbdIDRcO4cEjmbtYyxOtNRyAkIgUEzGNv2biH09fe9m2+BpZ5ut4sWVt0T+lAzCJHa4uC3gKLEm/1NfGjh1OduYYex5MuNqXa0syqZXQWNiF8lvIA/IVrbHYpuArjKWbVKQscusrFN3sDCLEMu1O9KUHzgiyiapVWscpv1G29Yuw76wd5vdvpdJDsoNbPn7N2iV8fHYFlL/ra5BukRvc/UfWs7SaTVbndy6+bZQYOtVZWTInU8XCf7alQQQ8o9BfwfBFLEPQA+jcAHb6G4b7f4NWj6VWPrRpSsI0lHFcCz9oR4vtxIRiL1sDsyvT1gkDbve/6xLsO3TuADv6NYjZ+B7/AO09+E5d7BPcTnyte3G/K9vJFvA81ljBzTCMr2Zv2nqX4NwAva/K3G1/Hra9QrvyuevbgTZH6tTnpF+zaViixtqYu2Myzjmix5B4yjllx8a9McifeI9O5gOI7dmvKHc5PmQ7f+7JM2t3U8yJia+LMjeOdIkYyukTN/Ue4XklvPIm3ivU/To/h/8AtJ1LwrstNBC75WfLiOkkDyuojR5APsQAtxe9hxA+tSzoi1vcVWKXvaDBtZ1k1FZBvHE7tBADqCKCZyAqsQrgqO3eAUwl6tek+nS9nn65jTdxSGPsLqTKkQsq3P2iwLANxtysSL15y7fF1qDsORD1B5pOvqwEbSG7NYfcQbKePK/G4Bt5pXas9VqjSlGlKTi1Qrjizs7krEtzWdiyVjmsuHeFcFZrFMQ6JilUBJYOg4gAmKIlAdg1BS9X0M3YIu0y4yHsEMRiSbyGEbAgqfNj4JAuCQDYWqdi7LvIdBL1iPJcaCWVZXh8FS6m4INrjyLmxAJFzSk2EPTqeIt61BVxrilGlKNKUaUo0pRpSk7HzqNgNZIxolLxysO7Wh1nbpmo0Kdyq2ESvolRT1XSSXXuVQvZ1AGoPC28W6OfgY65UEmLK0DO6FAWKXEkJPh1W9ww8XFTebqJdKuDn5DY06ZUQmCK4chQ9jHMB5Qm1ip82Na+hUxnQa01rjN4vJdy4ePHUo7SRReyTx84Muu6eAgHQdcdwL1doiUob6wemdUxemaGPR4srz8ZJHeZwqySvIxZnfj4LeQL+pAFZvce05Xcd7Ju8qNIOUaIsSFmSNI1CqicvIXwTb0BJpYGIQ6hFjkIdZMBBNYxCGWT6vqu7VEonT6tg32EN9WhkVnErAGVfRiBcX9bH1F/rY1WQ7qhjUkRt6i5sf4j0P8ArXYJjDuAmEQEd9hEdt/o7fR193NrE18AAeQBevnXFc0aUo0pRpSjSlGlKU2O/wB9tX/Fmf8A9Mb6UpytKVAzn15g2I/L8x3AXHIsVNW6xXWSdxNGoVbWatZWfcxyBXMk8cSDwiraKiI1NVMFXByKeuqQpSiJtVzsfZcLreMs+UrPLIbIi+CbeT5PgAfj+Nbl+GPhPsvzXvJtXo5IsXBxIw+RkSglIwxsihV8u7EGygjwCbgCm/8ALy80PD3mFNrfFVOr2HG+RaI0Zyk/Q7I+ZS51oJ+uLVCchJqPQaoSTNN2AJLlFFNRE5ybgIGAdY/We24PZVdIUeLJjAJRje4P1B9D+fpapv5w/wAee0fCMmLk7LIgz9HmMyR5ESslpFFzHJGxJUkeVPIhgDY3BFWXrGcgTdqcgLFMXoFXqEgAI+vuBdh36f8Ap1Zpvd4H2Lc7j1Hj181oSL2uf9a/t/l6/lWQCyoDuChtx7REBHtH4ez6Gu3/AL11VDHnPy8m+KWN8eK0SgtMpZvz1mOmcfMCUCTmj1uv2DJl4JJPGzi12BNq9WiKtXYKFeyL9RFJRc6DUSJFFQwaUrz08yc++YTVeSvOGRmazifGOeMYeTzJyTWXp+Q79LYfmKxKZqnFp3IONhXYRdlhskVuLcLNGoPm+xXyKZu+FIQ2Uq13y0bJhDiNhTFWC8yXbizhvkLmhhTb1H44pWZLjYbRkgtwq8MnX7XLN8xTbq4yV4tCiCovAaFFmouA9zv26UpkfMkx/fOLkVWMpYM5lcwbFzYzJyGpsNx1we/yqra8aZDez99jHNqx0pgRKLb1RDEdUx4s9PJSRkSrRDNArlR33nQBlK9Ajczs0aiZ+RFN+ZimZ6m2Exm5HYoFFyRAxxE5kSrdQFEe0S7b6UpK1f8A8Axv/AlP4lXSlRa8tr+wbxS/EzUv5KbSlMF5Hf6p3hB+KI/9LrPpSpbZi5EXvFtrRrdc4lcjs2sFYtvIGuGKk8MGrKK6yipDxSo37MNFm/aLYEwMp0shR2MHSoYdwBSoZcq7Pa5i3eXbnJKXz/x9n7NyxxvjWz4Qnshsa7FyNQsrW/KTMbkqk0W1WKl2l47NGtzpnM/fJJI93t0qbgClNNy4YZW4+8rpLmLbMjOsjceZDIfFzGUBhGvckc149tOPpuwyzWgTErBYrq9hZYmyMacsdnaybyKlWazp6zbqCUwAQA0pTu+ZrlTMhnXEjB2AbPFQqHITkxI4iyhcUcgzFCSrzOvYxt95b059eqX31tqkhYpeGbpqpRqjSXcJEFugsiZbq0pUjeBWTnmSOLUM/QiJMthodkyli2SQnMk2HJjSetWMLxYqo/kIfKtrB9ZbXVJqSjBOxkXwLOk2xgTU6zpDupTK4Es+brR5ieVXOccS1DEcw04bYtbV6MqGVj5YbTcKOb8snPKvpJSi0MYV2R19jBr3Djcvrd78GlKtJ0pRpSjSlGlKNKUaUo0pRpSuB9A/taUqPT5M6T12RQhiHK6XAxTlEpg3UMYNwHtDcB0pWLpSuRAQDtAQD6YDt+zfXPml6bqy5XoFQvmO8ZWKfLHXfKx54lBhBYv3Bp41ZZkkJsCvG7ZVky8G0UA32dRPr32LuOsObNxoMmLDke2TPy4Dz93EXP0sLDz59an9f1fe7XSZ/YsCAyafWCI5MnJR7XvNxj+0kM3JvH2hrfW1NTB5/t05ycncFtcEXZbGkFWE5k/JNlKxL7GzqwgxZu3tFBBuU7ttYWLhyZsoQygmIqkPUQodusKLZzybdtauNIcRUv8AuAQYy1gSn4hh6VZszo2qwvjmDuku6wx2KfJMY1LI65Yi5MFybn7TEwAcEDyCLG9LLDsXmQTXKQz2jixxYS32xtsbS2PY50RdpiNZVA9bZWN9KNyvgse3WL0qB/DGN07bjrJwEz7yPsxD7nutwKA+Iv5QxIvy/G3ionteT1IDEg6Qdkuv/YxHLTKYENnAH3WiVDxMXp7ZYcxTG1HlNmSyZHrVKlOGGaKrXZzK1/x9JZElHsWpXa3WKeyQdQWUJJNNuVY1XvKypkWJSmA5TkHcxvRqOx9znzZSY74GQkTTuhckcVVRdZD/APhf0FXPafGvUtf1/I3GN27UZOfDrMbKXFRX92WadismGlzb3sYDlIT4sRYCpUuMcY9f3yLyfIUqsO8jQ8G6q0XfHEMzVtkbWJBRRSQgGM2dMXzeIeHXOZRApwTOJx3Dt1MnFxWyVzHjQ5arxD2HIKfVQfWx/CtZx9g3sOkk65BmZKaCaZZnxhIwgeZQAkrR34l1sLMRcWqGXDvCWdMaXjP1ty5YBgKbJXmzQ2G8N1ZKqx+MoDG5Jn27GXdlD1pi1Ozt02ddVN4ZwJnCpdzKCJhDav6HX7LEyMqfPbjjmRhFEvERrHfkHAUeGa5Bv5/G9bd+We5dL7FptHq+rQe9t48KKTPz5jM2ZLl8PbfHaSZiGgjspTj9o8BbC95N0jMtdzDTITIGE3DS+VWRtEhXnskt46BBoSCkF4uwrotpRo1dLrx71AxClEgFU9JREO3XZmbfYT4uPldagjzInygkpZzF7cQJEki3H3MtvC/WqHndNk6xt8nRd6aTW7OLDE0aKqze48ih4UYoxVVdTcte6+h8049elns1Glfv4N/XnBnLtAY2SMkZ0VNuudJJyIoiYndOyFA5Ph2HWZpNll7XAGZm4k2DMZHX2pePMBWIVvtJFnH3D8jVe3WtxtXnnDw8uHNhEaN7sV+F2UEr93m6E8W/Ot5qXqJo0pRpSj9n7P8AHpSmBy/geOyc9jJthJI16wsOlFZ6ZkV01fsymMqUrtul3ZlnKaogBTmMIdG5RAdaX+T/AIdwfkHKx9tiTrhbuGytIU5rIgN7OosS4PoxJ+24INbj+NPl7O6Fi5GryoGzdLNdlj5lGjkPi6MbhUI9VAB5WYG9V3Xmg2ChyKLCztFGkk6Bdcpe7SMzM2BUwNVWz5BZVsodchROZENjIhsAh2hrw53Dpm86fnLh9gjaLPk5MPA4FAfsKSKShLDyYxYx+AfWvbXUe46Xt+E2ZoZFkwY+KnyQ4cgcw8bKHAUkAObq/kg+DTvYx5A2HHzeMZSbte1Vg5zFkWDghva8OcqYpoNYl84cKd6zAnSqJRKAGMAkAS+nWzvj75s3XSYcbE2Er7HrzMRLGw/rwGxVUhkZjdALPYgAm6i3rWtO+/DGl7nPkZeDEmu3wA9uRT/RnF7s80aqLPe6ggkgWYg+lTUrd5gstxtnYngnfuOaMaoKzcg5TRjpcj9smrJRhhbqgdi7igUAjgO9HpH4Q16w0PbtP8mYOwxHw5P+IHHVTkSMFjmEiAyxEqwMbw3CyDkbH615X3nU9x8b5uBlrlxf8sE7sII1JkhMbERSjktpEltyjPDyPpTlwsfGxERGRkKmmjER7Ju0jE0FRXRTZIJgRsVJcx1DKkKmAbGExhEO3fV/1WFgazW4+v1ChNZBCqRBWLARqLKA1ySLWsbm/wCJqh7XMztlsp8/aktsppWeUsoUl2N2JWwCm/qLC34VnCiiYqxBRSErjr8QXuyAC/eE6D99sUO96ydg9W+4dmsoxRFWUqvF78vA+64seX4+PHn6ViiWVSpDNdLcfJ+2xuOP4WPkW+tY0dGR0OzRjoli0jY9uBgbsWKBGzVADnMocEkUgKQnWoYRHYO0R10YOBg6vETB1sMePhRg8Y41Cotzc2UWAuSTXfnZ+ds8ps7YzST5r25SSMWdrCwux8mwFv4VBnmKwkjy9RekGRXjCxL4rhMElVIuPXI8STKudQhDJIOHZVQKInEOoCgAa8g/5R4We+z1mXH774AxpOQsTDGwkA5EgWVnDWPI+bACvWv+MmZgprdliP7CZ5yY+JuBLIpQniATdlQi44jwSSahaAmKBDdpR+qKbYQHbfbcvbsJQEB9HwhrygLoQfT6g/8A3fjXqjw/2+v0I/8AXzU0sJYQmHEZVsk12+BFOlpAzsWvs1woiLFsdxHv2DhA63cOHCg9QFUEBIUo7h27Dr1f8SfEe0n1+u77otyMfIefnx9pivtqWjkjZS3FmPmzEcQD482NeWPlf5X1kOw2HRd3p/3GOsATn7qhvcYLJHIrBeSqPBKg3JHnxcVPP6OvZPr6V4+80en0aetK6F3TZqZIrpy3bGXVBBArhdJAy65vqUUAVOUVljfAUu5h+hrpmyMfHKrkSJGztxXkwUs30VbkXb8hc13Q4+RkBmx43kVF5NxUtxX/AMmsDZfzNhXTJ+0fZr/2OZsWX8G59mC9A3ggf90bwgu+7AVBb99t17dvTrq2H779hMNZ7Y2XtN7XuX9v3LHhzt548v1W82rtwP2P76H+5iQ633V9327c/buOfC/jlxvx/OkvCO74RzBMLFFQqyZ4A69hnop4cjdGxEX6SMY+OVIVU7JVAervB9A/9Ne1GT3FcjDw95jYjRnCLZORDIQq5INhHHEw5GNh55G1jVg22P09oMzM0mRlK4zAuNjyoCxxitzJJIDYSK3jiL3rdWELEMaPusaKJLeKabDMlXMy8H35PHBs3DvO/Fvv3fwdXp1K7wbxsAjrpxxsvcT/AH+Rj4X/AKn6fPLjfj9L2qK0p0Yzv/8AoBkHXe2/+wVD8+J9v9Xjjyty/L0pEzuKKy+lH1thmbaHyAsZV0xtZvFuzMpRVsVqL4Y47nwi+zcBL0iTbt3231U9x8b6HM2E3ZdVFHi90Yl48w83McpUJ7ntl+Dfb4sVt9bXq1an5G3uHgQ9c2ksmV01AEkxPsT3Ig3P2/cCc1+7zcNf6XtXUzZUWkL0CqzbgZKzOnUmFTfy7dxJyKkqcviZdZm/UIr7NIr1j0lExAAg9ICO2vjFxOodRm0vXNq/7jfSSS/s5JlaWUzEcp2SQhvaBv4BIsLKCbV2ZWX27tkW57Fqk9jQxxxfvI4WWKMQg8YVeMFfdIt5IBJN2I8064gIekBDf6OtjkH61rutLIWGHipKFiH7sEJCwruG0QgKSxxdrNku+XJ1pkMml3aXbucSgOonN3es12dia3Ml4Zuc7JCtmPNlHJhcAhbD/wAiL/SpTD0uz2GDl7LDiL4WEitM1wOCseKmxN2ufH2g001izK7ho+8PEKHZFgq78kTEOVmqwM598KTlRw4HuyFVZRbErYRUWHqASmLt6da13nynk6rB2+XDp89110whhZkbhkvZi7eBdIowt2kNxYqR61sfS/F+NtM3U4su3wUOwhM0yh1540d1Cr5JDyycgFTwQQ1/SogOuWOVnKDhJE1aZd+UxUnDaIEzhsBh3KdE67lVIyhA9AmIYB+ENeYcn/JL5IyYnji/YRcwbMkH3Lf6qWci4+hKkflXpjH/AMcfjyCVJJP30vAglXm+1rfRgqA2P4Aj+NIh3nzLbwZEDXSRRJJkImuRsmzQ7kqZAIAsTkQ7yPMbbcRRMQTCO46qOT8zfJ2T7/LazouQAGCBF4gC39Mhbxn6koQSfWrZjfDnxvi+xx1ULtjklS5dr3N/6l2tIPoA4IApVU7krkOpsZBm9chaPFd2tHOJ4yjhw1XKoiRQTOO8BRVmdsmcAT9IKmA2/YIasfVvnzvHW8OfFy5P7iZADE+QS7IwKg3a9yhUMOP0c8r2FqrnZfgjpXZMuHJxY/7eY7iVccBVdbMRZbWDhiDy9OA42ubh5EeZseVul4qgvzOiok8SLaabA3MuBA70UAVamUKiJ9+kDCIgHp1tSH/KrCEKnJ0sxyAo58Z1C8rfdxulwt/S/m1awl/xczTM37fcwiDkeIaBuXG/jlZ7cretha/pUrmUpZ3Vm8MeAbJU5WvM5FtP+PKd8eZc9B1IpRgAgJUUUT797tsIhr0fibHsGTv/ANu2HGvWGwUlTJ9y8hnexMJj/wDFVN+f1rztla/QY+i/cLmSN2Zc142x/btGIVuBKJPxZhbj9BSmcuWzNIzh24btG5OnrXdLJN0CdQgBetZYxEy9Rh2DcQ3HU/PkQY0RnyZEigX1ZmCqPoLliAL/AJ1AQwT5MogxkeSY+iopZjb1sFBJ/E/hSeTkLMa3KR5oRp7nexEniFjK/KZ4tMHV2PG+zwERBsCHrArtsP0dQsebv37M2EcSL/i37QOuUJBzM5NjF7f/AIhfu5+n51Nvh6BeuDMGXL/yf92UbGMZ4LCFuJPc+rcvHD6fhSnH6WrBVfo0pWmsM43rkO8mnbaQeN2JUzHbRbQ75+oCipEgBu1TEDKiUT7j9AA31F7rbwaLWS7XJjnlghAJSJDJIbkD7UHk+T5t9KlNLqZt5s4tVjSQRTzEgNK4jjFgT9znwPTx+J8VtklAVSSVKBylVTTVKVQokOUFCAcCnIPaU4APaA9oDqRjcSRrIAQGUGxFiLi9iPofPkfSo6RDHI0ZsSrEXBuDY2uD9R+B+tI7IF5h8d1h5ZZszgG6R02bZNq3F0svIOiqA0SBLrTKJRMmJjCJih0lHt32Aav3Xt+r6P1+XfbUv7CsEUIvJmke/BbXHi4ubkeAfN7VZ+mdS2fdt/FotUE95gXYu3BVjS3M3sfNjYWB8keLXruqkpLHp0dNXJ1Cpv1I80nIvIox0odJkoBnKCpTLj1JgmzEveCI7AcB7dtdnW9hs36vBtu0yYi5hhMsrw3EKofuU3b04xkcyTYEGursWBrV7PPqurx5TYYnEUaTWMxcfaykL63e/C3ki3ilM0eNJBq3fMXCLpm7RTcNXTdQFUHCCgdSayShREpyHL2gIasGNk4+bjpl4jrLiyqGR1N1ZT6FT9QfxqAycbIw8h8TLRo8qNirqwsysPBBB8gj6iu1UgqJKpgcyYqJKJgoT6tMVCGIChN+zrTEdw+mGuyRS8bICQWUi49RcWuPzHqK+I3EciyEBgrA2PobG9j+R9DSeqkE4rEC3iX1glLKs1UdqqTU2ch365F1lHAEXMUwl6GxDdBe3sIXUJ1vTz9f0qa3MzcnPliLsZ8ggyMGYtZiPogPEf8A4QKmuxbeDsG4fZYmFj4MUgRRBACI1KqFuAfN2Iu3j1NbaNlI6YZpyES+ayLBYVCovGapV26pkjimqBFCCJTCmoUQH6A6ksDYYO0xVzdbNHPhvcK6EMpKmxsR4Nj4P51G52vzdZkthbGKSDMQDkjgqwuLi4PnyPI/Ks7WZWJRpSt/j1coPbO26FBOeRaqgcCCKQAWNbgIGP6AOPwB8OukzKJxj2bkVJvbxYfn+NdwhYwme68QwFr+fP5fh+dOjrurpqorzbfLbk/MCx3QXNHuENT8pYjezrmtHtAuk6tPxFkSZFl4WVds0nTmLWFxGN1UXJUVQKJDFMXY2+qT3XrH/IcOOSKRY8mAkgt+khrXBI9PQea9N/4z/PEHwtvcyLbYk2V1/aJGsoh4+9G8RbhIisQHFmYMnJb3uD4pp/KG8qO38DZa+5Yy/cqzY8oXuuNqcwhKSu+fVut1kkijLPDqS8g0jVpWSk3jVEQ2bpkRTTEAEwmEdYnSunzdfaTOzJEfKlXiAnlVW9ybkC5Pj6elWT/Jz/I/V/MePhda6xi5EHXsOcztJkBVlll4lFHBWYIiKW9WJJPoLVebrYNeQKNKVDLnBxMdctMb0SNqWQD4mzJhLL1Lz7gjJxoROzR9QyhRRfIMDz9bVcswnazOwss8jpBuVZFUzZ0YU1CHADApUBrL5WXJvNWQeVWXs+cqMd2a98leC0pw7aQ1JxPM1mhYwVc2x3YWFjrsfIWuVmZKLWK7Od6i5ci5WdDuRQiZQLpSrf6LhCj1qq4wjrBVaZabfjej0yotbq9qkOtM97UoNlEpPY2QetnUnHJmWbGVSIVbdLr2Ad+0VKqZr/AnzEapzAzRy7JyP4n5IuuQZVeuY1f5Wwjk2VlsEYHQfnVjMSYyRh8mx8FXyvm5u+m5NFuV5NPdjuDmSKRIqlXgpA6KxTK9Oio8KzIDtRuQ6TdRyCIAudBM51DkRMqAiUDGEQKIBuPaOlKSlX/8Axv/AAJT+JV0pUWvLa/sG8UvxM1L+Sm0pTBeR3+qd4QfiiP/AEus+lKtZ0pUcc48QeMHJeTrczn/AATjTL0tTk1E6rIXyssLA6rxVVvEKGiFXhDmYKmWETdafScBHsHSlZa3E/jS5ydE5odYNxm7ytAoxaMNfndUi3VljCwjMI+HUZSLhFRVu5imRQSbrF2VRIGxTBpSttZ+NmAbpVLnRbZh3Hdhp+RLi4yFeK7K1aKdxdovbvwfiLjLtlG4ldWZX2ehu9H/AEjZIvr9mlKcem0ypY7q0HSKJW4Sn0+tMEouv1muRrWIhIePQ3FNpHRzJNJs1RKYwmEClDcxhMO4iI6UqH8V+sqvP9x/F/8AXvlnSlTm0pRpSjSlGlKNKUaUo0pRpSjSlQ3s+XYMuf1cJngrkaxvqc9v6NjJW3pqGlEspEkSaLc2sA8AjYjrD1kZj9kMl62sI50YzxrisnvGMvy4nhYG1uXpy/KrKnV81+pP3FZsT+3pmrimL3V/cl2TmHEP6jEALF/S/iktm2yU2PpjqkWfLrTC81l1vLY3oFuLMM4iyIXCbinKTJelC9VRI8tEWB/Et0wHfrTAdfOxmx1xzjTTjHknBjRrgNyYeOF/Vh6iu/p2v20+3Xca7VNt8PVMmXkwcGeIwRuCwyOIJWF/0OfwJFVlYhib/H8z8CxOOOcWZ84YxcYPXlb60sVaNcMQ5BY0RFelu3UfkWNkXMBA5KlLU28dIpmTFwY6QkKfYwgNRwEyk7BjR4uxyMjD/b3cMvKJwn2EiQGyyFhdvr4r0V2rK0WR8RbvK3/TNPpuxjchMZopvYzsVskjIVXxXUSS4iQn24jfiAwJFwKuRO2bHcNnajZqq7ZCczN0q2QVcszKABVRaLqJiq2FUobH6BL1B2DuGr5ZSQSByHoa8liSRUaJWYRP+oAkBrenIA2Nvpe9vpVe+SOFEpXZ4mTOM2VMnYlWgZ6fyrLYEpk8nG47z3kt2+POuW98fyrk68chb3SKTF0cgigi2HchCiA71fL688Mv7zUTzQFWaQwo1kmk9fvJPgN6H6Wre/X/AJhxthhHrvyLrNdtVngiwo9lkRF8rW4ir7YOMqCzGAEyID9zP6k1FCG8zCXxtkbkRa+TZ7xWEMMUfFcRe+K9Nxw3sR8fXuySRmM3eKflAsmiheaMq4MkiousdBNEywdHWBRNqEj7c+LlZc+3MqDHjjD46RhuDsbF1kv96X8XNrX+tbPy/wDHfE7B1/Qaz46GFkvt83NfG3ORltEMrGiTlHjz4fAnGybXYKoYsFPK1wKc2NYYtk5GreYrZssctMhQ09PVq/Yj4/IKOuvEkXkVI9HJGyGJ4Ny8eyFYeKm8Yod10JIbAoUx9+3NVcKRk7VNPnSozK8cP/8ADD/ZYxrc8T6+fA9armRP2XHgyfgPXavq+BlwQy42dtDb/wCa+Kf3HNc2QKqzKPsAS7N5UgWq2/fcAHtDcAEAHsEAEAEAEPgEN9XevLXp4rnfSlQ75iRfKVljWoSHDD2G3vVcyjVJi0UhdvW4thfseKvjEtteJJzJUY+BOomr4hRyns5OBRBMesQ3gd+m5GGjdf4jJWZSy/aOSX+4XPhfxuPP+tbZ+Jsj41m7FlQ/LvvtpMjWzJDkAzO2NlBbwS8I7tILjiFP2C/3fbTsJRWXq3Fy7SMkWVsTXik1YVacdd3YWU/KOinfg6dm6ma8RAoqG8OXbqV7sAH06o66z5K69hZeHrMmLawGH/4z5D8cpciWT7y7foMUCEmNfVuIB/Cup9j8b77OxcrYwS6yVZyMhcdOWM+PEhCcE8Os07Ae417LyJHkXpz60zm4+BjGdklkp2dQbASSl0WpWST5wJzmFUjUnqIgBDAXYPT076v+gxdvhabHxN9krmbdEtLMqBBI1ybhB+nxYW/K/wBaoW+ytTm7jIytHjNh6l3vFCzlzGtgLFj5b8bn8bVvNS9RFGlKNKUaUpOWiqQVwi14qejmr5FRu7QbqOECLLMFHbczc7pkY4boOClNv1FEBHbb0aguw9c0/aNe+u3EEc0TI6qWUM0ZdeJdCfKsB9Rb09anNB2Lb9Z2CbHTzSQyq6MwViokCMGCOB4Zb/Qg+v41WhcMPSGL7U3Ssism5pqhgWCwxUSu/K9jk1SGdRzxNJZuVi5O19VUwnAhercN9eA+0fF2b8e9jRN808nVGN/3MMLSB4gRzicBl9tinhyWsOQIv9PeHWPk3D+QOvO+jWCPtKjj+2lmEZSUghJUJDGRQ3lQFubEEfi2itjdMHEkygpWUa10ziXJHxfiVDNjR8oIkVQdICYETg5QKQqoiUTGAPTuG+qFJvcnDnnxdRk5MejLzCKHmShjl8EOt+J5KFD3Fzbwb+avcekxsyCDK22PjybsJCZJuIDe5F5BVrchxbkUsbC/kWqR8Py3loOKjoZhQIFJjFR7aPZJElZIAIk0SIkmUROQ5thKTf0jsOt76v8AyX2Wo1sOrw9LhJiY8CRxgTS2CooUDyCbeP8A9taN2f8AjfrttsZtpmbnMbLyJmkcmGLyXYknwQL3P4f6VKqj53x5e3cVDxck4SsUmgc/sdxHvUzpLN2513aQuu6MzEqZUjCUe87S7fD2a9F9R+Y+kdxysbV67IkXd5CE+y0cgIZV5OOduBsASPu8i318V567Z8Q906hjZOz2GOjaTHcD31kQgqzcUPC4cXJAP2+Df6U8mtqVq+mYz1T7ReMfrQVT6FZH2xGv1mSrojVN+yad8KjUTKiVuoIrGTOBVRAg9Hp321qj5m6v2Dt/Sm0/WuLZ37mKRkLBRIiciUu32n7ircW+08f4VtL4d7NoOpdzXbdj5LgnGljVwpcxu9rOOP3DwGW6+Ry/C9RPjOJeRpJZmpNTEBEtli9662WWfuWJjmE5mxWrdJNuc+3ZuRTuyiPwhrzdgf41d62EsL7bKwsbHdeT/c0jxk+SoRQFJ/8AyW4g+lxXozP/AMj+k4EcqarFzMmdDxT7VjWQDwGLMSwH/wCUvI+fSpROJWs8b8ZxMdIvJGcQZu3jSMIVJshJSbt84dSJk+7A/cooNgPsc4dXQXYRAdeh5tj1/wCB/j/Gws6XIzIIpXSIWVZZXkZ5bWvxVVvYkX4i3i5rz9Dr998598yc3CigxJ5YkeU3ZookjVIr3tyLNa4XxyNwCKWtKyVVbvDtpNhJx7d37GbzctDKSDZZ9AtFgERGUEnQREqe3aYQKGrb1PvvXO3auPPwsiFMn9qs80BkVpMdG/8A4trAW+pNgKqnauidh6ntJMDMx5nxv3TQQziNlTIdf/4V7k3+gFzTT2rIXtKGvk86tsCyxyLB7SIJxELKTIydilWYqNZR09i01nsSo3A/QKQEHYuxvh1rXsXdxn6rc7nI2WHD0UwPgY7QEz+7kzR3SV5Ig0kJS/Epb0s3r67H6/0z9js9Pp8bW5c3dxMmdkLMBB7WNFJZ4USUrHMGtyD38m62ra4rSx/fqLRo1SRZ2uUxueOdAqVzIpuGE82KbupHu3ZWj1wicewh1SCQ222pL45j6X3Pp+owXni2Ww0Jia4aUNHkKPEtn4SMp+jOvEn86jvkJ+5dN7bt81IJddr94JEsVjKyY7HzFdOaKw+qqwYXqQg9o7/R1uzyfJrS/wBLVxtpSudhH0AI/tBvrkC9cXtXUssi3L1uV0G6XUUgquFk0EgOcQKUgqKmIQDGEdgDfcddUksUK853SNLgXZgoufAFybeT4FdscUszcIFZ3sTZQWNgLk2AJ8D1/D60iapb2tykLCQlck49OqzB4ptJzLNNMJBymA+JXiu8IKySBBAA6wHY5TAO/wAAVPrfZ8btWbnKuDkQJrsowpLOgHuMB9zQ3HIKLfqBswIN6tXY+t5HV8LCZ86CZ9jjCZooHJ9tSftWWx4lj/4nypBFvrWGvVpqsws4THbpJSdmrB7cVNb3juRj0heLlGSSbATpVbIlQAe5TD1Sm+HWLP13a6DVZa9IkVtxl5vvn967yxjmwMoS3lV4/oUeAayYew6vfbXEbusbLqMXD9gDCRIpDwU+0WvcM3L9bHyRWputnVcuJqrRUwelT1djYqyO7rMQhHVZaRirlAjxug9XHu/Erpq92YA2EgDqM7X2GTImy+ua7KbU7jBx4cp86eAPipEWUSKrt45sDxP/AI3vUl1bQRY8WJ2HY4q7XUZuRNjJgwzlMp5VVijNGvniCOQ+jWpmMlZqmrVBliMTKNpFV/JzFLmGzlFmtNSqgtm5SOqux8Wc7xg5SUVEyndmACCA7B6dar778r7bseo/tnxq0c8k2RPgzq4QzzHgtnxI+ZMkbAuS3EixvYetbQ6L8Warr22/uPyOskMcMEObCys6wQjkxKZUnACORSEAXkDcEXPpUF7HVbBUZFSJscQ9h3yfaCLtAyZVU9xAqrZXYUnCBgDcDEEQ215B3mg3fWM463fYsuLmL/K6kBh9GU2syn6FSRXrvR9i0vZcEbLR5MWViH1ZGBIP4MPVT+IYDzWi22+n/g1C8walyORvXA/Q/Z/g19DwfWuVHm/413Nl/DOG7jukV/DrpL9w5TBVsv3ShVO5cJCId4gp07HLuHUURDXbA/sZCZHFH4OrcWF1biQbMPqptYj6i4rryIP3EDwcnTmhXkpsy3BF1P0YXuD9DY1IMc/ZHtN6rLpGYUr0eWVhGCMBDLHbw4NTvkEVk3SKgiD3vSHEBFTcALsAAABrdTfM3fOw9w1+RFlHBwRkwRrjQErBxMiqQ6n/AHLgkfd4AsAK0sPhro3XeoZ8EuMM3NOPPI2ROA03IRsylGH+3xIB+31NySb1ZJaaxBXCHe12xsCyUM8OkLlkdVZEFBbLFWR3UbqJqlEihAHsMHo17w7D17Udn1kuj3sIn1cpHOMlluVbkvlSCLEfQ14W6/v9t1nZRbvRzGDZxA8ZAFa3JeLeGBBuDbyK3KaREEUkEi9CKCaaKRdxECJJJlTTJuPaPQQgB26lY40hiWKMWiVQqj8gLAefwAFRckjyytLIbyOxZj6eWJJP4eSb1o7TY2lSgntgfNZJ81YdyKjaIZnfyCvfrEQJ3DVMQMp0mPubt7CgI6iOw73G61p5t1mRzy40PG6QoZJTyYL9qDybE3P5Ampfr+jyex7eLTYkkEWRNys0ziOMcVLHk59PAsL+psKzGU1FyDhdk0fN1ZBo3Zun0cCpPHsEn6QLNRett+8biqQewDAHaGsrF2uvzZ3xMaZGzYo0eSK49yNZBdOaeq3H41jZWr2GFAmXkQuuFK7pHJY+3IYzxfg3o1j+FbBQpjpqEKodIx0zkKqmIAokJyCUFExHcAOQR3D4Nw1nOrMjIrFWKkAj1BItcfmPUVgowR1dlDAMDY+hsb2P5H0NaqAi3ELEM4x1LyE84alUBSWlDEM/didU6gGcCmAEEUymAobf5IBqN0uun1Osi1+TlT5k0YIM01jI9yTdrWHi9h+QqR3Owh2uylz8fGhw4ZCLQxXEaWAFlv582ufzNfc7HRMtDybCdaovYhwydFkG6xBOU7QEFDOBDp+yEOVIDbGIIHD4BAdfW4wNbs9XPhbeNZdY8Te4rC4KcTy/MHjexX7h9K+dRnbHXbODM1EjRbJJV9tlNjz5DiPwILWuD4P1pCVdnV7ZB1KVpsu99w2EdJRDevkSULDzjIQPHmQlG0gmDtQrIxDATq+qHtHVP69i9d7Jp9ZsurZMv/DoceWFcYKRDPHYx8ZVkHMhCDa/r9fFW7f5O/67t9lr+z40X/Lpp4pmyCwM0D+JOUTRngOYIvb0+npTktWjVi2QZsm6LRo1STQbNm6ZUUG6KZekiSKRAKRNMoB2AHoDV8x8aDDgTFxUWLFjUKiKAqqoFgoA8AD6CqNkZE+XO+VlO0mTIxZnYkszE+SSfJJ+pNd+u6umuBABASiACUwCUwD2gICAgICHwgIDrggEEHyDXIJBuPUViR8dHxLRNhFsmseyRE4pNGaKbdumKhxUUEiSZSkKJzmExuztEdY2Fg4Wtxlw9fFHBiLeyRqFUXNzYCw8nyfzrJzc7N2OQ2XsJZJsprXd2LMbCwuTc+B4FZmsqsWjSlKbHf77av8AizP/AOmIaUpytKV8HIRQhiKFKchg2MUwAJRD6AgO4Dr5dFkUo4BQ+oNfSO8bc0JDj0I8V9AAFAClACgUAKAAGwAABsAAHoAADXKgKAq+AK4JLG58k18KqAikdUwHMVMomEpCic4gH/qlDtMOviWQRRmRrkAX8etfUcZlkEa2BJ+vp/rXz4hEO5A6hUzOP3kig9JzjsA9IF9PUAD2h8GuPejHAE2Zx9oPgn8rfjX17Mn3FQSqfqI8gebeTWJLS8RAR7iWnZaLhIpp3fipOZkGkXHNe9VIgl4h8+VQao96soUheo4dRzAAdohrtrqpUtkwIkUQEDdYAcTAO4D1AAh0iHYIbaUrI0pWsi5uGnCPVIWXi5gkbJPIaRPFv2kgRhLx5wTfxT0zRVYGskxUMBVkFOlVIR2MUB0pWet+9K//ACz/APujpSkZV/8AwDG/8CU/iVdKVFry2v7BvFL8TNS/kptKUwXkd/qneEH4oj/0us+lKtZ0pRpSjSlGlKNKVBmK/WVXn+4/i/8Ar3yzpSpzaUo0pRpSjSlGlKNKUaUph7LnGiJ30cPRtzgUsmHr6lt90vajUtoUqyDoGSs8zieoXR4gjwe7M426QN2Bro/dY37kYXuJ+7KcuF/u4+nK34fnUsNDuzpD2UYk/wDx4ZHsHJ4H2RMRyEXP058fPH1tWFMXd5EQ8i7lrS1r8cRouivNyryPj20YZ0QWyLs0hJGTat1UV1SimKhukVAKAgO+2uyV0SMs7BFt6kgAfnc+P4fnWFhQZGRlxxYsL5E5YERqrOXt5K8VBY3AN7C9rmmrxDTJnHuPIKpzmTbVmCRZGkHa2Q7oswcWKwpy0i6k2x3i0YQjNZFk3dlQbmJvugQvaPp1j4GPJi4qwSTPOwv9725G5JHp48A2H5VM9s3OL2DsE+1w9djaqGTiP22OGEURRQjWD3YFipZgfRifSt5ace0K+OK0veadW7YrT5xKx1Zeww7SWUrM+kmKBJyFF2mp4CUSQMJQVT2P09muybFxskocmNHKNyXkAeLf+Qv6H86wdbvd3pEyE0uXkYq5cJimEUjIJoibmOTiRyQnzxPiml41EvjGl2av3XBlM4/R9dyJbYug0+iTEfKwc3RQfi6iLsojGkTbQ8lZVXCqzhrsByKCIm2EdYOo/dJjvFkY8eKqSsEVCCCl/D+PALeSRVp+Q20k+3xs7UbrL3s+RgQPkz5MbJJHk8bSY4LkmRIgAqv6EeBS4c1vJqmYIq1tsiMG+IWtJkIeVxYatNlJKTuriQKuxtydsE4O2rZkxDuRZgHQcfWH07hkmLLOeswlAwRGQY+IuXv4bl+Q8W+tQ0ew64OqS6yTAdu1NmLImZ7xCJjhbNAYLcSWb7vcPkelQB5E2jlrxtzTFzeM7ehlDHnJjJVPqURV75WbHaSYctCrhBB+2hxqiSCNcxw4rbdZZV69MoYsgYoCAh6w1jaT7vUbBZMN/excyVVCurN7TX824/pQqCbtf7rCt49C1vxd8g9Rkw+x4ra3e9d1887zY00UP7+EAlTIJrmXKEpUCOMAe0D9fFP/AJS4+5EzblrI1dyetjWX4h3DEKNNf44axTpnkC83MXqcgV7abpGkj52OgIJ8ySFqi0e9KiZhASAbcRk8zWZewzZYsz2W0ckHAx2s7v63ZxYhQQLAGxH0qj9b71oOndXwM/rY2MXyribU5C5bOGxcfH4leMOO/KN5ZFZubPHcEAg2rM4m4ic1OINfsjxGIiZ/LANcQ2mdwxOTctVUcdY/fGbY4qR05aRdHbS0HAotyvO8IVwKxfXMbX3pcEwp+5y1g/unD2mMTErwQ/018n1UWv6G/rXT8n9pj2eUNH1+Xano3vnOhj2EcaTnKyl5Zc90RQUkkLGOxKcT9oFPFnXNdb4/Y+cZItcBd7JDNp2u188Xj2subbZTu7NJpRTJynCtFUVTxzRdYDuVurZFIBMID2AOdsdhFrMU5UyyOnJVsi8m+42Bt+H4n6VU+ldP2Het6vX9XPh4+W0MsofKmEEIWFC7AyMCOTAWRbXLeLiluteaa2tcPQ3VnhGl4sEC5s8LT3Ug3QsslX2ZyJvZdpEHODtZiyVUAiqhSiUhtwH0DrJOTjidcYuoyHXkFvZio9Tb8BUOml20msl3cWNM2lgmEMk4UmJJWF1RntYMwF1BNyPNKnXdUZajSlGlKwSycceSUhyPmppVFqR8tHFWILxJmofu03SiG/WVA5+wDCGwjrEXYYLZzaxZozsUjEjRchzCE2DlfXiT4B/GstsDOXBXZtDINc0hjEvE8C4Fygb05AeSPwrAnnk6zSjzQEQ3mFlpRm3kE3D0rErOLVMIPJAhzAbv1WxdhBMO02sLc5W5xYoW0uKmVK+QiyBpPb9uEn75AbHkUHov1rN0+Lp8qSZdxkvixpju0ZWP3Oco/RGRccQx9W+lb0dt+wdw+Afo6mT6+PSoejXFKNKUlLPMQzZSGrk1FvJZtc3i0IDdONNIRgB3IKqBNjuCbZioU23UYBAR3+gOq32DaavHkxdHtcaXJg2srY/ERe5F+m5/cfRYyPqbg1YtBrNnkJlbvV5EWNPq4hPyMvty/qsPY+ryA/QWI9ahXlzi88iQcWHHfi5Zmq5Ezisd33r9iVc5ClNFmJ2vGyZzCJiG6TJl9AiAa8ofJv8Ajzl64Puui+5k4zSXbEteRAxFvZI/WoJ8g2ZR6XAr1T8a/wCQGLsiml7v7ePkrHZcq9o5CoJPvA/oYgeGFwx9bE1GOCp8/Nz7qqtY0gTKKb0XTd8qDZWPLFgLiRW6VDEEVWrdE4mJsYxi77AI7a8+6fq+6225k65jQD+6qJOaSHgYxFdpD9xHlFVrr5JF7Am1b623ZdPqdNF2LInP9qYx8GjHMSGX7Y18A+HYgBvABtcgXp2OPEvR6zkFxMWmWOwRYMXBYKUUVM1ZHcrnBkqR8z6FVzEcNXPUTcQKn0CYw9mtk/B+06j1/uj7TsWS0MUMLft5ieKFmPtkSJYsQytdbkBeJZjWuvmvWds3/TE1nXsYTSzSr+4iADuFUcwY3uFBV1s1rluQVR+M/jZTqKtfTs8QtIWGJWn0a4VaDjXL1QsiqqCQmFE5UjiySEQE6wbk6RAQ317TPyL1mXSr2DWvPnaxs1cUNjxO5EpPG5U2PtjwS/lQCCL142Hx72SPctoNksOFslwzk2nlVAYwL2DC45nyAh83FjanG9H/AKf8f0/Tq82+lUi4rR2BlOPmSKNfmEYN6V+yXWeLsk35VGCKoHeMwQUEClO6S9UD+kvwaiN3i7bMw1i0uUuJliZGLtGJAYwbunE+hdfHL6VLabK1OJltLucZ8vFMLqEWQxkSEWR+Q9Qh8lfrTQZ++LJ/W2cBkSwe7qr114yBftmS0hItV2vSVy4QbIJqCZBRI/dKdWxB6g+EA1rH5pHQM3QRabvGb+xklk548io0kqMlg7KiqbqQeDXsDcfUCtl/Df8Az7C3su46ThfvUij4ZEbOscTq9yqszEWYEclt58H6E1X3cEK9WnMi3x3e1JuuTiabV41K3k46SBsiIqlbyhXDNs3dNu+DcBTP2ibYS7BuPijtEOl0E88HRdwcvRZqhHTjLFLxX7uE3JFV1v5BVvJNithevZ/WJdzv4IJe76gYm8xCWR+UUsXI+OUXF2ZG4+CGXxa4a5sEozsEuEMeojJqo1qQl2Uk8YiCZmxHqRioFkS9ZRMmqkiPrbCAGAodXoDVax9ztF1R6yclk0E+THK8ZsVDqQvu+RcEL62IBAF72qxZOl1p2g7KMdW30OM8SSC/IowLe34PkEjxcEi5t6mpztK5XeP93p8zEIvV6ncYlnV3045m2RUF5Z6uk7RknLdYoGBqRsHfACQgTYQ29Hb7BxdDo/hbt2r2usSV+tbTGTEkyGnQK00jK6yurAHgF+/7Pt8j/XyPkbzd/MvU9nq9k8Sdj1mS+VHAsDllhRSjRIym3Mt9n3fd4N/ymIBTAYPV37dwDbsEPT/hAdeobEEfWvMdwQfNqj1b5q7YqCMGNTsd7ipWzSE7aJd5HGlVK3W+tuBoONbszkORQAUHwxjbh6o7h8OtJdn23bfjkY/7Bc/c67J2EuRlzPEZji4t1/8AjxKhBB8n2ifHggit1da1XVfkL3xnHB1GwxsCODFhST2hlZVmtPKz3BHge6B58i1M/wAonM/MJ0yYqcjYXUI+Zrov4uHUcnSYSBel21NJNo1Q6zaUVbOTEORTboBLb0hrV3+Q0+52keq2nWsjOk1UsTLJDCWIjl8OhlWIlllKuVKta3Cw81sz/H+DTax9prOxwYMe1ilVo5pgoMkf6HETygBogyBgy+vK58VusBTsllutTlHyLHKTsLWDw5mj5fv2y4OGi6wosJNwksk4WdtRRKKe4APQQwHER1LfC+5z/knQZnUO9QHM1WuMHCRuSsGRm4xysrBy6FQV8egIYk1F/MeowPjje4nbekTjD2meJuca2ZeLqvKSJSpVUfkeVvqwKgCpXLSztKwsIIkJIKsXEW4ennyCkMayWbqAklHLAJu+F04L6xdg22/6PSEmyyY93DqFxJmw5MZnOSOPtIyHiIm88uTDyLC1v/TzrHrsWTSzbdsuFctMhUGOeXuurC5lXxx4KfBprbzmOFiI6cb1pVSXlouWbViXdNkz+DqUlMouko2WkVFW6iTxk2eI7KFS6+0Okfoa15275S1eswcyDQM2VssbJTEmdAeGHLOriKaUlCHRHH3BORuLHz4rYHUvjHabLOxJt6q42uyMZ8qFGI55kUJQywxAMGR2RrqX4+DcePNQXaJzNyynFVu+XN3NtLG6Fg6l2Mm7Qjfs6DgjdRs2cIs0TJNXyYACHdFKYQ6Q7BDXj7Gj2vavkXH0PctrJl42fJ7bTRyukX3KwUojLGpCSC3t8AD6fWvXGTJq+sfH2Rven6uPEycGP3FhkiRpfDKWDspka7xm/ucyV9fpWwlMDztQZmnpq0xNcbpWZxCxMios4XF8ikRRRpJtFYIr9VuqsRE/UifoMQQ2EdZ+x+G9x1jE/vO22ONgQrsGx4ZSzN7iqCUlQ4/uMpYBro3EraxNYWu+X9R2bKGn1Wvyc6VsBZ5owqr7bEgPE4yPbDKpZbOvIMDcClnGQsxk+tztoZKurLZSQblaSVReMlpCIsUG/SLCrQ0MsgVdgnZY1AAN3YFL0lMPaYdWrX6rafIOhzOw4rSbDsAw2aUh0aWHJx5FEDQQMvKMZUS2PGwsGP6jVX2G01fQd7h9fy1jwNCctBEGR1jmxsiMmdZ51bjIcWVvBYk3KjwBTMV2rx8nB2ESTMM8tC0Ms4a15yV01kWK0dJJu5ZwLx6g3jPEki2quyZFTHOJ+kAE3ZrVOi67gbDUZzLl4svYnxGZMVg6SxtFKHmbnIqxBxCrWUOS3LiAW8VtLedgzcDb4IbFyouvplBXyVKPFIssRSFeCM0vEyst2ZABx5E2NNeIgb1g9A9odm3p+l2ba18p5Nyv6itgoOP2/Uf61xr7r7NbmvomczUWimD0VTPm5kSxhkSSJ1kz94iViZxs3K670gdAn2DfUjpYXm22NEvumQzLx9oqJSwPJfbLfaHuBx5eL1EbqX2NVkSye0I/aYEy3MQUize4F+7hYnlbzarTrtP3NtVKtMVd4xiLK5bIuPdGzEbneWBc0emorEiqJyHB+y3MqoCXrKGDpAe0AH9Eu27rtWP1zXbPr0sONv5I1b9llBS+SxiBMPIkf1I/LsE8sRYfn+e3U9N1bI7FsNZ2CObJ0SOy/vMYuExl9wgTWsQY38IpfwoNyD5pN4ahchryS1ztVtlJKClYNIsVBPxcN1G8i4X72SUdxiyKfgwYrpmTamATGOiICO3ZqC+LNT3ebPftXY9lkZGmycNRDjyclKyM3KUvCwHDgwKxEEloz5t4qc+T9r0qHBTq/XtbjwbfGyyZp4+LBo1W0QSVSefNSGlHgK4Nr+akR2gO4dg/R1vAflWkz6eaSkC+g5KWsziOh12UqxfpREzIuosWKsodsiB0DIuzB1SLRFNTYp99g9Aarmmy9RsNnsJsHFeHYwzLDPI0PtmYoAVKv/7qKDYEeB6VY9xibbB1uBDnZSTa+WEzQxpKHEIdiG5IDaN2Iuy+p9aVYbbhv6N+3b07fDtqyC1xyvxvVdN/NvWknT17Q4jnhrYvXnD8sxIJMz1tUyzIsSQ5AYpujnERCSIHUC4egB21WurzdhnwZX7I+E+YMqUIcUlkEIIEYcn/AN0efcH0NqsXZodBDmxL11M1MM4sRcZICuZiD7hUD/2j44H6i9NVnv2pPQRKfWbhDwMsuoi+no17INY94vVzKlTWkPErHTVbtWK5QOfuzAZUNyhv6B1z8zf3DcaZesdf2mLhbJysmTFJIsbtiE2aUOxBVI2F24kFx9v5HYnw7/b9Rtz2bfazKzNagaPHljjaRFywLiLiAQzyKbLyBCH7j+Ij3RuQ1rhpItE8G3vkeeWLCRE1EsywsgVsqPs5FVi0aoC0U3OUFkzKh1j2iJh37NJdQ+b+x6rY/wDDvaTc4LZIghnhT2JQh/pqyIi+2fP3qW8nySx+m6O2/CvXtngf8u919PmrjGeaCZzPEWH9Rg8jtzHg8GCeB4AUW8zkq0I7rsG0iHs7J2Vy2M4MpMzAkGQdd+uosUq4p+rsgU4EL/7JQ16969qMjR6mLWZeZkZ+RGWvPNb3H5MWHK3/AIg8R+QFeSt/tcfd7WTZYuJBgwOFAhhv7a8VC3W/n7rcj+ZNKDU1UNQHaIAHpH0Bpa/pStMEwg+j5R1XlmU26j/HNiNkHZO5PLMyD/2a5XJ1A2U74SlPv2l331Ff3SHMwcjI0jRZeRD7iBVccTMg/wBpmF+JvYG/6b3qUOsmw83Hx92suJjze2xZkNxC5/3VU25Djci36rWrvh15J1FR7mYYpRkqu1SUkI9FwDtJk6MH2Vum5AABYqY9gGD0679ZNnZGugn2cK4+xeMGSJW5hHPqof8AmA/GunZw4OPsZoNZMcjXpIRHIV4F0HoxX+UkfStlrOrBpTY7/fbV/wAWZ/8A0xvpSnK0pVcPmP8AKTkLwroFJ5OY8xtUcr8cMXzz2S5k1k71eOy5CYgdNkmhMh4kVcOmsBKvKM/UF3IxjgQXfNfVQEDAOlKQ3ly86cueYlP5T5EU3H1cpfl8HKlTuMtnsRniOcc0WmvybtC7ZLloAjhRhTMeCYAZMI92QsmoqgZU4FKIl0pVq+lK6zJJHOmodMhzpCIpGMUBMmIhsIkEe0o7a+Gijdg7qC6m4J9R/D8K+1kdFKISFb1A+v8AGqo/PEXkG3lb8pXMTHpy0q2jMeOIqKWceERkpJHKdLUYsVXQkUBqm7dFKmZTpHoA2+w7a+6+KZy+cxOePFO+SdBz+947Xprk7hLye5B4Ve4zp1xrp8Y5P41UBlcntEuJZ20yvxgU+SYS7fokUgi3R1UDl7ohTgIKU/V15tZhgMYeUZb2DSlmlucGZMFUPMhHEQ+UaM4TIvHe+5TsClKRLLEUh3ydlrTYrc65nZU2wnIYpjCBwUqtDjfe+f8Ai/GHmkZm43q8d2uLsA+YPziyS+omS4G2TFwzmlWpaMt2QIJta4ixRMdi8GEQwVaxbgWMoZy7N1KlKmABpSvR3gvLcLn3BeJc41tq5YwGYMX0vJkOxe7A7ZR13rMdY2jNzsAAK7ZGRAhhDsES76UpTVf/AMAxv/AlP4lXSlRa8tr+wbxS/EzUv5KbSlMF5Hf6p3hB+KI/9LrPpSrWdKUaUo0pRpSjSlQZiv1lV5/uP4v/AK98s6Uqc2lKNKUaUo0pRpSjSlIy9WhlVYVR9ISEdEtVBFJaTlXrWPYM0zdJBUXdvFUGyQmMoBS9ZwATGAPTtr5ZlQcnIC/iTYf9a7YMfIypRBixvLO3oqKWY/wVQSf9B6VUvAYw5RXLnE9znPReGsbYWoMS/wAf1lwwr8bbMvZuo0rHeNbu3V6SEzunwDCwqEckYl6AP0iAgcDdWqxHhbmfsZ2Mq48WviXgtlDSzIR9X9VUNYgf9/Wt55vY/jjUfDKdMwpdvsO350q5UwaV4MHXZKPxKjGP2zytFdTIbkXFrEWpj/NA7i549zhiia5BVyPrEtgOJl3fHSEx8pkHLS79tkqDWbZVjoKHlGNjlq43I28IdBLpTTMUyoiIFENR3cLZOLkYUmUghbFBMATnJ/uLaUKCGKj0t9PWrn/jhz1G903aMPRZD7GLePGu1kyhi4IU4koOE8jo0SSm/MM3kghQPIqafF3OlNyhFWTGtWgcgRD7j2yoWOrE/udMe1GMnnpaeyO2f1Hxq655OM8Oz2UHfdBQQTNuPaNh0+xgzI3xIVlVsUJGxdOIY8BYrc+R4/0Nag+SOl7breTj9h2U+DLDvnycqJcfIWd4199gVn4gcHu3j/yA5Cwp2KJXcoQ1rylJ3rIUbcKnY7MzkcW1tlWkYR1jyspR5EHcDJSiayh7I4dSO6wODAQSh2fDsGbixZceRM+TKskLuDGoW3BbeVJ/m8/Wqvus/reZq9bj6XAkxNpj4zJmStMZFypi11kRCB7ICfbxF7+tOhtvrMqt02VIv87bbdlGtSmM7hSY7HthYQkFbbF4H2Hk1m8jgfLT9O8Kczj2YwWHw6vfABu89HwgGHj5Uk880LwyRpE4AZrWkBF+S282Hob1Y9xo8HV6rW7HG2OJmZGdA0kkEXL3MNlfiIp+QtzYfcvHxb/rXGScTwGUnWP3c5MXGIUxxeI6/QxKlZ39bRlJaMQXboR1nTZerO15UrgRVZq7JnMACP02XgxZhiMrSL7UgccWK3I9A1v1L+Vc9f7RndbizosKLElXYYb40nvwrKURyCXhLf7cot4kXyKdIROU4KiAlETdZB26QEQHcBL2bCAD9DWZf6/WqyOJHH1HpTSYnwdi3CCFzaYtqqNWQyHeJnI9vSQeyD32zdbCKYy8ycZBy5M3O7FIv2JISJF29UoawsLXYeuEi4ScBLIZG8k3dvU+fxtVp7R3PsvcnxJey5JyXwMKPEgJVF9vHiv7cf2gXC39Wu34mncIdRE/UQxkzkEQ9IgP0wHbYdh+HWb5Bv8AWqsVDizWIqBdGxX8WnK4xaLi6Jv9PssbfrnkPkFbslx9qyXiS72l336WJ6zXHSh52Co1gSZpLlblAiCQnN6dh1W8bC/abv8A+NCskDh2knaQNJE7G/tqPUIwsbDwK3duezf8h+MAd1spcHa48mNj4urgxGhxM7HhWxzZZVtFJkxFmXmbsbD0qd+rJWk6NKUaUrBLGRxZFWXKxaFlVmqbJaRKiQHqrNI/eJtVHG3eGQIp2gXfYB1iLgYKZzbNYYxsXjEbS8RzKA3CFvUqD5A/Gsts/ObBXWtNIdekhkWLkfbDkWLhfTkR4J9bVnay6xKNKUaUo0pXICIb7CPb6dh9P7f0dc3NrfQ1xYH1rjcfSHYOuK5pncgYXgLs9bT0e9c062tVjqhZ4JJNN86KZEUhRfl3TByUQEA6t+oC7h2761d3X4q03bcyPc4U0mr7PGxIy8dVEjgjjxk9OX8SbgXHm9bO6b8pbjqmJJp82KPZ9bkUD9rkEmNLNflH68fr4ta9j4tUOaRXC4uyc5hsnRLMtYsHtqto2qfi1Umi4CiqQkhEOF+tJqV2JygZQwGApR33D068t9R0Q+O/kGTV/IGNEOv5vv4q5mREQjDibSwM1woe4BYg2B+nrXpzte8PyD0GPZ9CyZT2DC9jKOJjygup5C8cyrYuUsSFBFyD61Pym1CKpNVZVerrLlj2qDkY907WI7XFR+ZRwRyssmmiRyAKrAYOwNyAAa9odV6xrup9dh6915nXCjR/ad2DteQlgzMAoYXa/p5FhXjjtHZdh2vsMu/36ocyRk9xEBRbRgKVUEsV8Lb18G5qFNdynyDtcpba1T3JLS4jBFqWYPGRcV7LMyfiiq8TBwJWffSHdGTBJU5x6NxANw315P0fyL82dl2Oz0XVpBsZ8f7PfMUMXsmOSxcBvs5SWKhHLHj5AuK9U7v4++GOva/W73syHXwz/f7Allm90PHcIeN34x3DF0AHLwTY1OasFnSV2DLaFG6tjLGNAnFGoAVseS7oBdCkBAAoE6//AFezf0dmvXvX13C6LEXsLI29GOn7gp+ky2+/jbxa/wCHi/p4ryVvm1DbvLbr4ddGch/YD/qEV/sBv5vb8fP4+abPNGIY3KMJ35hdIWSDj5Aa65RVEqCi6wJrCyeIGIoCyDhRECgJek5TDuAj6NUD5W+Mdd8hagzOZE32JDJ+2ZTZSzWb23WxDKxW3ixBNwT6G9/FvyXsOgbb2l9t9FlzR/uVZbsFF15owI4soa/m4IFjY+aqhVSWQVVRXIdJZBVRFdI4CB0lkjimqkcB7QOmcogIfAIa/OF0khkaCVSsyMVYHwVYEgg/mCCD+Yr9GIpYpkWaIhonUMrDyGUi4IP4EG9fADt/h18lQa7CL0uZXINpn6xD1Cak1HkJXCkNBtvDtu+aqpEMkmKrzo8UqmVE4l+qEewPoatey7x2Hc9fxesbfIMupwQDjrxS6sAVBL/rICkj1J8Cqfruk6DTb/J7JqcdY9rmk++3J7OpIJsl+AJYA+gHk1gt71dWUc4h2ttsSMW7ETOWRJh8CC4m6QETgKvVuIJl9Ah6A1i4/b+2Y+BJrMfZ5ya+Q/dGJn4te3r91/oPr9KzJun9Uys1Nnka3BfPj/TIYU5La9reLfU/T60qi5syUZOIRd2R3IIwcm1lY9N4dTcrhol3KSKyzZRuu4ZimGwpHOJRHt9O+rKvyv3wx40OVnyTxYmQk0YcnwyDiFZlKsyW9UYkfX1qvN8UdEEmTLjYMcEuXjvDIUA8q5uWUMGVXv6OAD9PSujHeUbHj+wtJVg+eezFZksnYIpsudJObTP1JrpvBEw+IOVM5jJdYiBVNhHft1jdH+Q970nex7DEml/YNliXIhViBkA+GD+btYElORNm8/jXb3f490XctLLr8uGL9+uKYseZlBMBFipTx9oJADcfVfAqQ0VmTFRM0J3lmW0sWs4wSi3wOEmMXBxjlRMGysm+ZNjOHEkc5Q6jHHpEhjibt2HfeGt+VPjmP5WHb8X+4xQZkIik5COHHicjiZZEUs0pI8lvHEsW82rSew+L/kR/iw9Syf7fLPiTGaPiZJciVQeQijdgqxAHwF88gAv1FTPkpN/K1N3KUN9Cv5B1Hi4rsg8WMtBOFQUAAXXVbjudqBCH3Ev+UGvVOfsMzZdbl2PTZsSbNlhLY0jnljsb/qYr5KWDenm9eW8DAw9d2OLX9why4cKObjkxovHIUEfpUN4DXK+v0NMgTNNAq675m8g2SzZSALZpSzVdCPeQVjnUDIIzbViTYh3DxpJKmKAqHEeoNvqh1qZPlbpfXZpsXJxImx2wxlzZeIsb4+TkLxXISMWBZ0lJUcmvfx61td/izuPYIYcnGy5VnXNOLFi5TSJPjY7BjA8h8hY3iAJCra3n0pyGFBpM7No5JCIMrJzkDF+EQkkUitmCREyO2LlGOFH/AEGTSE4dZymEQHf4e3V7wumdS3G2Tvv7UtsMvDh4LKo4xgAPGwjt/TlBI5MDe9/r5qj5nce1ajVN0Y5IXAxMyXm0RPKQklJEMl/6kRseKkelvpUcqnhu9FgpOHl3VlVjHlnmYKRqikg3ZwiENJLGVG5QblyCy67psIlOQOgpjmEQ37R1orrXxb3BdRkavaSZ7a+XYT48uGZVSBYJWJ/fY7vyZmXwVFgWJPm163f2P5O6k22x9nrY8Bc+LAgyIssRM87TxKAMKdV4qqN5DG5CgDx4FNnkywQlJsadWgizqL6jAyiZl41lW8Ce0u2zcF4iwOjRrYVVncQRyVIElREpiAH0RHVA79vNT1PeL13UDMXL1Htwzusy45y3VeUOQ3tJyaSEME4N4IAufJq+9D0217Vo27BtziPibb3JoEeFsgYiM3GbGQStZUmKlyyC4J/hWsqmN4TJ0m8g5a3SsNlJ04TfNW0tEEWYyMSZoR+Z6dVh0j4pRmsU4mOconNsIAIG1H9d6HqfkHPl0+02eTi/IbyCRVmhBjlhKCQyEx/zGNgxYkFjYi4PjP7D3nadBwIttrdbj5Xx/GhjdoZiskU3Mx8AJL/aHUrYAgD1IIppcixUHA3Gcga+lIosIZ0aMOEo5QdulnjMTIu3RFUEkSlbOFQ6kyCXqKUdjCI61r3bW6jTdry9PpFnXCxZDEfedXcuhs7AqAODEXUEclBsSTWyuj7Db7jrGJt900DZeVGJR7SMihHsyKQxP3qPDEGzHyAKRGqtVsuL2+tZbFk8kXaLKPbLvHi5hBBu2IZRY5iFFQRIUu47JkIJhH0FAN/g13Y+JlZ2QmLhRvLlOftVRdjYE+LfgBcn6AE1j5mVjYOM2VmSJFip+pmNlAPixv8AiTa31JtVmeAFJGz1YJC4Jv5mTg5ZH2NLTajGTbJGTYg1FzWHrcpjEbARPpOcTCYVN9h2178+GJM3sHXhm9oWXL2OHkj2ZpzHKoIjC88R1uQnixN7lr2NeCPmOPD0HYP2fWWhxdfl4x96GASRMbyF+OUjGxfzdRawW1xenusTOfftmZK/MowrpKTZOHjhwyB+V1GoqdT1gVMxyAkq6T9Uqnb0623vMXc5kES6XKTEyFyI2dmjEnOIG8kYBI4lx4DfStT6XJ0+JPK+5xXyoGx5FRVk9spKR/TkJAPIIfJX60l7tN3hrL1uCpMAm6PKOvFS1hkyHPAxMU0OPjGTgUhKsSUepiHhx36dw2EB37K923bdvg2eBp+p4QkfIk5zZMovjwwof6kbFSGEsgt7R9L3uKsHVNV1LI1udt+1ZhRMePhDjRG2RNM4+x15faYkP+4LXt5vXfkayXKsxjV/TacN1U79RN8yK8Fs5aNxIUG6yCJAMo6E65tjlIAiUoCbXb3nfdp6/gR5nVtWdtJzYSRiTiyLb7WVRcvdjZgP0i5/h19I0XWN9nyYfZ9n/a4+AMblAyu1/uVmNgllH2k+psKRVWnbzBTZZHJabki1+lo2Nqlcr5FpaLq6aTQVl/a6/QTwKwnMBVVRMYpxAR7PgqnXdz27T7f9/wB+Eiy7nJiiw8XGDTQ4gVOTe+1h7bXIDPcqTc1aewajqW41P7LohjaLT40suXk5BWGXLLPxX2VuRItrlUsGXx5Nb+q1iu48kJ2uNrWZF1fZSVnYKvOlkUl49RRFT2ieCRE5lXRUjbKnOPaAkDf0amOu9f0fSM7M0UGxKT7nJmycfGcqGiJU+6ccXu4B+9mP1AvUN2Hf7ruuFibufXB4NPjxY+RkoGKyAMPb/cNayEj7VUfQm1NO1xK6m3F4jsh11xe5aDigZ0m4SD90xXsca48S9ax790m67gJBk9UKUTATpIQOkRENt9cY/wAa5G1n2+D3fCfcbHExuGBmyyPG2TE3KRYpHD8RJHIQCQtlX7TcVsfI+R8fVw6nO6VmpqNfl5HPOwo40kXGlXijyRoU5GOSMEgXuzeRY0vsNYkRx5HuX8kseRsU6hGKPhft2Si0MZmgYns1o9bAbv0Ejm+rDbcS6unxZ8aRdIw5M3Pcz7vMSIye4kZaAotvaSRf1KpP6vHpVN+UfkeXuuamFgoINLiPKI+DSBZw7A+68bfpYgfp8+tOOxjrQjapySfT7d3VXjFgjCV4jEqTmJeoAQH7tZ+AdTojwSmECj9Tvq9YeD2CLseXnZmakvXZYo1x8YRgNC6ge47Sfz8zcgfy3qjZeboJevYmDh4bx9gilkM+SZCVmjN/bRY/ROA9T/NalRqw1AVhSTRZ/HP2Ld85jF3jRw2RkWYgDtgqskZMjtt1er37cxuou/ZuGsTPxpMzBmw4ZpMeWWJlEqfrjLAgOt/HJfUfmKy8DIiw86HLmhjyIopFZon/AESBSCUe38rDwfyNamqVprU4RpDt1TO1kwFWQlFUUUHczIqB/pMpIAiAEUeuzAAnN2iO2o3rmhx+uaiLWQt7sqi8kxUK88p/XNJx8GRz+o+pqR7FvZ+xbaTZzKIomNo4gxZIIx+iKPl5EaeeI+l6Uep2oOjSlKbHf77av+LM/wD6Y30pTlaUqEXMngxQOcbnC1fzRd70fCOMrye/Xnj9CPm0fj3kFJR7ch6hEZZEqQSsvVqpNIlfBGpqg1eqB0rkMAAIcXANj60sf9K13Gny/wDFfETOmc8p4DslsoeMc9oxs1ZOL0Wdgng2sZTbuVTTmVaDAlbgvUJq1xwIoPmLIUmChku96BOYALzSp3aUo0pUaeYHGuI5e8dsg8eJ21ydIi8ge7YOrRDRzSWkoz3btMNaUvDR79VBov4taGKibrMHSRQTB2gGlKTmdOEtG5C5fw5lO6WeZLF4uw1yIwpI0dkzZlYXWscj6dAUu0Kvpcynjol3FRcKYW/cEMBzrj1bdIaUqGFF8oi0wVi4cO7/AM5M0ZWpXBDKMHduPuPJujY9godhVa7SbJQ4qm3SQr7dtJ3aWj4GbQQbzrowOkUWgkBERcKn0pWHJeT3aCt+TNQpvO3O1Bw9zGzllHL/ACGxfBU/H7hGci8qPmh5+jUG1ybV1P40QewSB41/IsTKrv0FesU0lQ6hUq4WlUms41olVx3S4tCEp9EqsNT6tDNQ6W8XXq3FNoeHYJfCJGjBmmQB9I7bj26UrHq//gGN/wCBKfxKulKi15bX9g3il+JmpfyU2lKYLyO/1TvCD8UR/wCl1n0pVrOlKNKUaUo0pRpSoMxX6yq8/wBx/F/9e+WdKVObSlGlKNKUaUo0pRpSoXc8uNkPy149W7Ak7apamRuQCxbJxPwjNtISbD2POxdgSUas3iqTVcVlowqRgOYPUOIh26it3qo93q5dXK7RpKBdgLkWYN9fH0q//F3yBlfF3esHvWFjRZeTgmQrFIxVH9yN4zdlBIsHuLD1FMDZqRE8isKVOh4I5QTtKQxhcqZCS+SsPScbJyr5fFLZOJsGO55RF4m3akmughJJDrE6YgAdI6x5sdNrr0xtZmNGsMiAyREEn2/DI3oPP8w+lS+t3GT0LuGVu+69bhzH2WJPJHiZyOiKM0l4sqIFbkx3Jia1j580wHLzkbQsT4x5bco8Z4farcieNkTGYfTyBeKQ8g1V21vmItogjVrQcpV7RT4dWfF30omKio5TAhtim3GM3u2xsHDztxiQD+6YiiLm6EX5Efpb1ZRe/jxfxV5+Kvj/AHfaOydW+NuxbVh0LsMr5xxsfIWQAwI7EzQjxDO4i4XYFgpuLkWrz+cJ/N65htOQWEaTmPMSF5xJYLtHVK2trtGVdiZnF2l+LM8+7uSMS3mkTV9w6K4IKrgUu6T7s3qbdOsOvd63y7PHx8+cSYLSBW5hRYMbci1r/be/k2+le6PmH/FT4nl6Judz1PUnC7TBhvPAcd5mDPCvIRDHLmM+6F4my8uR5Dz6+z9NVBwkk4aOEXbVwmCzV22WScNnTdQN03DddA50VkFSbCU5BEpg7QEdb/uCLqQVPoR58fx+tfkSyyIxjlUpIpsVIIII9QQQCCD6g+RTY2s2ZAyNjQtKLQTYpEbD8bx7AaTC6J7MC+63uOVqAxxhNI7+L8QP739T26w5v3/7uL2BF+z+73eV+fp9vD6evrf6VYtWOpHQbE7g53/Jx7X7H2+H7c/d/W/ccvu/T+jh9fWjMGacZYFqKN8y3aEKjU1rDAVVKWXZSUgQ0/aH5YyCjwbxbR66A796YCAfo7snpMIB26Z2wxNbj/uc5wkBdVvYn7mNgLAE+TX11XqHY+7bU6Xq+McvaLBLMUDKp9uFecjXdlH2r5te59AL1D+h2zOWbL9k3IeBuVmO7ri2j3+9UwMUSmJnkWWJtcJVCMGVInLmug0lnDeJt6qbxy9bEOCrcwkIYfqQgsabZbHJmytZmxSYUcrr7ZiIswWwUsbE2axJH0rau71XS+n6LXaHu/WNhh9lzcHGyP3qZqvzgkn5NkR44LIC8AMaxuRxYAkD1rAcrveDuOb7ybyZB5Iy3l3Ot0xUjl6hYyl5a3Uyu3GUcnrKrnElflG4Oq5SmIPTLuUx6jKCUu4gGwh8Ev13El2+Wss+bkvGJUjJdVcnjeJT+lB6n8fFd8ccHzLv8H4667Nr9V1XS4eacHJzESCeXHQe6BnSobS5DceKHxa59fN3ZxbyBrufFsq4avExQaXeV53IlOr9OoWWYmx3qXx5EJFi3N2AkSoWRq063F2cF0Nu9YLEAR2HWdh7OHZmfAyWjjySzqqpIGcoPHPx5VvPkfymqv2Xo2f0hdZ23TQ52XplhxZ5Z8nCeLGjynPMY93HCaM8Rxa/GVSbU6fjMZcRsAIubbcJxlizC1QbJS1zub+VtthThGCqbYsjOyKLd1LTL46zkoGOVMxx3AADYNZnLE0msvPIwwsdPLOSzcR9SfUnzVZ9nsXyn3ox6rFhfsu4yyUx8dUgiMjXPGNSVSNbA2FwPxNzRiPFOFYCwXvPWK4VNCa5KEq16ulqTfza5bomlDkGsyhY2UdKN4cvsl2UQSQRb79Xrl6tMHCwIpZdlhKBJl8XZrt9/j7TY+ng/S1c9p7P2/PwcLpPZpi2H173sfHhKxg45Mh95OaAF/vX1Zm9PBtT56kqpdGlKNKUaUo0pRpSjSlGlKNKUaUo0pWhslYgbfFLwljjUJSMcCmKjdbqKYBTVTWKZFdMxF25utMNxIYoiHYPZ2aht917Tdn1r6nfY6ZGA9rq3g+CCCrCzKbgXKkXHg+KmNFv9x1rYptdHO+PnpezLYjyCDdTdWFifDA2PkeaS1fjMjRlvlSSctBSePVkVDwLZJuZtOQYpCRJhEgJEikdNEWxR61TmMcxgDbbt1XdJgd61/ZslNhk4eR0llJx1ClJ8e1hHD4FnRUH3OxLEgfnVg3Of0fP61jvgY2XB3RGAyGLBoMi9zJN5N0dmP2ooCgXvelwLJJq2fFimrFk6cIrmIZFsi3Io9FI4N1XHcpF73pWENxMAjtvq3HEjxoJRrY4Ysh1YgqqqC9jxL8R5+71Juaqf7uTInibYySy46MoIZmYhLjkF5Hx9t7AW82qCEtlbk7U2Mmwm68ZQW7lw0NZBrAuASUOYSJKR7lkUsc4RKIdSRjInAwfVb68dbP5H/yD61iZGHtsEsEdk/dftOViTYGNktGyj1QlCD/NevXut+O/gPseVBl6rOC841f9t+64kgeSJFcmRWPo4Dgj+W1IrEucp2LyYE3fbM+cws0i7ZzijxRydmwEEjqtHSEYyRMQh27lIEykTTKUoKj9DVR+NPl/ca7v3927nsJpNVlI8eQXLlI/tJR1iQEAqyhVVVAAY3HirX8kfEepz+hf2rp2BDHs8V0eAIFDyAkB0aV2BIZTyLMxJKAfWmYv86ys13tdgjkAbMJecfvWaAFIXpbqLCCRukhEyh3pS9f1ID63b261b3Pb4vYO37Hd4Mft4OVmSSRr4FlJ8GwsPP6vQevnzW0em6jL0PU9dpc1/czcbEjjdrk3YDyLkk+P0+tvHjxSQ7B1W7/T61ZiWWuDAPSfY4JD3SmypwASoj3ZulY4GMUokSHYwgIgAgHpD06+SRe31v8A9aBrqfT09aoEwd5hfIfJPK2m8U57JmHWVSrOYL3HPOUTOtSSdX5PRFWTTdNMMY3aPGTeqxmQmPtXuJRds+V//UBFv1mEQN6E33xz1zV9Rm7bBi5py5cOJhgl19zBaQ2OTMQTIYTxugKj9VmsB48/6L5E7Fsu2Q9TyMrDXDizJVOcEbhmrH5GNCCAiyjlZyGP6brc3v6FYOAmbJJoxMFGO5N+4OBE2rRE65ygY2wnVEoCCSRPhMYSh9PWjtRp9p2DOj1mmglyM2RgAqKSR+Zt6AfiSBW79xutXocF9jt548fDjFyzsF/0F/Un6AXP5VIan8cHj5VRXIM0egsHT1OKrxHybIZSekFROHdItVHXSiP2P1SG9c4CAhuHbrd3WPgfKy5Gk7tlnTYckoixhII/dyJTfwqF7L6eFP3Ne48VpLs3znj4kYj6Xijc5kcJlyShk9rHjFvLOFufXyw+1SLHyaZXIVQGiW+bqoyKEr7JdmbeMQTVRESmIRUqa6ShC906TSOXvClE5AN9SYQ1qXu/V/8Ah3Z8vrf7hMkY0vH3FBX1HIBgR4cAjkASL+hIravSezL2/rWL2H2HxjkxhuDEEetiVIPlCQeJIBt6gGp44LyW7l6JEskq+0CIqcI+i5p6R8yZKDLtCitFR7GPOZMqgTTRUvriJQ74TB2jvr2V8O9+ydp0/GxY8KP+263EkhnkEiRn3kHKGOOM2De+hH3Ege5yHk3rx/8ALvRMbW9uyMp82T+47HLSWBDHJIPZc8ZpJJACQYHU/aLnhY2HisdjR0btcKfKNsWFo8DTXSqkuwnQI3QmW0siZ6mWMjmhVGbo7SRATrCp0j1mKPwbB0YnUYu2do1ewx+ujUaXVSEzR5FlWdJlLgRRJeNykt2flb7iD+Vd+X2yXqnWNnr8jsJ2242iAQyY5LNA0LBCZZXs6h4rKnG/2gj86eu1OpeVbW2rx7Cyw5kqwL2Pt0SVvuo9MInLHQ3Wco+1SFR6QA2xR6/T9HbPYsjabGDZ9ewoc/FZdf7kebDx8yHz7UFyP6wAt5sPNao69j63XTa7sGbNgZQbP9uTDm5eEH/uT2B/om9/Fz9tbmlO3z6pV1zJM5VhIGimibxrOd37YKu3TBudWS7oRT8W5FLvTbfCfUr1PKy8zrWDkZ8WTDmnGQOmRb3wyjiTLx8c2I5m341GdqxsTE7Hm4+DLjTYYyHKPj39gqx5ARcvPBb8Rf8ACmrzVh1O+112nVo+vsLS8mY+TfSbtAGy0iRsRRucHL9JNVUTpoH7AEpgMAdP0BDXPyv8XR9z0cidegwoewy5UUskrrxMoQFTzkAJuFPi4INrePWth/Fnya/Tt3HJ2CbNm0EWLLFHEjchEXIYcIyQLFh5sQRe/wCIpo4y7v8AD8i9plrioOYuqsSwSiZepRhV51CNSYPQJMWF08Sbpu/Z7FFMU0Eu0yaYlMYOzWs9f27M+L9hL1bsmPh5XbHxoxDNhxcslYljcCbJdwof2o1XjGluSqQxFbIzup4fybgxdo67kZeL1ZMmQzQ5kvHHMpkQmHHRGYp7kjMGkf0ZgVB+jDTk4fIFYcmUqsa5k0rOm4Wv8LAN42RmmQN1DvQmWLY6yzZc4Ld+ZUoClsn0m7e3Wm9vuH7r16QvroJNgmwDNsYMZYpZ4+JMnvxqWZCeXuFwCn22PmtwanUp0zfx8NjPHgPryo10+Q0scEhYBPYkYBWUcfbCEhvuBHitJjOBpCzaxWq7SDVaOrKAOoytN5dswnLFIN1UXCTZFq7bKpOo1ygBiqdpRE3Zt6dRPQdN1GaHO7H2yeN8HXpzixVnWPIyZFYMqBHUq8TqCreRc+Klu+bftkc+D17qsMi52e/CXKaFpIMaNlZSxdGBSVWsV8EAefwpyrvQ156Vl5zGFGkoOKRqcPOyaSDkkek0WmGaz54mxFwBTOEWrMwd4k2P0D0iGwb6v3bunT7rZZO3+PtRkYetTWw5EoVhEEadGkcR8rFgqEckibibEWF7VROqdwi1GuxtT37bwZewfZTY8RZTKXWFxGhfjcKXcHi8ilhcG5NqkrhFtT6RS6gWCczk0bJDlFZV2m1Vcs2sy3ZCR4msRMgEh2SCiZietvuYN9x9Ot9fEcHWOo9T1i6d8zLO9kVi4RnRZ1SzhgPECKQR5vcj1NaK+V8js3bO07JtumJijRxlQhcK7QM90IJN5nYEN4tYG1h6VI3Yd9th3D0ht2h+2HpDW9eLA2sbitHXFr3FjWhmrNA14jY8zJt2Xi3jRg3IcxlFlHT1QUmpAbpAdYCKqAIAcSgQPhHUPtt9p9GqNtciOH3JUjUE3YvIeKDit2sx8ciOP51MarQ7jds66uB5faieRiPChIxdzyNluB54g3/AV9vGk4eciXbSWQawbVB8nLxB2RFXEk4WKAMlknoj1NStTdpih9XrnJxdu+4xsnGyUj1EaSCaExhmlYj+mwk9UCH1H1r5xcnUpqcjGycZ5NtI8ZhmDkLEo/Wpj9HLjwCf01u9/pjqWFxUV/GtW5hYd5JR807jGTiXiSOE4uSWQIo8j03RRI5I0XEOtEq5REDAA9oaj59Tq8nYQ7XIx4X2eMGEUrKC8YcWcI3qAw8G3rWfDtdljYE2rx8iVNbklTLEGIjkKG6l19CVPkfga1oyNlC3pxZYBAakMKZ4ezC/IDgkyCwFJFhHb9ZkzI+v3u23wajmzt9/yhdcMJD1r9oXOV7g5CcNYQ+162t5528VIDC0P/GjsDmP/wAkGUEGL7Z4mC1zL7vpfl44X/OuYOeeS0jY2LmvSkMjBSRGLSQkO78NYETpd4L+M6B6hbEH1R6u3fX3qdxlbLOzsTIwcjEjw5xHHJJbhkqRf3IreeH08187XUYuuwsHLgzcfKly4DI8cd+eOwNvblv45fXxX1ZpSVjol+pXI9nPWRFsVxHV9eRQYHfB3pCHEyhzdaSRCCI9W3SIhtvrnsGx2ODrZn0UEWbvkQNFjNKsZk+4A3JNwoFzyta4tevnQ6/X5uxhTeTy4eiaTjLkLG0gj8EjwBYkmwte9je1btqddRs2UdIg3cqN0TuW5TgqVBwdIplkCqBsChUlREoG+HbfUvjtK+PG+QoSdkUsoNwrEAst/rxNxf62vUTkLEk8iQMXgV2CsRYsoJCtb6XFjb6eld+u6uqjSlGlKNKV1x8xNVx3IHjYxrKNZRw0cLFVeg0XbnQbg3VAoGSOVUqhCFEB7Nh311ye9dfa4+vm/wCH5fnXZH7X3e7y9PFvx/P8qUXv/Nf/AGon/wB9I/5jXZXXXUe8yqh0lT1JMx0RMKRvbaXqCYNhHYENh3DXW8UbusjC7p6flXYkskaNGhsjjz+ddvv/ADP/ANqJ/wDfSP8AmNdlddHv/Nf/AGon/wB9I/5jSlHv/Nf/AGon/wB9I/5jSlHv/M//AGon/wB9I/5jSlLWPnLY6ZoOEKsxMkoTqIb3hRAdt9h6g8COw7hpSs32pcv/ALUY/dEl/wDuGlKPaly/+1GP3RJf/uGlK61pC7LJKIpVqMbqKkOmRdafKqkiY5RKCp0kmIKKFII79ICAjpStkzjPY1XTiu978WMQo3Mtt094cjY/WcC9vSUxxEQD4A0pURfLa/sG8UvxM1L+Sm0pTBeR3+qd4QfiiP8A0us+lKtZ0pRpSjSlGlKNKVBmK/WVXn+4/i/+vfLOlKnNpSjSlGlKNKUaUo0pVfnmC8iZHCOPKzAY+tWOa5nzKFhRrGDWWVEJtWm2S2IqNXsnDyCsGiddqoaAKuZFRQ6SXfimAm7dtQe+2T6/FVMZ4k2UzcYRJfgzeCQSPT7b2Ppe1bT+Jej4vct9Lkb3F2GR0nWQGfZNhGMTwwEMqOokIDD3eIYAE8QxA8U0/Am4UbIfG2rZBplCxnjR5eZOyWK9VPFjtm7rxL4rMvI+wzbg7dFs4LJT7qO8ScHCZVgA4FETAUDD89ayMfK1KZWPFDCZWZnWO3Hnchj4A8sRfz581kfN+q3Wg+QcnQ7fO2OxiwooYsafNVll/bCNXijAJI4RK/AFDxuCfF7VXN582Xseu+JMriKLyTWl8mRWVcXSdjx0xsrUbayrbxGdXbu5evIuQekh3CgJKAKiYpmECG2+pHVT+Ss7FOjbBSZDmLNGWjDfcF8kEr62/jW//wDCTqu9i+Uou05WuyF67JrMxIspoW9hplMYKpKRx9wC4sDf1F/UV5DqJXoe3XapVWwWiNo8FZLFEwcvcppBZzD1WPk3aTRzPyrduILrR0WmoKqxSesKZR21o3GhjnyEgldY43cAufRQTbkbfQetfqnu8/L1WnytngY0mbm4+O8kePGQskzIpYRIT4DuRZSfFyK9d3lpcj+PvHLBtuwHMcomOX5ys5yeY8ol3GCtDeiWCy2iq+NqFPpEo/dTB3FcFavrmB0czRmUVekgBuBh3l1HbavVa6TWvmCeRMkoj2bgWZbqiEk/b9p8+BX5W/5D/H/e+/dzxe8YnWn1WJk6VcrJx/chOTFDDNxnnyEUR2ltKg4APIQtz+Ai7B+aV5gDCsoZEcxuLb3C2VXKSsrX4XFt1k3mJpzHDwIZGiPI6CdBNpLyiaqEkV0+3RWTWMQigdBgCHj7l2hIRlEQSRP7twI3JiMZtwIH3ebhrn1vYHxWyc3/ABr+DJ9kdBHJssLMxxhBJZMzHRc6PLXmclXkHtkIQ0RSP7lKglTyFWq23zHcHowdUpitFsuXc72TCFdzjGcf4qrIIWaZVVTZO1IFpF2hNyaHt7ZuZeVasnCJnJY5uKwD6BG55Ha9cI0x/befaPjLMIAtmP142a9m9WAIvxF68zar/H/uT5mTt0zcfVdJx9zLrn2bzEwxgFlErPDb3ICeMLyKwUytwP1FbLh1n26crco3zJmNj0qm8W6iqrR06dERDFzZMgZWkI2KnJm0TUygyjDwkhUlnwxzlECLpPVC94RUwdofeh2eRusyXMxfbTSoeHAAFnlIBZiQBYrexHm5+tY/yx0bUfGPW8LrnYRmZfyVlAZJnd2EOLhK7xxwxxln9xZwvuo11aMHiVFSmqHJfHGS6Nly84VeyeVVsMTV4p1jrlZj5FhOvchUWOO9k6HEElmbQHcw7W7tu3UTBRuoqqXYxg31MY+3xMzHnycAtOcdnRlUEEuguUFwPPpb1F/rWtNt8d9g67utVpu4JHrE28OPPFLMytGuLktxTJkKM3FFF2YGzAA3AqJVKzBw9x5kfFtwvfHxrxZzXl/Gs3kh5a73RIqtDSxtNhVjZqk3rJhE2kawvVpmESmBibZV4Khdu02wwsGdosXKhnycUYWwnhaTk6BeHJrFXk8AOx/l+tbR2/VPlffdf2ep0m+bsvUNTsY8RYcbJeb9x7MQePIx8Q8nbGhjJHuDwlj9BVkblCJm05CFdpRM2j0lby0M6TZSqXQsQFSN5SMWBwQCLJ7HAiyexi7CACGrWRHKDG1m/EGx/wBCD/2NefY3ysNo8uIyQv6pIOSG48XRxbyD4up8HxWWkik2STboIpN0G6ZEUW6CREEUEkigRNFFFMpE0UkiFApSlAAKAbAG2voAAWHoK6WdpGMjks7G5JNySfJJJ8kn1ua+9c1xRpSjSlfRSmOPSUomMPwFARHs7R7A3HsDShIHk186Uo0pRpSjSlGlKNKUaUo0pRpSjSlfQGMHwiHwekfR6P8AEIa5ua4IB9RVbfLGHrMNd4cIOORjZKQhTyU4m1bpt2bk675cjN4BUylA7tXu1QVN8IFLrwf/AJI6rr+s7fijUwLBnzYhlyAihUctIwR7DwXPFg5/Ja9z/wCOOz32z6plf3Wdp8CHKEeOXYs6BY1Lpcm4QXUqPzaor+nXngC1eh7AHlXIdmuLeb18seXpSmqdUkLjY4Sss0Vu8nnKLYVfCqOE0mCyoIvH6iIAHfMmyImMoP1GwCAjqd6z13P7Pv8AE0OIsgmy5lUMEZuMZazS2HqkYuSQbCx8ioDsnYMHrWhy97lshhxYmJUsBycC6xXPo7mwCkXNx4p8g4CcZYuQxtidvWGTZCgO3WUKq3Jj6ve5tfmWz94r7ZgigRNnV58qjlwJlUupVXvjHEfgD1dP8OaOLtMXXpN1uv77kYTzZM7RgwZGOjhfakkLAJ4S3Alrix8eleWIPl7dy9Xl7BHptMNHj5qQ40AciaDIZS3uxx8SZLl78wFsbr59alYuwq+PbuwdY9YUdmtIQj/3pgWcgVKyzLFm2F/FNqzGpufBis8WQMY5xL63YO+2r7Lhdd6R26DI6RDqIpp8ST93jxyhcqeNE9yFcSINw5OyksxHnwfQVRIs3f8Adepz4/dpdtLHDlx/tMh4+WLDI7e3M2VKV58UVgAL+PItet7alqnfqzTCWuLnmK8xYIZZkxjOhxPVmaEx1WwSa7MF/ZiXQmIKKCAAJB7NtTHY5Otdz6/ql7Hj5kMuVmwNHHFZsjFn8lfdZA3tCwPNiALGojrsXY+nb7aN13Iw5osbCnWSSW64+VB4De0rlfda5+xQT5FN5VabUc2Psj2G3xAmftbI/pSHhHB0E2zWDEqbOVah0mMnMnL++qiJiqegS7bhqj9c6t1n5ay97uu0Yt82PPkwF4MVCpj+EmT6icj9b3Ib0K2q69i7R2X4pxNHpes5NsOTBjzm5qGLNP5eFjexgH8qWBX1DXrYSfGWuNqlIQFRmJiOcO5JpNODyK6T4ku7h27j2PGvulNqmhHovFu8ExSGOAiI9vZtnbD4A0WP1mfSdZysrHmlnSdjKwkE7wq3sRSWCBYxI3IsFLD18+BWFgfPW9yOyQ7nsmNjTxRQPAoiUxmFJmX35Y/LlpGjXiAxCn08Xp96ehZW1YhG9xcNHdmRZFTmHLHbwarkpzgBm/SmkXo7npDsKHaGtx9Xi38HXsSDtEkUu/SICdo/0FwT+mwAta3oB5v4rUHZpdFNv8ubrKSR6B5SYVk/WEIHhrkm97+pPik7cMdjapVhPMLbaKrNxzL2c1cwjwpWhmqroXDkjiPVKCSy7gphT7zqASl27B21B9o6Oex7GHc4Wy2Gu20EXto2PJ9hQvzYNERZmbyvK/hf4VOdY7sOva6bUZmtwNhqp5fcdZ0JcOE4qVkHlVXw3ECxP4XratL/AEx0VwAWSPTKxn/dJdR4oZn12NMAA0Wl4oqXiXimwiAJ9QGH0DqSxe6dUyA4GfCohzf2TFyUvlAW9leYHJzbwFuD9DUdk9N7TAUvgzM0uH+8UIA9sY/+6eBPFB9S1iPqKwUi3G0xFyiJdovQXIvnUXWpyJfJP36sYHQKE+kQDADVyYwCHdGEBKGsNB2nsOs2mr2cbaacyvDi5EMgkkaLxxyQLjgxNxxNrVlues9f2er2WtkXcQCFJcrHmjMcYl88sc+DzT68x60pGlfZEYNG0ki3m3qMUhFvZiQZNjyEkmVoDRws7X6DKbvSiYTl6un1xDU7i6TFXCigz1TLykx1ieeWNTLKAnBmdrE/1PPIXt5IHioPJ3WU2ZJkYDviYz5DSpDE7COI8+aqi3A/pmwU2v4Bqv6xr13DEjTZ2sVSxV2eeKXIkqytB0Hkg6glQPEtARKKaTBJiquY5kygl1CkGwiO4a8V72fR/FWdqtz1/W52DuJWzhNHllXkfHN4U4ggRiMsSVHC5QWJN69l6ODd/KODtNRv9jg52oiGEYXxAyRpkC0zkkEyGQLYOS9ufkAUzmMI6Ns1yZ1SUZJrJXQDwIPtii4hH71cHKU5GpeqmC7Y6XSCYj0mTOYN+3WrPj7D1+/7TF1zYwq8W2vj+5a7Y8rkOMiIeByQrYKfBUkX81s7v2Zn6Dq8vYtdKySaq2R7fos8aKVOPKfJ4uDcsBcMqmpxw0TOypjUrHOWGKteo6bOtWqDmYRV5MKqFWclmyqvl0ynBGRSOZJMyfURLp6QN2a9e6rW7jYk9T6N2SE6TULHiZePPAZJ2IZ/fBkYA2kUsilLqtrBvFeTNpsNRrwO1d365Ku62zPlYk8E4SEAqvsERqbXjIDsGsz3uR5pZVfDqcTjZ9jiXmXXgncy9kEn1bVXh3jZms+I8ZtU3Corq9aIJgRQw7gcOz0atPXvi5NZ0OboezypP2cmVJIHxWaB0QyB0QMeTXW3Fj/ML1V+wfJzbLvUXeNbjR/u48ZIymSFmR3EZR3KjiLG/JR9DXXeaOzZY5hqlGXhxVXEXMxJq/YrBKqqrLyRHplkWb9cDIqSRnIKGKRH0CIF3AQDbXX2/qOLidExetYG3fXT4+VCcbJyZiWaUSFljka6mUvchU9CbeCBauzqXbMnK7vldjz9SmxgyMWYZONjQgBYjHxaSNbMIuFgWf1HmxBNL9jTYsksytMw2aydySgmsG/m+6Omk5TQMRZQ6MeY5m7brdlE5RAvWT0AOrridV167GLsO1jjyO1LhpjyZFiA4UhiRGSUW7jkLC49L1TMvs+wbXS9f1ryQdXbMeeOC4JQtdQDIAGayHibmx9bUsO0ewNxH/GOrR5J/Oq14Hr6VxrilGlK5323/Z/1du2n/alJZhbo6RtU5UEG8oSSrzRg9euXDE6UYslIAUUSsXomEjlUgG9coB6uq7hdnwc7sWZ1mJMgZ+FFG7s0ZWJhJ6e3IfDsL/cPpVgzOtZ2D1/E7LK+OcLNlkRFWQGVTHflzT1VTb7T5v8A613mqsCe0o3QzEBsqESpBpSPfLAJYtVTvjtu4A4Nx3U7eoS9X09dzdc0z9hXtbQ33yYxxxLybxETyK8b8fX62vXSvYtwnX26ssv/AOgnyBOY+K+ZQOIblbl6fS9vyrmdtUDW14RtNvysVrHKJwsMQyS6vjZJUoGTbF7lM4JCID9UcQL9PXO37FpdDNiQbecQy52QIIBZj7kp8hPANifxNh+dcajr243sWXNqoTLFg45nnIKjhEPVzci/8FufyrHgvfH2rZwsgQoQ4SSYVEYwVRejF92fvhme8ESA77zp6ejYNt9dGm/5R/cdgN6MQar3x+y9q/uGKx5e/fxzva1vFr137f8A4x/btedH+6/uhgP7z3be37txx9m3nha97+b2pUasVV+jSlJRncI17b5ilJNpQkpCRrOUdOlmRk4lZu9FMEk2j/qEjhyTvA6yAAdPbquYvaNfl9myuqRR5A2OJAkruYyISslrBJL2Zhf7lt4/GrFk9ZzsXrWN2qSTHOvyp3iRFcGYMl7l47XVTb7TfzW9lPaXsyQ9jA2GY8G59lg96vBjId0bwYO+j1/D9/t17dvTqX2P779hOdX7f9y9l/a534e5xPDnbzw5W5W+lRGv/Zfv4f7n7n9t91fd4W5+3yHPhfxy434/nWqiHsrH1dm/vTiHYSzVgC1idMlTIwbZwU5gUUQWcG3TbAUS9px9I6jdXl7HC69Fm9xkxYdlHCGyXjbjjq1/JVmPhQLeSfW9SOzxdfm9glw+opkza6Sa2MjjlOy28BlUeXJv4H0rfpKprpJroqEWRWTIqkqmYDpqpKFA6aiZgEQMQ5DAICHYIDqbjkjmjWWJg0TqCpBuCCLggj1BHkVDSRvFI0UoKyqSCD4IINiCPoQfBFdmvuvijSlGlKNKU9tTERgme4FDsUAOnf0AcQ3NuI+tuHb8GlKUelKNKUaUrDkf93vv9jc/xJ9KVC/y2v7BvFL8TNS/kptKUwXkd/qneEH4oj/0us+lKtZ0pXG/7P8AF/5dKUb/ALP2dgaUrnSlGlKgzFfrKrz/AHH8X/175Z0pU5tKUaUo0pRpSjSlGlKp88wjMjpoxfp4EqeGc/cpcJT0FkmIwfbEkrRkGJqBFCxs5Z6ZVmDlKaZ2EGTtMUVi7ACRjDsY3SUat2POMcXHWR4+VucdlkELfc4X0ZlUfdysfB/C9b6+F+rxZey9zvGVt9F8a7iGXDfYwkw4rzgco4Z5mBjaLkp5L/5WFwLkQ18sSE5KYgtN9dZ8473SCneYeRZLLz23QSFeY0bG7BhWjOmMJaqy0Fs/qMkuu+MyI3FEpjrJ79PYIjCdPj2+DNIdniyLLnymUsLBIwF8BlFipubAW9RWzv8AI7M+PO163Bj6Rv8ADmwup69MFYJDK2Rls0tmkhma6zoAvuFr2Cm1/QVFz/mFK7jSDomJ7NW4Cis8n3vJLqMyRYo9nEHvktA1moFdVdnPO0zGmEo1ksuIogcEyHDYNzAAahvlGHDjxoJoljGXJMQ7eOZVVuoY+tv+9bK/wP2HYc3d7PW7CfNbrmFrw+JEzP8AtkkmntM0Sn+mXYD7rEkevjzXliImZU5EkyGOoqcqaaZAExznUMBCEIUAETHOYQAADtER1pryfT1r9LSwUc2NlAuSfw+t/wD61ajwB4qZ75LZDyRxOPKfFdUGrKg5eyiztMMcxmB6pNIDVgTjmq7SXbSVtj5Zy0ByiYQ7kwArsXt1c+s6XZ7bKm0hb2YAElkDD04n7fA8gsCRcfT1rzP85/JvSPjzQ6/5QEX9y2rPk4OG0Mn6hNGfeu7BkKQuiScW88h9tzV2WcsF5R8u2rXTMOFuQVylrlkvI0AhlPJE/jOAyAXDGC4phJK16Ie1MZB0uauR0kds3RfCiZ27Mmmn1AImE2wdjrczq0Mmdr8qRp5pV9yQxq/tQgHiCtz9oNgGtc+leO+md06389bLD6p3DRYkWo12vlOHiRZcuL/cNi7J7sizcAPddQ7NHyCICzW8C1QeMuQOYcdTN7vdnoEBGXrkBPXeXqXNjkXV7YwlcdVuZZz1KarUdxDwsnK1R/LvXagNW5zLptAIRNJHuQMoajYm0z8SSXJmjRcnKZyuXOrAopBQcLAlST6DyB6AWr1V2Po3VOwYeFpdbnTyaXRQY8c/XtVNCyZU0bR5DDIEkiJOqKo5sApe5Zm52WrJcHchMO+VLgahWSizuXuZOKuUVqllY6yVpnF1eq0vLNQQSiLbX4dGbZtZJ8/uU4dQUQUIkYyTIDAUe0Rtut2uB0vWRS4zZGfh5jmzKAqpKvh1Fxclz/DwK8+dy6J2z/Jvu2dr91Bqupdm63jIHimZ5ZsjBnJeCWQxsUVceO3KxYcpLEjxVr9fz9jvBlxXNnLMuM8fRfIiSG24cxoNTi6nI4+Ugqqwk8kV3I1tgEFIWStsa9epi7cSKySwKF7sDGEQKF1i2eLrp77GeGKPLPKKPiFKcVBdZGH2lgT5LEfhXmLP6Pvu56lR0zU7HPytBH7Gfl++8yZXuzMmJLiQSESJA6qeCRKy28kD1qF3KLmjh+14IzYxztY8O5Cwtnx3M1bg+lSYK1T1hss/X2x4KRsV89os2rCAkaPenzdZJ6kdMUkd1USqiAANf3HYMGbW5C7NseXX5JK4nAMzMVFiz3AClHINx6DyK298b/EPbNZ3XTTdJx9vgdv0aRzdhORJDHFFFK3upFjcGZpUyMZWUxsDdvtcqKqgv2PeWmAaXT+J2SnljwvH5hutThsmczqq/sV+o2UHc4kzJh5m+uyTuOlYVOmOFlmz1RmsgZ63VBVwCgJlAaVlYu81mOmkyy+OuRIoky1LOkhP+0C/gjh5BIIuDc3tXp3R774u7zt8v5P68mPt59VhzyYmgmWLGycNYyxz2XHKuknvgK8YdWEbLxS3K9eu3GlZk6VjjH1Nm5v3mmalSatWpaydS5veCSg4RlGvZvqdHUdGCUctjL7qGMoPXuYRHcdbyxIXx8WLHkbnIkaqW/8AIgWv/r61+V/Ytjj7jsGdtsOH9viZWZNKkXj+kkkjOsfiw+wEL4AHjx4pa6yKh6NKUaUpos64agM/4zmcWWex3qqQ04+gn7mcxvaHtNtzZWvzDOaapMLBH/6U2bO3DIqbkgdiyBjEH06xM7DTPxmxZWkRGIN0Yq3g38Efj9fyqw9W7Jl9S3cW9wocXIyYkkUJkRLNERIhQlo28EgMSp+jAGnaSTBFFFAonMVBFFAplDCdQ5UUyJAdU5tzKKmAm5jD2mHcR1lgACwqvk3Yt9SSf+v4fgPyr70rijSlGlK5ABMIAHpEQAA+iI+jSlMjgTkZhjk9T5e/YMuqF7qUFc7Dj6Wl28VNRBWlvqh2yc9Di1no6MeKGYmeJD3xEzIKlOAkOYO3WFgbDD2cJnwn5wq5UmxH3D1HkA/6+lWbtXTux9J2Meq7PjHFz5cZJ1XnG94pL8GvGzKL8SCpPIEWYA09us2qzRpSsV8+YxTFxKyr5jFRbMCmeSco8bR0a0KYwEKZ0/eqoNGxTHMAAJzlARHbXDFVUs5AQepPgV2RRSzyiCBHkmb0VAWY/wAFW5P+grzBeZ356zLjFyXwlHcU8oUPPFIqldvhOQeLIQrB5XZ62LKJMabHOsnIxD97ErxHfqOViQ66gdbbulwAT7BrLsvd11uygGrlSeFFf3UFiC3oo52Nrev2/hY17h+D/wDFiXvHStnN33BytVtMiaA6/KcsJEiF2lYYxdQ4ewUGZR4bkvpVpfADzOsU8+MVlkaU5qcZnyOpbewWjCTaxLrSEbKHaio/QjTy7KNkJCBiXp0kFnwEMiB1QATBp175Azux4uwgxsD29/iIzY8TSrxyrKxVkb1VQQofnYjlf081qL5b+ANh8S77F/uuRNL0nJyRGc32f9peQFnCFl9xl5MiKfIU+CabO2S1wyVdJKRk2SjqeXcEZOGcY2XWasCtDAyI3RTQF13TZM5PqgEQOYRNuIj2+COy7LtHfu1z52wiMm4dxGyRKzJHwPthVC87KCD5ubm5uSa9Xdc1vWuidWgwcCZY9OiGRXlZVeTn95Zi3G7EH0sCAAPFqw5miTkHHx0m/wDCkayzKQeR+yi3iFvZTsrOQamaigC6DpA5gOIKAUnd+t1fBrF2fTdxp8ODYZvtjGyYpZI/Lc29lwkicOPJXUm55ADj55fSsrW9v1G2zJ9fh+4cjHljSS4XivuoXjcPy4sjAWHG55eOP1pHJmIQxBUTKqTrTMcgmMXrIU4GOmBijuUFS9giHaAD2arCMhILKGTkLi5FwD5Fx6XHg/X8KtLpI1+DFGsQDYGxI8Gx9bHyB6fjVkFD5C4xkI2LbFgn0TPR0SyZjHRsED8ySYHI2OyiHqIi9eN0A6TqCYpNwETCAj6fd3TPm/4/zsHHx1w5cbcwYyJ7UWP7nEXCGOFx97qvhmJC3F2IJrwx3D4V79g52RO2XFk6abJd/dlyPbubFg8yN9iM3lVALW8AWFbNYi2cWDt8/cT2OIJg/dNKzIlk0491a4ZbqazicxBuVUjNw+x90HUO5Sn6gEQHWfKsvy7hyZmXJm6LTwzsmJL7ojfMgN1yBPjuwt6FPJ8A8hesGJovibLjxMRMTebeaFXyo/bMiYk4s2OYchQeXryNh5IsbWrZzcHVG1YuCVHpUc5s+PYIYiNcTUW8ORdqMeooLaOlAUB7JpnjFFSFMmp1dZgADbCA6z9vqOt4/X9pF1DUwSdh0mH7MTZETkMntE8I5r+5KpiLqCrX5EC/m9YGp23Y599rJO27WaPr+6zPekWCVAVb3AOckVvbiIlCMQy24i9vFafAc7RHb6UjqdW14aSeVmvz1kdBJnetTvthZmjiNV3bpdksxVObcOzsEN+3Ub8M7jp+TmZOF1jAfEzpdfjZGU/ul0L/AKDEFZ3ZGjJNx48Gx82qS+YtR2/GxMfN7NnplYUWfkY+KntCNxH4f3S6oiusgAt+Y8eKkig1bNgUBs2btgWUMssDdBJAFVj/AFayoJFKCip/hMO5h+jre0UEGOCIESMMxY8VC3Y+rGwFyfqTc1o2WeecqZ3dyqhRyYtZR6KLk2A+gHiu8AEfR2/tduu8Ak+PWukkCuQ9IAI7bj2/DsHwj/gDQefFcn0/OkXEKWKxsbTHWeBNW0heyMRFrsZIjhzIwizcUUplNVPqFg5U7w3SUe0hgAdtVXWybzfYmxwuwYZwIjLLBC0coZ5cdl4icEf7bm5sPUGxqz7GPSaPL1+docwZ0oiimlWSIqsU4bkYCD/uKLC5HggkVoXeMWDaht6hBHa+0YbZ5XJ+ys0Jpwxnk1jKoTj0BRKD58j3higcxRNsIfQ1DZHQMKDpsfWdMY/32L9+LkZSLO8eQCSuRJdR7ki3IDEXtUzj99zJu4P2XbiT9llfZk4+M7QLJjlQrQJZj7cbWB4ggXvSnrU4Z4u9r7xR28nqy3jG09JKRijGOk37poVVRzGHMAJLIqnARMUgB0CO2p/Q7g5csujyWll3GvSJMiUxGOKWRkBLxH0YE3JC+FvaoDe6kYsMW6xhFFp895Wx4hKJJIo1ewSUDyrAWALfq9aQUpUYyuO07rdL/Z12kFanc7GkXcnbxscSXFFojAuWrUix38YgoUoJAYCiAiIj8Oqbsesa/Q5K9s7Xutg+Nh7F8iIM3GKMTcUXHZEDGSJTbiDaxJJ+tXDX9kz95jN1Xq2mwEyMvXpjylVDSyGG7tkK7FRHKwvyIvcC1R65CYiyddbsayQUOlNQxotq1jyspFEi7du1SFQwuGr5ZAE1nCihhDueoD7Bv260l82fGfyB2vtv991GMmXqjjokftygMqqL/ckjLZmJJ+y/LxfzW6fhf5K6D1Xqo0e4yXxdoMh3k5xkqzMbDi8Ya6qAB99rebeK2fHnDEtAPIfIM6n7Fcshn2ElBT8eJHgF3TJGykcqsUpY8Cl36lB9Ywb9IgAjrP8AhH4q2Wny8Xu24H7SeL9xHLj5EVnt4EcsZYARWF7sfJF7EA1gfNXylrtzi5PTNU37qCUY8kU+PJdL+TLFKFP9X6WUeAbXBIqYbGFhGL2QlY2Mj2r+Y7o0nIM0Ekl5IEwN3J3KyYfZxIBhEDDvuI7/AA69Q4up1OJlz7LBx4I83KsZZEUBpbfpLMP1Wv4Pn1rzNlbXa5eLDrs3Imkw8W4ijdiVivbkFU/pvbyPH4Ujay3q2PnLCgIz0m8l7AvMWCObTbpxIyDlPvAUfAk77gEkmjXp9RMxg2DfbfVX0EPXelZEPTI8zIl2ea8+TEuQ7SyML3ks/GwRP5VJFh6Xqzb6bsPc8ebuUmHjxazCSHHlaBVijU2tHdOV2d/5mANz62rY3qHlZqPikIiJrMuu2sEW+cIWlNVVo3ZNlRM4eMASARLLNyjuiI9gDrN7hrNjtcLGh1mNr8qWPNikZcsEoqKfueMD/wB5R/tn8b1g9R2eu1ebkTbLJz8aKTCmjVsQgOzsLKkl/WFj4cetqW4+kf8AH2+kQ1bj639RVUHpSbtlZaXCAfV188k49q/FuKjuHdixkU/DOEnJO4dFKYUgOdIAN2DuURDUD2Xr+N2jSzaTMlyIMebjd4H9uUcWDji4va5UA+PIJFTnXN7k9a3EW6xIseaeHlZJk9yM8lKnkhIvYG4/AgGulu7sDayIQRIMp6m3r6Jy2dSQTO6PKoGIgSNUY/vphMgTrMt6BHXVDk7uDfJqFwwetphA/uzIC5mUhRE0fqSVHIv6E13TY2mn0b7dssjsT5pBxRGQvstdjKJPQWY8Qnrb+FKnViqvV1rGUIisdJPvVipKmSSE3QCqpSGMmmJh7CgocALv8G+viRnWJmjHKUKSova5A8C/0ufF/pX3EqPKqyHjGWAJ9bC/k2+th5t+VaSrv56Tg2T2zQqddmlu+8ZDpPCP02nQsciPS7T9RXvUSlP9LfbUT17M3GfqIsvf4i4O1flzgDiQJZiF+8eDyUBvyvapbsGHqMDbS4uiy2zdWvHhMUMZe6gt9h8izXH52vW/1M1DV0rNmzgUjOG7dczdQFm5lkEljN1gDYFkDKEMKKpfgMXYddUsEExUzIjlG5KWUHi34rf0P5ixrtinngDCF3QOtm4sV5D8GsfI/I3Fd2u2uqjSlGlKNKUaUpA5AgKpY2sCxt0qaOZFn2arJoMiRghOSBd+5h3aRwEsi3c7+sh/lapndNL1veQYeH2bJMOJ+9Ro090RrkSfywOD4lV/rH9fXxVx6buOxaSfLy+t4wmyjhuHf2jI0EfjlMhHmJ0+kn0pdppJN000EUyIooEIiiimUCJpJJFAiaaZA2ApEyFAAD4ADVwjjjhjWGJQkSKFVQLBQBYAD6ADwB9KqMkjzSNNKxaVySWJuST5JJ+pJ8k/jX2IiBTbBuIAIlD0dRgARKXf4Nx19/Q28m1fItcA+l6TlVkLDJxBXdogU63Ki7eJGi0nxJEhWyK5iM3IOUw6RF0iAHEvpKI7aguuZ272GsGT2DDXA2XuSAxLIJRwViEfkPF3WzW+l7GpvsOFpdfsjjaDMbO13toRKYzGebKC68T5shuAfr60o9TtQdfaYkKchlCicgHKJyAPSJyAICYoGDtKJg7N/g0pT3REjEgxZJNjt2wKlAqTQFiHOQ49Rugw77ioYA3Hf4R0pW/0pRpSjSlYcj/u99/sbn+JPpSoX+W1/YN4pfiZqX8lNpSmC8jv9U7wg/FEf+l1n0pUzs38pMXcfJCCjcgsMrO3NiZun0cbHWDcx5bakRZrFQWLJPcYUe2tIdwJzeok7OiooXcxQEAEdKVC3lzySvVh460bkfxiyrkPFsPWM8YqqFprVywU/q0lkSItmXqDR5yEloTNdNibbARKcVOuRResWiR1zqAZJbZPSla3nheuXGH8xM8zwU5lOK4P40wnG2XLZ8MPcCBbou5RmQpZ3brBMwGWqPaLHaarHY2K1Os1hHce4KmRY6ZjK7AClWFZRaLXjE7iSruZLRhWONFMbiplCps6W4lo2ssmftp6sojkGr2ytoR7qLATODqsROkmAiUxBDfSlRi8uK6ZpvnEyHy7ma9WrKr/ACVZL/kTF7yxV2m125hhCSm3nxPxMuxqEHUa45n5anMm0gdbwzfrUkQIYwAQB0pTfYKyhOZT8xTKcnO4ZyvhdaI4bYujG0VlhpTWkjPoGzflhcZiELS7ldGikWmce7MK6yC3ef8Aw9u3SlWh6Uo0pRpSjSlGlKNKVSBHTMByI5nNcwcQ4rDqdWpUlI0/kTyTJENJLJFrdwL40VLcejxj5COsNfIQUEnSMwJTt1iJ9JBMUmw02F02m8XO0iwe1GSk+RYGRiDYwWNmHpfl6fhXpHZ4eX0P4sm6p8ny7c7DMVcjVakuyYkIlUOm05qWil9WQweGW9zYnwquJ2RL/m/kDyky+yvVpfcd0pODxPj7GVuZvoKUpeRcdlQZ5Elka29QKZtFzzoe9bOwVN4winV0l6dtZekysrY7PMz1kc6oMsSRsLFHj8OeNvQ/Q/X1qE+UNDo+mdG611SXCxk76Y5M3KzIGWRMjFyrtioZVNmeNfDpxHtkWub1A/zY/L+sGcLEncuPXE5jlPLmTGSR8g5md5ScVt3Svc9KOYQERFVCSmWMDIOrBDInQ78UzkR6NzFExgMFZ7t1iTYzfuNZgibOmH3ymTiU4ABQFJCnkPH5fhW7P8YPnTB6ZgHUd87Q+s6rrnIxcBcMSrke+XaR3nSNpEWKQhuNwWvYGwsanLF5T7umuMIYKVuSMpzVyCZnk3JeOXFgjKvTMNYEZu12klZlLU9bKMJ2VK8KmkZRFyCrdyAgmgsmIH1SZektAcfWmS/YZf6kkfIKkUNyC3I+GP8AA+D6A16fwP8AJ+LbJue6JiNH8P4AbDxMoRPNkZ+yZQyQiFWDRpxubMlmXyzofFeqHiPj2gYfi7TiDH2GbjR69hyUYU2Hyfe2sS+ls3xjpEZ1e1x1vaokkrHGA9cGKoK4lSIsId2Qodgbn0mLi4CPgY2PJHDAQgkexMw/VyDerC/4+Pyr80flPfbztmRjdq3u3xM3P20bZD4eMXVNc6n2xC0BPCJ+IBHH7iv6ifWq27d5eOasaUzKEXUeYON6DljljmS6wE9fMox8zOhcsR2tu9d07B1XZ2KRema2SvSa6zoi8cUivQHSl0lIG1Tn6tsMTGmTHz4os7NyHDPIC3OJrlIlDE2ZTc3Fvyr0Fqvnrp/Ytvrsna9U2Gd1fq+px5YsbDaOP2M6Eqs+xmaJVDRSoFQrLcX8tcmlTjviThy1cJeSOKr9mXN12x1S7HKv1pnNzK0MU8P5QwxU1Cyd4pfWdOxWihMbABpRumfrRXQRKiBDCBuruxdHgTdey8LJyMiTEjc+ZQw9qSJfLp/MyBvuH0NrWqN33yl23W/MXX+z6LUabD7BmY6KI9e0LHOw8+ccMfI8GKHJaK0LsLMrMXLC4tr+RGSsYPPK8eY9q2TcU5/yY2xDQH5JVrZKviixu3NwkyDA5Zh4eWatZCvzMw+RILJuCLd7LHEyRDdao7/G1zMNumnFhmhyssQIb8ljb7z4lAIupJ9BYFvQeTXd0Lr/AGOH/JFN7stds9F11trkrwaKbNiUQIfcwndCVljjUn3H5PHCLMRZarrksLZFx75aLGu5+4N2u4Szu4ZMyTd7x7/+KzFQbVaoOKjaHl6RpSYObAlHzMst4aSi1igHh48Tq7qKlMFWfAysXqIi2eteRzJJI78/6qMygJKU8tYnwyn6L58nxv7H7f1/ff5EPsOj9zxcTFXExMTHx/2vHAyYYZHfJwUyPEXKNBzimX+aUKv2qQYeuqTyaPhXjLhrkdjKOiMCV6Za5TpVbnK/J0KTfx09cz1OwR9ut5Y/x0I4vpJsFmqPfFOogVJcpCmAN4FsbbnX4mv28IXWKwkRSChILcWDNa4538eQbWNbXi3Hx0vcOxdt+PtjJL3ieFsPIljlTJRXjxxPE0EHLjIMb2+LtxIDFkJI9LbM54W80XP8XXuIMXCY7xTRcWY1qU1Iv2DF8phq0yFQsxHGLIKnZBmWkrYHM7DV2PZtpdsr6oHTOqO/Vq8bLX9y2aJokWKDGhiUkgH2mKteMJIQWuFADA/gTXlrpfb/APG3o2VP8q5M2fs91s9hPGisy/v4UnhIzJJ8WMpEIpJWkeBx9CF8Wr0GVMtiJVKuS3ljCW1OuQidoLCmVPDlsKca2JMhFHXAFjxwSBVO5E4AYU9t+3Wz4BKIEE9vf4Dlb05W82/K/pXhTaHAOzyTqvc/tZyJDD7lvc9ouTHzt458bcreL3pQa7awaNKUaUo0pRpSjSlGlKNKUAO3aHYP0fh/x6UrTQVbrlXaKx9XrsBWY9w8cSLhhXYaNgmK8i8MBnkgu0i2zRuq/dmKAqrGKKiggAmMOvhI44hxiVVUm9gABf8AHx9aycrMzM6QTZ00s8wUKGkdnYKPRQWJIUfQeg+lbVVZu2SUcO3Ldm1RKKjh27XSbNWyRe06zhwudNFBFMO0xjGAoB6R19kgC58CuhVd2CRqWc+gAJJP4ADyT+QryW+YZ/zGWRMNZdd4r4oYnpj+Oo9tttbtOR8lK++1ayOEC9TjGzvGvubOxTdtEg5KqKq6y7hbrAE+gvaOtUdg+Q58PLOLqokKo7Kzv9yvbx9nEjx+NyT9K9//ABB/hxqOx9eXfd9z8lJsrHikix8f+hJj815kZHvRuS9rAKFUW83N683PMzzI+XHN57EBn29LdzW07DGt6vVmS1LrhYmclGksWGla/HOEm80lEuGoFbKPyuHJCDsZQw9utdbjse23LAZz+FuOK/aLE3sQPW1vrc17O+N/hb49+MopG6pijlMY2Mkje9JzRSpdXYEoWB+4JxW/ooqAWwAIgHaAegf2ejUDW3l/OngwvnnLPHy/w2TsRXKSqF0gWUhGR0qz7pYpY2UQO3fxrlm5Iq0esHJD7mRVIcnWUptuooCHTlY6ZkDY8rSBGFiUdka3rbkhDAfj58/WojcaDT77BfX7bHinxHYMVdQw5L5Vhf0YfQix/wCpr2i+XD5gquY8HVeeUt1NrOa757WodqhWUgZnNmmag7SkHUnWolyqk8FR9CqoSCwoFcpIEXMQptyD06hh2fZPjPZbHH6yXhgzoo4zlEEtFGXDgqy+FYMGUswJK+i3INeSflL4q0+020Ue3glydLr3ORHHYGN/cQpxlNjdQftAulyAT4NTok1LA2k5EqU+vIkZIuPaUxHqSDiOFzJJCV0c6zkCfZZUxgKqYSkAxjD6ogA6qufJuoNhOIs151jRvdnjMjRcpR95LPbzMfDmygkn7SBVZwI9PPgQe5hpA8jL7UEgjWXjEfsAVPpDa6C5IAH3XNfNcQoss+dJ3CVmoFNVrHlZPoWMbyHVJGWAkgZ81XdN0it1CGEwKFOXYQ36fg1xoU6hssuVO0ZOXho0cftyQQrLeXlaUyIzqApHnkCLHzxrndS9v1mJG/WMfFzGWSQvHPK0f9LjeMRuqMeQPjiVNx45fWnzxRRWNSzRUlCW2LmmasvLpVt3CiSRRnG7SNWBdVyu0dHTiFUQclAU1QMJhAens7dbg+OOnYnWvlfWumzxsvFfKmGK8FpBkKkTcizI5EJHIXVweRBt4F61L8idvy+y/FuxR9bkYuUuNCcpJ7xGBnlHEIroDMG4GzIQFFr+TUrs8Y8jrvTZB8oyWdWCvMHTiCVTlSxSaZljJeLK8XcD4TwXclFRUFNtwT2Axd99ej/mLpGD2/qs+XJC0u6woXbHImEIBbjzDs32e3xHJuVvC2DC9edPiDumd1PtEOIkqx6bNmRcgGIzEhQeBRV+/nyPFeN/LXKtaknx9tMq+o9ksN4sangI+RShkjyKzdtBRkdEsEmgrxTkvdoFYvO9KAmKYxBMQBAw76rXwp2LZZnUM/edvz2/ZwTrADKVXHiihjCcon8L7b3AuCVuAQTe9WP5n6/rsPtmBpepYK/vJoDORErNkSyzSF+MqG7e4nEmxAaxIIFqV0HijFTtnCS1I6WbZhMFlUZmrSwnNMLNHBznYSskBnB5KOIuI9SIm2AQ27NtWbT/ABv8c5GNibLqNoseHK94T4kxJnZGJMc0v3mWIPe6E+CLfSq3t/kX5Dx8nL13a7yzzYvtNDlw2EKuoAkhisoilK+klrkU3uYMl2mmyiRJpdxCsvfBk5pKFdcIpubZDM2gjJtLKq4UMRvGGkFkUz9BQUDr3AogG+qV8n997F1XYqm1d8XF/ukbYC4zKGzIES8qZRYkLF7jIrcQHHK4UgXq6fGfRevdowC2rRMrK/tjrnNkqxXEmd7RPihQOUojV2W5Knj5YXFOHHTTDMUXOU6bbWCo2KtPIZW0sYp74Y0fImUO9as4+ZS7zxzRRNIO9HpL1AbbV1wNth/KWuy+r7iPN1m8wJYDlxwycDHLcyIkU4J9xCAOfgXBtVKztXmfGOwxezamTC2Wkz4p1xJJk5CSKwjd5ITb23BJ4i5sRenXczERGu4yLeSbFpISxjoRLJ05TTdyZ2xCCsRmicwKOlEymAT9ICIb9utk5G01mFlY+uzMiGPNybrDG7APKVA5BASC5Ate17VrqDWbLNxsjYYmPLJhY1mmdFJSIMTYuwFkBNwL2vSNlavM2qTXJY3QsISDnoyaqZq5IuWUk9FogIuGtj6iHSWaqODdiZdgMX06q2x67tOx7B13kns6nEzIp8M4sjJK/BTyXK8EFC3ootcepq0a7sOs69gK+kj97aZeHLBmDJjWSOPmw4tjeQQwUeWN7H0rJY3YycBNy1kj042Trhnas1AQ7tOfkGbQihzMFDItPsnfyDQAUKmIAIAP0tZGJ2xk0uZs9/AMfYYBcz40DjJljQEmMlU88pE+4La9Y+X1VX3OJrdFMZ8DOCLBkTIceORyAJAGfxxjf7S17VupNk0uVVdMFHEmxj7BFFAV2iikdLNWzpMjgDIHMUVGbwobAO4blHcNSuxw8XtXXJMOR8iLCzsYfchMUyK4DfafJRwPH4j0qLwMvJ6v2GPMjTHlzcLIP2uBJCzISv3D0dD/ABsfUVocfWKsWytEbwZn75hX1S1x0lYWohJA5iCEQKZ+k5KYVVlSpAoBxABMI77APZqG6VvOvdl0Ah05mmw8Jv2rjJQ+7yhAX+oGH3M1g3K3n18Gpjuek3/W98ZtuIYs3NX9yjYzj2+MxLWjK2sFuV4/T0uaUL20V+OnY2svZRs0m5Zk6kY9it1p+IZsh6XKwLGL4dMEhD0GMBjbdgDqbyuxaTB3MGgy8hIttkwvJFG1xySM/c3IjiLelibn6VC4vX9znaiffYuPJJqsaVI5JBY8Xk8qvG5Y3/EAgfU1ny0WxnYx/DyiPio6TaqM3iHeKJ981XL0nTBVMxVCgYo+kogOs3Z6/D3GBNrNgnuYORGUkW5HJW8EXBBH8QRWHrdhmajPh2Wvb282CQOjWB4svobEWNvwIrsZtGcTHtWDUpGsfHNUWrch1B6G7VsmVJIplVTCPSRMoBuYf29feLjYutwo8THAjwoI1RQT4VFFgCT+AHqT/GvjKycrZZsmXOTJmTyM7EDyzsbkgAepP0Art7hqsqi77lsssmQxW7vu0VVSJK9pgQcdJjlTVD09Jtja7Pax5ZFyeEbSqPteylgD/wCLeoB/I2Ndfu5EcbY3KRYmI5JdgCR6cl9CR+YuKyA/YA/s9Ia7q6f4etJqrMbOwjl0bXNM56RPIvlkHjJiDBJGNVV6mLMyICPWq2S9Ux/8rUD1zE3+HhPF2PLizM8zyMrxx+2BETeNCv1KL4LfWp3sGXoczNSXruLLh4IgjVkeT3CZQLSOG+gc+Qv0pSanqgqNKUaUrrWUFJFdYEzrCiiqqCKYbqLCmQxwRTD0CoqJekv0x18SyGKJ5QpcqhPEerWBNh+Z9BXZEgklSMkKGYDkfRbm1z+Q9T+VaKqzjmyQTKZdwUpWnDvv+uFmiFTkmncrHRL4ghQKAd8UnWXs+pENRHXdvPvdPFtcnDyMCaXleCcASpxYr9wH/lbkPyIqW7DqYNHt5dZj5ePnQx8bTwEmJ7qD9pNz9t+J/MGlDqaqFo0pSchp5zLSVij1oCWiEoJ+kybSEgQhWc8mokKgvYoxe07VMQ6REe3cdQeq3GRss/OwpsLJxo8OYRrJIAEyARf3IbeqD0N/rU5tNPBrsHCzIszGyZMuEu0cZJfHINvbmB9HPqLfSlHqcqDo0pRpSjSlauThIea8D7XjGcl7MepSUd4xEq3gpBD95eN+r97cJb+qYO0NR2w1Os2xhOzx4sg48wlj5qG4SL+l1v6MPoakMDbbPV+7/bZ5YPfiMUnBivONv1I1vVT9RWub2B04tklWjV6XQaMItrJJ2dUhfYkis6OQp4xop09QvWoH3OG/oKOsGDd5M/ZZ9A2DlJiw4ySjLIHsSlyAYkPr7i3Jbz+NZ02lx4euQb5c3GfJmyHiOKCffjVASJXHoI2tZfH1FKYB2HU/61A2vTR2vJqbOHyalWm7ha0Y9YILqtpGOXMydOHgEO2M1IicHEg36RHqEm2xtaz7L8gpi6zsEegjd+xaOFWKyxt7bM9ivAKeUq2vfjaxrZHXehPlbPQyb10XQbqZlDRyKJFVLhg5YFY2va3K/itpjDIkfkevGkmhHCb6LVbxk+kszUZJozQMkXDxNomsc6otiHUECibYwegQ1I/H3ecHvelOdjB1zMdliyAyGMCfgrOEDEngCSBfz+NR/fuk5vR90MHJKNh5CtLjlXDkwc2VC5UAcyAL2uPwNOPq91R6w38xHwLcZWTkWkSzbKogZ8+cJtWySqihSIAdZUxSAZRUwFKAj2iO2sPP2OBqsVs3ZzxY+GpALyMEUFjZQSbC5PgfnWZga/P2mSMPWwy5GWwJCRqXYhRdiFHmwHk/lUhYJ4u/iWTxx0GUcpd71JhsUxDGN3ZgAf8A1ybD2dnbrLBBF18qfI/hWIQVJU+CDb/pW31zXFGlKw5H/d77/Y3P8SfSlQv8tr+wbxS/EzUv5KbSlMF5Hf6p3hB+KI/9LrPpSrWdKVGDlFxFxHzAqsHScyOck+7MBONLG2jce5YyFi4j2ZjHjKSh3kyrQbBALzIwspHIOmZXJlCtnKYKEADdulKSeQeCGB8rEq7PIzrL9xg63W4CqvqrNZ0y04qV+iK27K+jS5TrZbcnE5JdndEAzleXSdKu9tlhUDs0pTq2rjhi26QWbKxYGNic1/kFXmFTyTEN7pbGDFzXY+sDTk4qstmUwglSGrmvmFBwWIBl4gTCdTqOIm0pTv1+Ah6rAwlYr0e3iYCuRMdBQkWzICTSNiIlmiwjmDZMOwiDRm3ImQPgKUNKVDCK/WVXn+4/i/8Ar3yzpSpzaUo0pRpSjSlGlKNKVCO0cLoxbN9ey5ivIsxg2EFre3GU8aY7rsLH1zNFvtkGtDwd4u7ghm6qtgpSyoOWZ+6U7xUuxhDcR1Cvp7bBc3DlbHjs/uRooCyswsHf8WT1H51s7F+SDJ1GfrPZcCLb5hbHGJl5MsjS4EMMgkkx8cG4EeQBwkFxYHxX3xP4T1jjDR7FDu79cMs5Hvtlf2/JmXbgsolYr1YV1nCce/fxZXr6Mjxi4lRNoUjboKqVLrMHUYdudLqF0+O0ZkebJkctJI/6nbzYkeQLDx4rq+TfkfI+RttBkphY2s0WFjrBiYUAHtY0QALKrcVd+bguS9yOVh4FcckXuWMc4pu8zibGoZdyM2jCJ1OgksbepltB3z1vHvipWJ0YqES4YRrpVyQxhAetIADtENZezlzIMGSXAh9/LA+1OXHlc2P3fSw8/wClQHSNd13a9rwsHtmw/tXX3kJmyvaM/tBVLKTEvlwzhUI/Br1VFy2cUPAPHzD3Gg+B5jlHlHOVcn4tXHNsvsipmeQr7Anv9Z4tjk+NjwsUxFVq2KptvDN3aH+ipdSZDgG2qbvDjavVwaj9s2bmZKMDG7n3So+9rSAciFbxYEePS9elPiyPdd573tvkQbuLrXW9LkROMqDGQYCSsf20Lthu3tRvLAC/Nkb7zZiL3ppuQUlzn45y2HuamMKhL1KnvOOBI7k3gi62Z5dsUYWeUqNbR9WQbMGMoWwycgVmqTvHTBE4pqInUcHApzmLg7N+x6l8fsGHGyY5xLZELMXji4Cy2APIn8wD6G/1qz9Fx/hfv+JtviDsmXFlbZOwctRsseJcfN2C5Ds0xLMntIvIGySMLhgsYuADs5jhjnzPB8BeYTgDMlGns02WuxWUJnHGVF7HceOsjYpFoxRRUw/HTjIzrHzZBJsoQHR0wdodRu4UII7j9voNnsv23aNZPG2wdRIY5OTwFiB5iB/2/wCPr62IroxPl3pHSV3nwR3nUZsHUMfIfDjy8MRY+1SJGYkZzxtbKJJB4A8GsOat9LKs78YbPyVSw2F2zRf8aQdRQRk8q43xPIkjK3lKeVSiXpo+UmXIHki1+ImGSpEkxIcHTRYxFA3NuFu2Wom24x/3GRLDHH5kSM2WQ+PBPrxBHj8R4NeeelfI2u+O22x02owdhmZRKYWXmrzlw4wXXmiL9nuyIwJNxwdQV9LUn8yVfhhXcrY+omUsEVWVtmf27WGZWMcbtndRTYYeRQm663vllRM0h6vHV5UiZosFgKTvwAC66c+HQRZsWNmYyNNlAAN7d1tF5Xm3ooH8v5+lZ3Utl8uZ/Wc/d9b3eTHrNGxkaL92VnLZxMcpxoTykmaUE+9xuePk1HLOePcsO8u5vzJiW10aNzpSJPEtjpNBxpktKPt2ecNRH2NljrNjW1yXu1UIOdl3Shmz1iiUztNPoAxjCQdRWxxM452Tn4LxjZRmJkjjks00Q9I5gx4qCSSCBcj6/Wr/ANL3/V4uq6bqXacXNk6VmR50ORk5eIWg1ufJ5bK17Qp7uRJGiqGjkayE3sBelRxPDOvJRtny88thtmMF5Fi+wxNcdXBIhrj6kKwZk3iGX8dyrpzIS7WTlY1x0eOX6UyuUjLoD0ATp79J/ctsMnI3nOEsDEYPARLf+6hJJBI+p8XFxUZ8oHpfx7JpNN8W/tdkkbrsI9qDIcrIEl1ODlooWMoji/trc8WCSC/K/RhSmpYvu5sGUDm5lTO+Q8Fx1ozOGFbS6jnp7djq31trEY4odjvizJdg8gI+YbpqN3qLky4LuzHUIQoBt86/HGHk/wBtxtjPk5WMGl9prHlGy2jRntYqD5DXv59BXZ3Hbv2TTf8ANN707WaTQ7qSHX/3CEOvsZUErPl5MWMGDLK0ZIaNkC8YwFYm9WM42m7lZKDUZ/IdOTx7eJeFbPbTR0pdCfSq8uoKgOIgky1AraSKgUpR70gbD1bfBq14ck8uLHJlR+1kst2S/Lifwv8AX+NaC7Fh6nX7zKwdDlnP00UzLDkFDEZoxazmNvKX8+D+FLbWRUNRpSjSlGlKNKUaUo0pRpSjSlGlKgH5pd8peO/L55UTGQZGSiKxOYvlaCtKxcAtaHEdJ5AUQqsI6WhG8lELvGKUtJJd+JHKRk0hE+47dIwPZ54cfQZTTkrG0RS4HKxf7QbXFxc+fNbY+C9Xstt8uaGDUokmbFnJkBWkEYZce8rgOVezcFPH7Tc2H1uPzSMT45vt/v2KKrVqkreXV3yAzrFCqckabSiLfMuJSPav41MIh21kmzITuG/tBRssismkcBA+4AIeb8XGnyJ4oo09wvJxVTezG48ePP4XsRX7W7/danUajYZ+bkDFTFxDLPKvtl4kCsVb7wVJ8NwDKQSD4NbHkqpLoZ7ysxsGN6ziefh7ZKVuax5VTyrqu1eWrxvYki1iXM2/kpV0gq5ZGVKquuqdQVOvftDX1suX7+VXjWJw5BRb2BHg2vc/6k119IXHk6ngTYubPn4smOsiTyhBJIkn3qXEaqgNmAsqgAC1vWmJ1hVbqNKGpgcW7A6jpOSo1Hc1Zjd82RSGN3FysqD6FtGMjnscdNsHeJro0lW8fB2a9BFlh1XLtIQKg5UR3KVbvAkcFIplbD4xOcgBD7i34/cGHAk2Ba3HyP8Ap61rfvEHOJNxn/uf2Grc5CxwsGjyP6TxsMmEqWeODmZQqN5KhvJXifTAy863AsFO0nC+bMYZ0wmo/ja9C5vZz0FGPJik5Lx/EpVlu+kiyqrGXlmFkVTM7cLopt/CC5EToqCQR1V+09aTYZ+djmWXD69mNHkZGNHCrTNl46FVs0hAVXu3heKryF1Nq82an4e7IuuXsOhn1u22eKZlwJ/eZYZMHLk91lCxBlDxeFAbmX4GzrcVY3jTIGOORNYmsicd7CxyDjeCk3UY6k4p41fSUIomiZ6s1siaRGrhNdul63enRIUyZydPYYu+jdn1PbStk5mm1+UukxST/UCGaOMqXvKyWDhbEGQDj+lfBsKx8nKm6rk42l7tLHj9kykDAAOsUrcgg9gNy48jYiPkTfkfoaevHV2k6vLwbyr1yKWlYgjp7LOlgTUkJmObqqO3qCCz8VEIrpjd0hO3IVYSgJuofRqQ6N23P6/s8PJ67gYz7LGDSTO1jJPErF5FVpLrD/SupaNQ5AJJPpVY7v1XX7/W5eN2HOyU12SVjhRbiOCVlCIzCOzS/wBSzhZGKAkAAetTsqlkyXKu0mc5CQl5x/Z/FPlbdHPWKbWGhHkeqopXlo1DpNKLx66Yt1D9oqGMI77Br2F1rfd+2WSuLtsTE2/SthzkbMieMJDA8ZJxjCv+60TD2mbzzJJvYV5F7Ho+h6/GOVqsvL1Pc8DjGuHKkheadJABkiVv9pZFPuKvjiABa5pyasrTbtR27eMge6p7hNxFpwEtEiwTK3YODJGbnjVBHobgsn1E7fWDYdX3rsvVu2dRjhwMLj1h1aEY00PtgLGxUqYjey8hdfxHmqL2GPs/VO2PNn5nLsqFZTkQze4S0i35CUercTY/h6VnRbqmVmSYY9hiMIZ+rHO5uPrrBqqin4Aq5wePEuhMW5AFcDCYBMBhH4NZmuyeq9f2EPSNUIcXMaB8iLGjQqPaDHm4sCo+697m5P0rD2GP2fe4E3dNoZsrDWdIJMmRlY+5xHBDc8j9pFrCw/GkVfrbX3C8/UVKfYLVMsItkdZKMhkFVUGNgOmxF5DyrsqiZHbEFgUUAgAYnTv8Gqr3LsukmmzesS6vN2O1hx4ywigViseSQheCZwQHj5Bm42K2v9PFp6f1rcRQ4fZU2eFr9XNkOFMszANJjAyBJoUIJR+JVeVw17fWm5hYGUxDkDZWzvInGEglHEPIWx03lnFjnnDY3eR6TlNIryOcNAJ1Csr9iAiYhv2hqh6nT7L4y7rxk2EuN8fTrGDJmOkzZWQym8SuFDxslrl3+wKpF/Iq9bbb6/5K6ZyiwIsjv0LSER4aNCuNjqwtIVJKSq97BU+7kw8eDUiwYViyqQdkK3iptSOFZ3XZsnduxaC5AE1l410QRKUFgTABEojvtref7LQb98PfLHjZckF3xpxZ+HLwzROLjzaxt62rSJzN9oky9Ez5GIk9kyYDdOfHyqyofJsTex9L0oBAQHYQH/D2D+wdTViPB9ahR5FxWiZBW0JqVTjhiCWF6m3dzSbU7cZNwkkHcNXD8iZxWMVPtIUx/R6NQ+KNFFtslMI4y7uVVecKV91gPtVpAPusPQE1L5f98l1WM+aMk6aJmSAuG9pSfuZYyfFz6kD1rEjXVmCfsDea9gpwfU3VqZGLhQZpy2IiJpA8m2UNsAprBsTuw229OsfAn343ObDtv2a6j7DhiNiZ2UL/AFDKpP0b9PHxb1rIz4NCdPhTar9421+4ZhkUewrFv6YiYfiv6uXm/pXzT7LD2yKUmoRk8ZNVZF81VTkIwYp2q7ZKgguuo3MAHUKoYvqqDuJw189Y3ur7JrW22pilhx2nkQiSL2XZ424sxX1IJ9GN+Q/Cvrs2j2nXNiuq20sU2QsMbqY5fdQI45Kob0Fh6qPQ1zbadC3WHdxEwwRdAskHcLGKci7ZdE5V2pyuUBSdlQTdEKc6ZTgVTbYdwHXPZur6rterl1m0hSQOv2sbhlZTyQhls4UOASoIDWsa4632badW2cey1kzxlG+5QQVZSOLgq10LFCVDEEre4rYV9nLsIWOZTr9CUlmrYiL1+1a+CbuTk3Ah0mvaKRQS6S7fCIb/AA6zdJi7PC1UGJuZo8jZxxhZJET21cj0IT+XxYfxF/rWFucnW5u1nytRC+PrpH5JG782QH1Bf6+bn/W1d8vEsJ6KkIWURFxGyjRVk9QKooiKzdYOlRMFUTEVT6g+EpgMHwDru2etw9xrp9TsE54GRGY5FuV5I3qLixH8QQa6tZsczUbCHa69uGdjyB42sGsy+hsQQf4EEVq36qVNqa54eGfSjetw5SR0DGmMtIvEWCJEm7BoZYVDKuDJlAAEwiIiHbqOy3j6t1pm1eLNkw4GKBFjxXaR1jUBY0LXJYiwBNyfrWfhxydo7EqbPKix5s7JJlnk+2NGkYlpHC2CrckkC1vpWzhpBWWiIyUWYOopaQYtnasY+KBXseoumChmjsAAABwgI9JvphrP1WbJstZj7GWGTGlnhVzFJ4eMsL8HH/kvofzrB2mFHrtlkYEU0eRHDKyCWP8ARIFNg6f/AIW9R+VaC5QtonmbdhW7QFUIqZwnKvUmBHkgdqq3MmkMaocxStHaCwgcD/D/ANcP2nU9g3GLHiaLYf25WLCWQR85ODLYe0SQEdW+4Gpjq+16/qMqTL3mvOxZQphjMhSMOGufdABLow8cf/sN5GNloaDZNH0i6mF4uOIm7lHKYC9kTtUd1niyaYiAuHHQIiUu+4jtqW1+PLq9RFj5k8mVPjwAPKw/qSlF+5yB/M1r2H18ConYZEW020uTiQR4sORMSkSn+nEGbwik/wAq3tc/TzWogbvWbEViVjIkQfyLM79tCyQBHzgsyLnbi5PFrmByRIVUx2HbYQ7dRum7doN6IVxJwmZPEZFgl/p5HAMV5GFiHC3Hg28+tSW46pvdI0zZcDPiQSiNp4v6kHMqG4iVbqTYi4v49KVmrJVco0pR/wBOlP4UnYU1qNIWIJ9GGTiyyKYVY8Ydc7taKFIRUPMAr6hHnfbbATs21BaluxnNzl3a4q68Tj9p7RYuYbeTPfwH5enHxapzajrowsL+zNktsDAf3YlChBLfwIbeSlvW/m9KLU7UHRpSjSlGlKT0mzsa85XncXMM2MEzO8Gxxa7EHDmYIqkBWRWjsQEWItltzGENuoOzUHsMXfTbbByNdlRQ6iJpP3UTR8nnBFowj/8At8T5P41N4GVo4dTm4+wxZZdxKEGNKsnFISD/AFC6fz8h4A+hpQ6nKhKNKUaUpqMvT9trdeSkKy5ZR6KrgrCRk1Yt1NSEcs+URQjXMfEtyHTdlBcRBYFA2KQd9a2+Tt12XRaRczQSRQxM4jklMTzyRNIQsTxwqCHHK4fl4A8/StjfGum65vN02Fvo5ZpVQyRRCVYI5RGC0qSTMQU+2xTj5J8Un6k3KbM9ger2FrLTTTHdYi51q0jV0SoyJDNVVXSrr/8AVEju1SGODbpKchTB9AdQvWYAflXOyps2PJ20WixIshEiZQsoKlnL/oBcgsIrBlBH0FTPZJ2HxdhYsOFJjaqXd5UuO7yqxaMhgECfrIQED3blWI/EiljWazI1m3WX2ZGQjKlz208q4RdPVp55bnRylkFHKSyh26DAUi+oCYAAD8GrToOv53X+y7AYGPiQ9TzP/kFg7tkPmsR7hYMSojI9AtqrG+32Dv8AreB+/nype1Yf/wAcKURcdMNQfbClQGaQH1LX8U42r1VHrAkoKOsjQ0RLRbaZZOFETnj3bcHKKqqKhVUDCiP1RklSgYPoCGsDZavW7nEbA20EWThMVJSQBlJU3UkH/wAT5H51na3Z7HT5Yz9VPJj5qggPG3FgGFmFx9CDY1IuFZiwimDQQEO4bJk6TbgJPV37vpH6kEwHYA+AA21nKAqhV8KBYfwHpWESWJZvLE3P8a2mua4o0pWHI/7vff7G5/iT6UqF/ltf2DeKX4mal/JTaUpgvI7/AFTvCD8UR/6XWfSlWs6Uo0pRpSjSlGlKgzFfrKrz/cfxf/XvlnSlTm0pRpSjSlGlKNKUaUo0pRpSq1uQ135vR+d3NDp2MceK8eLTQbAvW89N5dw8tOIbpGVSQdA/udCdKJ+97N/ZCtk2jViAdSPUJzCb1dV7LyOwJsDj48MX9seM8Zr/AHROFJuyH9QLWAC/T1rcPXtR8Pz9Ni3G42OcO8Y2bGJNaYwsOdA8yKVgyV/2GWLkXeT0a1hbzUH+SWULzc/L9xtnTHtryEnnKHtVAjIO61LFDmr3a320loVq1lrfsSUarzOPKLeHianjXBTAUGiZPSBwDUNts3JyOsRbLEkl/uSugDrFxdm5cWWxBKI/8xH0H51sn4+63pdT857Hpe+xsA9Llxsl5MefNWbHgg9kTRS+4jCPKycdSPbUi/uFvTjeqsrXyo5q8TOZ8fZOaURkWbaXElkGx45oVgh57D+RqJKRD6vYwqcbT0408QkSMtrkWzpwuYjl8JwMqnuACrTZtz2DSdgE3YVlZZOXJEYGJ0I4xqFtbw1wSbFvqPx9K6z4z+IPlH4ifX/EEuvhlxPa9rLyopI87FyUkWXMnecvzJeAc0VbpFYhDb9Mv/LozryBxtkHNNo5e4dyZiOn8ls0VOs4ZjHlenmFTpdv8M+blpVfp8iYDUqiBCrJKhIgRFgddLuyAY3V0zvVdls8PKyJt7jzQY+XkKsQ4kKreRwVD+hLWPLwP4mtU/P3S+idi0Wn1vxVttdtdt13UTzZ7rLE0+RByU/uJZ1//OMn3OS+1dpAp5Gwte81hlLGctdpXGsVkKlyWRINNZaaozCyRLu2RSLYqJnCshAIujyTRNArlMTidMAKChd/SGtjJmYb5DYkcsbZS+qBgWH8V9R/0rxjP1rsWLp4uw5OBmR6GYgR5LROsDk3sFlK8GvYgWJvY1Wp5muYLVB4vv8Ahy346x82xhlFbHdRp9kyFaLOLLMr55IGn8h46hq9Q0i2qJtUJEQ5Vo5YViJO1y93v6wBqpdvzpo8OXAniiGHNwVGkZrSkm7oFT7gwC3U3sT4r0L/AI69U1uZ2PB7bqs/PbsetGVPPFiww8sBVX2sXKklyT7EkMjycZVClkU3t4vSvkZDy2MgwNCr93CjVGSv+KsY2iKYXpy/x7kNxjnFsmyQx5H2+ZcuWUmxNAyrZJNFg9clcODbD0qAO+u9m6llRxRZPto8sEbAPdH9uMjgGJII4m1gxufwqKx4P8hNFnZ2dpv3uVj4OzzIXbHC5WKMvMRjlPBGqsje4hJaWNCqjxcUkaurw8wXgLlTyutOVbNmfD3KbIEollPIDSLlFxLBSC6+PkKdHsooyDlvBVVNZduaQTKkqKY9e47E3x4TodbrM3dzzPkYGZKfccA+h+zgLeir5HLwbealdinyx3TvPWfjHW6zH1Ha+tYKfs8ZnQf1EAyjO7PcGSYhW9s3F/H40w0j5eVzxHUscX3gBkm8e3cr3SgRszkw0y2ZS9M4ZHTbWauUapJTi4ou4eFOVE5XB0/aTtFXu1PVEQ1HP1XIwYIsnrE0nuTyIDJfymL+pUXl6gePP6iDY1dsf551HadpsNJ8567D/ZazDyXjw/bLJPv/ADDLkTe2LrJJ9wKg+0jLyXzard8CW7Lt4xuxn84YpTwxkA0vOxzqjpWZpbSpw8XIHZwU6aaYkI2OpYmCZXRkQDdAx+kdXnWT52TiCTYQft8nkRw5cvANlNx4+4ebfSvK/eNX1bTdhfC6dtDt9F7UbrkmFoLyOnKSP2282ia6Bv5gL082s+qjRpSjSlGlKNKUaUo0pRpSjSlGlKSd6oFFynUZ/HmTKnB3mg21gpEWqpWRilJwk7ErmKK7J+yW9VVM/SAgICUxTAAlEBAB11TwQZUTY+SivAwsVIuCKz9Xttpo9hFt9JkS4u1gfnFLGxV0YehUj/8AcfqK/OE5VcZ+WnlI8mXGSJCj1mlK2mw3KT46ZMrT9CwQdaQJPov20lRlEVkBiLdXIJcjHu5FoYrdJwoKZDmKRYnnbaa3a9U2ZyCioXZjC6m4Hn1X8GUePI+vj8a/ZfoXdfj/APyC6UuliypslYIoV2GNKpjkkPtkFZwQecUjgv8A02+4qoYgEqazb3coy7qR0v7rs4O1Knl3NymmElLOkrjLSkmtKGn3jGUdPSR0sqq6VKuDU6TQxejoQTEDdVanmWazcQsvnkQT9xJvcg3sf4ePTwK3dqdZkaoPje+0mvHEQoVQeyiqF4BlVeS+ARzBe97sbiyD7pXuu/7tTuO8BHvug3c98JROCXebdHeiQNwLvvt266fpf6VM80DcCRyte31r40r69a3VbWl2k9Evq86Mxno16hKw7xNQqa7aTije0GSrU5gMHjCOG5RRDYd1AKGvqPkJA0Zs4Nx/EelYeemM+JJFlrzxXUo4PkFW+1gfysTf8r0vs0ZbyZmzJdjyjmSXkbNlGyu27u2z863IhIzD1uyatU3Umy8Mgn41dFAp1T9Je9MPUIbiIj35uXk5mS2TlktksbsT6k/mKhutdf0fWtLDo+uxpDo4VIijQkqiliSFNz9oJNh9PSrrfJb9+a3yGnUKAa92bB58eV0/JRo4kIqmo1622OOIpFInqr1ZWTnW0TY2a7Fm8R2BwiuCyhE9gINQ7zia+DUja5fvHDieKOfhZZHWU3aAXI+xmUEk3H2g2B8VqP5SngztOmJlfsoezvkTNrSVeYFYSQZS6rxRjE4YofQ3UFvJr1WMzY8ha1FPmjb3qt80ylWsoDiSXiI+mPHXWjFLkYizInIHBFQesTrGQES+gNw1rXFbpGq0WPmY0f8AcezZcUyS8pWhiwZHJWFhHwAkPE/cWcx/b6C9ebcpe67Te5OJlSHXdaxZYmi4xLNJnIljMpk5kxjkPAVBIL/WxqwfALti7xFSlGCZE00GKzR0UqBW5DyDR4uk+UAhewwKrBv1/wCX6de2vhbKwsn4y1D4SgRpCUcceN5UdlkNvrdh+r+b1rxf8x42Zi/JW1TMYmR5g63bkRG6K0Yv9LL/AC/y+lMnabzkjGGVmNclLRLTVVmllJxiV5DtHz2UI4VMb3bilQOU7RNA+zchzHAE9wOYNhANak7F2/vfx/8AJEOjz9jk5XXMtjkR84EkeUMbnFhIIKBT/SViwC3DsLGtrde6l0fv3x5Lu8DX42L2HEUY8nCZ40iKi37qUEEOWF5GULdrcQbit5PJZSy3KVayViqGo0dVZdBwqhbVFIydlnCayap0yrtUiqua8o0W27sDdB1gNuA9mpncxfIvyXsNdvuv646fB1uSrMuaTDkTMGDEBkW7YxRrcb8WcNcHxURp5Pj7431+w0W/2I22bscZlDYYEsEKkFQSrmy5Idb8iLhCtvrWPfOTL+rR8g0Qgof3mNNy0bGlbzzSZj27CLcIJe0ZhuyVK5bLvu8OBG/UQxTEN1D2a6O5fPub1zCnxYcLFPYP3c0UQXISeNY4WUe7MsZDq0l2CxkqQVNz4rv6f8DYfYc2HJlzMn+wftIZZS2O8MjPKrH24WkBRlSylpLEEMOI810UXkZEXb2PAXmFhikfIyAWGWcC2ZwcWcxl02QFbyi6plWy7P1FVin3KofoAO3XR075z1nbP2um7hiYoSZZP3Mz8Y8eI3YJ9srElGTw7hrhiFtXd2/4R2XVDk7jqOVlF4Xj/bQqGfIlFlL/AHRAAMr+VQrYqvI+lPnS7CybLFpTqFjaesTx56bBx7xN2jL1BqYARnmgtidw1RXFbqBMTdQent7dbg6nu8SCYdTnxMfVzD3DhY8bh1mwkP25CcRxQNyuFvf6i9ak7VpcqeI9qx8qfZxH2xmzyIUaHMf9WO/I8nZeNiwFvoaT1MtNNpiDmjvpmXjJRvZnUbHtrhIpPrDLKyynfNH7MiW7g0Quc+zc5ylIABtvqD6r2HqvVYX6jl5eTj56Z7xRJmyCTJmMx5JJGB93ssTaMsABb1qZ7R17tHaZk7bi4uNka98BJZGw4jHjQiEWeNyft95QLyAEm/0pPRjnG2ByzXt+fUnbq/eA/mJI0b3lokW08/EzBsVAip/EIIKgI9JD9WwdQl9GoTX5HQ/h0ZY3Oa2Z2yaUSTS+1fLlTIl/poFBPJVP0Vr2HIqPFTefB3r5f/a/2bDGJ1WGIxwxe7bEjbHjtI5YgcWYfVha5sGrW1HI9Bu2U5ybfykAgtW0GtfoEqo8dxTySZTiagSjQWb5ZJF65K8UEhelIDlEQ2+AdYPWe9dM7d8i5e2zMjCSXARMbXSmR4ZJUyAfdQpIyrIwclRZAwJH5Vndk6P3Dqnx7ianEx8x4853yNjEESVIngK+04eNWaNSgDG7cSAfzp/qtWW9SihiGz+Xkk/GvnouZt6Z++7x8uZdRHxBylHw6JjbJl29UvZrdHXtBB1rXf2zHmyZ4/dkk5TyGSS8jcivIgfap8KPoK032DfTdj2H9znhxoH9qNOMEYjjsi8QeIv9x/mP1NJ7IlDWv7GPYJ2qdq6TFyo7UNBnBJV4qKJkm4LqdaZwTbHMJukB2NvsPwCEJ3fps3c8SHDj2WZro4ZC5OOeJc8SF5G4NlPmw8H0P4ia6V3CLp2XNmPrsTYSyxhAJxcIOQLFRYi7AWv6j1H4V0Yvp9npEK9g7FahtTdKRUUgHaqaxXzSKOAj4Z8quZQ664qiJtwMYpd9gHbXR8fdY7B1HUzajebH+4wLOTjOQfcSE/ySMxJZr3PgkD0Bru7/ANl0Pa9rFttLrhrp2gAyEBBR5f8AyjCgBVtYegJ9SL0rmMys9mpqIPCyjJKIBmKMu6TIWOmPFpCob2aoAiY/hRDpU39A6s2HtZcva5etfEyIYsXhxmcARTcxc+0fU8PRr/Wq3l6uLF1eJskyseWXJL8oUJMsPA2Huj0HP1W30re9u4bb7h2h9Ifo/t6mKiD6flWuby8U8kH0U1k2TqUjComko9Bykq9YFcF60DPG5Tiq3BYo7l6gDqD0awYNprcnNm12PkQybDH4mWNXUvGG8rzQeV5eov6/Ss2bWbHFw4djkQSx4GQWEUjIQkhXw3BiLNxPg29PrWw1nVhUm357aFlgSRqUOapGbPxsazo6oTCTspDezSxqZR7o6J1Nu86gEQDUBmt2Ub/DXATFPWjHJ+5Zy3vq9v6XtAfaVv8Arv5A9KnsNeuf2DMfObKHZBJH+2VAPZKX/q+6f1BgL8Lf61jylGqcxYIy2SEM2XskOZEWEyHeJPUyt+87lE6iZy983TFUwgQ25dx11bDp/W9pu8bsmbiRvvcUr7c/kOON+IJB+5Rc2Vri/wBK7cDt3Y9Zpsjr2FlSJo8oN7kPgoeVuRAIPFjYfcLGwpWb79vo1ZKrnj6UmJFjaVrLX3kZNMmlZaIvi2GHWYgs9lVlSADBRo82HwhWx+0wbh1ar+fidil3+Fla/Lhi69Gsn7mBo+TzMR/TKP8AycT6j61P4OX16LRZuLn4ssm/keM40yyWSFQf6gdP5yw9D9K3EsjIrxcihEOkWEsszcJRr1wiLhBo9OmYGzlZAf35JFXYRL8Iak9lHnTa+eHWyLDsWhYRSMOSpIRZWK/zBTYkfUCozWyYUOfBLso2m1yyqZUVuLOgP3IrfylhcA/Q0hl7u3p7RtE25eSlp+OrKc1MyMNAPFWL7ulyMnCrUiBBIRdRwoAghv1AXt9GqjN26HrGNHrezPPk7mDXieeWDHcxyWYIxQKLBixuI/W3n0q3Q9Tm7PkybHrawY2onzzBBFPkIJI7qXVXLG5UKPMnpfxTht1iOUEHCYHBNwikuQFCCmoBFkyqFA6ZgAxDgU3aUe0B7B1d4ZUnhSeO/tuoYXFjYi4uD5B8+QfI+tUmWJoJXhe3NGKmxuLg2NiPBHj1Hg+ort12V10aUo0pRpSkxLSdkaT1bYRVcJKQckq8LYZsX6bY1fTRTKZqoVocOp94pQRLsUfV231X9nsN9jbnAw9dgjI1E7OMnI9wL+2Ci6EIfMnM+LD0qf1uBo8nT52ZsM44+2gVDjQe2W/cFjZwXHiPgPNz61uGsrGPXT9kzkGTp5FqJoyTRu5SWcR6qpetJN4iQwqNjqEDcoHABENSePscDLyZsPFniky8ZgsqKwLRki4DqDdSR5ANrioyfX5+Ljw5eTDLHi5ClonZSFkANiUYizAHwbXtWfrNrDrntDt7f2+3/r09K4sK1bOFiI5/KSjCNaNJGbURVl3qCQJuJJVsmKSCjtQO1U6SYiBRH0AOo7F1OswczI2OHBFHn5bK00iizylRZS5+pA8CpHK2uyzcPH1+XPLJg4isIUZrrEGN2CD6Anya2f8A5d/8P0dSNR9GlK3NfdiylmaoFKYDqlRN1FMbpIsPQYxekQEDAA9g9u2lKffSlGlKNKVhyP8Au99/sbn+JPpSoX+W1/YN4pfiZqX8lNpSmC8jv9U7wg/FEf8ApdZ9KVazpSjSlGlKNKUaUqDMV+sqvP8Acfxf/XvlnSlTm0pRpSjSlGlKNKUaUo0pRpSm7v6Y93HLbeqB10xH6ZilMH+PbSlV78om2OcZuKrzCyNZcxNIjjVGTrhSl4xWeykTbm91VZV842KgsSAe3OYxd4RRsHWTw+xj9oF1B7gYmIU3uW+QI8QH7I7kNz+37kH6rX8fh61tT42k7B2JMn4o6/j6l8rsMkYGRmBUeA44aX+lkt/sBwpD+Dy8L9ahvzCv0HgXIjPzBrjjHHVlxJH8ZGlRpcjZXlmNlCbyrcJFWx49oRqMdV3VIuEdKrJLOpNVqL6PMRUSqbkKUYDfZMesyh2jIhifBGGFQsW9wyMeSJw8qF9LsRdfPnxW2/ifRZvd9C3wTqNlsMftMnYzPkLCsP7OPDgURZWT+5AWd5FAKpCr+3ICt1sxIhvwB5Y2vzQrNyRoPNBzUnuHqFG0zNtVqUIirRm9Ce1u4bsnSlxiHLGWkYKHRH7N7QcKmUN6xhAm5QgesbufuMuZjdgKHBjVJVVRw4FX9eQsSo+vI1tj5z+L9X/jdr+v7z4jXKTtmdJka+aeQjIOSssH3AQOGRJJD+n2kFh4H3eamva8l0Z5zloVRwjihHG16ym5Y2J9y8a4qZXej8gaHJVcJa01GsX+MVEsBKtWzZATyiomRMugRLfcQ6rDNl4zdjig1sPs5MxDHJ9sOkyFbsquPQjx9x+ot/HT+r67uofhnO2vctodjpdYrRLozmtj5GsyUm4QzzYzj+qhLNaEWYKxb+DN3eTxD5e9is1pzly6uvI7PeRlWdUTfZQdVSyRvHeNcJuZqlZceYh713Jx8fWwKiis9jyGdu2p+khAA3bH5D4PV5Xm2OdJlbKay3kKsIB6pL7XkgL4uV8kfSrbpsbtPzvgY+s6Z1XD6/0jXhpiuGJ4n2ri0eRgrnWVGaX7mWOUhI3FySRUFcv5xJVLBOtrpxoxHyKxxzSqcaM3yGiLIvGyOfLtFQpCuMi43Yy7p0jhRzBR7pVePr60e0cv5BBMyZTn2IFcztiIJWWfDgy8TYRi84axncL/ALkdyfa4g3VCAWIFga3T1TpjbTBhk0/YtroOwdQyn9vVvCHTWY7yeMXLaNVOwEjBVlylldIomYMQPNb/AI+eWhyQzhDTeEZvLuUcZeX++JGZixAg6JGz7i9qysvGFZVO4QcurBz9fmYqLaKOVm6rQjNs8E5kin70TD2arqO22Eba+WeaHrBtLFezc7kWV1PFlIAuQQAD6etYPe/8iPj7puVD3HD1et2PzmhfAzivKIYwRH5TwSIJIpUd2CKwcu8fEMRxtVz9AQ49eXJHY5wK6yRlezyHIPKTtli+FuL2UyJOoyCjWKYLw8KdozSTgKHWmwt9ym9VsVTq7Q6hDYGKur6okWtaad2ypiIw5Lm9gCB4+1FFv4XryLvJO+fP+TsO7x6/WY0Gi1itmSQKmLGUBdhJIGY+7kzHl5Hl7W8eLyUxzim+0rJ+b7tYc2WrIdbyfNQ0hRcbWFo1bVvDLaLYqNHcPV3SCyq7pjMOjgsuZQpDFEvYA+nUtiYeTj5mRkS5DywzsCiEDjFYWsp9SCfJrXu/7NpNv1zTafA0+NgbDWwyJk5cTMZc8u3JXmUgKrRqOKgXvfzSC4jZjzLmCo5DNnbE7zFN6x7li34/KUrCTYVm9V+IddcFdaWExu/eQL9kcpPEG9RVYhjE2KIAGNo8/Pz4Jf7lCYMmKdk9CFdR6Ot/JBH19Km/lPqXUuqbXAHStomz0ufq4Mr9SNNjSuLSY+Rw+1ZUYE8R5VSAfNSu1NVrGjSlGlKNKUaUo0pRpSjSlGlKR1/yJQMT1GZyBlG51zH1ErqKbmft1slG0NAxKCqxG6R3kg7ORFM666hU0ibidVQxSFATCAD05GRj4kLZGU6xwL6sxsB/E1JajTbbsGwj1GixpszazEiOGJS8jkC5so8+B5J9AASfANfmfeafz3t/mAco7LkR48WQxZS1pGl4Pq+6XhoKjM3xuqSMKJQK6lbY+RF+6XMImHrTT+pSKAebu0b2bfbRshvGKl1jH4Lf1/Mt6n/9lfth8C/FGu+JejQ6iJQd7khZs2Xzd5yP0+fRIlPBB+RPqxqtnVdrdtPzx5y8tiO7uXL6DibjULZVrjQ7fTrGR0tCSkLd649rzl+AMimkGEzBKuUnzJ2yFF4k4akAigEMcps7X5f7ScllDwurIyn0IYEX8eQR6gjzcVUO4dcXf6tY4pZMfYY88M8U0dg6vDIsgXz9rI4BR0cFCrG4uAQvMqcNMzUWvvspVGo2XJ/Ho0PG2eKzpTa9LS1FbwMyCJmzO3ybZqonSbbELOCtZOKk/DPGbkPWJ3Z0zm78rT5kEZyoUaXX8QwkUErY/wDkR+lh6FTYg/6VE6D5K65s8tNDssiHB7eJGifCmkRZzIl7mJS15omA5Ryx8kdfRrhgIk+nUT61sP8AUKeKjS2LZwmRgzcvfnM9KU2XeY+u1fkUZR4wyNFshWr0ddIuZ7w87ULEZuVg5WRcN3scJ03BBWImZA+XA+LJ7v733PcKEowN7OPQMD6q3oSCCPB8+lVjbY+9xGwv+MDEGImSgyIZFKhsdjaRoWS3CWO/NVZWSSxQ8SwcXreS/I8rLLZZyYtVnu//AON7WmSKLBtMsh927vcPbCcbGA3sBmgLTMpU1EXR9nLhUyCe5SbAG2tdfKm1zk6zj6ydnONk5IZb/pPtX8kn9VibDz4Nal+QNd0jGz2l18ON/wAsjIDlWBkhidQzfYD9iyDjeygMbE3NekefjIhCHq0lHOmAOn8cojMxKL47p+hItHjtAXqiPQdNs2eNkkzAXvBHrEdigUQ1p/da7WQ6vXZ+DJD+4mxys8KyF5FlR3XmVsQquoU25ep8KFtWl9NsNjLtNhg5sc3sQzhoJjGEjaJ0RvbBuC7IxYE8bWHli16sKwjlfHw46rkY6scTCyUAwQjpNpMLtosxnZOpQ67cFDJkdoKgP74XtEd9+30+3fiT5I6Sei4OBk5+Ni52FCsUqTMsJLi5LLysHU/+Q+t714t+V/jvuY7vm5+Pg5GVg5kzSRPCrSjgbAK1gSjD/wAT6C1vFJ/JOJIKax1MycRf3DqM8VO25B5MP2crHu3joqaxGTaVKTxLOPb9wYEUkjCAmN6wDtqE758aafa9Fythq9y8mB7mRmq88iTRPI4DCNJrco4l4kRohtc/cDapno3yPttV3fFwNnpkjzvbx8NkhjeKVEQlS7Qk8XkbkC7uL2H2kXqNUhlF9UrAMxWJWUcvJSixTRDewGexEDKvI5s2fKtI8CqImUZpttk0zgmZFUwm3HYNaEz/AJEy+tbr+6dfyMiTKyNPCi//ACfchxpniRJCkdipKBLKrBSjkm58VvfB+PsTsmmGr32Pjx42Pt5nb/44SbIhSVnjDyXDAOXuzKWDoAPHmzAOXK7xw4dulTruna6zp04UHdRw4XUMqssoPZuoqocTCPwiOtNTTTZM75WQxfIkdmdj6szElmJ/Ek3P51ufHx4cSBMbHUJjxoFRR4CqoAVR+QAAFKSu2YkG4YHdQkTOs2Tty6Vj5Nv3iMgDhBNEGbxQPspmKKiQKlTIJR7zcd+3U7o9+unnhfIxMbMxYpHcxyrdZOSheDn19sEBwqlfuub+ar+90LbaKZcfLycPKljRBJE1jHxYnmg9A5DFCzBhxsLeKfyIa5EnYW1ZzB1ZIRVn3bSso11si4ahFmMdu/SaN1lhOyhINszICwlIHWQRENxAR1uTWY3eNxqtj8viTPw5Y7JiDGVWT2ieMixqWvHBjqg52UclJIPg1p3ZZPSdTtNd8SGPBy4pLvlHJZlf3fDRs7BbPPOznhdrqbAj0pVRC8NyDyfBGlW7qpmjKgnLe02IpNLBaVmwkI3k0XiaaxGrNACmMRPcelIpthAw6sWsm1fzZ8h4Z2McmtMGsE3ux2TJy2SwWVXAISNQCQnmyA2IY1XdlDs/hj4/yxrnj2IyNmYfae74+IrAloihKl3a4DN9XIuCBT3S0w1u5JOrpx1Ps9vcJyshjyTYEXkIZBCF/wBFaOJywoD1Rs60MooIEAfVP2htrbez2uP2xcjr4g1ew7O4mk1ksYaSALj/AGI2Rkr5iyEJYhQfDWIFap12ryOqtj79p9ngdZQwx7KKQrHOzT/e6wYzeJcdwFBYjyvg3pv8i03GlAqlfJYK46lr/OQjWCZqRrx0LzxcaKDqTkYtyVI6CEkyFUTJL9yKqgbAP0QpfeurdB6X1vCG7wJMnuuZiJjoYpHMnOLi8ssLAFVlS5KScObCwP41c+k9n713Psea2nzo8bpuJlvkOJUUR8JeSRRSrcM0UlgHTnxT1H4VLWvEQTgIMjVV6u2JDxhW60iJzSCqIMkQTUfGUAqhnhybCoJgARPvvr0ppFhTS4iY7StAMWIK0tzIV4LYyE2Jcj9d/PK96837ppX3OW86xJOcqUsIrCMNza4jtccAf02JHG1aR/MTLe8QEK3cVssI/hpR2/au3gp2hZ227zwp4hj1gDiOIJQ78/SPT26ic7abSDtuFqYXwBqJsWZ5EeQjLLpfgYEv90Yt/Ua3jz+FS2FrNXN1PM20yZ520OTEkbol8QI9uYmkt9shufbW/nxWZUpSwy0UZ3aK97rSIP3zcI0XqT8DMW63S0fC4S3IUHaXrdI9pfh1l9a2G72euOT2HBGuzxPIvte4JP6amyScl8fePNvp6Vjdk1+l1uxGN1/N/uGEYY293gY7Owu8fFvP2Hxf6+tahWyz6dPkZp/VpJGTI5ds2sPAumkvIKNlHPg2co1VAStjj3ZwXEoiPSBRAd9RU2/3MfWJ9rm67IXYK7xpBjuk0jIW4JMhvx9D7lvpbzUnFodO/ZoNXhbGBteUR2nyFeGMOF5vC4/UPI9sH6k+LV8rVidUrEBFjeplo/inrGRkrCogzI+l2rdRRZeNkSCJW6KLlNUCKGKO4AQO0dJev7h+vYWvbcZUWdjzRyy5JVBJMiks0Uo8KqtcK5B8cfrSLf6hN/mZ41GLLh5EUkUWMGf24XYBVkiP6mZSCyg+vL6UqWkJBs5SSno+MYt5WbI3CTk2xABxJJti9LUHCwCYFioE7Cj9DVhxtRp8bYT7nCghTY5YX3pVA5yhBZOTD9QUfp/Kq9k7bbZOBBp8yeZ9fiFvaiY/bEW/XxX+Usf1fnW21J1HUaUo0pSJbXVN3fH1FRg5gTRsOlKu7CZDohCKLCXuo4qpigZR0oQ3UAhuXsEPSA6qUHbEyO5TdQiw8r+hiiZ8njaAFv0xA+pcg3uPHqPUGrXN1V8fp8PbZMvFtPlGJMYNechb8pSB4CAi1j59D6EUpJlk5komSjmUivEO3zJw1bSrUhTuY5dZMxE3jchtimVQMPUUB+ENT20xMjP1s+DiTvi5U0TIkyAF4mYWDqD4LKfIB+tQWsy8fA2MGdlQJk40Mqu8TmyyKDcox9QGHgkV8wjB3FQ8ZGv5NxNPWLNFs6l3hCkdSS6Rdju3BCCJSqrD2iAa+dRhZOu1ePgZmRJl5cMSo8zgB5WA8uwHgFvrX1t8zH2OzyM7Dx48XFmlZkhQ3SJT6Ip9bD6Vtd/ogA/4A/8APqQt+Fqj641zSjSlfCnX3anddIq92cUgPuBBVAoimBxDtAgn23+lr5cuEYx29yxtf0vbxf8AK/r+VfScOa+5f2+Qvb1tfzb87elaCqHtSkI3PdEIhtYBWd+JRglFVY0qAODgzFI64ioKpm3SKm/YBt9tQvXH7HJqUbtiYse75vzXHJMQXkeFi3m/C3L871NdiTrybV16s+TJpeCcWyABLy4jncL4sGuF/K1KLU5UHWmm7FB1sjBSek2sSnJvko1go8MZNNw+WARSblP0iUpjgUdhMIF+nqK2+81GhSGTc5EeNHkTLFGXuA0jei3tYXt6mw/OpXU6Tbb1po9PjyZEmPC0sgSxKxra7EXuQL+gufyruZwkPHPpSSj4xmzkJtZJeXeIJFI4klkCCmio7UDtVOkQRAB+gOuzE1GrwcvIz8LHiizctg0zqtmlZRZS5/mIHgV1ZW22ediY+BmTyy4WKpWFGN1iVjdgg+gJ8mtnqRqPpMxEHKR05ZZV5ZZCWYTa7RWMhHSSZGlcTbpGIqgwUIYTqkdGEDG6gDYQ1X9Xp9hg7fP2OVnz5OJluhigcAJihQQVjI8kOTc3+oqf2e21+dqMDXYuBDjZmIjiWdCS+SWIKtICLAoPAtf1pTasFQFGwj6AER+gHp1yASbCuCbevpQPYO2uK5rtQXVbLJrom6FUjgdM+wCIGDt7N/g0pT9xThV3HM3K+3erIEOfYAABEQ9IAAiAAOlK2GlKNKVhyP8Au99/sbn+JPpSoX+W1/YN4pfiZqX8lNpSmC8jv9U7wg/FEf8ApdZ9KVazpSjSlGlKNKUaUqDMV+sqvP8Acfxf/XvlnSlTm0pRpSjSlGlKNKUaUo0pRpSmyvMokqdGLS2MZA/fOD9ggU4l2IkH/tAAiJv8GlKr65c84sIcMq81kMmPpWTt83Dyc7Ssb1tg7dWi7MYJ2xbTx4dcGqkS0LDIPfELmdLJFBEhzBvtqB3nY9doIg+YWM7KWSNQSz8TZreLeL3NyPFbX+LPhruXy5ntB1xIo9TDMkeRlysFhx2kVjH7guHb3CvFQin7iB9arcufmjcNuXmALQpYMEGyJC1PLOJoWYxnm07WIryvvrZBgK/cmM5HBIMnMjHCs4UTYiUqoAURMIFHcKnkdx0G91jmTG92OOeMGObwPvbirgi4uPNhXoTU/wCNvy38U95xlwd2MDNytXnSR5ev5PKP28PuSwNG3Fgj2UGTyvkW8i1K/L2NcEcE4HkbinjpiA2FJPMuCJXIchnfIZGtpwOm9hn7huhhw7W4yBSDLWVi6XQZxCKgNlTuETGKIkLrvzsTW9bjy8HVQft3yMYyGZ/uhuDYRfcfVgSAvobj8Kierdi7r81ZvX+zd+2v94xtTu0xV1uKTDsirqCc8GBb8IWVWedgXUK4B8mpcY+5M1SoYbxDijGFFYu+RTzjTWMrY/4pGBPH72Tq7Ru0ZSXhXIN3lZrEcR2VdVNHvzCCewFAd99TmLt4YMCDBw4wdqcRZExv0ErYA29VUXubXrVe9+O9pte27Xs/Y8106CnYpsLJ3XnKVJmLMtxdZpn48QW4jz5P4VAVjxXziXlNRVsk0+qZHyxbb1NZ8vvJC/4ibZDqVU48s4wIhlxHbWGQaLtm9/hm6aiTJwVEUTICHdjsOq0ul2P96jOWiS5skhmed4+arABYYwYj9Y+h9PwreE/yX0w/GmanXsvK1/WMXCj1mNqsXOOLPNtGf3G3ZiVgTiyEhpFLcgwPL0rb0nj/AIOzvyjezFW45z09xQz1Fvs+2e62+u2ulP8AG+ecKzEpVa1AUONaDDtqbHu2RTm8ICJ1HqSw9BgTACh94+s12x3JkhxGfS5IMzOyupjmiJVQgFuIP4W8j0rG3Pee59K+NkxNn2CCD5P0cq6yHHglgyFy9bsI0mmlyXb3DOytYc+QEZX7hyN6sR4+3uFzPlq7ZmpvIG4TlHsFHi63F8YbbBIVhzjR/Vp0YWQySEG/cltKAW53GKppquGybdcigmTObsALTrMpM/Ok2EGVI2O0YUY7AL7ZVrGSx+77iLXtY/S9aE71pMvqHV8PqW20WJDuIM15X3EEhmGWs0fuJie4o9k+wHUkIxdSLMopVcxs9Y/4rYOs/Iq71aOtMjjxNFrRI5VoyGZf3S0rpw0HBwUw4Zu3EIpNO1SlXVR6DGQIYPWHpKPfvtni6XXPtchA7RfoFhcu3hVBt9tz62+lRvxN0je/Jnc8boOnyZMaDPJbJcM3trjwj3JJJIwyiQRqCVBuAxHoLmoJYs57cuckUvFmPQ4yBC8rskuZyfmI+dYS8FjHE2OY61qsImbyXHu3il0gl7LXkTKRqyqZG7tcSCTcD9IVvC7LvMuCDF/Z8d3MSSCCscSBrAyAnmvJfKk2BNvxrdXZfhH4s6/uNnvv+Rmb4x1yxxRvG0cmZm5bQhnjxGVRBKIZSBMoJZF5X8i9TvTq/KhxhFaqhnPGS3IVtakXbjISFH76qsqmeylfewF6aV6dwhImqoGaEXOJepX7Jt/lasgh3Ta72f3MP9053L8LqF5X48b3vx8XP8a0o2y+NI+5DZ/2XYjojYpUYpyLTNP7PH3RkcbFPe+8qPRft/KvnHsbzCf5fezWT7FiCCw1FvLfERFHp8XJylntsaudmNItktY5BUqcDJtA74rxkkUyZuzb07gxU3zZpkzHgXAUsAiglmB/QxY/pI83Apvcj4og6qmH1zH2s/bZEgkfIndEhgccv3ECRKLyo32mOQkEef4FP0DPmQ8z5zmo/FcBCjx8xa+vuNsyzF2ibBWMkJZmrg9cK2x+wdgWMsVEdonJ3r8oCBjCbpENg36sXZZWw2LJhqv9rhLxylwyv7q+nAHwyH/yrO3vR9D1HpkOR2aeb/nWzjxsvAjx3imxDgS+JDksv3xZKm9oj5H1v5tGPjL5gWaMj2Z1hPLvF3IVaz23r+WLwsLatydPxpFViruJEMbNJqYtCjhds9v5WZECOinFsRZUphAAHYIjUdn2GXMdfnYcqbMLI/hSsYVSfbBLebva172rY3yL8F9Q6/rl7j1bsmBkdHafCxxymSfMeaYJ+7aOOEAMuNyLFCOZVSPJp4sc8puTUvlHElJynw0m6TUco1Fo7e5DqWQ4G/RlEvYi+WloCyIRoEL7sxbJomIyiZhKdw4ImUoiA6zsPc7iTMgx83AaOCaO5dXVwj+bq1v5QB+r8Taqp2D41+OsXre13HWe2w5m11uWyriz4smM+TjfaEliLefedmP9EgEKha4uKnyg7aOhWBo8aOzNlRQclaum7kzVcA3M3clQUUFuuUPSQ/SYA+DVmDKfQg2/A3rR7xSxWMqOoYXHJSLj8RceR+YuKySlE5gIUOoxhApQDfcTGEADbb0iI65rrJAFz6VXblbzJuPdep2flMOZCxVlDL/HWwNK3f8AE1uyVEYbWiJALbE1KdWkrDfEY5ghFwjyUBMXqXfM1XvQ2Kr3qhS6r+V2PAjhnOHJFLl45s0bOI7fcFNy1hYE+o8X8etbh0Hwv27M2OpXseJn4PXtxCZIMqHHbMDL7LyxhY4CzFnVb8DZwl5CvEXrUPvN18vdivi9uXklQJj4z7m5x+nJV2bjpKBo9jZNk1HjnJEsu8Ykp1TM5MZFrLuSAyeimY6JjJgJg+G7b19TEP3MZMr8bgiykevM3+0fQN6H1FZEX+Pfy9Kuc/8AZcuP9jjCfjIjK88ZJsMdQG96W1i0SnmlwGAYipSYme5Dtd8yFkwuZMcZO43XmJqTnAsRRYpqu6r4sWqra3yMleo988j7ezm5NMTNxS2BuBRL2CA7yeIciWeTJE0cutcL7QUeRb9RLDwwJ9Pwqi7+PT4Grw9L/bczB7niySjOedyBJyIMKrAyhoWjX9V/1ev1qjPz8ufmBonDuSuAjavWDLOX7vVIy52llS5Vi2i8Ux1KnIy6kVu70U3TlJ+4iYVZyLVuRRVuiUDrgQpy70nve+wUxJNEFaXMdQzcSLIFPL7vXzYXsPQeteov8T/ibtWT2LC+V5JYcDruNO8MRmVi2UZ0aA+yPAKh3CciQGa4S5Bt5Ep3M2NOQ3LRDLGYMRnhcM2eRiqvKY2w22aQM7EVmOq6FTrTGnu46Nj4h7fkysm6qTp21M3fyACo7SOU6ga1K+bjbDbDKy4eOGxAKR+CAF4gKQLcvQ3IsT6iv0Mxetbvp/x+2g67sRJ2SBGlXJzCXR5GlMsjTBmZxAeTAqjBo4yFjYEA1FvJldGo5Cula91rZSE4WySzBtUr4ABc6+xSdqeAjLR0sItM023ZCmDgxG6KZlNxIUCiGozJj9nIeMq6BWI4t+oC/gN4Hn8fFX3R5w2Gnxs05GPktLCjGWD/AGZGKjk0X3N9hN+N2Y29STSH101LVct5TnP3LeDcz4k4+2nL7OO4iZHu69HzFi+/kYExKSk5OAIa6XKXdrlRRiJmOSOQ5JByfu0SlDvDd0Al1b+q77Lwc2LAllA1Mj8ZEa3Di/hmJ+hH4n0/hXmv5/8AiXr/AGfrmw7dga9n+QcLGE2Hk4/L917+N98MKgXLoxuPbUXJPgcqus5B/wDLs8SeQ9Cm8seW1nmObuG7J01r9RXu0dlPEdmskaZiVWILklq/eTdScqoCqdY65pMhF1SB3aSXaF0z/j7VbCA5XXJxe3heQeMkW8c73X878v8AQV5l6h/mF8g9P20eh+ZtU5QsDJKIGxcuONuVmOMVVJQDYAKIiVB8s1eRfOuC8pca8r3LCeZ6m9peR6HKDGWCDemTVApjppuWMgwdoHUayMRKsVk3LRykYya6CpTlHt1qXNwsrXZb4WYhTIQ2IP8A6H8wR5B+or9Cuqdq0XddDjdm63kLk6fKTmjjx9SCrA+VZWBVlIBVgQfSrUPIUvttj+f+PMPMp+JQoGboy21q91Owrv8A2fZSxdRnJ2DGCQaEVRRuLCWjklGy6ndlK2KuUT+sBR7tV17C7PtIdTsFD4ziUcG/SxaJgB6Gx5cWDCxHHwfUHTv+TTRar4tzO1QJINxrngkhmjA5RXnjVy1yLxMjMrp5uSpt4uPVo5RdRL58yMYpHLF84aLGSMU5QcMHJ0TimcS9pQVSHYf8oNeO5459bmS4TG08MzI1rEco2Kmx+ouPB+ta0gkx9liRZgBME0Kut/F1kUMLj8wR4+lYh0jdJVTFKJDHMQDB0j6xQKY3Z6Q7DB2+gdYpjPD3GA4kkX/MeT/3rIVxy9tSeVr/AP0/+n/2vWWR89FieOGSeFYJn78keZwuLEym+wnK2AwtyLiBuwekN+0N9ZQy8tsM4PvyDDU8hEWb2y348L8Q352rHOLirljOEEZzCOJk4r7gH4c7civ5XrvhISVscozhIJivJSkgsCLRk2KAqLKmATCG4iBCFACiImMIFKHpHXZqdRst7sYtTqIXn2U7cURfVjb/AKAAC5JIA+ptXxtdtrtHr5drt5kg18K8ndvRRf8ALyT58AC5+gNO5VMC2q21uzz7R5HMnVTfyMdIwL1J2EiZ1GJAs4IkdBNVEwCUBAogA9Qh2b7hrZ3W/hvsvZdDsNzjzQQ5OtmlilxpFf3S8Q5MAVBHpfj68iPF71rbsXzF17re81+oyIp5cfYwxyxZCMntcJTxUkMQw82v6WB8+hpKr0lzEHTYOW7WVey9XQm1Ojx7ZWoM3TkoElXwH8Ml3iSIBumruQxFAHsHVdl6lPrGXCnSPKy8rXLkH/cU4SO4tNIDxW4UA8X8EN+NT0XaoNmrZkDyY+LjbBoBf22GY6L5ijI5mxa/3J5BW3oad7GN1tkTfYZQ0K6f1UIn3JGswTgBj5JszQTbi8bxLt24QdSBzKA7cEIG5+sw/U762f8AH/bOy63ueLIcSSfrgxv2H7XHb+nKiKF5rC7srykn3pFX9XIn9N61p33qnXNh07KjXLjh7F+5/f8A7rIX+pEzsW4NMiKyRgD2Y2bwvED9VTFjIOwzM00mFosmO21Ul5Ng3iYxGLcpXmuGblBn4xdBNFaMad8YwggXcANv2a9Ra/UbzbbWLaS440cGtypY1hiELDYYpX+nzZQphTlchBexv4rzJn7fSavVS62LIO7n2ONFI00plU4GUG+/grFhK9rXc2uPrUanqjShe8DW10Sw42rt9nYUh3FWukYY0O8jnaqiarNkiiDplHLNzmVdCPeGPuBQ9Ia0JlPi9NOdj9j0+bodHucyAFsTOivA8bkgpGq844mUl5vUtcAeore+MmT3AYWR17b4W83enxJyFy8GX+skiAEPIx4PKrAJEPtA8k+lSpnFFbLIylKZR8zCPWtcQkILJQRrd00j3TsyKYexnK4EOEsmkG5wAxBEu++vRe3kl32fkdUxIcrEyosBZMfaiJXSN3IH9Bm8icL+oAjxe9eedVHHosHH7VlzYuXiyZzR5Gr91keREDH+sq3Hsk/puD5tal5FtXDGMjmTt8tKO2jFq2dSTgoEcSDhBAiSz1chRMUqzpQonMACIAJtXHX40+HgQYmTM2RkxQojysLNIyqA0jC5sXI5Hz6mqfsMiHLz5srGiXHx5JXZYlN1jVmJCKfUhAeIP4Cm3v2N17XMxNmjbg8pkxCRrqMbyjBmzWXI3eOiLuP9JcqJGRKqn1JiACACU4/tDQ+59Em7HtcbsGBtJdVtMOB4kljjRmCu4ZvvcqVDC6kA2IY/ne9dO7zF13V5Ohz9ZFtNZlzpK0UjuqlkUqv2qGDEGzAkXBH8CFpYZaLhKtLS0udd7Ex0SueTMwSF45ctSIgg6FBJufqVXWAw+qU2+4+ns1bN5stfqevZOy2ReXWwYzGUxjm7oF4txCm5Y39AfBPiqppddn7XsGNrtcEi2M+SoiEh4Ijk8l5FhYKPxItYeaRTF4rDxuMGVDThGNRlVA8S2sj1drOBDOG4PUU4Vu4W791KlFQ4qJG6xKHwaqeJlS6vB69i9NXEh61ksOSZTsmR7DL7iiBWbk01ySyEkqKteZjR7PO3+X3A5cvZcdTxfFRXg99W4Ezsq8UiNgFYWDGsmRu+M7XEowr2faO4q5vpWpN0yGeN/aL1kUpZRgRwRNMzdRuUwCJxEpdh3Add+f23oHY9Ymqy82KXXbaWbDUDmvuSRi0sQYKCpUerXA/A10YPVO+dd2TbXFw5Itjq4ocxiQje0khvFIVJIYMfRQCfxArYY/tFTnWD6Hp4ujxdMcI1sFVklPCq+FR2SNHPTqKhJtQTJt3xTCAjrO6X2Dre3w5tX1cyHX6p1xbsDwPBbD25CT7q2H6wfNYXctB2PUZkWz7N7Yz9ohybAjmObefdjAHtPc/oI8Cl9q51Tq0bqRlUp6LjmkQm9iXLd4rLS5X6KakOskQDMkTsB3XXB8PYBg2AuofIz9jHucbAxsYS62RJDNN7igwMo/pqYz9ze56XHp61L4+Dr30+RnZOSYtjG6CGH22ImVjZ29z9K+3+HkmuYh/MPHM4lKQnsluwkgbQ7rxqTv2yw7oDe0O7TKUzP7JuXuzbj8OvrWZe1ysjLj2OJ+2ghn4wvzV/fjtf3bD9HnxxPmuNlh6vGgxJNfl/uZ5oOUycCnsSXt7dyTz8eeQ8VvNx9H/RqXuaibVgyTAspHPo5RZ02TfNVmh3LJUyDtArhMyYqNnBQEUXBAHcpvgHWJn4Y2GDNhO0kcc0bIXjPF1DC11b+Vh6g/Q+aysHMOvzoc1UjkeGRXCuOSMVN7Mv8yn6j6iviKjiREZHxabh27Tj2iLQjp+uLl64KiQCAq6cGABWXPtuYw+kdfGtwU1mvg10TySRwRKgeRuTsFFgXb+Zj9TX3sc19lnzbCRI43mkZysa8UUsb2Rf5VH0FbDWbWFRpSjSlGlKNKVyA7CA7b7CA7D8O2gNjenr4pCxuPIBowlYySF7aGUrPrWM6NmXCUKzeqKlVTRYAoQPDNGpy7pED6n6OqhgdI0uLh5ODnGXYYuTmnKK5TCYJISCFjuBxRCLoo9Pxq3Z3dNxlZePnYPtYGVjYYxg2KDDzjAIJkt+p3B+5vr+Fbda0xaFpY05Qj72vIRLmZbnKyWNHgzaKCkqCsgAdwk5ExfVTHtEP8GpKXsWug7FD1dhN/c5sZp1IjJi4IbEGT9Ibx4U+TUbF17YTdfl7Mhh/tsOSsDAyKJObi44x/qK/iw8A0otTtQdGlKNKVqJ+HSsELJQi7t+wRk2p2ijyLcC0kGxVNt1WjkoGFFYu3YbYdRm51cW61WRqZpZoY8iMoXibhKoP1R/5W/OpLT7OTS7SDawxwzS48gcJKvONiPo6/zL+VZccyLHMGMems4cEYtGzMi7tUVnS5WyRESrOVh2FVdQCbnN8JhEdZODiLg4UOCjO6QxKgZzydgoCgs38zG1yfqaxs3KbNzJs11RGmkZyqDiiliWIVfoovYD6CljWYg8lIJmOmItmxyqLmEvqj0+sVId/QJhD0enbWVWN609JSFIUCEKUhChsUpQApSgHwAAbAAaUr60pRpSsOR/3e+/2Nz/ABJ9KVC/y2v7BvFL8TNS/kptKUwXkd/qneEH4oj/ANLrPpSrWdKUaUo0pRpSjSlQZiv1lV5/uP4v/r3yzpSpzaUo0pRpSjSlGlKNKVr5aWi4GMkJqbkWURDxLNzIykrJOkWMfHMGaRl3T188cnTQatWyJBOoocwFIUBERANfLusaGRyAgFyT4AH4n8q78bGyMzITExEeXKlcKiKCzMzGwVVFySSbADyTVQfPDnFkmvxMIz4Q524bS1ijiLSVwjL9k2uPLbJuyLxytdptSiPa5IpVzcUl1UOtY5VynFMEjEMcDapu/wB5loi/2DJwTIvlg8i8ifHFVF7fd6fj+Fq9I/EfxX1/JypW+W9N2uPCl+2F8XElEMa2cS5Ez8C4EBAaygrbkWBAtS4xxyxlbzxMT5JvcR3OSucTHOkb5hannjpq7RV1hpZOFs9fbpnfeHRUYvBM46HKpFU2nacN/TK4m7fI0n92aCQzqPvhSxcMDZlAv+Pnz9PWqB2H4yxtR8on4+i2uImpmkBxthkc48d8eRDJDKx43PJbJdAQz+niqLeT3Kux8teRmALThOqwUDcsZcoLJxqxqFvlvbdQy/U7nUyKZBeWCBTiu/ZIwCqoMJPu1jIokcAAKbiBg11uN1NvNriza9FTIhzGx4+ZJWVWX7+Qt44/pbzYX9a9n/HPxlr/AIu6DvdZ3HKmn1Gy63Ftsv2E9ufAnx57YqxSF7MZReWK6hmK3428VBA8xQ+MWdqBRqhiqywFix9yBlYzOeU4l7NMa9mnE8dZCEesK3jHIhHEZARUGRBY4vEx3bA3KqByAffVa93F02yix4IXSWLKImkFwssYbyFjkFgF8+R6WvcVu0Ym7+R+lZ2522zx58DO0SPrsN1jaXX5rRfa0uZikPK8pKj2z4fmVKm1XdUnhxwD5rYbncd405GXnkDP4rnrWeGyXdLhPXadxzL5Dcs3z1seAnmkFCWeHIlDiVmCiK6KA9RyKAYQEdh4+h6x2DAbFxMqTKlgZrSOzOYzIbn7SAGHj7fUD8a8c7j5a+cfh7tsO+7FoMLRYOzgh9zEx4IsePKjxQyqfdjaSSKQmS8nFlZhYFbVNywZAyLj/L+MMS4v4eWfJ61TYYwodr5ESTSq1KuV7GMu0TjZWUrM8oQZWWPXVGfevYZt3aQCb1S+jexS5WXi50OFh4DzcBGjTkKqiMixKn1NvUoLCtN4Gi0O96psu09k7Zja1cqTMyYdWrTTyy5kbF0SaMHggl5Wjne7ePJ9aYKH5ts+VmcT4hwhyFhOPzrGk1m2nZkxpf6ii5yZc2NZYmh4XIuNZtdo5goRlX5lXxQJOXKS64J9JkzB6YyPsA3Wy/Y67KXGMLSrLG6/1HCiwkjNrAA+fJuavOV8OS/GPTB2ruWhm3sexh10+Bl4s5GJjtM/OTEy4wRJI0sY4XRGVb3DCp5cerrU5nAtOsEZmlHNcBAwbyMms0yKbeILan1VVcs7HYJhIEGLRgs3cNVPED0FTICYm6hD1hsmsyIJNZHKmR+4iRSDKfHIr4Yn0A9PP/etJd80+0xO7ZeDkac6fNnmV49el39lZrNFFGbsWBVhx8km4FvpS5rNbxdJ2BzminxNRkrFeq3ExTjJNfKzdu7VU41Qy8Kz9uszqJSMQ1V9ZHpMJQEPT2dmTDDhvIc+BUaaRAPcFrso9PI9R+FQ2x2HY8bBXqG1lyo8DCyHcYkpZRDO4tI3ttYpIR4a9jSnsNfgrRFrxVigIKyx5jEdJxNiimMxFKSDMRWjnCzKQbuWwqtnZSmIp0daYh1FEB7dd0sUcyFJFVl9bMARceR63HrUbgZ2ZrclcrAnmxp/ILxO0b8G8OAykGxW4IvY+h8Ux+A6be/Bu8o58x5iarcjp9J9VLPP4tO8kGcjj6DmV1aJFrzMkY71cEI4E1VURHoSX7C9gAAR2tx8ixzNnFAm1a6sY/N0B+wcj59PUfjVy7vttJ7qdb6Rn7PJ6BAVnhizOKsmVJGBkuI0+0Xa4DerL5Pmt3C4mxVhq0ZxzpCQsqxs2T0WtuyrIpyM3OmmAo8M8BoeIr6rhw3YroRveFKgySILhQQAQE22u2LBwsCbI2MasJpvukNy1+ANrL9PF/A9awsztHZu263TdLzJo31utLQYSFI4+H7iReXOUAFgXtdpGPEfgKXGNcg17K9BqeSamSXTrd0h0ZyFJPxDyBmSsl1FUiBIwz8ibyOcgdE26agAO2w9oCA678PKizcVMuDl7Mi3FwVNvzB8g1D9h0Wf1jeZXXtmYjsMOUxye06yx8hY/bIv2sPPqP4etUIcw+ePNbB/I/kK1wtdcfXvHeA2jW/XDE1vxSrWloKgrMISNHwF+cljXGRFfbcz4lyEY58SgkTsExQOUNa77svYdbtsoa+SKTFxQHaNo+NksB4fx7nk3PE3Ar298T/Cfw/3L4/0Mvb8PPwd9vGONBnQZolEmSGkf7sYchij204p7ycGY+gJBqc+KuRGRJetVuv8qmLe0Y65KYbnstuc30qACjYRwfjWZqaKK+MsiW6RfhIJWNy5crFauzLAYCrJgJ+sPVseFtct4ki3QD4mXA0vvIvCGGMr/tuxN+RN7G9/NaX7P0LQY2wyM74zdsbf9e20WCNfkS/udjscuOYkZeLAicTCoCl0C2ureLevzxPxHhvB07xjxlKTXxlZXr+OMsOcA5Bx4yubnGSGDJiwmmiRUlNKSUhASc0Vq/ICbl4dVdxuIpm26RHjS4OBrpcPDdvezlhl9l4+ZjEJa9iblSfPgkkmufk7tPbe54XY+x40P9u6vPsMEbPGymxxlnYpEI+aRhFlSO6m6RhVX+YetkReckVzhnYbzkvivVsR5Mwxcsvvp/mvNOs4mXtuMMjTDtNq8sibORk37KNi4mFTWUXiGyQuVlwKRJL4dY+RlxaCSTL0yQTYEk5bLPvfdHIT+qxJAAF7qBe/oKmdN17P+W8DC678l5O113bsTUrF1+Ma60GZixqSsRZEVnd5CoWdzxVbszVNjC3JDFfM3DF6ufHWzupuMOa648avZBF5VpOOtZIl00jXDohRWkoVq8UdIuWjsCgqKI96QvUXbVi122wd/gyZGrctH96eQVIYD6/UA3uD+HmtN9z+Pez/ABL2rE03fMZYcvjBklVIlR4S4LBT4VytmR0vYN9pPmvzZ69izPaPPCSwGRrV8jZwd53lMd2Kq3qeWn8f5Rt8DblF3Fau0nKOGZbVAzs7DE3O8MQy64Jq9RFQKcvnePGzxvTgfbJm++VKsbq7BvRibcgSPr/Hwa/ZLN3vUm+Kk7WTPh9YXVrkRywRiOfGhkh8SQqgb2nSNz4QEKLrYrcGRPM3j5cnGb8xlyRxzqfCG2rUjHV9wxxorp4ydgLaklOVzGM5T6nYa6+cLSs49eLryvgkxcO0ehYinQJes0huMCY5s37nHXCmKI8cIsQ3kIVUjySTdrC59b2qo/G/btcvWNd/ZNxP2fXDJyMfM2MnON4jwkyUlljkFkjUBYuZCqbqVv6D2K4j5AYO4TeTmOScYXeHtsDxxxLM1AruPfvphmTPjt6q3XpCDyXhWSzpwxyTZyt0yuGoJd2UCnMJNza29iZ+FpeofucVw6Y0JXwbj3b24+R/5tbyP41+dHYepdn+Tf8AIz+y7vFlx8rc7BJrMqoxwFUETlUcgBsaIseL3vcjz4r87my3SUvkxeLxepaZsWQ7nPGnpGxvnxVFJB/MPHz2yu5IFElHC671dZPu+g5CJF6iiUSiUA8+yztO7zzktkObkn6knyTX7B4Osg1OLi6zVpFDqMaLgsaiwVUVRGF82AUA+oNzb87o8XDhI4EFQ26KaiCfSYpykTP19ZSCAGLsPeG7Q7e3cBDXRcg+tSXFG8gepv8A/b0/CnezBe77kRtjGVyC/iHL2tYyq2Oay1YwbaHk2tDpEcgyqK06q1YNQm3L6PddST5ZVy4XIX1zABSF1l5mRkZIjacgssSoABYhV8Lew8+Pr5vVd65qdTp3zcfUJIIp86XIkLOXUzzMWlCcmPAKwsUAVVJ8A3JLM6xatVfZFVEyqgmoomCyYpKgQ5iAql1FOKSgFEO8TFQhTdI7huAD8AaeR6V8OitYsBceR+Rtbx/pXol44Zi5rYo4e48NjaZn+NXHDiVDzuaM8ZDw3knH1jtue08nZXq7StQ1gx+xlAma3awZy4RTIZc6DQWbM4r9JwIQ2wdbmbnF1Ef7ctja/FBkleN1LS85AFBQG4bzxHKwsPP4V417r1z4z33yHmf3tItz3DfyJh4OPl408cWD+1xZTK8eQycJIuSe6/shn5uOFwSR6HvMo8o/AvmUY3i+RtNXDDHIF9jxhkJpfgjVpNG/QLigNZiAp+Sohs5EFl49kg0btpBkAuGhSHIBFyGAutg9j6pg9jxxsYj7OeYw/L15DhcK4/6AEeR6ea8hfC3+QXbPhbdP07Yj+5dSTMbHMHIKYHGQySS4zEeAzF2aN/te4N0bzXmG/wCXhwrKZO8zLHdoA0tHR+BKte8ny7pvEKvGXjkoN3TIuAmXZjokhAlnVoU7pQ/UcyrfoKQRERLrT4+wnyeyRSnkqwKznx9bcQD+Fy1e4P8AMHtGPpvhTM1/9N5tpPBjKpcBrFxMzqPPPgIvIHgA3J9L+rHMVhp7TP2ScesJyuoWyMlBmF6YjJsSz7KIl0ivGEmpB98D9JjIJiZVJTo6VCiJgHbt15Q+WdNka75C3ATGeHBbNdo/sKoVYB+Sn0INy1/4mtU/Fs2Tm/GWm2czPKjYixtKbkF0JQoW9OS2CkeotalFH0qZmH8NHVamzjhc6rSSGQl0DuGLphIlbAxVkUUUTM2cSkqmqIqdRhMQ49W3TtrHwOrbLbZeLh9c1eXJIzJL7syl4njkC8DKFUxpCpDktckqfuta1dWd2jW6vEyszsOzxUjCvF7ULBJEkjLcxEWId5iCgC2ADD7b386y0QkFBRiaRJuLk7KvIFMshXnTh3ENosEXArkXO4bICm+Sf9BSFKJiimAjsHw4XYNRp9TrxEuXj5G/knBZcZ2eFIbNyDFlW0gksAASOIJsPrIdf2u32+eZGxMjH0SQEK2SqpM0t04lQrNeMx3JJAPKwufp2Y7Ws8bKDZatHrOHFQWZ2CUeNRUMs1hyOAZOUFG5FiC6ZuzOQKoUhDK+jbYN9ffSJewYGzO+67A0k+sePJlkW5ZIOXtupUEc0fkAwUFvS1heuvvEWgztf/YewzKkOzV8eJGsFeYqZFYMQQjpxupZgvre5tUgskgELlN84nJy4w9JyC1jLidSjN3RZM5VGncNG0mmcxTILNhBTsIBhDr7S/Q3X3y2q+RJp9vmbTF6lu44s4nXq4lN04osoJurJ936b+vlfw0x0bltPj6GDU4erye16aSXCAz2UxCz8naIgWZW+39Vh9vhvxK/eE3EXe8ewTkbOL53HNqva7bGDKhIRbxZn/8Aw/YDC2BVk0SOU5hVXOYiZyiPSAba50nbkn1256Np5DsDNJGuJl5kRm9yGRktjZJ4AxopDHnIxVWB8AAVxuepvDn6juu3QYHsxSNl4mHL7Xtyor//ACcYc7O7Cw4IoZlI8k0pyDVcdyjOxJRVFmrHI1luxqrqlLKvXy91SEWktIM4QFzwzZhFGOPeKAJFFQIYA33EAnl/470jZRb2LG1GXvZ9eseG+ATJI2ev2TSJByMCxwk/cw4s4UgepAgW/wCQ9110ujkyNti6SDPaTLTOUJGuCfvhjkn4iZpJrfap5KnIE2sCZP4usytwo8PNuHqkk8N4hlIyJ4pSGK7ftFjEXWRjlO1BAwCAFH0G23DsHXob487A/Z+pYu2nlafKPJJZDCYA8iMQxWI/pU+OP0PrWge/6GPrXbcrVQxLBi3V44xKJykbgFQ0o/U34/UXsaj/AJYqNETzHQZZ0WKVVlJOQf35pJyYrkNGsotJZi5WilVh8O37lucQ7sgAcS/COtKfJHWunJ8paXZZP7ZpcnIkk2KSylgYo4QY3MJb7VsrH7QORA9TW5fjnsnb3+MdzrsY5Cx48EceveKKx915SsirMF+5rsB9xPG59Ben9xoZ84gDSb24t7mnOvXErEvWjbwTJpEKHFBoyYNlCJuCtkQR2EVA+q3AOzW5+hHNm0pz8raptUy5mmhdF9uNISSqJGhAbitj+ofqvbxWnO9rhw7gYGJrH1cmHEsMyO/OR5gOTvIwJXkb/wAp9CL0q4ufhZpSQRipJs+Vin68ZIJImN1tX7cAFZscpylEyiYGDfp3L9PVk1+61W2eeLWzxzS40zRSBT5SRf1KQQLlfra4/Oq9sNNtdVHDLsIJIo8iFZYiw8PG36WBFxY/S9j+VYNvha7YK5Jxds6AryyRFZI6j1SOTSRbqFVKqo9SURO3IQ5QETdQB9HWH2jU6PdaLI1/ZLf2RlBlJkMQCqQ3IyAgqAQLm9ZXWtru9NvMfYdcud0rERAIJCWYEWCEEMSDYC1/wpuMPsq9XHV0pUBNy0yzipVrMtk3zM4RcXHzqHiWjKFlTrLll25ibGMpv6fSHbqifGGLpNFk7XqWly8rKxsbIWdRIh9qGPIXkkcEpZhOlvJa/rY2vV6+TMrc7zH1fa91i42LlZGO0DGNx7ssuO3F3niCqYXv4C29Pr4ra5Cm5CFlq++LjJ5eIyNTcSAS0WZutKQMoc4M25GDBYSnVMumbc5y9hCb/Q1Jd32ubqdjg5a6CXb4GOrSe9DxaXHmJ4KI4zYkspuzD0UHx4qN6XqsPa67NxG38Wpz8hlj9mXksWRCBzYySC4AUiwU/qa341nPm8wFuqkawpcEtSHUdLSE7JrMmhHsLLrpfYEmyRTEBJZ8B+lYxUzGN27mDWXmQ7T/AJNrdfh6rDbqMsE0mRKUQPBMw+0IBYBpL2chST5uwtWJiTaz/jexzszaZi9rjnhjgiWRyk8Kn7ixsbiO10BYBfFgb0qWytZrbcsGxNFxSUayO7RhWqiCKqLLqUMKiDIDAqYqigGAuwD1G7A7ezVjgk0GhgGowzjY6QQlxAhVWWPybiO/Igm9rep8DzVfyU329mO3zBkZEk8wRp2DMrSeBZntYEC1/PgeTau+Cno+xQzKeYC4Tj3xDKIjIN1GDgpSqGSHvm7gCKIj1kHbf0/4ddun3OFvNVFuMIuuFMpK+4pjawJX7laxXyPF/Ufxrq2+nzdJs5dRmBGzYSA3tsJFJIB+1luG8H6fnWOxq0JGz81aGbZROasKTJCVcmdOFU10o8nQ1BNsc4oNxIX0iQodXw66cPrupwN1l9hxY2Xa5yRrM3NiGEYslkJKrYfVQL/Wu3M7Dtc/TYmgypFOrwmdoVCqCpkN2uwHJrn6MTb6V2S8TISDuDcMp15EIxcj4yQaNU0FU5tt3RieznZlSidJHqHq3L62+vvZ67NzsrDnxMyXFhx5/ckRACuQtre05PlV+t1818a3YYWFjZcOViRZMuRBwjdywMD3v7iAWBb6WPit6P7N9S49KiLWpu7kxI0mKxcyp2yScwjoYpKBry5fAu05w/cKyEwwP0lcoRoev1bgJNUjtWGmLtNf2oLssifEk9kY2Mw9txkHiZJ4zYMsX6r3+2rt1fMbJ1mf1ctroIMtPeORkqeaGAchHDILlWlPi1rGlEztMW/ss3VG5X3tWAasnj8yjJVJiKL8N2/hHpgBF0cA+qKXtLqbxOxa7M32X1uETf3LCjjeQtGVjKyfp4SH7XP4gelQmV1/YYeixOxTe1/bsyR0jAdTIGj/AFc0HlR+BPrWyl5NrCRUjMPQWFnFsnD90DZEzhwKDZMyqgIIE9dZUSl9UodojrP2ewx9Trp9pl8v2uPE0j8VLNxQXPFR5Y2HgfWsHW6/I2uwg1mLwGTkSrGvI8V5MbDkx8KL+pPpXVBTLOxQ0ZOxwOSsZZmi+aFeIHaugQWDcgLt1PXRU+iUe0Nden2uLvNVj7jB9wYeTEJE5qUfi3pyU+VP4g12bfV5Wk2c+ozeBzMaUxvwYOnJfXiw8MPwIra6kqjqNKUaUo0pWjmlLGQ0T7vN4twU0ogWaGTVWTFGGEpvErMASAe8fFNt0FN6o6iNs+8Q439kTHdTkqJ/dZl4wfzNHx9ZB9AfFS2qTSMuR/enyEYY7GD2gDyn/lWS/pGfNyPNbwQDfcP8AiAb7ft7b6lz61Ei9qNKUaUrRxsXIMpadfup13Is5VVqpHRLhJEjeCIgj3SyTNRMAUVK6P65hP2gOofB12bi7LMzcjMlnxchkMcLBQmOFWzBCPJDn7jy9PpUvnbDCytdiYePhxwZWOriWZSxacs11ZwfC8B9ot9K3mpioiuh0gDps5aidZIHLdZAVW5hTXTBZMyYqIKBuJFiAbcoh2gOunJhGRjyY7Myq6MpKmzDkCLqfoRe4P0Nd2NKcfIjnUKzI6sAwupIINmH1U+hH1FLXBMjWpLHUWNWXsTyNYOHsSo9tbddvOunkeuKTpV34gAVWJ3htkz+gxADbVS6BstTsuswnSyZkuDAzwh8oMJ2aNiGLlvLefRvqKtffNdtNd2WYbiPDizZ1SYpilTAokUFQnHwPHqPoaeLVzqnUaUo0pWHI/7vff7G5/iT6UqF/ltf2DeKX4mal/JTaUpgvI7/AFTvCD8UR/6XWfSlWs6Uo0pRpSjSlGlKgzFfrKrz/cfxf/XvlnSlTm0pRpSjSlGlKNKUaUqOnKWAuF8wpkDG+O5eowtzucEMEyd3+sntlMNHSbhFtNsp+EAyZJBnJwpnDcSbj0ioBth21g7LHny8GTHxiiyutgXXktj6hl+oIuKtPSd1p+v9rwt1vY8uXXYsvuMuNL7ORyUExtFL54MknFr/AJWryLV/iRhjO86fH/D/ADXjCwcr6De8m2K+VLNuDFsdSUlAwz+EYRS1RTI2lY1pGY1mUQPCvVFiGOkcpxTEdt9OxaTX7KX9rosiF91FLIzrLD7ZKgqBx8EARn9Bv+dq/STO+Ue3dJwRvflfT7LH+MM7Cw4safXbEZaJJIsjOJyWRy+XGbTxhTZgVDWvWs40YDyNkDJGdvLaz/fc94Dv0hfJfNdDnKM/POxMnPW2vrtbp8ZfsIWLCYrN7gGyUm38S8aEM46UukDiJR+NRrMvKysnqW0lycbKMplRkNwWZbP7lvBVx9wuQL+PWsn5E7z1/R9e0v8AkJ0fC0e80iYKa/JjyVEbrFBKGx/2nucmjmxpCYW4I5CXa/HzU0r3xmguBOH6ngLFWIeUuWc3lJa7zR+Z2GsRxlincQ2C2P2TOxtK/HrSijSJeyddgSMl253Jt0VhUA/UbbVgydPH1nBTV4UGZPsfudMqKIM0TMQGAF7AlVsRf0N61BpvkXO+b+2ZXeOzbXrWr6b/AEMfI0GfnPFHnRQqzRNKwQF1SWQyKwUfcvHjYXqDuTclZ55tc3MbUBeCiGT2q16hx9WwbzDr6dOs0lGlqzcMyJKuEo1RBFzfI+PcS7xEHSJ+7OkZNQp0g1XMvM2fYewxYpVVZEQLDlLwYjj/AFRe384BYi48EWItW5eu9e6R8O/Dmw3iTSvBkz5LTbHQymeFH94/sCAXBIxmdYI24MLhwykNUvcPVPj/AOWHa+Y/JKFxLyZtsXjjIlbwzRYZeNj0a49hMieNk1JLHkg2duE7hWWj2KIipJOiFVRQMkUhDHHqGdwINZ0+fYbaODMdYZViQWHG0lzeMg/coI/UfIHpWq+2bTvX+R+s6l8e5e165i5OwwJs/JkDsZVkxeKBMpWA9iUq5YRISrMGJIAsHdwvx95k3HM3/wCet8zXIcasZW9+9ypcOL91vs7bkK9RV4N2ysjlJdZaHq8FGP60qD6K60Uwi1lSi49dLtz9fq9/kZ//ACbIyDiYjkyPjs5biliGP0UAr5X/AMSfPkVVO396+JNT1H/9SOk08fYux4sa4UG4x8aOAy5IkDRAgB5pHWUe3NZiZlB9vw1eWAbzHQeYZJRzL2uUxojlx9NSaqTqKf3aerDW5FkvEKzpzCjI2B9HsUTGUFx4VVYN/qR3HS/7lY89iWc4gnJPkF2Xnf8AV6FiAPra9fpf/ZZ83qcYjixYuxNqljQcXXHimOPwsIh5SJXZrDjzVfHqKvzrnnvY2+N7I8VZ8Mz5eINkx80rtQxLE1yitLFF2d6Qyd1f2Rdiu3jpSDs6DhdPw6YiICoBjBv1COzYvknE/fypNjt/YniCrEFTkGP6yxFgQ1z4rw9sP8KOw/8AFNfk63bwH5Vx89pZ815ckxPCvnHWIMC6SQkKeR/Cw+lTZ4ScyMfZK5Z5NjMbXh7S+GUViHFuOMCUmxGiKjjNplwF4RabqmPmr4zZy8t7lq9V6mZBUWUKCpwJ09BhsPXt9i5e7mXEkMfXxBHHChssYl8clS9iWPn7fX8q098xfEu9698Xa7I7DhpmfLsm1zMvZ5EXuT5bYNpBHPlMtwsAKraQ2UHiCb3Au+OdNM5U1FE0lVBMVJJRQiaqpiBucqSZzFOqYgdpgKAiAenWxPw/GvGwVmBZQSo9SB4F/S5+n+tJWJvtJsExba3XbZW7FZqGqi2udahJuOkp6rvXTY7pkwsEW0cKvIZ4/RIIopuCJmUDtDs10JlY8ryRROrzRW5KCCVJ8gMPUE/S9SeVpNxg4mLsNhi5GPrs4EwTSRskcyg8WaJ2AWRVP6ihNqQ+Bcuvc4Y4ZZCfYuyLh5Z5N2GGClZTiQhbagSAkTsCSqrIoAX2bMAXvmp/8tIQHWPrM5tliDKaGaAlmHGQWb7Ta/8AA+o/Kpnu/VYem9gfQQbLX7ZFhik/cYT+5AfdXlwDf+afpcfRqce1Q8tYqtZoGCnXtXm5qvzETD2iNbkdSFak5CPcNGM+xbHOmmu7h3KxHCZDGKUx0wARAO3WXNG8sTxRsUkZSAw9VJBAYfmD5H8Kr+sy8XA2eNm5sKZOHDOjvC5IWZFcM0TMLkLIAVJANgb1Bi18FMITdAxlZ+Z1sk8327BNDsldm85ZGmnNGj5mFn1H4y8xc4ttKlrSXct5ACJquFDdyZMhwNuAAFcn65r5caGbfu2TPjRMpmkPAEG92cX4/Xxf09a3Rq/mnuOFvdjrviPFj02r3WbFLHrsSMZDRyRBeEeO5T3jcrcqoHIEi1r3jRC0mt8TsW8rILkVYkbd5ak5E4wo3HHHUNZVMmSURRLf0oSyIsGQpzoN5ewzBXpHR3K4910rEMBChqIjxodLh5se1f3OossaQIG9whG/V4H3WLG97nx5vWw8vcbH5P7L1jN6DA2J/kNDLmZO1ypIhiI+TB5Q8mvHeOKP2ygRfuuhHImq/eS/NDD0ThGhcWuHOSa1gaqYJy5Yau9VyDlW2R8zYqpAEBWKn6Va6J7edS9VmJGafCqjIKFW6kUhKTYvZWNvv8BNdHptFKmNBjTsp5yMGZF9GRk5XQlj4bz6eK3p8d/EXbMruOd8lfLGvyd3s91qoplGLhQtHFPLcPFkQ5Ptqk0axx8WiBX7mufNedS+gxh7bfK5Vro8uVM98JFRnYEzvWjC7ox7x4nE2t3GuFAFVy5RdKnSO4KZdMqxu0OowDqnJ4pPLFDIZIPcNm8gPYmzEf8A3+fNe/NGZsrV4Ow2eGmJt/2ihojxZscuql4VceiggBgv2niPwFSi4Hcvsn8R87U+yU+5t4Kkz1hionJMDZXkuegv6xIPGrSampqIjVBMaUhIsDrNHSSZnBFEiFDcgiUZnre8zNJso5ceQLjswEgYngVJsSwH1A8g+orWnzb8U9c+UulZev2uI024ggd8SSJUGSsyKzRxxu4/RI9ldCQpBJ8GxqPnNd9B5ZzBmvMFan29ozrZc3OLFH5ZpjQ+PaUtTa00WYQMxSIaJGPfEs1sUSbOXbp6ggqkdqVQphWXWNrnbbGHJzZ8nl7mc2QSJUHBOC+FKgWPI+CSQPS/qTXPx903Y6PrOq0ns/tOrQ6dUfByG/c5AyJCGlSaVuSe3FdlRUZgQxU/aiipA+Wfxuz7mZNHl3P1qb5V464R5ikVblhEtosy+V0UnrZrkuQn8Kgwk0XtqvkvaGAiWNWE6ThwUTqAYFDbWHrWvzs5RuGVsrFw5btHc8zf77x+fuYken1Pr61pv5w7f1Xq07/HEEsWg3vZMALFmGOIYoKk4wjzOSFYoEjaxkWxVbBSOIvJD/mJuXjPIKPFfA1ArEjievS2MkuR+T8YrRaNZlou45SdGGGr95j4B6lAv7FBt4ddV8is1Fy1kVDGMfc2wSXyDtlnGLgQIYozH7zpaxDOfAYDwSLebi4NUz/Dv48l1Mm97Xt502GZHnHXY2SGMiPDirZ3gaRS6xuXAQh+LRgC1hXl/wBazsK90Wpb40rJLpkOkVFReLapWW0QkIs6nLFHVGGat5KQQarupW0S/wD2ZAR7ZFQx1nS+6aJCiYQHbbXfjRCbISLwAzAeSFHk/Vj4A/E1E73OOs02VsVEjNDA7gJG0rkqpICxJ90jG1gq+WNhTp5TwVk2nxVlsjGMfXXBNLvk3Q4fMlSLL2HDzqxiuistEQN+Wh4iLkpFygRJTuegiipQ7wpBJ62snKwcmFGkUF8FJCokW5jv+AewBP8A3/hVe0Ha9Hsp4MOWRMbtWTiJO+JLwjyxH5AZ4A7sqg3F7kA+Cb+Kj4ZDpbJOe+QMCqqqQolU3cpCkBBA6yQlASprAf1BARAekd9uzfAANr1cle8hQgiwBv8AQ3/A/iLef4iuoClEDiY/RsQRKHSIic24ABdwHYvYO+/0tcV9Ny+npXs/8nXlVwyxr5efIK65jxvx/QythujBK5Zq9cgYdS4Zrw9GLwq+NXN7Ush5GqT90sd6lFWaLYT9ZHJEFFkETCA63J1DaafG6/kT5kcH7qFLyBQOUkYtw5XupYsSLfja4Ffmp/kX0T5J3XzBqNX1zN2x0OxyuOLLJI3s4WW/MZAgEYWWOGOBA5a1ivNUdhVimOf+YR8syx4kSvcxkOzYtmYhgmmthmZo0u9u7Q7Jqh3UXXPdtq5qks0STOVJuoR41SHuzF6SFKG9gxu/dbkxPfaRonUf7ZUlv4Cw4kf6j+Fac3H+I/zZh9gOqgxIM7HkckZazosJBJu8gkIlUnyWHBz5vck1uOJPm7eVHmPMbXGuCzxuKco5SfJxrN7N4UicUhkKeeO1HTaDdWmISOnJTMi/MY6Cb9UgLujgUhjLHKU3Zqe2dWzMwY2FaLJlNvMYTkT9OQ9ST6X9T+dY/wAgf4+fPXXOuHddp9zO0eCvIqmY+V7CKti4ifyqKv6jGDxQXICgkN75yHljnznWZLmVxdj5irc3cQoxtjbTFVknTGRyVVKnHqtnMAq1O6TYBZIiHJ1s19incItxan6iGANR3eun426wpc2CINsAo5A+RIgFipU+LgWsfytU7/jZ86y9P2cXx93GVJPjjOLRgSAWxZZW5CQMAW9t3NnXyFLcx5Bqknij5xWNOK/GaJgpfM/KXNkzkF4/iJDClpdwsgXjwMWuzdzcpAZFkJVpLW6Guz6SO5jQ7luo0Ikq2VSAyfeK6S30+1PWBrOs7OfHSSBoDikFY4gWVjIkkZDq3i0dlAAZgw/H17vfg3I7d8hybTY6XSYsOI6zLsIg3PPJVkWOXGKMiGIC012fm3F0ax4r6DML3yF5s0lpkDDTuoOa4eoI2duvCNFGi9keR+0a6bSvfHVXZWxocxkHSKpkykcFN1AAj1a09HoOzfJmwzdTi4mDgbTWYQnkjjQo2W8f9MOzkm8rqf1ErG17kAm9aq7A+v8AhOaCPsU2wyEy8446tK4YYqP/AFLKoCgwAi62DOFsAbeKdPEk7daTVLbaq28rLBgd02SdjI9y6s0gtDeGXcRdfi1DbuSuG0mAqnApgJ0b9m2+uz4z3Hbepdb2fY9FLr4cQyKH93i+VIYOLNDjQn9QZZbu1iFtfxa9QvyTqeqds7HrevbyPYTZgjYp7XJMWMT81WbJmH6SrRWRbgte3m9qfpW65JsEqQ9fyJBGQhYOXvsitHMUXrJjXHbdutF1+bi0CieRl48hTgIpnH7KIgbbYNbmk7X33d7ENpd5hmLEw5tjK0UayRx4rqphxsiJReWaMBgeJP3Eg+laej6r0XTa4rutJliXKy4dfGsrmN5MlGYS5EErG0cMl1NmUfbYi9R9G9RtZt848hpB3aKNemrU9khzN3UUaTXfCASrQEW3SqwfRK51DtgAR9QxQEBAdaTPb8DQdnzMvVTybHqG4RDlQFXh91pP95OK/dHJAxZohc+CB5Brcw6jnb7reJi7SGPX9t1EjjFnDJKIljv7L3b7ZI5lCrKbD7gSCLVlSzCHY+Pk5eFjHdZsJn1eq0RVR7+TqESg5aSg2M6bQXqaMqDdyUF2q5k1llTKdQl21lbPD1WGJs/Z4mPL1/OMmNiQ4f3S4UKskxyiE5hZuLASQyFXkcvyK2rH1mXs8sw4Gtyp49/hCPJy5sscYs2ZleL9qC/AtDyUmOaMMkaBLBr050JYnKcqwgyXi8DYQYMsc1eJimyJ1Ypo8atnTa8yUeZwVumcWKobth6lEekR6/VENX/U7ydNjDp02+3O7EMesxIYUUtCkiI6bCWLkFB9s/7Ru0dr8vBFULa6SBtdNt31Op/snvPssuaZiFmdHdGwIpApYjmP93wr3tx8g0vmmNHl1yJYJA7hCWqxGDCnXeUn2jxCxzTmKZot5JtBgZMvsls5dIkXK5QPsbcxQNsO2rpj9Byu194zc5pEyeu+zHg58uQjrlTvCirKuPcf0UdwsglQ2P3KDY2qnZPesXq3ScLCVGx+wmaTNwIsd0bGgWZ2aJ57E+86oxjMcguLA2BF6eahYpqeOXj5xBuZZdy+R8KiSWlVHvgIwinfEjo9ubpAjRJbcwG6TG3Edza2p0342630XJmn1EmS88y8VE0xk9uEG/tRqbWRT5vYn181q7uHyJ2PvGNFBtkxkghbkxhiCe5KRxMkjebuy2FrgWtYUmFbJXGOZWsPP1ZaGn3CJ2lGmmKqi6NkZyRBPMPpBi0KmiyBu4Q7vvlwOYw/CGq9JvdHhfKker3WubF3LqU188bFlyklBM8kkaAKgVl485ORP5VYI9Hu8z4vk2en2C5WmRg+fA4CnGeI2hSN3JZ+Sty4R8VA/E09UlGsJhg8ipRoi/jn6B2r1k5J3jdy3U260VidnUQ23aGtsZ+Bh7TCl12xiSbAmQpJGwurqfUMPwrVWDnZeszItjr5GhzoXDxupsysPQg/jSfgIiwRUrNJuHsWNNTbxTWnQjBiLZxBtmjcEXKDtx0gDhNQxS90G49JQ21C6XV7vW7LKimlxz1gJCmFBHHxbHVFs6u/8wJA4f8AiBU1udlpthr8V4Ysj/k5eZ82eSTks7O3JGRb/YRc8/8AyJruXutUaWQtSdT8c0sh2aT9OKdLg2XVariYEjpHW6EVTqdA7EKYT7AI7ba7Zu2dbxt8Os5GbBFvjEJBC7cWKNcAgmykmx+0Et+VdUXVexZGjPZMfDnk0QlMZlVeSh1tcELdgBcXYgL5Hmsit2mv29gpK1uSRlo9J65jlHSBViEK8ZnAjpuILJpmEyRjAA7AID8A67tF2LS9nwm2GhyFycJZXiLqCAJIzZ1+4A3Un8LfnXTvOv7nrWYuu3sD42Y0SyBGIJKOLq32kixA/j+VNreKJjS1ZDqC1ndrpWsrFwvCxDZyo0CYbRS4vQcujIpd4r7MWLuUO8IAhuGwhvqhdu6d0HsndtZJv5XXsghZoIVYp76wt7nJyoufaYXA5AfSxFXvqfbu99e6VsotBEjdcMyrPMyB/YaZeHFAxsPdB8niTfzcVvlJivXqpvkrlFSFVins0pBGY2ZwEG6fOGrootFWawKoGFJ+omBkAKPUoACAb6mJNrpO49amj7Tjz67WTZZxzHlP+3eRlccCjXUkSEXjsbsAbA1DprN11HsUUnWMmHY7GHFGQJMVf3Cxqy/eHFmF4wbOSLLelcu6lWczCxTOEM5hFmjsJCa8YmT2QdmkQrJsZofdZ2Lvbp6gEOnbcdWebJ2OLtMTXYuIX1Lxv7k/MD2SgAjUofufna1x6fWq3Dj67K1mVscnLCbVZE9uDgT7wckyPzHhOHrYjz9Ky5eWYwkc/lJBXobR7Rw+XKmXvXJkGyYqKig2KPeuD9IdhSgIiOsrZbHE1GDLsc1uOPDE0jW8vxUXbio+5jb6DyaxddrcvbZsOvwlDTzSLGt/C8mNhyY/aov9T9PNaV7KzcnAxEvSmzB0pJLRboU54XLACQjkxTvFe6Avepv025tyJmDbq9OojL2W22Gmxtn1SOGR8h4ntkcowIHsXNrchIFPhT9alcXXanX7jJ1vapJo0x1lS+PwkvOgIQXvxMZYeWH0pW77D2CP/m1Zf4VW/Uea40rmtDILWFKahgYoRA1wxHvvC6eOFEZBscEy+A9np9iKhVFNwU6x7A9GoXNl3ce1xFw0xf7ERJ+5eRiJENh7ftj9JBP6uXp9KmMKLSPq8o5bZI3gMf7ZEUNG4ufc9w/qBA/Tx9a6JNW2FsNcTiGsOtV1QfDaXTtZQko1EqW8b7JSIYEliqqjsp1AOxfRrq2EnZV3mDHrI8VuvMJP3buxEqfb/S9lQbNc/qv6Cu7Aj642lzX2cmUvYFMf7VEUGJvP9X3iRdbD9NvrX3Pw8rKrwSkbY3sAnFy6UhIotEEliTrJMolPEOxVMUUW6pu0TF3EPoa+9zrNlsZcOTX50uEmPkiSVUVWGRGBYwuT+lT6kjzXxp9nrtdFlpnYMWZJkYxjiZ2KnHkJ8TJb9TAeAD4pRD/i3+D6H0tTp9T+FQlqNcUrkPTpSk5EWBSVlrHFmhJiMLX3bdqSSkG4JMJoF0hVFxEKgO66CG3SYfgEdQes3Umx2Wdr2xMqBcKRUEsi8Y8jkvLnAf5lX0Y/Q1N7LTJrtdg7BcvFyGzI2cxRtd4OLW4zD+VmvcD6ilFqcqErUy8sSNbKFR8K5mF2rw8LDqvEWjiZetm51iMmgqm6jnUEoAIlAegB3HUZtNkuBjssPtybR45DBAXVGnkRSwjS/kk+ASAbA3NSWs1zZ86mX3I9YkiCeYIzrAjsFMj2FrD1AJFyLCuuuP5eTgoyQnoYa9MOmwKyEILkjwY1x1nKLcXSYFIv6oAPUAbduvjRZmzz9Rj5u5xf2O0kjvJBzD+01zdeQsG+hvb6197zD1uBt8jD0+V+91kclo5+JT3VsDy4G5XzcW/Kt1qWqKo0pXW3irnJWKrhAsohetEkVffN2/cLIv2jEEetoMOmT7Gu5UXDY4G3AC6gNpJ2RNlgLpo8Z9S0rfu2kZhIkfH7DCB4Zi3g38Wqd1cfXX1uc23kyU2qxr+0EaqY3k5fcJifIUL5FrEmpJgAAGwdmp+oKudKUaUo0pWHI/7vff7G5/iT6UqF/ltf2DeKX4mal/JTaUpgvI7/AFTvCD8UR/6XWfSlTZybyu4y4WsKVSy7n7EOM7QswRlUa9eb/WaxMqRjk6hG78kdLyLV0ZosdIwFUAvSYSjsPZpSo2Zz5U3+EtnDW7YDtmC8iccOQOdqhhm2TCZZ20WOUC3JW05Z7Hdrrdja1Bu0iVK2CaniUHwqKicoFLtuClNbl3mDyHxTzRlKBfY2HxTxDby+Dq7UcxzmBL3d4K82nJbZw0nKxLZcg8qQ0DjiRJa1WkXHqu668aAu4L3yu5ihpSnv598obrxgpuIZKme6UUOTsxRWNZ+9XGsW7IjDHsM+q9pnveBrivHb+Pv+RX7qSgW7AjOLU6mwOxcrbIonEFKXXCzkRYeRnF+mZyvMZARUrMDdSP8A3OJKrw8pH1G1z8AyscdAvlH1nrylji4ZN8MI/wCuVjlFxarl74ghpSo84LzjjzOnmLZUmcdObU5Y13hriyElBteOsiY4ckkFM35ZdFKzYZFq1VfyjXuR7XDVNZuBvV6+rs0pVoWlKNKUaUo0pQI7do9gB2iI/BpSkvaLlW6bCyFhscxHxMLEs15CVk3zxu0YRse1IKjt+/eOFU27Ro2SATHUUMUpSgIiOvh5EiQySMFjUXJJsAB6kn8K78XFys7JjwsKN5syZwiIilndz4CqouSxPgAAk1WNybuuO8Pe/XmKOsp2SxVuq4KXrVTore8ilhC7EdyQyFWko1qzZyLcbTZpV6kzSkkxUIVJQBEOku4V/Zy4mtWTtLSs0aY3FU5/0XubqRYH7mJADea3F0fA7B3TIw/gaPXY8Gdk7oSzZJxr7DH4pxmRizIfZiRWkaE2JYH6mqZchZA5FcX+S0HzZPR8XXjJ/LinwUmbFUTRrVaLPg7CkPD1yRs6ths1bcMma8hE1lugmZVAintMxSHOTYu2qJk5O11G3XsJjhkzM6MH2wjM0MICluTLYXC28gfcbGvVmh0XQfkn48m+HlzNlh9b6rlyJ+9fIghh2OweSVYRFDKGYK8pY8WI9kEqp81UxyE5GXVvyQzTnbixI8hqhVJg0jXnuR7dLTbexy7a1yUiRg+sajhixCBjTR7wWMTGOu+8ACIHIoCgAJaRtNrkDa5Gy0xy0ha6mRybnkTYt4HEWNkU3428GvUXQ+gaiT4/0/SvkuPQ5W0h4SriQRxmJDAiclhAZvdfmvuTypx9zlZl4nzYDT+MfO3NfGrAHKnCefc65kv13tbKOudAc5CcUupVmo4wM6j2sas8QsBSzK8jM18rdV6USqLkW3UTMJhPqzwafsmx1OLudflZORlSSAOnucFVY/AF+Xm5Wxb1INaM2vyN8KdP+Qt58Z9x0el1Gjw8Vnx8lcUZE80+ZZmcKYj7YSOUssZ8KV+1gLCosTmZrvEZPzBl3MGO4F1Zcb58uJOREfc77HI59UqeW6/7o2bG9GqgTrGPbwcJDGVZIykYVV62Osmr1lANghpdhkpmT52fEplhyW98M497jKvFo0XkBYDwGX7hcGtl4fUdNldc1XVuqZ8y6/YaOA6tsfGc6wT4MvvxZeRP7bM0kklpGhl4xuFZbEmmnn+VELiHJBAw28tmGfYuPXbpGSwHlK4W+MlMoSkAyRqhrS1ylLO4aUg6vDrCzfEboCsm6OodE5xKUAwpd1HgZf8A8Avj8YvWGR2BkKjjy9wkEKPDWHg3IvVnwPjLM7V18/8ALUxdv72eqlNnhwQOmGkrGf2Ww0WRJJpAHjLNxKBQ4AJNKd3zD5KEzvjPPPJObaZtCZ46OjL4+JPoVChTeJ73DzFXRhL00hm7RBRB0MgZ9KItklnqjgC+uU4D0dx3u2Gyh2W2YZHLFP2X4oYnBWz2+nnkwAJJ/Oo6P4o+PD0rY9J+PYX05h34tle0Z8mPNxpI5jJjNIWII4iOFnKxhb+CPWraTVbLyUisxZN41itIPVmcczXcuWke0VcqnasWjl6dR44as0BBNNRUxlTkKAmETCI6pj8TIxQALc2AvYC/oL/T+NelcdZVx40mdpJhGoZ2ChnYKLswWyhifJCgKCbDxasHXFd1Z0dJyUQ+j5SKkHsbJRL9tKxb5i6XbOo6TZqkXaSDJZE5DtnrZZMpiKk2OUQDYdfSO8bB0JDqQQR4sR6EfmK6cjHx8qCTGyo0kx5UKOrAEOjCzKwPqpBIIPg1eZwM80GVmuRWKFOfGSXVppeOmNxc43ypOkfNpXH9znmJU1X9sWqzMq9vhJdigLICvUFysTCRQgb777F613KSTawf8mmL48Qb25G8FGb/AMuP6gR48jx4rxh82/43Y2H0DaD4P1y423z3gGXhR8SmVjxt4WATNaCRGPuH22UyC4P0q9Zzz48sbDFXy9yYx5kbGErZshSCTq8JY/K++M7Ldsgmhm8OzcQEogzkFHJEDimk8XRbsSEERFURDYdkN2bp+vhn2+LLC0spu/C/uSsB4HE2P8CQB9b14rj+Dv8AI3t2y1Xx1vdfso9dgIRjnKK/s8GGRryMJULKAT5Mas8hNgF81RZYfO6u1RytnXIvH+gWBEuapiIfCzzxeHd6iKU1hIVeOZFoNLgBioipLqO3Rl3JBdyDdwKSfUUA3DWt5fkTIgzcnK1kT/8AyGBtM5cIFFhwRbBfJufJB8eK9p4H+G+m2vWdLoO9Z0BOoideWtx1xnyGkkDscrIl5vOAo4qeETLdrGoO5o80HnTnmPSiLtnuxxMKQiJVofHTZhjdi+O3di9bOZA1TRj3rp0grsBT98XcgAUQEOzVez+49k2S8MjKdYx9IwI7+b+eNj/61uXqH+N3wv0ic5Wn0ePLmEm0mUWy2W68SF94soUj6cT58g3qKk1m/NVliX8DY8w5UsEFKtzNJSFm8hW6ViZJqYxTGbSEa/l12bxuYxQESKEMURAOzULJstjMhimyJnib1BdiD/EE2NbNxOm9P1+Umdr9TrIM2JuSSR4sCOjfirrGGU/mCDSQkbdbJhonHy9qssqwSKyIkwkp6VfMkyRjUrGNIm0dO1UCFj2RQRQACgCSQAUuxezWO800g4yO7L48FiR48D6/QeB+AqWx9Xq8SUz4uLjRTnldkiRWPNuT3YKD9zfc3n7j5Pmk9sH0P/P+39HXV6VnVzrmlcgAjvsAjsAmEAAR9UvaYR2Adil+ER7A1waePrT38fmOLy5Eh7LnKvzVowxW3Pe3qAr0m8gpuwlcsJFSJrULOtSASOm5h2yEGxlVUE1BTMAnKG4hI6tcP90suyVnwEP3hSQTcGygj0JI8en8apnepuyHQza/pk8ON2/IW2NJKiyRxWZA8skZ/XHGrfeFDEXBCn0qw+9cxs0cjpWocWuHPGrkBxN4c44ytWse8k7HgeptpLKy0RXX7AsdcMh5Vgo5i4rVihaw2VXk/HPjpmRRFwu5UKBia2I23y9gsWr0uLkYeljdUlMS3ksPRncD7WC35Xa3i5JrxxB8d9e6dNm97+St7pux/JWZizZGAubLxxDJIG5xY2K7ESRSSkCLhGGBbgiKbGvNznGRrEtmXKb+koO21OXv9tGqov7E5tz0IAk4+JFqubO6AFp5d01Aqx3Rv34xxN8Ia17mtE2ZKYLiH3G4+eXi5t5+v8a9j9ThzoOuYEW0KtshiRe6VjEI9zgvICIeEANwF+gFvpTW6xqsVbFkrEEbSBJBk+dO1G4ljF2kgg0btHfeJbLPW6rF0d4gCQHDuyKImETAPVsUQHlSnE8geVvFj6fxFvNYswyHdPZZBGG+4FSSRY+AeQCn08kN4v4+olFjfm/f8P4alOKwK47umHsg36IyjJ0nI9NZXBFO0Q0A5hFnVWbTq6aUHIMmUot1LsxQUXWAp+sQKG09g52zi1cuNDGJdUsivJyTkFYgqPX9PqfIt59T6VqbtPVekZ/fMDdbHMfX99kxJ8fE9rIMEk8Kusjg8P8AdAKqeL8rLcBfWmeyhbsA3CwWaTwJUbdVqmtbXnu+0sdxaWUsbXEI1k29iuEk40zs0snNpOFhcGkXRAQUIlsYxBVNhbSCPGyCiQSwcrMquQTwYC3i1/Jub3PjxVl6Dt83c6hMqfaYG1MQeCaXGQqhyYpGElm5cbKvFSojX7gWBs1hYvQPKavtj4VUrmtkjM+NeP8ASMg5OhKLU2GYkbDBN5ivzjzwzO+NZmMjJcgRLsjV4o2TOgQXKbQygKAkYpxmcfqmRJpk3WRNHjwySBVElxcHxyuAfHgn08gVrfb/AOQeqwvkrJ+NNPrczbbPEwXnlbEMblJEFzAUZk+8XQMQx4lwpHIEVrnPkzeZC6iLBO1Lj5OXTGcO1nbBXL7XrNQXVcvtUZOi+Hs9VWa2tdaWYTMW2K/bkHtFApttjh0j8HpvYyjPFjs+MASGBWzL+K/d5BHkV3R/5J/DKTw4uw28WNvJGSOSCSOcSQSsPMUoMQCsjko34N6+PNVo1yrBOzC0JIzsLTXCLd6r4u4HfxsYVwwbLOjx7pygxerM3rkEe6RKdLY65ykMJerqCtRQ834MwQ2P6vA8fT0Pr9K3Zm7D9rjDLgikyUJXxFZmsxA5AFgCBe7WNwtyAanV5e/BvlvypzBj2f42wlkgG9ZyJCuFc6syNE4DE8jXX8TKOrRIOVHRHAyNeI9bOW7ZJNRZyoYoFKIdQlnev6TbbTLjk1ysoWQXl+iEWN/X1FwQB5Nar+YPlH4+6J13Mw+6SwyyT4bgYJJMmUsiugjUWI4ycWVmayqASfpf2Nech5pE75ceGcaYgxvMNLtynyPUmaSNun2cU/b12BrbeLiZvJlmrTttIMZGQvUoi5TbMFkiIHFRwoU4g3Eg7e7h2eTruHHh45EmzkQfcbGwFgXINwSxvYHx6n6Wr85f8cvgvF+Zey5vYNzG2L0XDyGPtRl1MkkhZkxo5FKsqwKVLOrchZFIHO48W9S5RYUuWaMi5b5f8W6zm5K+RrQUatiSxqcZIeu2doqmJ7Cyj8cQxo1YJRqmKLluCKSRzHFftULsOm4dphTZsmXt8VZg4/SjeyAf/KyC3n6j/Wv0q2HROza3rWH1/wCPN7PrGxXN5cqIbJ5IiDaNmyX5fafKsWJAHD9J8egXypfNy4vcdOMfI1rbaBQsDxGM7TWviaoFXkL5kS/W6HyZMWR1YwlnFqsT6VsTGsv2LMXThqLRNHxPUKJSGACy2o2ms0es2270eHhp2mRESP3GkYyqTb23Ytf20ABIXj5Nza9eYPm/4L7x3fu/XsGbYbDZ6zIjlbMmKY8EMD46R8HjWKJVSSUM4VX5khbciR5uK4i89OH3IS+RBcU2/HcxenuJbPkxLGTEJuMmYlxEeJBZKJiZJNZqzsKsUkUJJAVlFyF3MQokAB1Tumpja7sUm7ydRhRzYupyMl4ofc9hMhAxUYgYuPclhAE5N7G6r6WrWHyn8e970vWTr9nPshq5N1jYYypfaZpIZCoJyWTiTDHMScewCnxyIJqc8alVn0fRbpXmwUCSskPPRyuOCNlVC2+CILl3OsolJBIiykw7E3+jLqnIBRMGwAO2p7Ai67l4On7XpI/7LnZ+LkRHVhWIzccFnyI4QoDGd7/0pHZbXFgDWq86TsGJm7fq26k/vOFg5WPIuzLAHDyDxTHkmLEqIUt/VjVW5WNyRSOQoTuzsYRvG1dxP08kyn7IXYT7WFu+MnT9VAZRvOtmwqN3qkKm1KJe/wCtwImAez0BV4em5PYsPEhwNdJm9YGWPZaPIWDP1TSsvurkKt1cwBAR7nKQkg+PQWefuGNoMzKmz89MPsrYp95ZMd58HapEre02OzWaMTlzf2+MYAI/M7JphmfpFnvaNbvT6BZvawLp1crBHkbRpDvZNJYjYZcqqxPEEAo+KW6CKCJvV231n4/xXuuo9g3Meh3EuFjS6/m2dkRBIgZJQwX37kcxY+6/ENc+LXrAyPlDTds0Gok3uohzMmLP4LhY8haUhIiC3s2B4m/9JORUWsb2pmJSvQhCSy8fOTkrkKHRBVtaQnkoVtZJZN4RF6rGJSZU3TlpERggCayaxBdFDsAR1qnY6TVKuTLg5mXk93xlBXL/AHAgTKmDgSGISgM6QxW4usi+6PQE+u09dudqzY0WdiYmP0vKezYn7czvixFCyCVoSUR5pb8kZG9o+pA9JVUmpWFbG+O5uFsjuYtkUsrYgaSFjcHhpUJpcpHraWUITxLtqwbJj3BB7CK9QbiGvRvUutbyXomj2urz5MrsmM7ZPCXKYwTe+wEiTEDk6RqPsU+A9xcivO/a+x6SLvO71W1wI8XruQoxuUeMonh9hSY2hBPFHkYjmw8lLGwp75CuwQzrK9vm7g03XYWQZN3CDhyZJKPcEUXfJgwTN3TpU25ugRKJ/gDW3M7R6c7iHuGYj/3bBxJI1ZWewjYFpB7Y+1yfNiQT+HmtT4W7239pl6jiOg1WblRuysq3MikLGfcPlB6XAIX8fFaRjIydjnarY4WGhl6c+g3qrmblWqjO3MXRznK1asW7goLIsl+n7KAh2+nURh52w3u41291WLiv1aXDkLTzIUzI3JIRI1YBlja33g//AFqWy8LA0mn2Gk2uVlJ2eLLQLBE4fDkQAc2kZSVMi3+0j+FKieb2FyiwLXZJhGrJyjNaSPIMzPCOYlM4i9aNylEO5dLkHYhx7C6sO5g3k8cA0c8OPKuQjSmRC4eG95EUfR2H6W+lQGnn0sEkzbuCaeNsd1iEbhCsxH2Ox+qKfVfrWO8rKTy0xFoGUmEFIdi+YJxLd4JIV4V6O4uH7PpEF3KHoTNuHTrpytBFldhxuxHIykfGhkjEKvaCQSerSJb7nX+U38V3Yu+lxev5PXxj4rpkzRyGZkvOhj9Fje/2o3qwt5rWXDHFLvqAI2iBav1CdPdP0+ppKIdH1IIyLYU3RC7dnT1CXYR2Dt1gdo6L1TuUPtdhw455B+mQfZKv/wCTKtnA/K9vJrO6z3ftPT5jL1/MkhQ35RmzxNf6tE10J/O17geaUMHBxNbi2kNCMUI+OZIpooN0CFL2JkKmCqxwADLuDlIHWofc5x7RER1NajT63Q6+LVamFIMGFQqqot6AC7H1ZjYcmN2Y+STUNttvst5sJNptZXmzpWLMzE/Uk2UeiqLniosqjwABWot7SfXZoL1RjBK2AFfBJyMwApqRcc8IdJ66YOEkzLlcpblOUgCBTiHaA6jOz4u6lxUm63FhtuuXAST+DFE4IkeNgOXJbhgo8NaxBqT61laeLKaLsc2Wum48zHD5EsiEFEkUkKVbyCxuVvcWpIUxjD2esEq1tnYzJU7SpvubA+VQcdLWxtHLlwyE5F0m4+KZIHAoCG5fVHVZ6piazsHXx13s2ZBv9zqMvjkyFWsmSju0ZIYLZ41NgRceDVk7Tl7PQ789h65hz6LUbXF5Y0asv3YzqiyAFS32uwuQbHzThPLFHs5yKr7gXftKbQfOWXdtFlWopsCgdwLh2Qootz7D6oHEOr4NXbK3eFi7jG0s3ufv8tJHjsjFLR+W5OBxU/gCbk+lUvG0ubk6jI3UIj/YYjxpJd1D3k8LxQnkw8eSAbfWmYyoyrDy1Qq97p027rzdgnFx1ripJ90kmJx4VMIZeHjlU1zFMCfWZwfcqYBt8OtVfIuJ1/K7Hiy9w1eXLo0gEMWZDLJYT5Dgew0MRDEeORlPhRW0vj3L32N13Kh6js8SPdvMZZcSWOO5hx0uZ1mkBUEXsIxYsfNP8mkRBNNBMOlNBMiKZd+rYiRQTIHUO4m2KUO0e3W6Y41hjWFBZEUKB+AHgefr/GtNPI0zmZzd3JYn08t5Pj6etaGzWeLqTBvJS/jPDOZJjEpAxZLPlhdyKootgMigAnIiJw9Y49hQ9Oobf9g13W8NM7Z+77EmRHCPbQyNzlNluF8hb+regqZ0Og2HY8x8HW+178cEkx9xxGOEQ5NYt4LW9F9TXfBWODszd07gJJvKNmUi7iXazbvOhGRYmKV00N3qaY94iJw323Ad+wddun32n38MmTpshMiCGd4XZb2WWOwdDcDytxf6efBrp2+j2+hnjxtxA+PPLAkyBrXaOS5RxYnw1jb6/iBXFjr0Va4V/X5tFVxFySZEniKLhZqqchFSLFAjhuYiyQgomA7lENN7o9d2TUzaTbIz67IUB1VmQkAgizKQw8gehppN1seu7WHc6plTYQMSjMquASCDdWBB8E+oraNm6TRs3aIAJUGqCLZEpjCcxUUEypJFE5hExxAhADce0fh1I48EeLAmNCLQxoqqL3sqgAeT5Pgep8mo+eaTJnfJmN5pHZmNrXZiSTYeB5PoPFd+u2uqk9JNbMrOV9xFSke0rzYX3vJHOWZln0mCiQAwCPdgHS0FusG599uoNQefBv5NxhTa7Igi0sfufuonTlJLdf6ftv8AycW8t+PpU3g5Gij1GbBsMeeTcv7f7aVX4xxWb+p7ifz8l8D8DXFgtMVWlIRKTB712CXQhI7wjNZ2XxzgomT8UKQD4VvsXtUN6oabrsOu0EmJHsPd5ZuUuPFwRn/qMLjnx/Sv4sfAppuv7HeplyYHtccLGaeTm6ofbU2PG/6m/BR5NbeQF8Rg+NGJt1ZMjRyMck6OYjVV8VI4tU3KhPWI3Ov0gcQ7QLvqTzv3iYcx14RtgIn9sOSEMgU8AxHkKWtyP0FRuCMR8yFc8uuAZV9woLuI7jmVB8Fgt7D8aw4FeXWiY73iTjm1h8GmeXZRbgV2bZ0Ij1lamUMKwt9ttjG9OsTTz7KTWwDeiCPe+0DPHE3KNXPrwv54/gT/AOtZe4i1sexn/shnfSiQiF5V4uyfQtb7eX42/wClbfUpUXSKtqS6b2qSbCmo2p+0nE24PDroNl6wwepGTfTSB1u1QE0wApky+sbfVT7Kk0eXrc/D1SbHNjzAvMsqtiRyC0k6k+th4Kjyf9KtfW3ikxNjg5e0bXYcmIW4BWZcqSM3jgYL6XJuGPgfWlqIAAiAdobj2/R+n/h1bDa/j0qpj0/Ouh06bsmrl67VKg0ZoKunS59+hFugQyqyp9gEelNMoiOwCOwa6cjIgxMeTLyWCY0SM7sfRVUXYn8gASa78fHmy8iPFxlL5ErqiqPVmY2UD8yTYV0QErGWZhGS0G9RkYyWBNRg9R7zuXCR1BSKoUDEIp09QCHaAD2axtVtdfu8CLbamVJ9dMvKORb8XW9ri4B+n4etZO01efpthLqtpE0GxgbjIjWurWBsbEi/kehp2Md06Zp7eeSmLS9svtedcyzBN22QblgmSxSppQ7UyJjGXbI9G5Tn2N29oajtHqtnrHy22OfLnLkZTSRh1VRBG3pClvVV+hPnzWdu9prdkmIuvwYsIwYyxyFGZvfkX1ma/ozeLgeKcbVgqCo0pRpSjSlYcj/u99/sbn+JPpSoX+W1/YN4pfiZqX8lNpSmC8jv9U7wg/FEf+l1n0pVnkjV61MOAdy1dgpR0BASBzIxMe9cAkUREqYLOW6qgEKI9gb7BpSohcouGrjkhIYbdQ+fMoYIjsI3mJyZVYDFcFitWIdXyA9pEg56VRu1CtSihY9tLuUvComSbKgp1HIYwAOlK+su8Mvj1scZ8aGfsy2HErSw4/t8pglE9FiqJY7LjeTi7BAuZmVjac3vB4hxZ4VtIu41KUSZLuEgASAn6mlKzcqcPEMpvK7ZneccwV/JGPMy2nMeIMixLmnOpfFilwqDmizWP4KHmqnJVmYx6vW5B0iDOVZvXAGcmP3+4F2Up2OO+A6rxuxshjiqS1isZV7JbbtZrXbnjZ9Zrfd75YH9pt9omlmDOOjUnkxNySqvdNWyDZEnSRNMpShpSmNixEfMpvACIiAcH8YbBv2BvnfLO+wfBvpSpy6Uo0pRpSvhRRNEhlVTlImQomMYwgAAABuIiI/S0pUKuVnILIGJMdOLnjjEVmzaLKyV9hL06luCN7OFYkn6TSXsEc2O3cLSqsMkcFPBpFAy2/aYpQEQjdtm5GvxP3OPA+QwcAon6uJPlgPN7fhV26B1fT9w7CNHuttjaaB8eVo8icExGZFJjidrgRiQ+OZNh+BJApuctUnGXMPDeTMFuL3IM4uxtI+rZFSx7ZYot4pzlcWUy4qk6REZIkHKuEE+4eM3SXWdE6hRLsO+uvOx8Le4E2uaQhGAV+DDmh8HifWxt4IP0rK6vuOx/FHbdd3RMKN8rHZpsX91C/7ecDlGJoyeHuICeUciGwYKaqf5nUXiDh/EkXaGKNy5SUeyZJwpiCm8a6VmYE6bXZ/CLVVBRPHsLW2sqia2MGhwPMRB+x8quAKgQB1S9/jaLBwFmQSZmO8sUSQJL9itF/4Kt/uA/Uv8xPm1enfiLdfKvau0y62Y4nWtzj6/YZ2RtsjAvPLFsWBBypJSh9lm8QTj/bC3W9qrZe5xybmTzP2h08U3fiw5z5T5rDyjaWsEpi64OsUWypJ1GmWmSb2ZYYaMk6j7KbroJQaaJXazTu0d1dxGpNsszYdyH9CTCbKjMVixjf22XijHl4BQgEcPUiw816Fi6Z1zqX+Nzq2zw+zR6PLjzrxxJmQDNgn9+eFDCPcdJ+bKxySxRX5P9tKbDfll515Y8g820LNXIVNVlxjiITClqyTHV6wTEjfZ72O8TimiEdcnMaMy3h4wR7yYOZTvTopiTc4gprtwOobHd7TJxthlfZhqIWkCsS5sbeGtew9W/IfXzUd23/IrpXxh0TTbvqGhIm7JLJsIcRpYo1xovcXmxfHD+2ZH9IABYMwPgFav74wcBaVxce4okaplHKkoXGOJJbF61Pczx22MbTIWGfe2aeyHIUsTu0mttdyr9TuTEXMRuhsmXcA1szT9Zx9M0DwTTN7MBj4E/wBNixLFynn7rnx58DwK8N/I/wA47j5Ji2kG01usj/uW0TME4ivmQrFGsUeKuR9paBUUXBUFmuxpcZe4E8P883yMyXlnA1Ht1wjk3yS0k4YGYEnvH/vqtqbxSjEtkdIm9ZJZ0J1kzD9VtsGsnO6zotllLmZuNG84v5tblf8A8rfqP5nzUN1X5u+V+k6STr3V93mYmpkKkIG5GLj6CEuG9pSPBVLKR9Krn5x+U9xQhOIeaHuEccVHHmS4h4hkeBu9ktpYtNquykUPaNXdWm0u1WUPUXcS5WSQZ7kDvyopkOAj21XsXSdJHosg66KOLLU+4HZrfXyvJjYLa9h+NrVv74Y/yf8Ak/M+VdPD3DYZWf12VDiSY8MHMkMp4zLDCA0k6uFZpPP282IsK8apjnP0AodQ4Jl7sgHOJwTJvv0EAREClAwiOwbBuOtB+frX62hVW/EAEm5t4ua+dh232Hb0AO3YI/CAD9EPh1zXP5fWuNKUaUo0pQVMDHASkAVDbEAQKAnHcewoCAdQ7j8GuPr+dC1l8n7R5rn0dg+nXNPWuNKUaUo0pRpSjSlPjgDKFExBdXtyveG4DOCKNdkmNcqNsnpaGqbSxvDIkbTNmYw5AcWqGbtAWSVjDKtyL96Bu9KJA1IazMxsHIORk465I4EKrEhQx9CwH6h6/bcXv6+Kpveet7vtemTUaTbT6Z2yEaWeCNJJmiW944mk8QyFuJEwDFbW4kGphBkDEudMb0Ss4V4rEZ5h+OmVu2asK46d36x17J9VLAOUazJUOtNHruws4KlkFz4qM8SYjJZQqxVBTcqAnPDKwtjixxa/Ctn/ALgvLEhdlkSx4lFvyCp5ut/tPkEgm2p20Xaemdgztj3Ds5fqZ06Y+v2GUuNDLhzGUGZMmZlWJpcj7OE3AGRQUKho15RLvvMaax5wzyNw4xZKZyqFxZ5vQybdn8pI17DBcf1RjHTFKfYxjkoSwjeMtx9mkrQKrhOSErhsigQDJHSTOJe07qWHRPqcczLP73NzcR8RYqY/BLPct5B8i3p4NR8XxnhbP5Xg+QdxHrZ9S2uONjqBJm/uJGZJ1yizxiDG4LF9hjurljZgWAqBud+EvLDjHCwtlzvgi+Y3q9jbQbmCtMxHt3VWki2Rm8kINs2skQ6kYQ0k/YMFlitBXB0VJMTGTKGw6iM7TbXWxrLnQSRxMBZiPtNxcfcLi5AJte9bM6p8n9A7tky4PVdri5mdCzh4kYiVfbZVcmNwr8VZlHLjxuQASai0Gw7b77D6dg7dvh2AezfbUX61ezc+R6UCAAYekREvwbgADt8G4AIhv/h1zQA+p9aby/VJ/ciRUWks1YRyTk7t9KgUTzDQ6ROlBOK7AKkLgTCCh+r6kNttWTru6x9G02W4eTJKBUj/APaYE+TJ+NvoLev1rSHzT8abj5Ui1vXceTGw9FHkNNkZlic2BkW0a4foF9y5Erlv0i1jepveXbwqvvK/JtXxQKsZT8dVV0yks0Zf76NiKZi3FpJVBm9uNhmJ91GwUfMSJVvDsGizjvH0gqRMgG9fpyWxI+0b9psNpVw34vK8hH9IH9Q5EgFQfCXtc2FqgIexZfwT8RQ6/sUODJ2TFEuNr8XCV757ryMBEKIXEjr/AFMniGCDkxcEgV+ibcbH5fGE8N0PgPZXVEzWzrWF3bbGnGuUdQmTchZBquPao9fJJR7RfvmaVulYlNc8essqxWWOofwpgAo7bonk0GFiJoZeEyrD9kJs7uqqT4+gYi9vI/I1+ZmtwvlzsvY8r5WwVytZJPsgcjYqHxseCWeVVPIixMSOVEgAcAAe55Pny+Zx86zmKji+TheKVWp/EbAmFrT7h1GiU2vN3+Vse16DdMWlVr2Vi2N87YVhhJnFVBAjJsUHqyayIAfuTmHWWd3TcftSmqVMTAhbiqKPvQC3EPyJsD6eB58j6V7j6t/jR8ctvUyO/T5PYO17KD35Z5pCuLkSOpMsmL7aq0jL4Zi7HgpVjbkBXnJmrBPX21ztkuE06kZy2WBzN2WxSrhwumpKzsiZ1Iy0mZADnVFZwuY49HaAB6pR2ANa7eSSeZpZmu7tck/ifUmvZONiYmp18WFrogmNjwhI41AH2otlRb+lgLC/j8TXrV/5YGmJsM5ctZaFj3aVbisUYqpz6URlSWWMl7O+sBph3IsphkizjGxJNJmou2YCkZw3b7FOoYxFBNtf4zhC52W6AiMRIpN7gkm/qPHm17eoHivz+/zi2Rl6v1+DKdTmPn5UyqU9tkiWPiFZGLMeJYBnBCs3kLYrahDzVuSeU+TfODNtkyjYa1PK0G32HFFLRpZlFKjB0akWCWYw0TBrOE03boO8UVXdrK7nUfLLD2F6QCi9p2WVs93NLlMrGNii8fQKpNgP/Un8ya9YfAXS9F0n4x1mHooZoly8dMqYzW9155o0Ls9vA9AqgeAiqPW5qurVerdFKFtGMlq65kU3z00/7baxzWERjlVEHkOeOkHklIjIkW9VVk5bIJi37s3WRYT9QdGw/QUGMsCedwLW+ljc3/6VHyTyLmiFkX9p7TMXLC4fkoVeJHoQWPK/grbzfxYh5V9tpXH7lDjrmHl15YmGL8C2Z0o0YVOACy2jI+UZyoWRtS8VV6KK/YnbvbIQzhVV+p1t2SSG6gCJyANi6vLDr9nHt8vkMSBvRRyLuVPFFFx5Pnz6AVpv551+y7d0fL+OuvLC++2sIu0sntRY+MksZmypGKtdY/tAjFmcnx+k17COLPmu3fl3yUxLijFfGLLMpXjTFtfZ6vl9oCtPQ43RhGbw0BWH0maOdx0nYhVjxBcyblqLjvSlRKqKaoFtXW9n3Db7/HOxOPPDFmTuT+0AaDHYWhiEhsUlWx5uv67gebV4M+RPhPqXx70LZbjM2sEGa+Jjphww5hlGynDD3pxFyBMJDfYrBgliW43W8sXfKuJ4+2rmxdMj4xyvjnj7xzeQ9guV8sNJUbxd7krC1YJpyWDk0XRD3qOK7eJJPziZM6avoAADt79Lr9r07t+4zIEC9GyJDkTck4vHksAB7ABtKknrIzWIPgAfWkZ3WoPkPqnUtVgZuJl/I+bG2PjwxT8+EEbMSufdb48igH2lAIZfxJ8Z2d+QrCyzfGOmNhnQpnLlhHPsVNGsbJRbyeF7VY+6GTvztdm6Y1Juyi36KgN3BiqKnE6YbmIIhWvlrC7v2rJxNJrow/TM0J9kc3tSzFlDN7/JWAiiNm4jywv6kC0p8TaLQ9bwt72DOaNe3ddeQZDuoljhCytCP2oV1aWSRgy8xdV8HwD5e3H8YF+u76MuNtxTOO8QFZw8riSmv4qdf0SRctAVr6lqSRdOJOEdrMyeIbJOukVSB9SJQ31I9c6P2bd7rHzfkBtSdZrI/bh12OgkWJlFoneQ3YFQOSgk39CKq/aO0anrvX3XpUG6iydy5kbZ5XOL9ynL+skK2COOR4uyjwfQ3NSMZ0+vx1jc2tkyFtMO4hvBqmRWOkyCNaqAsigjHpiRogYpw36ilAR1tjF6vpcLeydixIfb2UmMuOeLERiJDdVWIfYtj9QL1qDK7Nus3Rx9ey5fc1keS045KC5lcWZmkP3sCPoSQK2MhOREY8i42RkEGj6eVXaxDZQwgs/XRRFVZNuAFEBUTS7e3b6Ws3N2+r1+Vj6/OnSPLzGZIUJ+6RlW7BfzA81h4Wo2ewxcjPwYHlxMNVeZgLrGrNxUt+RPimrqk7EVSoRZoFO+3lhK3J7Di5kWy7mZj13b1RN06elcppHSgo5ZEQKfbYCCGtddb3Gt631jHbTDc7fCyNrJDykVnnjZ3Id5AwBXHiIsDawUithdj1Gx7F2bIXcNp9TmY+rSbhGyrBIqICiIVJByJAbkXvcG9PWIbCIbgOwiG4egdvhD6WtskWNq1UDcXrjXFc1yACPoAR/aAR/6tcgEmwrgkD1rQQtngLEvMNoSTQkV6/JKQ80miVUox8kkAio0WFVMgCoUCj2l3L9PULquwabeS5UGpyEnmwpzBOFv/TlHqjXA8/wuKmdroNxpIsWfbY7wQ5sAmgJt/UiPo4sT4/jY12FkJcbEpGDBKFgyxRHpLF4tLuzvxWEh4zwW3flFNIOvvBHp219DN2Z3ja84jDUDHDjJ5rYycrGL2/1Ahfu5elq+WwtYNIuwGWDtjkFDjcDcR8biX3P0m5+3j61ntWUe1FwsxaMW5nqwuHazNBukZ44HsFw5VQKAulhAe05hE309ZuPiYWMXlxIoY2mbk7Iqrzb6sxUfc34sST+dYWRlZuQEizJJpFhXigdmIRf/ABUMfsX/APCABXVMOX7GJkXsZHGl5BqzXWYxhFStxkXaafUiyK5UKJEDLm2DqHsDXxs58zE1s+XgQHJzo4maOK4X3HAuqcj4Ut6XPpXZrIMPK2UGLnzjGwpJVWSUgt7aE2Z+I8txHmw9a0oW1gxa1ULQKFcm7WZu0ZwjlYHKvtlREqq8WksimKa6rcR26/VKP+HUSOyYeHja4dh4YG22JVEx3bk3vlbtCrAWYr6cvANSrdcy8vI2J6+GztVrgzvOo4j2Q3FZipN1DetvJH4UqtWOq7R+z9r6YfTDSlJqbfNKbASUtHV5y/I1P4xSGrUej7QkHDlZJJVVBqiVMqzgRP1qGH1hKURER21X9tl4vVtLkbLCwZJljPMwYsa+5IzMASqKByc3ux9SASan9ViZXaNzj67MzY4WccBPlSH241VSQGZieKi3FR6XIApQNlRct26/dKI9+giv3CxeldHvkyqCksUPqVUurpMHwGAdTmPIZ4Em4spdFbiR9y3ANm/Ai9j+dQk8YgmeHkrBHZeQP2tYkXU/UG1x+INduuyuujSlGlKP8X+L9nbpSgRAA3EQAA7RER6QAA7R3EdgANDYC5pYnwPWk00qkC1tMndmyKozs1GNIl668Wqq1VYMjJmbESa9Rm6ZyGTDc5e0f8OoDG63p4Ow5Ha8dW/vGVjpDI/MlDHGRxAS/EEWH3DyfrU9kdi28+gg6rkMv9oxch5kXgA4kkvyJa3Ig3P2nwKSuSHOUI4YOUxyyjJtBiuueyVx6ZJF3Ltjd14dOOeKhsgonsffpEBERAdhABDVc77kfIWF+z2HRosfLhhdjlY0lledDx4iN2/SR9w8fWxNwLGw9Gg6Bm/u9f3eXIxZpkUY2THdkhccuRkQfqB+31HoD6E3DisFnDhiycO2pmLtdo2WdMjKFVMzcqokUXaiqUAKqLdUwk6g7DdO+rziSzz4kU+VGYcl41Z4yb8GKgsl/rxN1v8AW16pGZFBBlywYsglxkkZUktx5qCQr29RyFmt6i9q1y0bJKWFjLJzrpGKbRzlo6rxUETNHrtY4GRkVHA/ZyLNi+qUoeqIen4d8GXAz33cOyTMkTXRwOjYwVSkjsbrIX/UCvoAPFZsWdgppZddJiRtsXnR0yeTB40UWaIL+khz5JPn/wBK3KhCKkOmqQiiShTJqJnKBk1Ezh0mTOQwCUxDFHYQHsENSrokilJAGjYEEEXBB9QQfBB+v41GIzxsHQkOpBBBsQR6EEeQR9CKXdBgE2+zhFs3aRrVPw7Rog3SRbh29WyKBEwSTSSHtDpAvra+IIIMWBcbFRIsdBZVRQqqPwCgAAfkABX3PkT5UzZGU7yZDm7MzFmY/ixNyT+ZNOvrtrqo0pRpSjSlGlKw5H/d77/Y3P8AEn0pUL/La/sG8UvxM1L+Sm0pTBeR3+qd4QfiiP8A0us+lKtZ0pRpSjSlGlKNKVBmK/WVXn+4/i/+vfLOlKnNpSjSlcCO2lKgnmfKGW8gzOZ8KYZiZTF9+x3H0l9WcuZLqKktie0ubL4aRkUIJNm6M5nFYeNKq3cFEhe4cHAe0A1ET5ObktkYOArQ5UYQrLIt425eTxt62FwfzrYeo0vWdLFqOz9tlj2WgzZMhZsHEn4ZsQiBVDKWHGMOxDIbnkoP41qsk5kkcUT2DautjfIORHuW7k2oklZcfwxHdfx46JHtl3lxvCiqxVIWprOVDAQ4CYxdhDt27e3Lz2w5MeExSytO/Asg8J4H3P8Agt6wOvdSx+z4O52SbDAwIdXiHJSHJk4y5QLkCDGAFpJwALjwD6/WlpSsVY4xvMXqfodLgqpN5MsvvhkGUhmx0HVvtPdC39vTRzKqA4kO5Hp6gAobCPZuIjrIx8LEw5JZcaNUlmfk5HqzfifzqH3HZ+wdhxMLB3eZPlYWux/YxUkN1ghvf24xYWW/mxvTC5G4W8arVgq/YUDDUO3olll5zIryrU906rEnJ5CcqIzTiYibEgspJwM9Y5KMQQXct1EupMwkH1BEBjcrr+om10uv/bqMZ2aQqv2kufNw3qpYgAkVdtB8vfIes7nhdw/u0rbvHhjxVmnAmRMUAxhHiI4SxxI7MqOGsQCPIBqiXmvw75oc+Z3BkzA8QYvAM9R8KmYyE7cMiJPJlBSrWSSg69jKWsacgqyeO04Jq3l49crIioGdqlUcCOxda37DoewdmkxpIsFcaSPHsWd7t9rECMsDb0s6mwPk3Ne1vh75Y+Ivg/B3WJndql3mDmbfkscGKVjImhSSXMjiKBlUyFoJVMhX7FKoBc1bt5fdayNxj4d0SI5g5FPC3hxZ5NsqbKVlhUl60WWfkjqtRUbO4kVk5sy4NDLMwVcHcG8T3YAAEAoXnq8WXqNDHHvZeOSXP+4w+25sqBr/AHelx5v59PFeVvnXY9f+RvlnNy/ijXibTLjowGHFIRLwXnNkmEIDHx5BX4qFHDl9SamZQs14pyhZMlU7H94hrVaMO2NGo5PhYwH3i6ZZHCKy6MPLC6aN0BdKItzmAUTqk2KPran8bYYWZNLBiyK80D8ZFF7o34HwPwPpetSbvp/Z+t6/XbXe4U2LrdtjmfDkfjxyIQQDInFibAkD7gp/KnQ+mPYHp3HsDb6O/wBDWZVbqv8A8z/IFBpHAzkNJXdCGnIey05SkwsY9fETay1ssblNrX0mTpNF4kMnGPURfoFENjKM+ncBENVjuGVi4/Wsp8ji0bx8ACfBZvAsfxHqPzFbz/xw0W83PzboMbTGaHLx8sZEjqpLJBEC0pZSVPB1PtsfwkvXjapvArNth4rZn5VzcTI0mm4lbUl/EsbdDPoVfJEPaH3hZKbqz6QK2bOWEEkqgbcvX4k7gpU99h1oTH61sJdLkbqRTHBAEIDAgyBj5Kk+oHj+JNq/WvbfN3TsD5M1Hxnhyx5m22rZCu0EiyDEkhW6RzqtyGkIYWNuAQlvpUJhOcSFTExhIUTHKQRHpAxwKBzAX0AJgKG4/SDVfrcNgDf6186VzRpSjSlfaaiiRyKpHOkqmcqiaiZhIomcg7lOQ5djFOUe0BDtAdAbG49a4ZVdSrAFSLEH0IrsVKcwAt3agJKGECqH9Yp1ClIKpQVApSmUL1AIh6QAQ39O+n51wpHlLjkPp+R9PH4fn9a6dh+gPZ2j2fB9HSvuxrjSuKNKUaUo0pXOlKlzxcytGw0ddaDDPsvNrt7QJeCo8cGbRHK1grtdgXp56CnryZdV/QqFAJopyLhVs1XMqKqgrEOmQoFn9NkmOKSCP9yJuXMGAf1GCg3Uv6og9SQDf63rUPyXpY8vPw9nltpm14ibHI2rk4cMkki+3LHjWC5GTJcxKHdAoC8SrEkuW4xhxTyTh2csgWiCi8u58w9llrx5xByHeRr2vw0pW56HPbOQTbkLHNaoHvdKRsTJNa1CvWS6L+TTOikZQ5yl1P4WJq59e0t+GZkRye1HObgEEcpfdsv3EBuCkG7elak7H2HvWs7lBgiP9x13UZmGc/N1iMrOkiSCLXnXs0/9KNniOTMkimOIqxCgE1PK14utvnAcD6BjvCecntBrfGSEWxrhLEGS5KAVluY94w/WYkj/ACcsyXfM7LSbPEM5d1EpN1EXSezxMyhyFOba0TYs3btHHj4U/tx4w4RxuRfIaNRd7XupAJWxv6jz5rRWv32u/wAeflTL3PZdWuXl7uQZGbl4yycNRDlyvxxuXExzRsyLKSGQgowCkgV5C7LWp2m2Gbqdoi3MJZK3KPoSdh3pSFdxktGuFGj5i4BMxyd82cJGKbpMYNw7BENalkjkhkaKUESKSCD9CK/RLAzcXZYUWwwXWTDnRXRh6MrAFSPyIN60mvisurNvLy8qnkp5hllSd0mKGhYOipNdheM+2pocKXXFmbZN2vGRiALN3llsCqapSkbtvsKRjALhZEvaNk6/1bY9glvCOGEDZpWH2j8h+J/Ien1IrRnzD89dL+IcMpspP3faJIw0GDER70gJsGY+VjjFiSzeTb7FY16UaNy58oLgVw95J8YY6nMMpNcQ3xljfIFMsilckrXzNyKyUiHshdI5/Hu3bJap1+S9coOlGjWObsRSalOrsVTY8G26lotRkawIJRDJwZTYtkP4Ja4JHEH8bAAWFzXizafH/wDkN8rfIul7xLkPgy7DEbIx5o/cWLT45DhYWVgre9Ivj7Q7SNJykIW5Wh/jFnfyfbHki9J8reLma6JU7a/sNwir9WM32m5TlPkXRzFY47r9eqNZpCxaoslILiDlys4XTBEhDiYohqja3O6jLkP/AHTFmSJiWDLIzFT9EAVV8fmST4r1R3fqn+RODpcVuh7zXZWwx0jheCXCjiSZR+rIeSWWf+qCo+1VVTckWq7fHmYv+XY5TOaTxDrNBvkXJ5YlMZ0FlPu6xkGkWG3SdFTcM8dRluyM2mXbxw7UWceFK4VR63ThYnfnHfrC64+X8f7QpqIo5A0pRAbMpYr4QM9z/C9vP1NeYdx17/MDoq5PyHn5eK8OAmROUEkE0cSzENO0WO0agAW5cQ1lUHgPoXJnP+W1x8jiCQxJA8nV6RAyeW5y/wBtmyYshHMrOVMVWCVDoklPS1kUkUC4+RI7Bu6K4Kg7cPzqLNzCUhQyH+OccYhxY8nhGZSzHgCSvjipJN/t82N7Em5FRGJ/mbuG7Cm/y9J+6y0wEgiT904VJfuM8yokfE++eHJePJVjCq/kkqfMHLvg75HvDRpjHhXIY6zVl6btEnXwatbvB2axv7y2YuQl8jZnk6sZYyXuyciCLeLBNokp0g3SBEoKH12Ze20nStOMfTGObMLEfqBYtby0hX8PoPH4C1YXXfj75Q/ye+SG3XyWmZrevRwK9zC8cawEgpj4ay2/3ByZpbuR+tuRsK8MmRr1MZQv1xyPYmsKzn7zY5a1TjeuxTeDgyzE48VkJJSNh2n+ixzdw9XOp3SfqFMcdgANaRyMh8rIfJkADuxY2Fhc+TYfSv1K0epxtDqsbTYZkbExYViQyMXbigCryY3LEAAXPk/WkcUiihippJnVVOYCJpkKY51DmHYpCFKAmMcxh2AA7R310+b2qTkfgpY+gF6urpfl61fBPJ2i4x5HcjsE49x3a4fF94lZex3aZhcuVKnZIpkbPx0vEUaHYv0GVvdDYSs0UHSy/QAFWWTBHqDV0h6/Dg7KPG2OTBHjuEYksQ6q6gghR6N5tY/xItXmPY/MGw7V0nL3XTtNtMzcY8mTCqRwo+LNLjzNGyPM5XlEPbLFkC3/AEqxerVuU/lPcevLe4dkfP7hXc9ZSyDlBarxsHcq3ZIe15qq1sLA9GFsKx9YWsiNUyqY8Qi9RsPSDoGxXKCIlKsCRrRtOq6/rmpuWWfKkl4gMrAyK1v6cYXlxfxcNa9rgetq0L0X5+7f8zfIZijx59VosTA9xnhljePDli9z/wCZmNIIjLjWdkOPcLy4O1yvIOFg+h85MH8FcCQfLHOrfCuRS8sMPOuImJrtkSZp1wt0L30mWfxdmKzVmNm5ewQ8tCPkjQ0M+MTw5mgJKqpgciIZGFj7rC0cEe2n9jI/dRmCNnKsw83SQgEkEH7VPpaxI9KiO0bf4v7P8pbbK6Fqzs9OdBmDbZUOOk0ML2X28nEikZER0dSJpo78gxZVaxc08ZAdZBzjl4cT4BvnLLkPxwyLIX2Ex7Vsy59i3tnzlknGKT2zRirCovV4t1jdWlzztN+gzdIGUfoR2xVjkXEhKjkNPm5f7XAkysjXycgiySgmR0uR9vjhxJuAR5t6+a9EaiPUdY69/f8AtuJodP3LCWB8iXDwGWPDx8kiNg0w5jIEyKY2dWAQyfpHG5s88ufkjU+ZEHZMW8v8tZh5TZhwHZa1yX4tt7zZ4DC9Vncx41jloqt4ix3LhKMLNa5ycnJVJm7YPWvgOpv3iZTEEDKWbr2xi3CNjbeWXKzIGE0PIiNTIgsEU35MSTYgi3itI/MHS9h8c5UO8+PMDX6Pru2hk1uzMMT5kqYeQ3KTLyECGKJI0RnWRW9z7rEg+FuW4ZVg8tzrzLnm28a+RXHDPGcuNWPbVnSqzC0JL8bEbWhOpx6UbD2eLZovbDl5iwYpKPVFTdyi2WVKRMhhP1W/UR8t7NnTY+Rj502MrSqbGHle1gwFy4A8/gL+BXnD5HzRB8V63q2v3Wn3HVtZup4sKRQ6bH2jGWLPEzFY8QsxCBfLMFJZgBawGw5QzNF8l8e4mhsAyU/g2z0Cw2W5cjE7MybRdCuEYq8JC0ZxVTomfSK86mgmYHBVClT78vYYAMIT8mTmLso8VICcJkJaW/hWF7Lx+t/x/OtSYmj63P0vL32TtVi7RBlxxw6/2yWnhbjzmEt7Lwuftsb8Tci4vIBVq2XVQWWbN1lmhzHaqqopqKtjnKJDnbqHKJ0TGIOwiUQEQ1lSY2PNIkssaNLGSUJUFkJFiVJF1JHgkH08VVkyMiKN4Y3dYpAA4BIDAG4DAGzAHyAb+fNd4dno2Df07dgdvaPYH0ddw8eniuk+fXzTd5ceZQjsX319hKFq1ky80rMgvjiAvEg5iafL2whS+zGNkkmaiTplErGE3eKJmKYNg7Q9OsfLbKXFkbCVGywp4BjZS30BI+lTHXo9HNvcSLs8s8PXmnUZEkChpki/maNSCC4+gII/KtxQnF1d0WmOskx0HD5Fc1aBcXyIrLtaQrcXcVoxsexx8A+cCLh7DM5YyqbZU4idREpREdx19wGYwIckKMgqOQXyA1vIH4i/pWNtl1ke0yU0ryyaZZ5BA8oCyNCGPttIo8K5SxYDwDekDmgzh5Bt0YO1xsJOVqWi7S7jVZwkS6lIxqqZMsadQFkhRSk11CpkMrsiY3YPp1q75VM+VqI4tRsoMTbYGTDlvEZxC80SG3tE8hYTMQoL/wBMtYE+lbE+LhBi7d5dtrp8rU5+PLiJKIDMkUrrcygcTyMSgsQn3geRTlRS8W1bsXSzGLrslZBReuI4pmSK7uUXQK4XRMsh3YSjtIphAVA6hMAb+jV7102uxoIciaHHwc/P4yNEDGGeZl5MpZbe663N2FyR5qi7CHYZE82PFNkZuDg3RZLSMqRK3FWCtcxIfB4mwBNvWk45pTiUrwQs/c5x71Tisp7VYuEIh2szUWOZGDUO2DoUYlROKQ7D1nD6YBqDyOqTbDS/2ndbXLlvmGX3kZYXaMtdcclPBjAJU/VhbzU5B2qHX7o7XTavEiAwxF7Tq0yK4UBsgBvIkuA34D+FbSERqVJTgaFHOyMlFWz1WDinTpw6fOmyCp3D1Uiy/WqqVE6wiPUYNg7A7A1n6mHrfUo8LpmDIIXaOQ48LuzyMqsWchmuTxLebn+FYG2l7H2t8vuOdGZYxIizzIipGjsAqAhbAcgv0H5mtCVEAy+q56Ll1DSkyCYTl9w9vGiHQmT6v3h+Efg6NQ6Qj/8AWc05/ul/7SBe/wD+jv1nwB6/uf8A+mpdpifjVIb6u391Jtb/APSH6PUn0/bfh/8AipQLSrR1cWtad1l+6FnGe32dkXj0FYZo4BYG4s2r1TqUQlhKPUHSAD0/Dqam2WNkdoj0OTr5pDFj/uUymjVoEblx4JIblZiPPjza/moaLXZOP1mTe42fDGJcj9u+KsjLO68eXNkHhofp5v5t4rWg+vcKMO1exbW3KStmeN38jEiSKb1yuKCB2Lty3cCY71ZsT1D9HaYdYAzO46o4uPl48ezfJ2DrJLDaFcXFPmN3VvLlR9rcfU1nnE6htBlZGLPJrFxsBGjimvM2TkjxIiMvhFY+Vv6Cl/q61TKbewPcir3avQ9aYtY6qtyJy1jsr4iTpKQQBUya1bZIAUzhm/MQoKFX+p2NsIhtqh7rL7xN23B1ehhjg66gE2VlSAOJVvZsVF/UklvuD+nm16vWmxekw9UzdnvZpJ+wuTDjYsZKGNrAjKdv0vGD9pj9fF/NKW1VwlpjUo08tMQwJSTGRB3CO/BvTixVFUGh1ek3U0ciOypf8oNT/YtEnYcBcBsnKxVWeOXnA/Bz7bX4FrG6N6MPqKgevbx+v5zZy42NlFoJIuE6c0HuLbmFuLOvqp+hrqtDuQZexXjaeg6/HpzLck0pOFL0P2CwGTLHMFzqJkbv11dugRHt9AfQHq7DlZuH+0ycfNw8LDXKUTmceJI2uPajYkBZGNuJPr9K7Ov42Flfu8afDy83MOKxgEBN45Ab+5IoBLRqL8h9KVI/+f07+n4NWKq/XGlKNKUi1nErNztlqEtV3TepHgk00bMV+QhJdWST8O/jUEE9nLRVqkqb7Jv8G4duqpNPsttuM/rGy10idbbDAXK9wATGUcZIlUfehQE/df8AMVaYoNdqtRgdl12wjfsYzCTi+2SYREeUcrMftdWIH2/nY1voGDjq1DRsBEJqoxkS1IzZJLLqOVSIEExilUcLCZVU25h7TCI6mNNqMHQarH0utVlwMaMJGGYuwUel2a5Y+fUmojcbbN320n3OyZWzsmQu5VQoLH1sosFH5Ctt6P8AHvqT9KjaBEAAREQAA7RER2AAD0iIj2AAaEgC58CgBJsPWuCmKYAMUxTFMG5TFMBimD6JTFEQENcBlYclIKn6jyK5KspKsCGHqD4Nb2GgncwqAJgKbcBOVRzsByJnKTqKQ5QMBw6/g7Nc1xTxRMeEWxSadZVBTAepQqYE6tx3DfbcTdIdm49o6UrZaUo0pRpSjSlGlKw5H/d77/Y3P8SfSlQv8tr+wbxS/EzUv5KbSlMF5Hf6p3hB+KI/9LrPpSrWdKUaUo0pRpSjSlQZiv1lV5/uP4v/AK98s6Uqc2lKNKV8nECkMYRAoFKJhMb0F2AR3ER9ABpSqwGd05bZNybKEGiwGDqFjjOD2Il3tyWSuT3kDh4sU48LZaEeMK1CkPFJUEw6HHem6B+q7BKaDjn3eVlECJcbFiyCCX+4zxW8Mlv0G/438VtPL1fxfodEkr5s+632x04kRccGBdZnFxeLJ58v3KhL+U4i/wBPQhzMVY/yTSLJmGXvGZ53KMLfryax4/rUrCsohriSrC3USCjRDpm4WVmmBVTgp36wJn3L9T6dZmFi5eNLO+TkNNHLJyRSABEv/gLeo+tzVa7Nvev7nX6nF02oh1uZg4XtZUqSM5zpr3/cuGAEbW8cVuPzotclndvmPF0bTKxRJHBb+LtKmX7NMzLtpeoCYboANRb1GHSMDSTZv3HY7MoBhTL6Ntu3md9kM+FMdIjrire6xNnU/wAoUfUH6/h5prMbpUnU9lkbfJzY+5xywjBhjjVsaWMn+uZ5D9yMo/QB6mnl1n1UaNKU2uUsQ4szfX2lOyzTIC/12MsEJbWkDOlO4bM7HX3BnUFNFRQXRWI7j3AidIw+qI77gIaxMzBw9jEIM6NZYlcMFb0DL6H/AE9RVh612rsvTs9tt1fMnwc+THkgaSM8S0UotJHcgjiw8MPX+FKqEp9SrsjOyldq9ar8rbH6claZSGhIyJf2SUTIZJOSsL5g2buZl8mmcSgs5MooACPb2jrtjx4InaSJEWRzdiAAWP4sR6n8zeozL2u0z8eHG2GTkT42KhSFJJHdYk9eESsSI1P/AIoAL/Sq5cU8t7ahnLI2LeSA2WFjc05JyjXeKdN+KmXgpeOxxiiBeLXmQuVmbk8G4bzaK5FYh6JzC56DFIIDtqrYW8nXYy4e25qmRLIuMntkERxA8yzD1v6qfrW/+z/Furfpmv7J8fft5snUa7Dl3U/72ORGy82RRjLBCfuBjIKzx2HC4JFV/wCccsceIThvS6JXcfXd55Z2Wy2Gpo3Jt7WX5KVHNaGTHknJSDZhkRJ9HwtNj1GThQ0i72U7hUybce0CjWNjnaqPQx48UUh6jPdeYv8AuFm9y5NpLgILH7j5/Ct5dN6v33M+WszdbDOw0/yJ1ftTnHPAamfXnEVEUtilWkyG5KBEnjkoMn405XKSm5DoHCPH9Tg+Vj3kjRrlmiMhq/NWeGph8Tz+J3EKyj6tiHK1qZzCbCGokQrDd6Mq2Hxqj8QIVJM2wly9zj5WN16KCPNOXjSZAClgnttEQAsUjX8ILX5Dzf6Cq98a7fQ7z5iztnmdYTr26xNQ8kscMmR+9izRIzzZ2FC0ZZ8lw/EQv/TEXksw9fI9LVoV52URpjedsteJIS6MNMpwblEko3h0TupVdBNqZ83K3YpoqqdixzEakKooBB6gDRzw3lYY4Z4rkA29bev4+nr/AA8mv1NxNiEwY227Q4+fwQyRmQEoZDxQG/E3YkD9IBclVv4JSAdoAICAgPaAgO4CA+gQENdFSp8G1GuaV9ppqLKJIopnVWWVTQRSSIZRVVZY5U0kkkyAJ1FVVDAUpQARMYQAO3T62+tcMyqpdiAigkkmwAAuSSfQAeppxyYiv5GSEtJQSsDCKzNnri83OmNHxkXY6dGISthgJ1QU1V4OVYNnaBe6dJpCdZchA7RHbL/Y5QXm68Y+TLc+AGQXYH8CAR629RVfPatEZji484nzBDDKI4/ud4p3KRSxi4EiMVY8kJsqknx6vCfKEbH8O3+Dk6TSZ169zTX8pmyt4xke31cJamuopfHkdGOGiE4kUTwYLvnbdVZl65UTFBQxDDnfvFXRHXe3GzHIWT3PHJbrb2wLX+nk3K/T61VB1vIm+WE7m+ZmwQpp5cP9lxYQTe3OHGWzhjGTaTjGjBZPBYEgMKpXr1lyErkGEYyE7kf4izZFkkK1ZVopROdlp4hhM2gJ52XqkwphZQVyIrKIlBXpAhtih2XbKxNUNVJLHHiDsv7RTJGGBRU+roP0+9xsWAJte9eYNB2Lvs3fcPCzs3sP/wCpP/kEq4mY0DDJnyQSUxsqQXmGuExkWORowH4hWstrT+H0j+2Po9GtX17sPr59a41zXFGlK50pWul2q76LkWbRyszdOWbhJs5bn6FkVjpmBMxDiBujc+wCOwiADpXDC4IpteGPMXLXBXkBFZox4qm9dIlc1jI9MmSEVh8kUKQetlLRRrD4hBwsRlMAyIPeFDvE1UyG9YvUQ07pNxk6PNXNxbEWsyn0dCRdT+Rt/pWqvk/420nyl1WbrW8BV+XuQTLf3MfIUERzp5H3Jc+DcEEjx4I9IVo5L+VXkrPUrzFysONcz1mjVjF9V4d8KsUY/ncaZrpMwlPk96Iy81UIerUe2uIa0yKr+GAZBQe4DrTS3N3Y7Hl2XWMjOO3yvbmjRUXHx41KSKb2IZbKrWJJX7vT6fSvFWB0n521HVo/jnRfvNbm5U+TNt9xlzpk4cycLxPDIXlniDxKqS/01s36mP6hJbnHO46yXkvi1kfFOB8xcTr3x55fTFHomQT8ZomTxdab9fYCDsEPbrQSpS0MubGsjbjpou37du+cOnnfFOmPSIhJbqTHycnGyMWCbEmx8sqj+yOBdgCGbiR9ha1yASTfxVM+MMTcaXTbzTb/AG2u3+q2/XknnxxsnXJjggeSN4Y/dRx+4WK7IjMiovAhhexqw85Hyec/4Wc23mRAx+KZ7HsihWHWXYDEEfLwKtSuBq7GIX3IYVCUFZNKn2O7A6dGFiut4ErkplkkyCJwq/b+o5+Fy3EYiOObe4IwRxaw5PxP8pa58el/P41vr/HD/IvqXZEx/jnLfPi2qGQYj5ZST3YfcYwY/vJa8scPBBzUc+JCsxstQCx5w24wY+411vkRzSz/AGOtSGa4+zN+PmFuPMdUck36RVhFoxkrcslSjmeTrlMi412/3NCOFEJJwmYgnVanMUhoDH0+sx9auw3OQyvMG9qOIK7G1vuc3soF/wBJ+4/iPStr7j5I7zt+6T9P+NtRDNDrJIjn5me02NAocMRDjqI/cmdgv+8oMSm9lkHkSezT5ktQsuScUcdbrecmhw04/wCPLvgyek+JUlHY2meRdSnq6zRY3qwU9V3H4/j7U/lm4A/RFFZFNM6pCHMYTKHk8zscMmTFr53k/s8EbRkwEIZlI8My+EBJ9fB+oqj9a+Ftjh6XYdx1mLhf/rH22ZDmou1VslNfKkhJgjmAbIMSoT7ZDKSeJIAsorI5X5+xjmqeqMLg7AlX49YaxnDu4OlVSNerWS6z60idkpN3TJ96epIvbVbrAtHInMUpE2jFMoIoFH11VK1tc/GzZETCgXHw4lIVR5Y3tdnb+Zjb8gPQD6nePx71HedZxcjK7RtZ9v2PNkDzSsojhj48uEONAv2xRJyYepdz9zn0VYpIIrul0WrZBZy5crJN27ZumdZdwuscqaKCCKZTKKrKqHApSlATGMIAAb6i7FjxHljV/kljiRnkIVFFySbAD8b/AJV6NONkvxy8kyJicqcmccRHI7njk6oV+zUXj+iEKkx4tVh44YzEdKZLs8qznBqmVrEmQVEUGLFeQjUUk/3sFjKBsPWtruloMrZRjJ3kqBli8WhHg3ckGzn8ACVH4XvXjPukHcf8msmTQdJzJNN8VYORJHPn/eW2cgDIyY8aFPdxYz4LO6xysT68AtJPzJ/PsuvPbAROP1VwmfBlfkrdFWK5TrTJb+zydpi4Zq4MyrKjdtXK0m0jTyrkHDgFFXJHHcpgKZRKBg6ex96n32B+wih9mMuCx5liwA8D0Xxfz9fp4qR+Fv8AE/W/FPbf+W5+z/umWmO0cKHGWJYmcjlICZJbtxHEWClbnz5rz5D2iIj2iO4iI77iI9u5h7RMO+qDY168CBf0+lc65r7qWvC7L3HXC2bIO28oOPKPI/FqajMr+pBaJGtP4lwi/br+3GabU3s6yJpNQUItFvgTbvCmAO+REOoZXTZevw81Zdnj/uMX6ryII8+vjw3j+U+D+IrXfyb13uPZetS4HRtwdNvSDxl9pZFYcSOBv90dzYiVLslv0sDarZs75c8nqLynM5wi8mcqeVcXkOrIyktx2sEMSovIa0t3Qz2N4Uc6SD8khTaNi3wjeHLCRrB8YjMhSgoskQExtWfl9STJbNWXKykkS5hI4kN6oPdvdVTwvFQbD6kC1efOqde/yIydHH1ifC0WhyMOcqmwjcyh4iOGQ/7JV4zT5N2m96WSMFySVVjyq07iT56svyVetLfy24g4nrHGvHmYoaNdcmHMsaTrHHyVnIaRXx0VxCWWBsEvPX1ZzGih7UhzMlCEXKcyKBfq7Rqe8Psj7u0w4l10cw/rXuIiQeHhgSW8W5Lb1vYCtE/IP+LWP0mJsDoHYs+bueXrnYa0Lxl2CI6jIs8bxokFnv7U3uAkFQzn0WdJ58+WdmvzI7zlHP2f6BkqZgnkTirhnOu8dWuGx9j2qTKDx3PO7RYLKyLUZa0LWp0keMsR0UlmBwEQVIUSiTuh3vW83sL5OfkRyOpCY5KMFUG9+RP2luR+17XH4io7Z/FHzZ1n4ZxdH1PU5eFjSq+Vt0XIiefIlQqEEccbe6sQiB9zHBIceOJN+UTDcUOIXlrc3MHX7HE9lTmhbcm+8+X8DRcbZKVRcRY4oruPtcHYL5es1tDSNZvLtN8zdItP92sUUgKLgxjGSE0V/atT1zdQT4zS5ksvKSIAqsaIeQLNJ5Deb29APr5tV/8A+ffIXzT8ZbPU7mHB61gYXt4mczRTT5eRODE6QQYZCyQAqylr+47EkR2HKzpcJ53y6cC43wFkrkn5fOQcDWiU5BZNbQOfs6MlJiIoUjFIMsl1y7Sd5komjFf1941dEbwXhIhwYyzNQETLgUVDZOmk67g48GRstfJBK2Q9pZBcKQA4YsQtx5stlPp4v61BfJmJ8w9s3O20vS+3Ym1wY9TjF8DCIR51ctjSQrCrz8ZAQWn5SqOLryCk8Rd1zusNftdR4f2eKccrbPXbHyXxVYK5K8QXRzEesZVss+jJnMSgJmBXBDtquVSUHYvUUxPRq67ySOSPEkQ5bRtkoQYPwI8GT/8AtEeWrzJ8VYeXgbDsWDkDQQZkGkyo5E2w/SykKyYf4ZwIIi/1qydYPsqnaQR7w+/d/UfVD9R/7P0Ppasf1rSy/pH8K69K5o0pRpSulwum1bruVuoEWyKrhUSFMc4JIpmUUEhCgJjmAhR2AAERHsDXXNKmPC+RLf2o0LGwubKLnwPJ8D0HrXZDC+RMmPFb3JGCi5sLsQBcnwBc+v0+tN6FZxzkdkFsd11lKpWCLbNFHkqzctHTmMYvPEtWzpFU6Siabd4gBygIAO4AO4hqjjQdF71ijsmTgxZMebjqjPMjI7RRvyRXBsVCutwDbyAaux33eOj5R65jZ0uPJhZDOEidXRZZE4uysOQYsjWNr+pHrW5ttYRn4UUmDeKCdjGboKnIyKB3LaGfrtBZpuiAkYFOgEB6R6dx2227QDUp2Xr8e51Xt4aY394x4m/ZyyrzSCRk4Bxbzbj48flUX1vfy6bZ88x8n+0Typ+8jjYK08avzKG4tfl58/nTd2CmLxOPaJWE6q7uy8HN19R23i5pzEg0coKKLu54HCveLuGLVycxvDmH1imAOzbVI3XVptb0nT6BNdJtpcTLxi6xTtCEZSWfI5G7MiMSfbPqCB9BV10/aI9j3Tcb99jHqosvEyAjSwLNzVgFTH4iyq7qAPcHpYn609qjVoq5SdnbNlHbcFCN3Z0EzOUU1f30iC5iiqiRXYOoCiAD8OttSY2PJkLkvGjZMYIVyo5KD6hWtdQfqAfPi9apTIyEgbGSR1x3sWQMQrFfQsoNmI+hI8fSu7cdtt+z07fBrtubW+ldX5/WuQ3EPh6Q7fh6Q+mP0Nc+bePSuPAPn1rjXFc0aUrTvLBCR8vEwD2SbtpmdI6Uh49QTeIkCMSd47MgAFEoggTtNuIdmovK3epwtnjaXKnRNrmBzBEb8pBGLuV8W+0eTc1J42l2uZrcncYsDvrMQoJpB+mMyGycvr9x8CtyACPo9PwB8I/talKjL29a0s7XYKzsiR1hi20sxSdtnybV2UxkivGZ+8bOAApyCCiJ+0vbtqK3Oi1HYcQYO7x48nDWVZAjgkB0N1YWI8qfIqV0+822gyzm6XIkxspo2jLoQCUcWZfIPhh4NYzSw+KsstWvY8w39ksGL4Jpw26IWQB6AADWPdb/AGZy1/8AiF29XbXRjbv9xvsnQ/tcpBiwxye+yWgk5/yRv9XT+cfSu/J0v7fRY29/dYr/ALmaSP2Fe88fD+eRP5Uf+Q/WlFqcqDrCkmQSUc+jjOHLQH7Rw0F2yVFF42BwkZLv2qob924S6uohtuwwb6xM/E/f4M2CXkiE0TJzjPF05AjkjfRhe6n6GsrByjg5sOaEjlMMqvwcckbiQeLD6qbWYfUUk6vUpetPfslwmZ2CRhI+Jj4iXIiuu3dszCLiWXkgEFnbt6A7H6igAarfXutbPQ5Vn2mVmadMSOGOGYBmV0/VM0v6neT+a4qydg7Jrd7iAprMXE27Zck0k0JKqyP+mFYv0okf0sSTS51b6qNGlKw5Fi2lGD2MeFMdpItHLF2QhzJHM2dJGRWKRQggdM4kOOwhsID2hrGzcODYYU2BlAnGniaNrEg8XBVrEeQbE+R6Vk4eXPgZkWdjEDJhlWRSQCOSEMtwfBFwPB8GkqkvTsYwVbgFX5IaJFy2rlfI/XcOlnL1yoYzZkDgwKqqrKmOOwm2D6eq3HN1j4+0+BpZZhi64yJi4wkZnLuxPGPkQSzEk2J/61YpIuzd92+duY4Tk7H22ycgxqqBUUDlJxFlCgW8D/pUkKO0MjHKuBMP+kqiHR6Nu6ExeodwAdx32+h2attVQefNLbSlGlKNKUaUo0pRpSsOR/3e+/2Nz/En0pUL/La/sG8UvxM1L+Sm0pTBeR3+qd4QfiiP/S6z6Uq1nSlGlKNKUaUo0pUGYr9ZVef7j+L/AOvfLOlKnNpSjSlcCAGASiG4CAgID6BAQ2EP8IaUqJ2WYq9nqd7icYysNWsiLwsqhRJy1MFJSAiLCsif2RIzUa3KdR9GoLiBlEiAImL8A+jXRlDJbGdcNlTKKHgWF1DfQkfUA1LaCXSwbzEm7HFNPoFnQ5EcLBJZIQfvWNz4VytwpPofqKaXAEHySgoGbQ5K37G1/saz6NUr7/GtWf1WPYxycQ2SlUJJs/HqdOXE0VRVI5QKBUhAP/ZLhauPbRxsNvLDLLccTGpUAW83v+Juf4VYu85vx9m50L/HmDsMHXhHEq5cyzMzlyUKFfQCOwYG/wB3/UupdErS7qljjqHMQcJfX8FJo0yRsLc0hEsbCZsckY/kYpE5XMjHNXYlMsmn2mL2azsgTNC6YzKuUVPAt5UNbwSPUgfWq1p21sWzx8jdwzTaNJkM6RHi7RchzVHPhXZb8SfQ1GDhvaM03OCyTNZbzphXOjFrej1ipy2F63I16PqzqstAY3StWQsg1bqPJpvOHKYBKKhU0+zqEBAAh9DNsMhJpc7Jx8lBJxUxKVC8RZla/qQ1bH+WtZ1DUZmvxOraXcaWd8ITTJsJUlaYTNyx5YeJIVDGCPNrnzatcTlTlGEyc9ol+4mZbhq9LZzbYjxtkGsKR1sgbLWF4xw/Nl2zJtzoL0+nILJFQN3vWp1n+kOvj+85keYcfKwZ1hOT7SOtnVltf3G/8V+n413t8Z9bzOuJutH2jVzZ8WlOdl4swaCWGYOF/Yw3uJ5yCWFrCw/Ol46p40Pk+yutOxJZLGrnOuqQmXsslu5y1qgs8eMzq0tmNKkHR0zOLCu5Ol3zFIphEu6gj26yTB+224yIIHc5KWlk5/anD9P2E/zXI8D+NQke1G7+OH1G22mPjrpcgSYOF+3/AK2S2U1shv3Ci9ogAeMht9FFSSf+N8A/9ndyEj4F57O8T1eG9oeGV8D4np9fw/iujr27ejfbUs3LieFudja/pf6X/wBa19B7Pvx/uL/t+a8revG45W+l7Xt+dVcSOUPMYZYSutJunHJOxZ3TiImvwWYsS2ipowCbrIEvLxLm11aHlwFRBziKFM2fO0VelN6oHSG24jqnPmdqXXSY+RictlxAWWJlAu5I5KD6GIWY/j6V6Sg618Azdxw9xp+wHH6UZXlkwM6GcykYyI4gmkTwVzpOcaMLmMebVIHIvH6xynHzHFfuNuvOarvg+NJb5SMhlIOpJcmLVAwr4oVK/Qzho8gjQltdrFIq3EASKqAGEfTqUytXM2riiyJJMjJx15EDivvsoP2uLcbMfFqoug71r8bvewztTiYen025k9hHf3JzqIZJF/r4sgZZPcgUEh73K3FbvHmDMQZB4w4+oOUeMVTxrSl2sZfZzjpLmSlIHH1sbv3E+uzcLxzhNs8dRcic6xlCmFMwnEBLtuXXZi67BytPFjZuHHDAbOYD5VGvy+ni4PmsTfdz7VovkfP3fWux5Wx24Z8aPapdJMqEqIgwDglVdAFAIv4Hm/mvPFyQ4P8ALrPUe8fYm5E4UyRwSb5Atl+rEjD2KpYurGMY1GXlmdlYOK80gINV27q8WZwCwI+JTX7oDbmU3HWrdv13ebNS2Dl48vWhKzqQyxrGLnkOIUfpF7+t/wCNe8/j35l+K+kZCQdp0G41/wA1tgw40yPFPmTZjlEaFhK0sgVZn4ceXErcjwtq8/t8i6/CXi5QtSlwsNUh7TPxdZsIBsWegI+UdNIebKHSTpLLMEk1wDpLsCm2wejWsMlIosiSOBuUCuwVv/JQSA3+o817o0mTn5umxMzaRGDaTY0TzRf/AMKVkUyR/X9DEr6n0pc4fwTe83OZwlTNARETWixRbBcLpLlrVJhHk8+TjYCKlbIugu0ZS9geKCmxbmDvHJiG6ewphDJwNbk7FmEHFUW12c8UBJsoLegLHwB9ahO2d20vTY4W2gnlysjn7UGOnvZEixKXkdIQQzRxL5kYeFuL+SBXqbrHlDcfeJuOU8qtso1aM5LtYeJcY4u3JGZgHWEKTk4kYk6lx91Uo1gwuDFqBHarIHaTlwl3RFSl3KOtyw9G1ekxP3qzINuFHB5yDCklrn7fAYetr3PgGvzT2X+VXe/lHsB6zLrcmT47aVxlY+pjlXYZGHzKp/WLs0DH7Fk4FFN2QnzUKOcHmeYKe1bH9a47HXtOT4EbYTMUzA1FtRMDZEttnqy9VtVrlKs4aIvMgOTTSZZOHVco+GAoJir1CAE1Xuxdw1rQxRar78xeQlKrwhdmXizFT+vz9yE+Px/Ctw/DX+OPdYdlnbHvwGL1yf2DgRyTnJ2WLBDMJoYEmDFcUCMmGdUbnckLa9686wiJhE5x61BEwmUECgYxjDuc3qgUC9Ru3YAAPpa1X+Zr3z4tYeF/CljD1JSbp1ys0eo5cL0ZxXnc5HosSmaMa3YHx4QlgeSQrEK2FKzOmLJNAEzqKne9YbFIcdZCQmSCSVLloytxb+Vja5P/AOVxFvrf8BUTl7NMPbYetnVVjzBKI2LWJmiX3PbVLebxCWQtcBfbI9WFI/WPUtXGuaUaUo0pXOlKiTlysukLWu7ioV34FyxReuXLRs4VQM7MdYzxdRQoKFIoJ9hNvsH0tdiNYeawZ0PM2B4n8qbaZnZSdkDzEw8fSFgcrqO5KdfvnjuVk3JhIKTl44cqnUM4RKnsCgD1H9JhEQ313tI0jc3JMn1N/J/1rAxcTHxIv20CqmIoAWNVCqo83AA+hv6en4V6d/Jg86PB3DjjRkvAnJhW3KrQVlsORsQSkbEStqbTjqXhmviKLJrA8fKwAqT8Ymo1XTakbIldLHOJj+nZnTu5YOo1smBsuX2sXjIBa9wPtPnx5Hg2t5NeG/8AJP8Axp7R8jd1wu19IGPwlhTHy1Z0jKBHNplHFQ9o3IZSxY8VAsKro8y/zgcueZNEUqoWvGVJxVSscXC12aqsqlK2aSnnjWzR7CKLGW6UkpAsXNKMGUeXZVuwZlOoocegpR6QrnZO3ZXY0SKWKOOGN2I43JN7CzEmxtb8BW5PhP8Ax26/8MZOTssHNys7ZZmPFHK0qxKgMTM3KJFXkgZm9Gkf0Hk2vVRqj12q1bslXblVkyO5VaNFHCp2rRR4KQvFGzcxxRbndCgTvBKACfoL1b7BqqEsQBckD0H/AN1ehfagRmlRVErW5EDybXtc+pAubX9Ln8aVrbGuQntlgaYxo1sf260s4OQrVXYV+UfT8+ysrRu+rzqHiGrVV/IpTLN0mo2FJM3elOAl3312jGnaRYVjczMAQoBJIPkWHqbj0rAbd6WLCl2U2VjpgQO6ySNIqxxtGSsgdyQqlCCGuRYg3raZJw1lzDb2Ljst4yvmM382wGUh2V7qk1VXclH94KJnTNvMsmaq6RFSCUwlARKPYO2vvIw8vDYLlxSRswuAylbj/UCsfR9l692OOSbQZ2JmxRNxdoJUlCta9iUZgDbzXxiDIamJcpUHJyMBEWlehWmItTevzyXfQ8q6hnRHjVs/T6TiKHfplN6B2EoDrjEyDi5SZQUMY2DWPobfjXPZNOOw6TL0ZlkgXLgeIyR+HUOpUlT+NjSIlpWUnpOQm5uQeS0xLPXMjJycg4Vdvn794so4dO3blYx1V3DhZQxzGMIiJhEddDM0jF5CS5NyT6k1KY2Nj4kCYuMix48ahVVRYKoFgABawA8AD0Fq14bD6P8Ao0rI+0+RaudK5o0pX0UCD1ic4k6SCYuxBN1HDbYg7CHT1B8Pwa4NfD3BBA//AGD8alAhxgdRlCuVxyFl3EGO7FBVyHsFOxPJWhSzZPyj7wGjDxTKqQ9HZ2OJj1FmckC5hlXrAxCoqEMQFAAoyg1ZXHebIlijkVQVQm7ve1goUMB63+4j61QZO9xy7bG1up1+wy8SWZ45spYxHj43t8gxkadonYArxHtJJclTexJq9nBFc8vXgfwq5EV7kZeXdw5vP6lcKUvjleoWCWiMTZAy3iVlMUiC905TrgZZ/WXMU2VcWkzUDREoqZuQ4GITe74MXX9FpsiPYvz3ZRl4cSQjSR3UcT9p42F3t9p8CvLPas35e+VfkzT5nTsZcb4wTJhmGQJo1fKgxMspNJ7qf1EWUMwXG5f1YgHIIJrzCD1G9dQRMZTcwmE3UYxtx6zD2iO4m37R9OtaGvcS+nED0FenHgl5lHHa5cbsMcfOWtVyNLpcZXkJirGuD8PLGcQfLioZjmV68/rmVoCbclZTb+lTEmWQTbIPGYLIiUyKZlUzdWy9H2PXTa+HA2qSEYxCJFH6TrIbEOD4JUm4Fxf6C4rw/wDKnwt3HV9y2XbegZGHF/elfJyM7LFn1MuGgkWTFdByQTohjLFH4kWduJFrffN04hc2eYfB1hi+p4dx8yn8Q8m4d5ibGmNrA5k3k9x6ZVSQpVRkXhpw7VlX7LXkJRNWRZd6UiLRuYEwMYAKa29s1O62+lGLFDGHhyRwRDe8XHip8+hF/I/AeK88f4+/IXxn8efJz7zP2OW2LsdG4ysnJjCrHsDKs0qj27mSOQoRG9iSzAtYeavVwJSprF+DMNY2nXTFzPUDFdDpU67hwcJxbqVrdYjYiRVjyuSkcgwUdNTCmCgAbo23DfV4wIHxcGHGe3OOJVNvS4UA2/KvLXbNljbztOy3WKrLi5efPMge3ILLKzqGtccrHzbxf0p1dZVQNGlKNKUaUo0pWrmodhYIaSgZMiho2WZqsXibdU7VUzdcABQqS6PSogYQDsMXtDUdttVhbvVz6fYBjgZMRjcKxU8W9bMPKn8x5qR1W0zNLs4NvgsBnY0okQsA45L6Eq3hh+INYES4r0Qqyo8e+SK+h4Rqo3iVXCi8gnDN+hm2dKnU3MqXcoFE4iJhHt1h62fSauWHqOFMoy8XEQrCWLSCBbIrknywvYXJv9TWXsoN1so5e2ZsLHEystw0wULGZ2u7IoHgH1IFrW8ClHqdqDo0pWld2OCYzcVW3km2bz04i6cxEWoKnin6DEpju1UAAgpiVApRE25gHs1E5O90+HtsbRZWRGm4y0doYjfnIsYJcr4IsoBvcj8qlcfR7fL1WRvMbHkfUYjos0otwjaQgIG83uxIt4rGnK4E6+rr0ZiYixr0qEmDaNdeHayw933fg5YnSbxDINt+js7ddG40S7fMwcw5WVjnByPdCxPxSbxbhMLHnH+XisjUbw6rEzcQY2NP+9x/aLSpyeHzfnCbji/0vXdDzvtd3ONPZUrHexJH2f4iRb9w3lPsQK+LizgY/iGfb09XYO+uzWbj+55WZi/tsnH/AGk4j5SrxSW4B5xH+ZPNr+PNdOy1J12LiZJyMec5cHucYm5NF5I4Siw4v4vbz4re6maiKxVWLJZy2erM2qz1mChWbtVuko6aFWDpWBsuYoqoAqXsN0iHUHp1jyYmJLkR5csUbZUV+DlQXTl4PFiLrcetj5+tZEeXlRQSYsUsi4stuaBiFfj5HJQbNb6XBt9K1lmgwssFIwZpOThwkEipe0oZcGsm16FSK9bVcSmBMxujpHs+pEdR+/043+nn1H7jIxfeUD3YG4SpYg3RvoTax/ImpDQbc6HbwbYY+PleyxPtTrzie4Is6+LgXuPzFZC7hjBxjcj+VRZopIt45KRk3SCJlXIpA3QOosuYiarxY4dW3pOf0BrumyMPU4CLm5KxRKqxCWV1UlrcVuzEAux82/mNdEUGZts52w8dpZWZpDHEjGy35MAqgkIo8X/lFainQdgr8Y5Z2K2O7g8VknTtCReNEWSjZmuJRRjyJICJDJt9h2N6R3+hqM6tqN1pde+LvdlJtMpp2dZXRUKo1uMYC+LL9CfPn8Kkuz7bTbnPTK0mti1mMsCI0aOzhnW/KQlvN2+o9PFKzVlquUaUpO2qImZuJ8DA2NzVpDxrJx7WaNknaotmyxVHLIElxAnQ9TDoEfSUB31A9k1m022s/aafOk12Z7sbe8iK54qwLJZvFnHgn1Aqc67stZqtl+72+DHsMP2nX2XdkHJlIV7r5uh+4D0vSi+AA33EAABH0biAbCO3wbiGp0en51B/+n/0/KjXNKNKVhvI5hIggD9gzfeFcEdNQeNkXANnSf725b96Q/cuE/8AJOXYwfAOsXKwcLN4DNhimEbh05qrcXHoy8geLD6MPI+hrJxs3MwuZw5pYTIhR+DMvJD6q3Ei6n6g+DT91YVDQjQygJh1AYU+7ERAU+oQKJgH6k47dofR1lVjUodKUaUo0pRpSjSlGlKw5H/d77/Y3P8AEn0pUL/La/sG8UvxM1L+Sm0pTBeR3+qd4QfiiP8A0us+lKtZ0pRpSjSlGlKNKVBmK/WVXn+4/i/+vfLOlKnNpSjSlGlKau8NFCPkXndpFQVTImJgOUFFVS79QmT+q6SlAA37dtKVArjRG8h2OQeVi2bpm0SlQfZrVWwC2sCcCmxi8Y+zBOm2rYw6yro8T45bp3egRfqT9HaO0HqF2q5WadizmA5H9Hlawjt6Lb6X/HzW0/kTI6FPousJ02HGi2qae2zMRkLPmc7Eze4AOfEX/p3Wx/hTsWnBeObjmDGGcp1tMHyLiKLs8RR3LWxyLCJQZWxEG82EjXUFyMJtUyX72osQwoj2h8Gs2fXYs+dDsZOX7qAME+4gWYWN19D+X4VWNb3Pf6nqey6ZhNCNDtZYXyA0KNIWgN4+EpBaMA+oUjl6GtND0zK9dzio+rshi6B4yuqQ8We0GGqRIu+OsxSEsdy+ty0yxQQj1Yd6xEoLFNuuouAmNuPbrrjx86LYloTCmpMZJQLZ/dJuWuPBBH+t6ysvbdY2HTBFnx7Kf5FXNULkyT88YYCoFWARsS4kVv0kWUL4FvStZywzY84/4KueR49jLrv2rdOJZy8dAK2aNpryWKsg2vVyi0F26pqLVVgBzKKFNuRuXsARHbXzu9g2s1smWoa48AheQQnxzcX/AEL6t+VZPxj0+LvXdMTr87xCFmLsjyiF51SxbHx3II/czD7IR6Fj5tSa4hck67yWwGyyJVLO3yVMVv2jTrbY4irytIr1tyHWopq7l3FPi50pXSdamV3qIsnBg6DEV32DpEA6tHtotvrBlwP7zpdGYKUVpFAvwB88TcWNSPyr8e5/x33h9BtMZtdi5HGeCJ5kyJYMWZ2WNZ3j+33owre4vqCPzFO5hK637ImNYG3ZPxVJ4Uu0krLElcbzE2ysMhBJsZR2yj11pWOSQaLhLMEE3RQKUBTKsBR7Q31ma/IycrEWfMhbHnYm8ZIJWxIFyPHkeaq3cdPo9D2CbVdb2ce408YQplpG0SyFkVnARiWHtsShufJW4rUVqYz45zrkeFtNToUfx8j6vWXOL7hFTyznIM7bHPR70sLRAGWMhGRTH1vCqlTIKuwdo7jt8QybM7GWOaOIasIvtsG+9m/mDL6AD6GsrY4nSE6Xr8zW5WdJ3uTJlXMgeMDFjgH+y0Mtru7eOYubflTzOjOSNHZ2aaSrwjVwZmksYU0FXhUTi1TWUABFNFRfpAxg7QKIjqQa/E8f1WNv4/SqjEIzKomJEJYciPULf7iB9Ta9h+NNVg+SzVL4xhZHkZVaTSspOHEyWw16iTS87UWsenIuEodRnLPRFRwd3FAmdcphECKGEv0tYWufYPhq+1SOPMueSoeSgX8WJ/EWv+dWfuWP1DF7HNj9ByczM60qx+1LkxiOcsUBkDIvgBXuFt5IANecznpxTreM+QM7XMu54w9izi1ydvEplWCcSUvO1S24UsdUqww8nI1Kkwpka/c4hU8oDZVkIlQVI5Fc5CrEKOtVdl00OJtGhzsmCDTZkhkFyVaJlWxKoPtcebEfW9/W1e/PhH5N2HYuiQ7Dquk22z+Set4aYUgRI54NhFPN7iJPkSXlx3ATmsgBZSnAEoTUlbb5JHG7K+VMT5DrMvB1XjivhNijZWlIkJCDs+R8gPYsVK7kJgrIA9hY2IlWjtF84RTFM6ipQJ09Bh1LTfHmpzc2DKhZU1Rx/uCEhpHt9sgv4AIIJHj+Fa81f+YvyF1jrO00OximyfkBdwxhbIVZIcTGV/6uKwXjIzoytGrG4AN73ApT5XwTWfK2ww2yvh16xyrg+gwCEFkLjxdCY/Sd5Zvliskg9h8y2u/SDF05fSOPFnhBjIZJksBCE6G4p7m13Zutg6brxm4BE2uiTi8D8P6rliRKzkeSl/tQA/lao7rHddl/kp25usdsR9Z3LOnMmLtMf90VwcaKJVfAhxVZVVMoKRNkNItybycvFeW7kNzJ5L8qvAN875ZsV7hoaZlJ2vVp4DJjXK+8lh6VfZcTGtmjchW7UAQQFXvToogJSGADG303td9t91YbKd5I1YsqmwVSfwAt/p+A8V+lHQ/iT46+Mg79J1ePhZc0SRyzLyaWVU9ObuzHy33NawZrEi4FRj/Z/wCQNQ9bGrnSlXU+WFnnjhcq9P8AAjlZjapnpWcJBtF0nLcNEsK/eo+zO5hpLxtVtV1ZIJTSsKrNs0loxwdYwM3QAkJBIcok2D1DZ6nIibrW6hT9vkmySgBXDE3Cs482uLqb+D4rx/8A5H9I+QdRnwfN/wAY7DKG40yM+RhSO0uM0QjZHmhx2JjEntsyzIFHuJdgQykNEjn1xCx5w3yVF46pfImq5zl1kJNW3w0JDniZnG75B2AMIefFvLTsWuu6aqCAlK5I6SVROCyKe5RNB9m0WLoMxcXHykyZCDyAFih+gbyR/wCtxbyBW0/g75V33y112Xf7jQZOlxQyCB5JOceWpX7pIrpG4Ab6lChVl4u3m0DNVut20aUo0pRpSgfRt8A+kB7QH6O4egdKUgbvjyPu6LRIqrGHkgeNESzK6KvcNGay6SbtZ8kxQXeO0GrYTHKQhTH3AQKAiO2vqO3IBjZSfJ/D8/x/6eaxspXMLPEvOZVJVbgcjbwtyQBf0BJAH1Nqa2z0W+3TIlRx7D1OJm5uxL1mm1U2N6W6CRsgRDNCAbuW8FGImkpKYfNEivH32Hv3CwiofYwm2z1L50648KgyEhRxXybeAbDzcjyfqfrVUlGL1bUzbbZTtHiIsk0hnmBSPmxkZTI54qqE8V88VXwPFewXiV5GflRZSaZYp5n3IvK19wffGeO8rSFtnJXGKdfvBoFtIyFfrzWIhIuPsEM2WBUSvCgqYB9QxzdgjuPU9J6tlCWK+RLkQPwk5EpZrXIFlAI/Ovza+Qf8ovnrRS4Gw46fA1W0xDkYqxIuT7kPMqskheR2Rz4uvj8QB5AnNUf+X58r2nSKUo2w3b59wg/iJFulbcnWSeZIrw0ijJIp+CVFBsu0eqIAk7RVKom4biZMwdJh1ORdC6zC3JYXY3B+5yR4N/T8/qPrWrNj/lt847KEwPsceJCrqTFjRo1nUqTy8kML3VhYq1iPIFW8t6DQmllaXNnRaYyt8fBIVePtTOrQTaxsKy0MQ7SusZtFgSSZwTQyZRSaJqFQT6Q6ShsGraIIBJ7wRPeC8Q3EXA/AG17fle1een222kwm1smVktrnlMrRGVzG0h9ZGQtxMhubuRyP1NUw/wDMNUewW7y77PY6ziet5RmKFdYKcmJebrhbBNYzojuPmIu0XqsuCrIPop1HruWZF1iGUSSQOZVVMxU+olO+QIZJevtLHEsrxuCSRcopBDMPwsbXPpbyR4r0l/iFtMTX/MMGHm582Dj5eM6IiSe2mTOrI8cMgsQ4YByqmxLAKrfdY/ngMXTNulIldxqUio7jzNWKyrp43NFPTOmqwSaJWqqRHapW6KiPdLAdESrCbp6ikEPPqlRe4vcePyP4/wD76/YaWKWQxtFIUCvdhYHkLEcTcePJBuLHxa9r1IfhnWrhcOWfHCs0GNgpe3zWZ8eMYSPtEQ3n6w5crWaOKclkh3bR80kIHuOsXaaqR0hRA3V2bjqQ1EUs21xooAplaZAAwuP1D1BBBH4/lVO+TM3X674/3GdtXlj10WtyGdonKSACJv8AbcFSr3/QQQeVrealp5yzjjM48wPM5uKKdSaY4RUiY2ajaHADW6dGZEiWvsu8s4GPSIhHC1CZZmMczNFBoCwnKmTsERlu4nWnfzf2rgMfwCFHFQ4FmAHp6/h4v6Vr/wDxtTuyfEutHfTkNuTzZGnk9yZsdjygMjG7X4Gw5lnsASfIqrXVYrfNGlKNKVIfjvm6Kw/k/C9quFHhr/RsV5fgMtydRMxjGUpbnME4jnJYF/ZVGa74Ydf2WQpW6vfNEzKKHMip1GKOfgZqYeTDLOiyQRTCQrYAta3gta9vHp6flVL7j1afsOj2eDrsqTE2mfrpMRZeTMsQkDD3Fj5BeY5H7hZiAAGFhZfct+Y9x5b8nb7yZudeqSU5bnUgxa10K1HJQqNUSZOYWrs5lox8K3mZ+GhFUgUfnKCi7xsRce0AAO7bbibbbN9lMqe49xxsLcbWW9vUgW8/Ui9RHx/8b634/wCkYnSdZNkHGxlVjJ7jFzKSHlKFrlI3k5WQeFRio8VH/I8qtOPIKcknVYkJ2Zr8Y+ln1WMi3Q9RuSPYR8zCs2EfGwlijGMeRN0m2T7tX1VTCZQ5zmwcly7K7cS5UE8f+liALAj62/73q3aOFMWOXFgWdMSOZlRZbk+vJmR2ZmeNixKljceVFgABJ/injnlvTLbSc14yibpiCk+KJJjyStuMpWSwxRYpm8Bk5vkvZpmqT1Xbta68H1HiBVHqCw9LUe+MBRlNVj7WKZMzGV4Yb395kJjUA25ElSPH4jz+HmqN3/d/H2y1+V1reSY2w2XEr/boslFy5mK8hAsaSxyEyAeUYhGXy/2gmv0SeJF25Et/c3D+VIyXzTUIXj7Qr6hzxCViWlZz3eLgZGVewsRSUGqcnGkjoiTIoi8VPs5boFOZNMx9tegtTNsBwxMoGaEY6v8AurgCVm8kBfUWB8H6j1r8efkHWdOb9z2HQvHrNjJt54Do+LGTBhhuiu0xPFuTqQyAfaxIDMBTxYtneTslmTP8Xl+jY5ruDYWWriXG601WcXkbndIVdmsezucgRaki7Sh3bJ8VMrcCItushh9U23UOZjPsmzJ1y0jXCUj2WU/cw+vIXNrfTwKre8xelQ9d1U/X8nMl7PIkn9wilTjDC4I9sQNwXkCL8rs/08i9qkTqQqoVqZ9KbWgJ1GsumLGyrQ0olXH0o3UeRjKfUYrkhnck0SOmq6j20iZM6yRTAZRMpigICOviQSGNhEQJLGxPkA28Ej+NZGI2KuXE2arvhCRTIqmzNHyHMKfoxW4B+hsaROGo/LUVi6lx2drFUrZl5pDkSvtjocO5gKhLTYLrCdzAw7w53LFn4YUy9JxAROUxgAAEADpw1y1xUXOZHywPuKCyk/kDUn2Obr8+9yZuqw5GP11pLwRzuJJkSw8SOPDNe/n8LD86SOPZrkc+zLnCKybSsdwWCodaqBx+tlasDqRvFxQcxxlLia+wqpzN4Q8bJ7EaAmUneJjv63p11Y77E5k6ZKRrhLx9plN2bx93IegsfSpDb43TYut6ufSZOZL2mQS/v4pECwxENaL2Htd+S+WuTY/h6UoL/nfEGK7linHmQ79CVS7ZxsD2qYkrkmLzx96sUak0WfRUP4dquiDhsm/REwrHSJuqUANuO2vufOxMWaLHnkVZpmIQG92It4H/AFHrWHqur9g3muztvqcWSfW6yISZUi8eMKNy4s1yCb8WNlBNlJtYXp2tZdQFdBWjXxQvQatvHHRBsLzuEgdC3A/WCAuOnvhRA/b079O/btroGLjfuf3YjT92V48+I5lb3C8rX4382va/mu45OR+3/amST9oG5cOR4crW5cb8eVvF7XtWqr9ii7OxVkYhRdRqk+exxzOGq7NTxTBYUHJQScEIcyZVA9U+3SYPRqP0m81/YMNs7WM7Y6zSREsjIecZ4uLMAbA+h9D9KkNzpNhoMtcLZKi5DQpKOLq44SDkvlSRe3qPUfWt5qXqJrDVjo9d41kV2DJaRYkVTZP1WqCj1mmuAlWTaujkFdAiwCPUBDABvh1iyYOFNlR50sMTZ0IIjkKKXQN+oI5HJQ31AIB+tZUebmxYsmDFNKuFKQXjDMEcr+ksgPFiPoSDb6Vm7D6RAe36Xp1l2Pr9KxaAH0b9oB8A/Q1x9b0P5etJapw07CMXjewWdxanTiVevWz1yzSZGZMHBimaxRE0jqFOmzKAgBxEBNv6A1Xut6vcanDlg3WwfY5D5MjpIyBCkbH7IgATcIPAb1P4VP8AYtnqNrlRT6bATX46Y8aMiuXDyKPvlJIFi58keg/GlSACICP0P2enVhqArjSlaWfrkFaWScbYYxtLMEnbZ+m1dAYUyPGZ+8auC9BiCCiJx3Dt2+jqJ3Wi0/YsRcDd48eThrKkgR72Dobo3gjyp9KldPvNv1/LbO0uRJjZjRNGXS1yjizL5B8MPWt19D6Qbf4uzUtUVRpSjSlJqWcWtOeraENHxbquLqPQtL145OlIR6ZEgFgaMQKYCuTKrbgp1AOxQ1AbGbske5wIdXDjyaNzJ+7kdyssYA/pmJb2YlvDXvYelTuuh66+mzpdnNkR7xAn7REQGOQk/wBT3W9VAXypFvNKX9nZ6NT/AK+R6VBUCIB2mMBQ+iYQAO3sDtEQDcR1wSB5JAFcgE+B5NH/AE65FcUlH0xMRtsg0HKdfb0x61VReSEhKkYyylgMqJWEXHt1DlTcpukwDcQ6j777B2arGftNrg9hxoJhhR9XliYNLLKEmOTf+nFEjEBww9bXb8PTzZcDWarO6/kzQnMk7NFKrLFHEXhGMAPclkcAlCp/Gy29fWpYMUiIM2ySaZUiERT6UymExSblAwlAw9pgAR9Pw6s/8fWq1WXpSjSlGlKNKUaUo0pWHI/7vff7G5/iT6UqF/ltf2DeKX4mal/JTaUpgvI7/VO8IPxRH/pdZ9KVazpSjSlGlKNKUaUqDMV+sqvP9x/F/wDXvlnSlTm0pRpSjSlJq1RiUhFqnHpKszAy6KhhAuwFD7IQTGEAADlD4fhANKVWva86Zqw5TOQGS8v4TXslUol3j2eGq3glZe75IyLj+RdM45OcmIB04InGTrR47BRduQxCkbpnNt6oCMFNsthgY+Vl5+PzhikAiWH75JEJA5FfQNc3IH0raus6X0/tm20XXuqbhcfZ5uGzZ8uyAx8TFykVnMccoBLxsq2ViCSxAv5sHgeYvpF/vGKc5y0bYWl0olakwqSKs1JxjeLZXuOaqS7GwVxs4LGyUk3TEE91yHFuqU3SPZrPOHj5ORDsZA4yIkPHyQAHAuGW9if+tqqkXZNxo9Ns+l4smO2ozchPfIjRy7YzMEaKUguiH1+0jmtr07u+s6qrWFJRsdNRz+HmGDKViZRovHycXJNkXsfIsHSZknTJ8zcEUbumjlIwlUTOUxDlEQENfLokilJAGRhYg+QQfoR9RXdj5GRiTplYjvFlROGR0JVkYG4ZWFirA+QQbg10QsHCVuLYwdch4qvwsa3Sax0RCx7WLjGDVAoJot2jFkki2bopEAAKUpQAADbXEcccSCOJVWMeAAAAP4AV95mbmbDJfN2E0s+ZIxLvIzO7E+SWZiSSfxJrZlOU4AYhinL8BiGKco9vbsYomKPbr69axypHhhY02UJiCkV7K11zPGt5ct6yDA12tWZy4npR1DKxNWMsaIJH19Zc8VFuS9+PeqoplOr2dQ+nfEjwceLMk2CA/uZVVWuTay+lhewP4m1WHN7VuM/rOH1HIaI6XAnlmhAiQSB5rcy0oAdx4FlYkD6UyWHJR/D5/wCUFEm+T7bMlvWd17IVawS8ikYh7x6pEwxVThYYzxqkYZWIsbt2gcHBziqUAD1Q31HYLtHs8zHkzBPPcOsJFjAhFgPzDX9fP8KuXbcaHL6P1vd4XW21OqCS4suyVy67TIjYGSTiT9kkSqwKgWP41UfZc9u+NZOYEJ5pdncET5SXGnSuNMBYfyPLW6crGNAdTMXMTdUXaO269OrCTho3OcouE3K6iBygkIj0jSJtm2pGfH3R/wD88kQxwxSFisdyCVsftX0+vm3pXqXX9Hi+Qj1TN/xqxlJ61iTpl7POxEgjmy7RvHHMGVhkTEFwPsKKGW7AeacblLzi4TZo4EZayHh+xQEra8VxMXi7FDzI+O493fIa2SKUMo0j6wwyFETHijydajlzrLmAxVk2qxj7nIGsrcdj69setT5WCymaFRHGXQFwxt4UODe6gkn8AagPjb4Z+YuofN+r0Pa4J4tZs5XzM1cXKZcaSBTIGaZ8V0twmZQqeql0A8E158+XHmV555eUPG+L7Uzq1LoeNWkIZjF0to6jns3YYeHUgxsEtIlcEFIjpicP+z26aTFA5QMmQvo1q/edt2e9xocOYJHjRAWCeCWAtyJ/Mfyj7R9BXuz4s/x36R8U7vYdk1j5OZu9g8nJ8hlZY4pHEntItvJDD/dYmRh4ZjVfi8jIu0u6dSMg7QExT905fOnKInIPqn7tZY6YnIPoHbcNVguzCzEkfmTW9Ux4Im5Rxor29Qqg2/iBes+tBXhsleC3mlyVIZyIC0nr5Wpp4lcGQbhOHhCvtmRpcsZ3otgW+xCt09XZvr6i9r3V9/l7HIcuPrxv5tfxe17X+tdGx/f/ANvyP7V7X909iT2fdv7fu8T7fucfu4c7c+Pnje3mpC8tFOJJsoJpcNG+UUcWM4CPZPnGU1UFJCXsrcDFezUIQTjKNIqRSEpjoPCpqJOSqAmUEejUruzozmf/AKAE37IKAfc9S31I+oB+oPob28VQ/i5flIdbLfLba1uytOzKMIEIkJ/THJ/IzobgNGSCnHked6jvX69YLZKNISqwUzZZl+48IxioCMey8g7ddB1AbNmjBFddZwKaZjAQpRMIAI7dmoqKKWdxHArPITYBQSSfwsPrV9zs/B1eM2Zs54cfERbs8rrGij0uWYgAX+t7U4WGKXGWvM+OKXcJReow0pkWqV+zyyjlrEyNfZvbCzjn7sh5YCNmjuNMcTG74AKl0iJg7NtZWvx1n2EWPO3CNpVVj6FQWAJ8/UVBdv2+Rq+obDcamMZWZHr5pYUALrKyxM6rZPLK/p9vrcWr1Oc8ODPltX6y4nuLK+wuNbxknLlNY2B9ig7/ACKvl2Puco6TWcTFfr0y6LXTW5+mcvvSUpWjU5RACmHs1ubsnXOp5U0GQsixZM06BjHeT3QxPqqk8eZ/9z0H4V+aPwn80f5CaPXbPUzYM2x02v1U7RLmhcUYLY6CwjlljHu+wtj+zJLuLeRVMnmv8IsQcF8n43oeKpm+Tpb5V568v17lIw7xCIYJWJxBRleiyRsWycreDFmc53ThRU6wGKGwCAiOv+7dewet5sONhNK3uoznmQbDlYKLAelvJPrXrr/GH5j7X80dc2G77NFgwnCyYsZRAkimRjEJHlfm7AcuQARAApB9RYCqTVMr05RpSudcUrjXNK2+OmsPf8u48xAlMptZq+XCErKqiINVDQTCSX3kp56Z86YR5GsLHJqOVCKLpiciYgHbtvma/DOfmR4oPEO4F/w/E+SPQefWqz2/skfVOu5m+aP3mxMd5BHcjmyi6p9qs13ayiymxIv4r3ieX7w74MYlxxVMucW0a7llK1NPHw2f5FQlnmZhywTdVicf06Vk2KLqpRz180cpLtGYIlAwGIbqANejOtaDQa3FXK1IE3MX94+SSPtJUn9IJBuB+dfjB83fLvy73beT6H5DaXXjGcg69B7UcauRKizKrETOFKEO9zaxFr0+fKvmxxc4Q08LvyIyTC0hCSctfZtcjWxZu/2lR258GL2EpkUJp6ZatDkMLl33fcIFKPWoA7FGV2m51eli/cbCRUBPoPLN9PCjyR+J9B+NUPoXxn3r5O2P9r6fhS5RQHlIx4QRWF7PM39NSR+lL8mPovrVMHID/mc+GtCj4sOP2P8AJef5t9uq+LMMFMTV2FSTWFM6Dt9ON5OZePlkhBRIGzJVDYBA6hDbANN2HyXp4FH7BJJ3PrccAP8AU3N/4C3416T6l/hD8kbWVz23Lw9Tir+ng37qRyR9FQoiqD4PJw31AIqweT5N8sc10Hyx838Y8ZGb435JXKoWjlNEKx8TbVMb4Vs0PDP1Tnnn5oxZh4JVw7L49qj3xzJh6gfUjYG2W0zMfW5utjtj5Lq0wsG4Rm31NrfXyB5rUEPSeh9a23des92zQ2402PNHrH5PF+4zI2kAsi8g17J/TYlQCfP1FmNkrkBcK9O1K1Q8fYqvZ4mQgbFAS7ZN7FTULKtlGcjFyLRUBTcs3jVUxDkH0gP0dWSSNJo2ilAaJgQQfQg+oNaVwszL12XFsMGR4c6CRZI5ENmR1IKspHkEEXBrx889f+W7vN05NRM5wZZ0Oh4EvbIry2xNyuDltH4hsibhQj5KDjVUZOzTNVkm5k1WjduDxZsoCiZzATo21HvfjqefZCTSBI8GQXYM3iM/Ww8kqfoBcj+Ffol8T/5na3V9IkxPlBsrK7ViNaJoYgWy47DiXYcYkkU3Ds3BWFiATero/LV8nHj55daoZBjJaaylyDl6yeu2DJU/3TSIiWciEa4mYmiVlAgJREc4fx4CDlydd+okPSY4AAAFy651DX9eP7hS0uwK2Ln0F7XCj6C49Tc/nXmv5q/yN7f8xKdROkeD1GOYSR4yXLuV5BHnkP6mCtbioVAfNjXj+89zilZ+NvmAZZs54TwmNuQEy5y7jyWYRiMfBCFiOC1mrTYrMoM0ZCuT/fEUS9VUyKiSxigCoDrUfedXJrt/LLxtjTn3EIFh59R//Kbj/ofrX6I/4qd+we5fE+BgCXlutTEMTIVmLP8A0xaOQ388ZI7EHyOQZQftqnGMjJKakWMPDR76WlpN0iyjYuMaLv5GQeuDgm3ZsmTVNVy6dLqGApEyFMcxh2ABHVQVWdgiAs5PgDyT/CvSORk4+JC2RlOkeOgJZmIVVA8kkmwAA9SfSuHUbIsSmO9j3zMpFO6OZ0zcNylVARAUzGWTIBVAEohsPb2aFWHqDXCZOPJ+h1Pi/gj0qQ+JOHvJ7OTCPmcWYJypc6/LSqEFHWSBpFgkK4vLOnRWREDTSDE7BJBBcR79YTikgBDdYlHUhi6jZ5yh8SCV4y1gwUlb+nr6fxP0qn9h+Ruj9XmfG3m1wMbLjjMjRvNGsgQDlfgW5XI/SLXa4tS/j/Ly5hyWUbvh1PCdpYXahVy/W6YRsLctWhHVZxq8MztU3D2ayjE19/GtjFE6S3iSJLJesU222u9ev7dsl8T2WE0asxv9osn6iCbAgfjfzUNN8xfHcOjxexHZQPrMuaCJDGfdcSZAvEjxxc3Vz6FeNwfB9Kh5KRjyGk5KIkCIkfRD93Gvit3TV83I8YuVGrgqD1ks4ZPEQXTECqoqKJKB6xTGKICMQymNijfqBsfr6fmPB/0rZGNkQ5MEeRESY5EDLcFSQRcXDAMDY+hAI9CAfFa8f2D/AOT6evmwrubwPFfoteSPTsPXvgNXJOLv8llqr5CqFYp+TsD26cNecW4fslPbuWcrR4Csz7ZwMahZEzoTL9q5M5TOu6ASD0FKAehOlwYk+iV1kMsUiBXiY8kQr6qAb2v4Yg38kf6fjf8A5NbHseq+V5oMjDj12dh5Es2NnQp7OVlxzEFZpJYyORiIaGNlCEKhv5vey97nSMp3IvHPFWNw/krwdlxdL3WMyfX6sgTCVMjaodSNa0aXnEVEk4ifcN2ZQZMyJdHdCQA2AQ2srZyQ7CPViGXi0RYOF/pqF8cSfofHgW/CtKRdWm2PTszvk+xwvcgzkhbGklP72ZpbMZ0TzzjBb73Jvfle9SQ/YGpGqbTP0bL6V4yZmPGhcf5ErKmHn9YYKXO0QHs2k5BGzRR5UrnHUx36nt5rDAn3L0wkJ3SxgDt31hwZYnyZsb25FMLAcmFla4v9h/mt6H86sW0662r0mu3Zy8OddikrCGJ+U2P7bcLZCWHtl/1J5NxTwazKrtR9suU8sw/I/G+JIbAc9Y8O26j2WyXHkQ3n2Tav47s0OdyWGpchXlEDPpF3YiokEixFCAn3xfVMAHEuBJlZSbGPESAtiMjFpb+FIvZbfUn/AOtW3C0WgyOnZvYMnaxQ9ix8mOOHAMZL5Eb8ecyyXAUJc+OJvxNyLqDILWfVSpucgY9hbkauzw1ijSt7o0l7Yx7ZrlWWE+5pUo4MilISdcdOG6z6EkXTNIExWanTUNsACOwBqF3+Js8rAJ0bY0e6RlMUk6c0TyOXgeQStwCPrarJ1rdR6vJkxtjJnDr2UhTJhxpjEZ1AJRX8hXVWN+LAj1t5pxCdfQTvBKJwITvBKGxRP0h1iUB3ECiYB2AfQGplbhQG8vbz+F/r/wCtVxuPM8fC3NvxA+l/zt6mvvYfT8H0foa58+v0r5uPQ18gAB2AAAG4jsAAAbj2iOwAAbjrgAD0FhX0ST63rnXNcUaUrRs4QWc5MTftSUce10WKPsty4A8XHeCIJO9jm4FAUVHW+6g7juOofF1Bxtxlbb9zkSDJSNfZZrwxe2LXiX+Uv6sfqal8ra/udTi6r9vjocZpD7qraWX3De0rfzBPRfHgVvNTFRFGlKQLKuWlxa152fsgDGRr157swsMB2jZSMetgSMlZSHT2kHbdUOpI5R2LqlYmj7FN2R9xuc++vgmf9rBBdEMLrYrlAj+q6N5Qg2FXLL3fXoevJqNPg2z54k/dTz2dhKjXDYpB/pow8OpHml9q61TaNKUaUo0pRpSjSlGlKYm6nMvISFRvq/tqPtj8H2NICuIvo6SO4rjcZFVhMzaRCIoqO3QEBMxzCQQEQHWne2O82fP1nub/ALvC2U/uarHxVkil5Yq+6Y58hQFUu/EKSSDex8Vt7qqLDhQ9k6en7XM1sPt7TIyWjkiC5LCJZIIGJYhF5FgByHg1tqxOZKkla+ta2lWo5Hko+KNZXdi9nJWFIyIZim1OLgwJSrZffvwKGwkAB2DfbUn17b99z5MKbsceu1CyZEg/aM/uTzQBB7YQ8jaZGv7gHgqAbC9Ru/1PRcCPNi67Jsds0ePGf3SpwghnLkOXHEXiZf8AbJ838XNqedtRoC8vYxKfj20inBSbaeZkckUMDV8yOUzZ2iYiieyxTjsHVuG3wavO20Wn3ogG4x48gY06zRcr/ZKn6XWxH3D6XuPyqkarebfSGc6nIkxzkwNDLxt98TfqRrg/afrbzUiADYNg7ADsAPpalv4+tRXp4rnSlGlKNKUaUo0pRpSsOR/3e+/2Nz/En0pUL/La/sG8UvxM1L+Sm0pTBeR3+qd4QfiiP/S6z6Uq0pzKRjNQEncixaqiUDAm5dt0FBKO+xgIqoQ2w7fQ0pUQeRHJy94PyhxlrkRiWGvOLc95creHJ7JQZKbQknRrJbG9gcw6kdSyVmXUtrQEoAxlz+PYlTBQAATGAQFSmJunmFTtW51yPD9xQ8YVatximJEGuTspZWuNIe3x7k+HkZZaIxpCtsMWCkT8/AmYg2Kxd2mPdPnKgETIA7bqVIXlRydmsCy2EaFRqDE5AyjyDu89SqDHW68I4yoLJxVabMXmdeWq9KwNoVjzjDwx02LRtHO3b5ycCkIBSnMVSlHxU5P1HlLgmLzdDN21baFlbtV7hFqzsdNx9XtuNrLL1K7MErQw7uKmoVhLwa6jaRSBNJyzMmt0k6hKVSo14fzLiLNHmNZOl8P5Sx5lSKgeGGLYecksdXOu3RhDyx845ZckjJR3XJGRbsJAzcesEVTFU6O3bbSlWX6Uo0pRpSkrcVDEhVilUEnWchTgAFEVCb7iXcxyCAbgG4h1D9LSlQ5qdJydCZhyfeZ/MEhZ8b22MrDSg4kVrzGPZYyfxCJ05+SaWFBYz+aPZVDAc5FSEBHbYBHWBBjZkedNkyzl8WQLwj4gCMj1PIG55fgate03HXMzqmu0uDqY8fsGLJM2TnCVmbLWQgxo0RHGMRDwCpPL62pqs6ZAzhF514wY1xExBvXLpZbNOZns0vSX1grbfH1ajk1FIBGytDgjU7ZLvFf9DOsUSLbbbhtsOFssrYpssPEwhaGR2aVihZQij05fysT6Xqz9K0XTcnpXZOw9pflsMPHhjwIY8hYpTlTMR7phIvPDGo/qBfK/9pLDban7z+5PvRXQuYxYzoU8ZuM96fYXeiiEz7v+J9reye+KJPE913PUAh1b9mpj34Pe/b80/cWvwuOXH/yt62/O1q14NVtP7b/eP22R/aPc9v3/AG39n3LX9v3bcOdvPDlyt5tWZOTkJWYh7YLJMxNegYwhFJKcnZJnEQ8emooVFM76TkFm7JoRRU5SlFQ5QExgAO0dtcySRwxmWZgkY9SSAB/qbCunDws3Y5SYOvhlnzZDZI40Z5GIFzxRQWbx5sB6AmtNPW2jxqEOxsVvrEOnelhr9ZJJWSKi1bU+k23SSLrB3D1FWXlHDZyBkkmneLCBimKHaGuuWfHRQs0iKJftW7AcifovkXP4Wuay8HV7nIeWfX4uTK2EPdlKRO4hVG/XNZSEQEeS9l8EGtFiTFNNwlQobGVBJKo1eunkVI9Cdnn9jlkxlpJ1Ku/FSsquvIOCmeO1BTA5ukhNil7ADXXg4UGuxlw8bkIUvYMSx8knyT5Pm9Znaez7buO8m7FvDEdlOEDGONYkPBFReKIAo+1Rew8nyfJpydZdV+mTy3A22rVXI2RcA4xxxYuRMrXo5hCq2tNpWU7j7OesyMoe23FoglLqREaw707ciiwkIomUpenfUdnRTwwy5WshibalQBysvKxFgzDzYC9quHV83V7LZ6/Qd52Owx+hRTs0ghLTexyVuUkGOxKB3awYhbkEk3rzw+fhxoyJPQWFuWx4wiykHTIjGmX4WBaLSLGmSCyq840mRmCiJlaz7elHMeRVQgAU4pCIh3mtW/JmoypYsfeFfKxiOUKLhP5r3/8AHkSvn/6171/wg+RNDg524+LhIQs+W+XgySMFbIQARtH7f0m9pElKg+RyAH21XDwX4i+YbkqDmZvAsarjzElnjHUhMXTJ0ZEjj2cSj2TtMj+BgrPCzi85L9ysZBF5GshV7pYxQW7vvA1VOuaPtOXG0msHtYLi5eQD22sD5UMDyP0BUXsT5t4r0D81fKfwN17Mhw+7yDP7TjSBY8fDd/3UfNlPGWSGSMRx3AZklfjdRdA3E1VY7brMXDpo9SO1cM3C7Z2guUUVW7hBUyS6KyZ+kySiSpRKYo7CAhtqlkFSVbwQbGvTEUiTIssJDROoZSPIIIuCPxBHkH608ePcPZ25JWpRpi3GNyydYn67Nq4Cm1NQ0cgqVgKTAj15HM2ldh+/ZRpugyyiBVjJmHcxxERz8XA2W2mthwyTSMQPtXx6eL2AVbgfW1VLe9r6V8e6wS9l2WJrtegYj35/uI5XbirsZZLM4uFDFQR6KBafcJ5O/Jhu9w7G5WtGJMLWHM1wTrteoF3ujJHIh4Vsm0cWGwsoQgki5J5BtXWwxRXpHqywkIXbrA2rNF0TbhoEzXgx5ciTiqO4528XYD0JA/lve/8AGtH5n+WHx1JDtsjrGNtdxgajEMsuTj47HF9xiwiiaT9aLIw/3jGY1Fyb2Iq6mkeSDwJwZExsryKydL3iXN3LwHV4vcPiWkvnUP1LybZpEovEXLuEeFOmDlJd4uqimP74HVvrYON8d9a1qB9rM0r+v3uIkJHqAPqD9QTcD614/wBx/mT8391ypMXoOuiwsXytsfGkzshVksEZnIIWRfPBljUMf5fFR9rnKvjZh+VV4lcua/x5gMdXy1NbxF3rhxNRzWOxTXqpLs/ipp9/sOOo0Ze0zLgkeDl7MHfqqlZqbOiifrAIuHc6nAk/se7XFXFlkDh8UgCMKR7aOyC7Hxcvyvb1q9bD4y+Qu14q/KXxZPvp99hYpxnxt9GzNmyzxt+9nxosp/bhjHPhHjiIAyC8R42qgfkbaIiP5U5wt2L7hBXGuSOVbvP1O3RkeD+Am4axyD1+2WRYWBiYrtFJtJmQEV0O1VMTk2HpNrWW1mRd1kz4ciyRGdyrDypDEn0YefBt5Hr5r3J0DW5c/wAZ6bVdkxJsTPj1mPFPA78ZY5IkVSC0TfaSU5fa3obH6ivTB5GznBeU47LWYYWEawGfYaGpeLLDRUjonp1OoMO0cuqq9xjHOhcSELX7ZLN3Cz9BZZXuJBA/ciVPbfb3x02tzEnz41C7NVSNkv8AYqAEr7YNyqsQSRfww8eK/Oz/ADNj7p1nI1fU8yZp+jzTZGbFkkH38jJkYLMuYwssksCFViZVXlEy87teqm/NGzVnTlJJMZnImN65WpXixM3DGWX16YzWGuV2esl1dJ0Rojap1UkrZTz8AxBYzdoZds2XKooBSAYxho/cs/Y7hw+VCiSYTNHLxH2hmY8Byby3JRewuAbmvUP+NfUOl/GuO+JodhkZON2aKDLwROw92WKLHU5LGGMcIvalYqHfi7LxW5sBVOIiBQExh2AAERH6AB2iP+DVDr1oBc2FWwYJ8rfklkOpoTUfij23crVRaLlrHSNrXcw+PE6DKS7lZ2tLzgnSjJ62yzJmgVCAA/WLB8ZcxiKEKXV31vTdvlQCRYeWQ8aSxhvCcCfq3oWIAsl/Qk3vXl/uv+Sfx7oto2HPtPZ1GNm5ODlGACTK/cpGAoSPy8cCMzFsm1hLGEAIJNVaWJi9grBPQs7HmgJqInJSIloN2iowcQ8uykF2j2IVZuulw1VYukjIgkcO8J0gA9uqZKjxStHIvGRWII9LEHyLfSxr0ngZWLm4EGZhzLPiSwo6SAhhIjKCrhh4bkDe48G96ay6X2LrEQ5dNXTCQkiqg0QYJPUTqlXN1AY6xEhVOmVsACYwGANxDbcN9fKi5tXfJKqLf61C1Z46XdrPlHCovF1lHCrgDmKqKyoiY5wOAgcBETfR9Gu9fHpWAyiQEPY39a9I+Nv+ZHzTiLibSsBUPjti+HyPSKw3psdlFu8VY1r2S2jlmhLCljKHiYxg1uqz5QHiq5XvgFXImOZqPWIa2PjfIuZiapMCDHiGTGvHn6C1vXgAByJ8k3tf6V4r3f8Ahh1vsHf8rte02+dLpsqczNjEBpORYH2/3LszGEKOAXhzC2Ak8VRLyG5N555X3pDJfIjJk9lO8tINlWmtgnyx6ThrBRyzpwyjG7eLZR7FFugs9VN6qQGMJvWEdg2o+w2WftZ/3OwlaWfja5t6D6eAB9TXqrp/Rup9C1Z03T8GHA1plaQpHysXYAMxLszXIUfW3imTZtHEg8aMGhCKOnzluzbEUWRbkO4dLEQRIdw4URboEMqoACdQ5SFDtMIAAjrCCliFHqTVonkSGJppLiNASbAk2Av6AEn+AF6/Sj4HZ0b1am8T+GeGoiK5G1vCeIonFfJ7P2PLVH+6GAMl1GqJuImo2CHVQcqWBxbHLddFBePdrNyGREQOcN9vRmjzfaixNPhgZEcMQSaVG+2JlXwpH15G4BBtX4q/KfV22Gy7B8j9jkfT5uy2LZOtwMiJvdzseWWzSo4I9v2lKsyyIGIb0Hi9tmrZXn+jSlGlKrx56eWfx/8AMTPh8M6yuQI5rh+dlJNiypE6lEo2SKmyNAlq7MeKbPSNEXZ2KZivGxCPUthKU/SIgNf3vW9f2Aw/vjIBCxICm3IH1B/C/wCI81t/4p+a+2/Dy7H/AItHiPJsYlVmmQuY2S/CRLFbkciODEofqLiu/AnlVeX9xqlatZcWcaaK3ulPZINIa9WtJ1dbam4bOiPUJw7+xLPWadlSdJlOnIIN0HSXSAJmKAba5wOr6HXMsmNjJ7yCwZvub+Nzfz+YANfHa/nj5a7nBPhbzdZR1uSxLwRWhiIIsU4xgMYyCQY2ZlP1BNTIu+KMUZMhFKvkfGOOL7XVXbeRUr1ypNXskSs9ZGOLV+eMl4x22UcNTrG6FBJ1EEw7D26l58XFyU9rIijkjvezKCLj8iPzrXOr3+/0mT++02dm4mYFKiSGaWNwp9V5IwNjbyL2NhcVD6fw5xJ4unyvFo5LleOlj8wG2scZx8kxucjGqfGS/q7msVmPwrEJoGiaRNRkav3jNJqmkiVVNPcQApChEyYmq1nuqJDjyZ7BLhrfeV4r7Y9FIHpbx6VsTE7F8gd5GBO+FHuMPqcBySrQqw/brIJZTmMTymRytnLEkgsbXLGvBfmnmnnvjnZOaXDXCOcMoz/G+8ZAv9BnkMvqRc9kSaMwnG0Ja7G6srUVH0VO2xxXVEXotXPh3TJc5FEupQ+tFZm5z9dJm6fCnlbXPI6n3LFzY2Y39QWtY2PkHzX6tdZ+Muqdzw+tfI/Z9Zgw9xxcPHnQ4nNMdAyF4oxGftZIhICgZeSOoII4iqvUHCrYFgSEgA4bnbKgdJFXdFQxDGAnekP3R9yBscvScA9AhuOqyPHp9a3u0avx5XspBHkj0/ha/wDA+Pyq4DynPLcxl5gLrOrq/wCXJOsv8O1lnM1fDdMc1GPyVmaSkYmzPkWVXlLc+OyjmEY9gEGz1b2c6KT2gkYyiGwCe3dV67jb8znIlKmFbrGvHnISGNlLGwsQATY+o9K85/P/AM0bz4mj1cep18c0OxnKS5kwlbGxFV4lLSrEvJmZZGZB7ik+2wAb6e1jjdDTOCMBcZMC8S8IYvotvjndNk+QHH/ImV66+y3ijGFhcPT2S5T7yrJFf3e7EcqIkbu3TciTohiJgYQIAF3NrlfBwMbA1UMSSgqZYndfcRDe7Er5ZvwJHmvzP7nk43ae17vtXyBs8/L1zLMuBn4+LIuLlZMYX24UEptDCRcsisSpu318r/FnLXIuYuX2QcY40qNGyPxGpcNKVmZz5SbSZ1MUTkJUlGpLXivIcA6WSK2eALgxGhWyBuwneGVMURKXvxdrkZm3fGxlSTVICplU3KyrbkjD/wBB4/1qI3vQNP1349xN5vMjKw/kHJkSVMGaKyT4EvL2snHcDyvgFizfXjxBAJkhyRzIjx4wJlzObiCLZkMVUqVuStePNsa2WZLGiiHgBnZIijGK74Vv31QpgAQ2ABEQDUjscwa/AlzSvIRIWtcC9vzPgVTel9bbt/a9f1ZJfYbPyVhEnBpOHK/3e2v3Na3oP41uYjMVMPiKk5hu9hrOOKrbalTbMd/brZBRtfhVbpEx8kwiVbO+ds4R0sdd+CCShFAK6OACmA7gGvtMyH9pHl5DLFEyK33MAByF7cjYfWwP1rHyOubIdgyuuayGfMz8fImi4xRO0j+yzKziNVZwLLyIP6R606BDkUIRRM5VE1CEUTUIYDkUTOUDkUIcoiU5DkEBAQ7BAdw1lfw9KgiCCQfBBr60rijSlch6Q+HtDs+jpQ+lN5BKzkNT5Z17dDKMu1cy7tkoxMyaKPRKoBm1fTVbquGqazQA7vqMPVv6Q+DVJ1Em31XV8nJGZ/yHZxvM8Zj9tC/n7cYFS6hk/Tcm/wCIBq6baPU7Ts+NjftP+P62RIUkEnNxGLffkkMFYh/1WHi3oa2TeGdTEpVrg9dzsG8Yw6qbyokfENFGcSaRDrJyqSZQI6eRxxEhDhsACG+s2DV5G02Gu7RmSZmJkw4pD4QkHslpVBYTAeHeI+FYePHisKbZ4+t1+w6ziR4mXjTZIKZhjPvBYyQphJ8okosWU+fNfNOgrNBIypLNcF7eo9kjuo9VaPRj/ZbIwCBY8gInU78pBH6odh7PRp1fT9g08WSm/wBo+zeWcvEWjWP2Y/pEApPIDx9x/wClOzbfQ7aTHfQ6xNYkUASQLI0nuyfWQ8gON/wFLLVpqsUkL9aS0unztkEhFF41kJ2SCxFjpOH6yibdkiqVuUy/cquFSgYxQ9Uvb6NVjufYV6p1jM3zBTLBFeNWDENIxCopC/dxLEAkeg8+gqy9O6+3auy4eiBKxTy2dlKgrGoLOylvt5BQSAfU+PWuijXJtb49wUQ7qchDM2NnaJIOSMmUyu0I5WbsHDkpReNSdQgU5dw109Q7VB2fCcMOG4xCkeUgVgiTMgcpGzAe4gv4YV39t6vN1rMQqeWpy+cmK5ZS7wq5QNIqk8HNvKm1LbVtqp0aUrnYdt9h2D0j8Glj6/Sl/pXGlKNh/wAfo0pSctNrhaZFkmJ5ZwgwO/ZRpTtmqzxTxcgoKTUoooAY4EMcO03oL8OoHsfZNV1XXDabhnTDM0cV1RnPOQ2UcVBNifU+gqc691za9o2B1enVHzBC8pDOqDhGLueTWFwPQep+lKPU9UHRpSjSlGlK0UatYFZWcJMR0a3iGzluWtvGzkXD141Mju6UepHIAM1U1vVKBR9YP8eobAk3Muzy02kGPHrkkX9q6PyeRCv3mQEfYQ3gAeoqYz49NHrcRtbPO+xeNv3KMvGONw32BCD/AFAV8m/oa0jGruH8klMXRCEl5aCmpF3TnzFsu3Vhox4QqSaSgnU6VXwpBsofYSj8HbqKw+vTZucu07WmJk7LDy5XwpI1ZTBE4AANzYyW/UbEH6VK5e/hwsFtX1Z8vG1uZixJmxu6sJ5UJJIsPEd/Kj1/GndqjwjOYRMqciaSpFEjnP2AHUXcvrbgBdzAHaOrdVSp6wHftDtAe0BD4dKUaUo0pRpSjSlGlKNKVhyP+733+xuf4k+lKhf5bX9g3il+JmpfyU2lKYLyO/1TvCD8UR/6XWfSlTLytxB4u5zsqVxzFgPFmS7UjHIRCVguVQiZ2WTjGp1FG7Ajx63VWK1RUWMJSb7AJh+jpSo08j+FuSbbHcZqhxWt2DeP+NeN2VK9mGGpM1h+etUe9s9XPPBGRMenWcg0dpCVpynZHJ3KZElHB1xAxVChuAqVv+RvFPOXJp9FUW75nx9D8fCXjEuQ5+t1zFDsmVH0xiyxQdzShYm/yVxew8NCTVpgETmcFiDyTdmY6Ka+5hPpSltnbj9ljM+O7PS5yf48Xo73LLq11GPzVgEMh0quY9LGg3gq08rZbjFKyt0gJM6jlOcK4b96Q/dGb9Am3Upb8S+MtW4nYJreE66+LPpMH1psdmnVIiNgkbLb71YZO1W2WRrkSmSHgox3LyypGrBuUEGjMiaJdwJuKlMzUa7X655kmQG9egYWBbueEuL3DlGFimMUk5cBnXLJAXcJsUECrrATsAxgEwB2b7aUqf2lKNKUaUrQWSNcSkads17oVgUIoUqoAG/SPoKcexM2w+nSlQlyzNZ8hrbh1hh6h0q4VSYvpozOctabErDSdIx8VFH/ALfpzNJ0gWwTviRVJ4YxVA6SgPT276js2TZJNjjAjjeBpbTFmsUjt6qB6tf6fwq49YwukZWs28vbM3MxNpDhctckEQkTIyrn+lOxB9uO1jzFj5PnxUffMK5ot+EmEm12jIaPt+SrjaY2oY0ociMkDexyhzg8m3TpKIOWTOwg4RNRVTuN1O9OkUCiBh2jO0dgHXteMhFD5cjhY0N/uP8AMSB5so8+Pyq8/BHxDJ8xdxbTZMz4vXsTGefLyV4XiT9MaqZPs5SSWUcvFgxv4Faa04jqvJ1THfIbAWSIPDHJd5T8Yu7BlODiPeG7jhZ+b3jk8WyVVm3qJ4Wv2pwuoVJ04ZJOgKTqAw9oa+JsGDce1tdbMuPtjHGWkUcn9o/cYypPhX8+SAay9Z2rZ/HIz+h941023+PFy8xYsKWT2sf+4L/STMSaNT7ksIAJRJGS5sR6VFHzPMftuWcdlfG2EMg5UtPIDjdQq/IWDi9TzeHouRmuSJ6EdwchdWkl4SOsjqBZFF42TTXDuTEL1h27ahO4Yv8Ae1nxddLM+zxIlJx1/RJ7hBBe/hiB5Hmtn/447yT4vyNX2HuWBrMbo3Yc6VYtxP5ycU4kciyLjlOTxLI39NyV+4E2rT8JOL2Mac2vaecByJNVHiZZaZkCL+PeuKQtXoeeGlTazGSMz4utrKaWXloA5Y5Nuo1VAzJj4UOhMercfjrunxIBIux91oMJ1ce8tlSYLeSWNgTceALfpFhYVlfMXyR2PbyYTdM/YQ7TtGPkYr/22X3JsnWtOY8TAzIGjASUci4dbSSczybxU1aPkvjBEZtz5yNYvrXGRMnivFUrc89z8y/V482indSTWuo0F+LheJTmY1boSkhRRIt3puk4dXVqwY+Xp02GVtVLqjQxl5mJ9hl+gQ+lx/NYfX+Nag3PXvkfK6do+gTJiyZMezzUx9bFGo2kOR5aU5K2DmNxcxcmK2Fx4tUg+PtOyNTa1YDZFzW5z0ta7zN3emWNzAsINOAoFiBo7rFJZpRwilJR8K13FJ2b7IuVXcdSmrgy8eFjl5ByecrOjEBbI3lUFvUAfX61Q+9bbr+32MA0GnXSJi4UePkRCRpDLkxclmyGL+UaQ+qDwpWsnC+IFcJw12jXuT8h5MC03+z5ANJ5Ll0ZN5Vm88oVz7pwi5EkCMqpAkSEGyRtxTKI7jrnX4P9vjkVppZg8rPeQ3K8v5R+Cj6V19v7UvcMvDyIdbga79tgw43DEQosxiFvfkBJLTSE/ew9TUM/Mbx9x+zdxOyBmK0IWrK8NjTH92PVAwxcHj9vIPZdRiwcquGdalCRdlZwkoyQcOyqKCLdu3W9A7hqA7Xi6vY6SXPl5zxwxPx9pibk2B8KbNYgE39ADW3PgDfd56d8n4PU9a2Lq8vY52OJv38CqVVOTABpkLwtIjMsZAHJ2T1pvuIHGyTydTsYZMuVqiJzBk/wZqmDscwtWdXup2+OSsqBy5HezTeTXBv/AKUcyxGDg3U8bG2MXYoFHWNotS+XBDl5Dq2ubWrCgUurDl+stf8AHzxPqPpU78rfIWN1zbbLrupxZYe6Q9zm2OVJMuNNAxiP/wAVYygv4+0yqLI48G5vT50Dys+BGNSwTqA43UiZnq21USj7FeVZu5Scg6M3XRB/YkZmTXhptyoK26hlWggPYIFAwAOpHG6Z1nD4tFiRtKg8M92JP4tc2P8AqKpm8/yW+cOxe/FndhzYsHIYFosYR46ItweMRjQSRgW8cX/IkjxXbkK+5H4TcFMiZEsEfgdfImLYCUewsHUYZzjnEsogezkRqdaTjGIspBNynDP+ju0BAyzso9AbCIjzlZWV17rcuXKMY5UKkgIDHEfu+1bDyPBt+Z/jXxodH1/5i+acDQYD7tdDsp0WSSeQZeah9m88xduSkGRb3bwqHybgV57+R3nZ1rM7jGdjYcOsbymRsUKnmaPkHKE3NzXuXbpCLjTPZ6oVuEexrdEWVjZEWRRk3Egksk2R6ygcTbat2vyHFntDKmBEcuDyjyEnixAuyKpA8MPAYm4Ar3f8f/4d7DqCbHAn7bsIuv7MCPIxcOOOP9xAjvxinmkVyeUTFWaJYmVnfiSLVWryCyFzR5W1tfkznI+SL5jRnZnNSZXBaKWaYyrE+ugRVzXYKLYkTjIYBQIUihgSEDmKCaixlAAuqntMrsG7hO32Puy4gfjytaNW/wDEAeB/9gTXoXomi+IfjHYD466Z/b8LsL4yztAHDZk0YNhLI7EvJ5uQC3gEsqhTeoakKQhQBMpSk9IAQoFL29u4AXs7dQAt9K22xYn7r3/Ovr0B+18Af/o1zXFWzeWDOyeB87P860vHeU+SEpT8dsYNjS8Nx5BjE8lZcRfxMNV7vJSIlFrGMItm6OMiiioik8J0biADvdenSPrNidljxTZckcQASIePcluArk+gAB8jxevL3+R+Hj926UnStvn6zr+Nl57SNkZ7HmcTBKvJNjol+Ts7IPaZgzIb2/C+/nByN4kZkp8bxTydiK+ZQ5G2+NiJxzhHEcASw3/C2R5KtnVbOLFaEyNq3GzlfJJ+HUVXM6TBs4FUUhJ6Nmdi22jzoBpMyCWbauATDEOTxSFf5m/SGF7XN/BvavEHw38f/KXUttJ8m9b2uDrfj/FkeNdjnSe1jbDESXyIoSTK8UvDkFXgeahOQNedpPgpa4qfmMw5EwRmnHXGSk5poWOMjVCzOUG+TYZlaH3gZ13GTSMIEfN1uuuehNSRbNhFQHKZEtzgY5dWf8anSVs/LxsiLTx5CJIrH+oAx8kECxVfQsB9bCve7fNWsycGLqeh3enz/kXM0+Tl4k8IJw5GhXlGrxmTnHNKLkRO/gqS3iwPqkqfE3lFiKsU3B/G7lEMRx6Rx1kpm6tuToI14znUblZSdWKX2P3C7JKASpdAZggn4F2JDGTKcOkwmKJN2Ymm2Wvx01+tyz/bRG45SDlKpb/b4eLcU8eD+BvX5fb/AOS+kdv3GV3Du3X0PcWy8Y+ziP7GDNHF/wDnf7gBjIZ8huRMiXFyPIAN/FB5u2G20fnfKuboi32axy62bbFhnKbWfx9G0V09yPi6l0Rna8qxqVfmpKOkazkufkVHwOTN2BgfLnIYhhMBh012zCUZ0ucrlnM5je6BfvRV5OLEgq5JN7DyTX6T/wCPXZppeqYHV8iCKHGGsjzMUx5DzquPkzTtFisZEVllxo1CFQz/AGKCCALVTl27iIjuYe0R9IiI+kRH0iI6qQ9K9IgC1Gua5rjXHilOthvBuYOQ12ZY5whje25Qu8gmsu2rlQiHEo+8O3KBl3TkUwK3YtEtwAyq500wMYA6txABy8PBy9hMMfCjeWc/RRc1Xuy9r630/WNuez5uPg6xCAZJWCLc+gF/LN+S3P5eK9AHl8f8vzy/n864eyFyrxhVMfYEipQtpvlUvc7Ey9mtETFuDpqY/e0iCklZaMdWkoCQV3B0E2zYwqgfvATIa+9f6DtpM6HI2sSx4IPJlYglgP5eINxy9Lm1h5ryL8vf5c/H2L1bYajoedkZfanT2oJYEdI43ceJxM6hGEXrZeRZhxta5Hr3qWJpnjjlLEuNOK3GrBdF4tT8LPK52tsG4Sqt4r07XY0WeOU4uEanK4vSjwhhQcPHwunCSYm3UL/lbahxG12VFj6zGgj1jA+6w+1gQPssB+r8CTf+P4/nnsewY/ctHsN13zdbXK71FIgwYnBlhkSRr5HJyLQAfqVE4KSBYH6TM1MVrejSlGlKNKVgycnGwsa/mJmRYxERFNHEhKS0o7QYRsawaJmWdPpB86USas2bZEgnUVUMUhCgIiIBr5ZlRS7kBACST4AA9ST9K7YIJ8qZMbGR5MiRgqooLMzE2CqouSSfAAFyfSoF8krrhbCL2oeYBH0PL/IGzGgYPBdXace5R1e4+SpORp4z01nb1JhKkq0tGRrxl1ryxROqkiYClEdw2gtjNh4TJv1SaeWwjX2jyBVje/G/E2/8voK2v0zXdl7NFkfEsuVr9RhCV86U56iBlmx04+0ZWQyozBvERAF7k/WlBySmc9Rd+YT9e4j445J4ixpjR3lGpi7nIVPMqXI2OmyMq5W6FEWTvImHSNXnAuTS4EBZIxVEynAekpvvYvnpkCRMSPJxY4ua+R7nvA2AUHwPBvy9fXzWH0zH6pPqHxczsGbpewZ2aMWWyP8AtDr2TlJJO0f3v/UHH2r2P2mxFyPPbzT/AOXViL+6yXn/ABDnFhjnJmYLG8vtN4+ZRPFtY5e73NkFml8RscgLzJDObSrZXTwjVcElW47FT6ATDvi0Hc/Hq5DSZ+JOI8mZiyxPa3JvPth7+W5Xt9Pp+deu/jP/ADEyNQmF1LsGrfM0muhEE2fi8iwghb20y2gCeIvbCFxcN6m5P2ny68iOGfKPiYNfDkbhS54lJanEm1ra9nbMytZleHFEJAjFywePUDigVwQwdQl6yG6i9QbjrWWw0+z1IX+4wvEHvbkPW3r5817q6Z8l9F+QBMOnbLGz2xwpkERN0D343BAIvY/wPg2Pio3NHryOWB0weOmDgpTEBwzcrNVwIcNjkBZA6agFMHpDfYdRoLL5U2NXSeKKVOMyqyfUEXFX6eV/izLfKOGlMK8aMptMC8oHMTD3HIuXLFN5AqktdMAVq1VyUr9GpE7DSb2ZdWBrYiLOpMzdimxdxCaCIrdKSgGvnWMbL2aHC1svsbSwZ5GLKWiBBCqQSbg+T4sVsL+DXkn5z3fXujZMfZu6YLbTpAkaHHxY44JVhzpIpFkmmR1CCMxkJGGkMiSs7hbstvXwwpvOrAWE82yGMsd8VMp5qtGfD2jHVXrcYth2nyGKZh02QlpnKEyU8ApYMrMY9I67l+KygvFB7TKCHSO2lh3eBhTtjR4suY0/JAB7amM+pc/bye3qfr+dfnnNsvi3tnZtZFuszfYHWoNV7WRLK/7uVcpASqYyWk9vFLHisfEBR9F9RMyl3XEnI3Gh5mszFJy3jeyllK3MnYeFs1Oln0S5NGWeAdIvEDtZBKOlUFEFU1CGKJiB6ewdTMM2JscbnGUlx2up/mUkeCPzsa1tstZ2Dp27GNmx5Ov3UPGROV45kVxyjkBU3UshDAg/WsXL/H7CufMZK4YzDjivXrFSq0AuNDkiPGUARSrLpOa73CMK7jF2yUOqgTuE0zlTKBQLsJezXGXgYedjfs8uNXxfH2m9vt/T6Een0rs6923svVN2Ox9fzJsXegSD314s/wDV8SXLhgS/1JBN/Pr5pDcmpPlLXKDV2XDyh4tt12fWeLrc0OVbC9ga5RqKtGPGq1xZJMlknE+/rrxNsYrDc5nCIGDpMPZrH2TbSOBV1CRPMWAPM2Cra3IWNyR48fhUp0iDoubt55fkXLzsfWLA0iftYw8k8/IEQtcERiReQ9zwFa3kU4mDo/M0ViakR/IaxUq2ZpaxaieQLHjqLdwtJlpfxzoyLivxb9JB20ahHGRKYpyE3VKYQKACAaycJcxMRF2DI2YB95QWUm59AfyqH7PL1uff5M3UIcnH60zj2I8hg8ypxFxIykgnlyI8nxbyadbWVUDXIb7ht6dw2/b08/T1ofTz6U3UQwrJq7aKxjOTjoFdo9kmbtzEEF2aAsr3ZVyuu3dGMB3gG9YxRESjqjavC6+2k2PXugZEGFPFNKjtCOf7bKk+5mZXJu4Pkr6Vdtlmb5N1r+wd8x58yCWKJ0WY8BkYqeFVWUAhPoD6/wAa3rOYj4hev1KZsTZ7a3kV3iJVwBB9OjHIlJISSTZMopEAyhBOYoCAF37NTOJs8HWS4PWdrnRy9jlx7qG+2TI9tbSShQLAXBJF/F7CojJ1mbsos3smrwpIuuxZFmK/dHj+4bxxFibnwQAfN/rX1IXKpxUmzhZGxw7SXfuiMmkYq+S8aq7UIU6aBm5BMoic5DAJe8AoGAQ21zm9q61rthFqc7PxY9nNII0iMi+4XIuF4i5UkEW5WvcWrjD6v2TYYEu1wsHKk1kMZd5RGfbCAkFuRsCAQQeN7W80n3t9eRkncm7upyziPqiMMu0dxSjeRez5pQSAok1ikjFct/BGN6wqdhigIhqFyu55Ouz9pDk63JkwtasDI8JWV8ky25BIQeS+2T55eCLkVM4vT8XYYOrlx9ljR5mxadXSYNEmP7V7F5SOLc/oF8gkA0kWmKXziyQTybsb+wVaBdubXDNpV0sFia2iRXRW7h6dMgN3ECzbJlIm3P2lNvqsYnxxmTb7Dy9rnzZvXcKR8yBJnb9ymXKytxkIHF8ZFAVYm9DerJlfImJDoszG1eDFh7/MjTEnaFF/bPiRKRyjBPJch2JZpV9RanyKUpeoSFITqHqN0lKXqH6JukA6h2D0jrbwRFJsALnzYfX/AE9a1KWYgXJIA8XNIaUnLqElIR0FUEVUWLiEFGYlpNNrHybF4oPtcWZEd1yO4tINwKfsOP8A01DY7ftYz58HT6xHjhkg4zTShIpo5D/WKBfuDwr6A+GP/rbdfqeqnBhzdvs2WSWOflDDEXkikQf0eZNlKTN9R5Uf+itkpBrEx76UenOmyjmq7x0oRI6xyt26ZlFTESTAyihgKXsKUBEdWfPzcfW4U2wyyRiwRs7kAsQqi5Nhcnx9ACareBhZGyzYtfigHKnkVEBIUFmNgCTYDz9SbCorZUlLdJXPH0rSMhEgGVkhO9hIp8VyxaqolMseYl35nSARyRkmCpBTI6N194QAKG49vnL5G2HZ8/tek2XUt4MLEz8TlBDIHjVgLmaaTmvtAiNgVWU8uS2UXr0P8eYHWsHq+513bNIczLwcvjPNHxkYMbCCGPg3uEGQEM0X28WJYkCpYtwMVBADreIOVBEp3GxQBwcqZQMv0l9QoLG9bYOzt7NekoAywIGbm/AXawHI28t48fcfPjx58V5ymKmVyqcFLtZf/EX8L58/aPHn8PNaeJgzRUhPvxl5WRCcfJvSs365VWkSVNEqPhItMpSig2OIdQlER7dRet1Da7NzMw5OTOMuYSBJGDJDZQvCEW+1T6kG/mpTY7Zdhh4eGuNjwHEiKF41Iea7X5ynzyYegPjxW7MQhw6TkIcu4D0nKU5dw9A7GAQ3D4B+DUuyq4s4BH5i/wD3qIVmU3QkH8jb/tX1r6rijSlGlKSrqUnm9vi4wGkKWsPot2orILyQJTZplE26TJjGCcBdNhQ9Y5ylES/S+Gu5Gw3MPZ8fXiPFHX5sdyZGl4z++p+2OOK/3oV8sQCR+VWHHwNRN1vIzzJlHfRZCARrFeAQMPMkktvsbl4UEi9KrViqvV0KOmqKyDdZy3ScOhOVqgosmRZyZMvUcrdMxgOsYhe0QKA7BrpkyMeKRIZZEWaQkIpYAtbyeIJu1h5Nr2HrXcmPPJG80aO0MYBZgpKqD4BYgWW58C/qayk1BRORUEwVFIwKgkb6lUUx6wTN9I4l2/w67jext5NvT8a6h5IB9L05ONLhM3avDKztUd05+m+dszRLx2i8VFJsqZNF2CqJSkKm7TADlL2iAD26geubPZ7fWDN22DJrswyuvsyMrMFViFe6+LOPuA9QDU32LWa3UbM4eqzY9hhiNG96NWVeTKCyWbzdD9pPoSPFOJqeqDo0pRpSjSlGlKNKVhyP+733+xuf4k+lKhf5bX9g3il+JmpfyU2lKYLyO/1TvCD8UR/6XWfSlWs6Uo0pRpSjSlGlKgzFfrKrz/cfxf8A175Z0pU5tKUaUo0pWkn5U0QxFyREyxjG7ouw7FTMYpuk59wEBKBvg+HSlQm5INMgSXH/ADc3xYzln2SnmNbehRm0FIjCy69tdRLkkMlFzAdkZIHenDuVdwEpwDtD06j9suU2ryFwgTlmFggBseVvFj9D+Bq3/H8mkh71p5uytEnXk2MByWkT3IxCJAZC8f8AOnH9S/UVAPh1OchJq50/j3zPwWNnmMTYaoeS8d5+uFWa2J46vsgzZkuUU/sLhN7ERFrrC7/2aiu1OR68KyOooI9W+qzoZNpJkJquwY3N4IEkjmZQbubcwSbgMpPEEeTYk1u/5Zw+iYmoy+9/EW6/bYu02+TiZWsgmaJVxlZvYdYxxd4ZgvusrgxxmRVX0tTpczMbceafDFyta7zkPjvZsiZWwzXbDlTB7eVC83l/CSThpSMf2EIojgfc6QVcGSdgCZEwTAvWI7AGs3f4mrgT97PJLizTTRK0kIPNyCQiNb+U3sfFretVr4k7D3za5Z6zrMLA32uwNZnyxYWxKftsdZEDZGVFzt/XUAMnkm97W81tOSVgPd8DZYzNw/zLS6NkTHU0rI3jJFZrMbZ3lqRw4lIOLDh60vEWppIqbtQCNjKbLqMygIpEHq197WX9zrZtho5448uJrvIqhuXteTExtfz6X8kfSsb49wRpu7avqPytqMzN0GwhCY+JNM8KwnPKiLPhUnhdfLgfaJP5j4qvDy8sKWiW5EZCuzPkK6YUfIlZhsv5Y4ijDXm40aUqua62+dRzJ5bchFbMknrafeKHORogooLZIiKggT1QqvVtfM+1lyVyiMeVBLLjWd0KyqSPue3m/wCA9ABW+vnnuGtxuhYGml0KPutfkSYOFvPcxsfJSfXzKrssGLdipiUAF2A5kut281ajcMaWHF1SwthXjfgHEljwY9v4xGYKXbXhWcJUMYyblSXl5usQb467awzKk0sY4NVSKlEwgIF9G10nxJcODH1+oxoG1xltKjGwWMm5Kg+GN/oa80ansWB2PabjuHyDvNpj9zTB54ORAvKSfMQBEjmkWxijEYA5i3jwT+LQxS+Ziipx9Rudl5D0bNthzfAT3KLFElUaqTh9BFh02tMoR4+BUkQc2OAFYUmS3et10FukVCAAAGsFGz7/ANsEj5WPkNMrZEZVf2wt9iWW92X0HkEfhVqyI+okDvT4mPodzp4NdLFp81Z5jvZPcJnyQ0gS0UtryLZlZbhSfWpgY+q8Fi7HtAwxO5BkrvIsaqaqM5vIk6zdX/IyEe1UJKSUkKq6budlFGq4i6OiUwkJsI7anMWCLDxYtfJKZGCcQZCPcew8k/Unz5tWqd7ss3su+zu3YeBHh48mSJmjxYmGLilmHBEsOMaAj7AxHn8awKXgyk4gw8+wzgOKjcRV5OLs6FTSh2Z5djVp2zA8XUnUo6XcOgkTIyzvxJkFjimqIdA+qOvnH12PhYJwNYogi4txsLhS1/usfXz5sfWu7b9z3Ha+2J27u8sm1zzLCZzI3ttNFDxHt80C8LxrwDKLr6+tInjTlZtc2FwxcrNT94tXH6RiMcXzKbyvREBWMi28sWR9JytVRgnruNKVqKoJum5ATM2V9US9usfUZoyFkwyzST4pEbyFQFdrXJXibfxH0NTPyH1iXUT4nZEhgwtZvY5MvGwllklmxYC/FEmMih/usSjG/NfINSb1MVrmvL95/nMCHfMqnw4pUqi+dtJeMyHlxzFyrB40ZqsUpBGt0WaYpoKuWc00dqJShg75MyaYplOQevs078m71HVNBjEMwYPKQQR9eKEfQ/zeviv0h/wa+KcqCbK+WtxEUiaJ8XBDoysQxQzZMbEgNGV5Qj7SCeRDeKhH5HmDcFZm5TTDjMBGFomqLVF52gYunqoWwVa0v1yOUJKwz7t4VeIQGoogmdozcJm8Wu4Axd+5EBr3x1rtbsNyxzrPLGl0jK3Vj5uzE+Pt+gPqT+Vbj/zL7p3XqPxpFH1QvjYebkiPJzIp/amhUWKxRBbOffN+cin7FWx/XevQ5iey8N6xO564pcWrpD23MOYpHMuQ5is2ptMZFxpC5PaM1IyxRFmM8jFa9CREZJlS7+EAB60eoCAbqDfaeFNoYZcnSaaRZM/IMrlWBkjEgFiGuOIAP8v4eleC+0a75Z2OFpPk75Kw5cXqmpjwMWOaEx4mXJhs3OJ4eLiWR3S/HI8Wa1yLVW1yJ8iW0ZDxvU73jqcxTR+Rzeuvl8vUWpR0pCYUvttM+dPvaOOyqgKtCWctTdz4UzYWCiwk6QbEKYdVLa/G02ViJlYrQR7YIfdRQRE7XvdP/AkeLWtf8BXoXoP+aut0PYcrSb+HZ5vQGyFGDkzukmwxoOIXhlW8ZIDfdzD+4FvcyMRXm9uOCsxY+ykXCV1xzaa1ldSXj4NCjykcohMPZGXOkSKTjy7i3kG8iZcvcronOiff6rsHbU0+tz8XN/t2RE6ZtwOBHkk+lvxv9CPFfoJqe69S33Wv+Y6fYY2R1gRPIclHvGqICXL/AFQpY8lYBh+HpVwvk+SlAvt0YYfyvll/j6NxDOWfNrXG8DWk60GSHdFj3VgUtF1yzBPEJ963xWZo7VNEPiHYmYKCVMd+8AL30STFyZxgZs5iTHZphGq8fcKDkWeUHkfbsftb7ePp9a8of5X4u80mok7Z1jVpnZG1hh15y5ZjN+0XJZYhDj4MimNTm8kHvxkSCUAsP016GrJyLwTNwsjmvjDm3ixDnZ3rG8nyIybMxrZ4pKY7nVFYxpHPJuIbNZQtonCFI2i1XZjlJ0iQekNbTl2uulQ7DT5GEpEqGeQj1QmwBIAPI+ApP8K8Ga/oPdcLLj6f8j6fsswfCy01eJG5UJlRgOzLG5ZPZj8tMqAE+D5qLeUI7MXPvj9OVnBd3hM8YUzNnO8oSl0yS0ncOSnH2CoC8Q8pcFXImMO3m75Gx1rQP36piCLpDpEC7CI6hsxc/s2raHXSLk6/IyXu8nKIwBLcAoFi4DDyfqK2T1vI6n8Hd5h2PdcObSdv1OlximPiNHnps5MkOuRLK7gx4ztARxW44NcX8UpeQecm8/RbtSsgIcyMXzXAq2ceLtkvIuAqwevVTPb120YtX0HQ5STfFG24oauX4qWJNY6ItkERMcR6R1I52dzx2gyf3kcmA0LO8S2WU+LhCT9yX8ve1hVN6t1n9tuYNrpD1zOwu2Q7GHFx9hJ7k2AoZir5Cqv9LLIFscqCGY+PWvHX5p13k3WYsqFtMnVo/J1qyPdUbtQIDB72h1l/Aup9hY6rnei3iTmpZxbkclt2aaC6iZUEdmhlEinI4E+tRdomf93L7pUZLSNyURcQRcESqxJ5c7W/Dxcetfon8EayCPrmB+wjnfSY+FCYJ5M1Z5VcRtHLgzQKiCI4xYlQSx+8KxDJaqg9VOvRo9KNK5qR/Eqm4ByHnuh0fk1c7bjjD9nkDRNlyHT1oRJ5RwXJ9gs8sWejpRotXIwxRO8KRPvu67SDuGwyOphwMnPSDZO8eIxsXW11/M3BFh9frVH+Qtp23T9Ty9p0rGx8zsMCc48eUPxmt6xrwZSJG9EN+N/B8ea91nkAYwwviTjJmeoYpybjrNknFchbOjZcr46ibAlGTcQvCw40mLNYLJXa66m1WEMiodYGqZmaKq5iFMYdhHd/Q8bDxNbNDiSRzOMg3dAbEWHEXIF7C/p4F6/K/wDyz3nZew911ux7BhZetgk1EZixchoyyMJH95vbjklCXcgDmQ7BQbD0q93V5ryxTG5dzqww/asH1V5j3Jt2WzjkcmN2ExQKwefg8fujsivQtGS3xFkgrVQKU3QLsQPscB7OwdYOXnLiSwRNHK5nk4Aotwv1u5+i/nVo691absWDtM+PMwcVdXhfuWTIk9t5xe3tYy2PuS/Xh48Wp8tZ1VejSlGlKNKUmbpTqxkWnWrH92h2thpt2r8rVrXAvjKlZzVem2irCVjHR0FEVyt3rNYxDCQ5DAA9ggPbrqmhiyIWgnAaF1KsPoQfUVnazZZ2m2MG21kjQ7LGlWWJ1tdJEIZWAIIuCAfIIrAxjjmk4YoFQxbi+vtadQKFBt61UK1GrO1GcHBtBOKEe3WeOHLxVMgqGETKqKHMIiIiI6+cbHhw4ExsVeECCygE+B+HrXZutvsuxbXI3e7lORtcqUySyMFBdz9SAAo8ACwAAHoKx3GVMcM8mRmGnV2rrfK8xUnt8isfqyBCWh/S4177Nf2VtHDsY8Q1f7pHV37DAP0BHT91jjKGGXUZRXmFv93EGxa34X8V9poty+kfsiYsx0EeQIGyOJ9pZmHJYy3/AJlfIH4fxpmOTXGsnIuQ47vxtMfWi4Gz7U84AR7VmNp94/dls7b+wGSrtyh7AcvBdAJXyXeHT6fqB7NsLZa0bF8duQUQZCyfp5X4/S5/Te/rVm6T3Q9Oh3EQgeY7XUS4X2ytF7fukH3CAD7gFvMZsD+Irw7+fLgTkdA+YBlS7WPGMyhj7N0jIWXD8lHSsjb4+xQFCrEaNqnWTcz58auSLJm2UcSLQUUEmqAiJCgQBNrSnesDYpvpZ5YmEExLRkEtcKByI8mxA8keK/T/APxS7Z03K+JsHWYedGdtrEWPLVlWExyTyN7SMeK81LELG92LEC5v4rB8mLy9a5nPlhiWZ5E2ak1CqlgmOW8c4jtbyDfW7kNEEUlAjjwFSlI6ahpmipPYdROYSeGbO1W6pRbJKB1nJ89O6/Hm7WJ9kyJHxDpGxHKUebWUggrcfcDY29PxHb/kl8w5vWegZ+P02HJyM0yNi5GXEHWLAaycucyOjpMVcGEoGUMPvK+A3qx4tcNOANJ5UcmOeuCsoxFqusLO23G0/DsZqv1jD3HKbawDOHteP2MbHM4iOi0iskmyagvDGTZJmEEhDqEdbS1mn0MGzyd9gyhpVZkIBCxxECzJYWA8ePPp9K8Fd5+R/lnZdF0vxV2nAkg10sUWSjskkuXsELl4pyzM7MSeRAQXkIu17VIXgji/kZDjm3PPKOySrTJ3Ia7JTKeFobIfxhYSxLUKwD2OqKuH3APZAka0vEK8TeySZVTEOsUhgAPqQkNFi7FPez9mx/c5D39sNzjjUeF9vybcgbnzY+KqPypven5H9s6r0eCM6TUYpT94+P7Gblyy2aUZY4pyMLqUjJW4BIufWnwyfmfj5w8gsZRNoatMc13KuVorFtCh6NR11It3kW9OXLtsi5jKtHg2iEpR4VRVy+WIRLvBEyhtx31mZWbr9QkaSD2kllCKFXwXa5HhR4ufUmqxo+t9u+RcrOnwWbMzMDAbKneecchjwAAkNK134iwVFJNvQVmQOY8jrXrkjB3Hj9eKjQ8Jt4t7jzIiUhHT6fIJkrXns1Pe4VdZJoyDN9BO2YMQQXOczlwqQCiHo19JmZBnyY5seRIIbFH8H3Ra54AeQQfHn1NdWX1zTLq9Lla7b4uRtdmWXIxyrR/sGEionvyG6lXB58lH2qDe9OBiDJTXMWMaVlBjVLnR2l3hEZxCo5EhFK5d4BNZVZEGFkglFFTxkiUURMKYmH1DFH4dZOJkDLxkygjxh1vxcWYfkR9DUT2LSydc3mVopZ8bKkxZShlx39yGQgA8o3FuS+bX/EGnK6TCUTgUwlAQATbCJQEfQAj6AEdZFQtxe31r50pXIekO3b6fo2+nv8G2n/pXB8i3rSLqThwu6tRV6X7pFQn1UkHf+if/AMWJgmG1gAWyKRh77bp3UEx/p6q3W555sjYibVf2xUzWCv8AZ/8AMFv/AM5+wA/d6fddvzq09jhhhx9cYdr/AHIvhgsn3/8AxDf/APN/uY/p9ftsv5UplY2OXfNpNZgyVkmSSqDOQUaonfNEV/35Js6MQVkU1gH1ilMAG+HU9Jr8CbMj2EsMTZ8SlUkKKZEVv1BXI5KG+oBAP1qCjz86HEkwIppVwZWDPGHYRuy/pLIDxYj6Ei4+lNnZMdwLizR9ha0euSzuVmGbq2Tco5WRfMW0UgBo+QYFExiHcIKplASF6CmKHrbh6KDvej6afsEG7xtRg5OVk5SPmZEzsskawreOSMXILKQAQLAgfdf1F70XddvBoZ9LkbbOxsbHxXTEgiUGORpmtJHIbXCsCSCbkE/bb6qp25pNWJM3505iotGTTYkmrL3wHQdptzeEYd84RMqQ5SGP0F6Q9PYOrFk5PU+vJld0yZMbHiyBGJ8rlcOF+yPkykggE8Rb/Wq/jY/a+wPi9PgTJnmgMnsYvGzIW++TipAPkC5v/pWRFVmFZTsxb2BnZ5C0tI4r1Q75dZio2Zoh4I7Roce6a9aZwERL2m+HWRr9BqsXc5XZ8IyNnbGOL3CZGaMqi/YUQ/alwfJA8/WsfYb7aZWoxetZgjGFr5JeAEarIGc/eHcfc1iLAH09BXbORk6/e19xE2A8K0jJMXc0yIyRdFn2ApgX2adVQwHZl6/W6ygI/wCLX1t9ft83LwptbnHExoJ+c6BA/wC5jtb2iTYoL+eQr41Ow1OHiZsWywhl5M8HCBy5T9vJe/ugC4kNvHE2FKPf9n+H6ep30qDtbxXAbfCAGD4SiACBg37SiA9mwhp4v5FxXP8AA2P41GjIGH7va5Wrvwn67PJwz6cM6RsjBVugMPLum5kItBlHlOkuDFqmcpVDGKbrEB+DWg+6/GHbeybHXZn73BzI8SXI5rlRsq+xMylYVSMEN7aAgMSp5WN63x0z5M6p13XbDDOHm4cmVFj8DjSBm9+FW5Ss8hDL7jkEqARxuPrUlU000UkkUilIkimmkkmX6lNJMgETTKHwEIUuwa3zHHHFGsUQ4xooUD8AAAB/oBWiXkeaRpZSWkZiST9STck/mfWvrX3XzRpSvhRQqSaqp9+hFJRY/SG5uhIhlDdIfCPSXs18SOIo2lb9KqSf4AXP/oK+o0aSRY1/UzAD+JNh/wB6aJ1meulx+zyGyauVot9OIwaDSQVRi3YuDyJo5ZT7KKhBKiYgnAA3Exfoa1lkfKujXpcXd8SORtfNljHVJCsL8jKYmP3XFgQWsPJH4VsuD4u3Z7lL0rLkjXYRYjZDPGGlTiIvdUfbY3Nwtz6H8aeDW0K1lWpewUPIyUVLvmCDmTgzuFIh4oBu+YHdpgk4MgIGAAFVMNh3AdRuVp9ZnZ+Ns8uFJNhhljC5/VGXFmK/xHg3vUlibjZ4ODk63EmePBywomQekgQ3Xl/A+RW1MAiU5Sm6TGIYpT7b9BhAQKfb4ek3btqQYEqQDZrGx/A29f8ASo5SAwJFwCLj8R9R/rTaVWGYOlEGtjsTK/XWjSb1Y0wZqm0fQZpgDKNmx2iBxTQMViIEAR36gDfsHVB65qsLJkTH32dDuu2aieQmfgEkxzPcqhRSQpEf238kgX9avfYdpmY8b5GiwptP1XbwRr7PMvHP7JAZw7C7AyXawtY+PSnM1sCqHS7qE04TctIgE0vDKGXMYwFHvROIdYG3D/1RDt+kOnqb/WlOrpSjSlGlKNKUaUo0pWHI/wC733+xuf4k+lKhf5bX9g3il+JmpfyU2lKYLyO/1TvCD8UR/wCl1n0pVrOlKNKUaUo0pRpSoMxX6yq8/wBx/F/9e+WdKVObSlGlKNKVjOmbZ6l3LtEq6W4G6D79O4egdgEO0NKViN4WMbN3DVFqQqDoR78giYe8AQ22ERHqACh2Bt6NKVA7m7mhfibx8y5mmKiY+0PMf15OXiq/LPzRUW8kZCQaxsa0fOwWTWFuVV2QTFSMCyoB0J+uIait5s20+pn2KqHaJbhSbAkmwuf9fT1P0rYPxT0iL5G+Q9X0yeZ8eDOn4PKic3RArOxVbEcrLYFhxH6m8CvMPl//AJgjM90p8FCY6wzSccWIY14ndZmdXcXVu6knDIzZBSlxztJirXPBrrC4SXcqOHCZyl2N2DvqLO+T9hkY6xYmPHFNY8y33gn0+0G3G3re5Ir9EOq/4KdR1G1mzN/t8zYa8SA48cQGOVQNc/uHBYS8gOLKoVCCfH4IrygOQ1st+V71xaWiLg7kuTlavTS3Z3hrfMnu2O2bOGfSjGzMYhwk4rKTppKKnKrIKJA7cOXiXWp0l6TY/RNrPPnSaYrIWzEflMGPNBYnkB5W4PgsRe5H4VMf5W9D1mq6vhfJaS4q4/XMjGaDWyQR/t8pmkVGhZxabiyAFYgeCJG1lubj1yYuoZMZY9plENYpu6OqjWYitOLtazNXFttQRLcECS1jfNkUSupFx2mPsAFAR7ADW8MPGGHix4xdpGjQLzb9TW+rH8a/LLsm7PYt7l7oY8OHFlZLzDHguIIeZuUiUk2Uegv5pO5Tn84QczilPD1EqNyhpa/NWGXJC0WRSAe0vHpmxlHFlrDchyBPziLsAKVqIGAxe3p+EOvMk2MckIwYo5I2lAlLNYon/kv4tf6Vn9awum5mHtG7Zm5WJlRYJbBSGL3VyMq9hDMT/txlfJk8efrWxxZhfFeD4yyQeJqZFUiKt1ynMgWJjFqPTEmbjYzonnLC4CQePFPGSQoEFQExKkHSGxQ19Yevwtcjx4UaxpJIXYC/l29WNyfJ+tY/Ze39m7lkY+Z2jLlzMrExI8aJn43jx4gfbiHFVHFLm17nz5JrZ2DF2PbXdaPkax1OKl7zjT2wNCszsi5pKrmsDUGUyMaJFyIF9oNA6D94Q/Z6Nh7dfUuHjTZEeXKitkQ34MfVeQsbfxFY+D2XfazUZug1+VLFpdj7f7mFSOE3tHlHzuCftbyLEfn4prOTsrmdGkQ1TwZGWVC45EtEbTlMmVtGAfnwpGuzAu5yTJwthUI0nIyOKgKR25N1B7zqL2gG+JuHzhjrBrVcZErhPcXifaB88yG8ED8PWrN8cYvUX3M2z7pJjnU6/GecYkxkX+4MvgYiSRjlG7k3DHx4sfF6YrM1kpXlycdH97xxRaos8seXaMa+lcFsMfG3K8ZImWMRc749bw4SztlKyooqOu6RKm0TOPTsUoAAxmfLj9U1RycWNCzzpz/UA7yEB3Nrm59fwFXTqOv3Hz/35NL2DNyhFj6rJGNb2meDHxI2kx8ZTJwVkS4S7EuR5uT6bljSeXsXiG+nxbyhxnlzItszgpbKHdch1xorSKTh9xKNySuK2qdXF4WWk4Vkiqi1eH3P35hA+wh2di4+8XAlOHmQz5b5HJGdRwSK/mP7fUj6H8fwrDm3HxXk9qwR2Trex1WgxdN7GTj4srfuMjPVDwzGM3EokjFWdB44j7b1Tj5ovlXw175E1TIGIMr1uEzFykvMk4nsaZPsTCEjXbmHrLdex22hO/Cmk5FBj7PKo7julVRNNQDI79IlGhdx6XHlbVMrBmRc/MlN45GAHhfuZPqbW8r9PpXrT/G3/JjK0fQcrRdr1eRN1PrWEgiy8OJpHVZJiIoMleXBS3MhJfAJFn9b1WNFFaYQxPaGyebMUwFmoJMe2/DbHBNtaZAnrRyOqFqckPN31xDxB5drXparFdbJu1zxDVYEyFSBQ5tVBOOuwXUZMCzRcGiELc2adG9XIF+JW/gniDYWr0ZkmXuPaMZ30+zn12ccqDPbZQHFjh1M8I/p4wkf2zKk3Dyiid15EtxAqY+Ncjjimf478tWvmFtrhATuR4yT5gRMFEQFQcU5Syx4KN6+7wvFRTKXvFhmn5XTaVlSIHOmkgRyYSbAOp/Fy/2cmLvF2gkhaYHKChU48h+kwgAuWNwzW+l61L2Lr47Ng7/4uk6G2Jnw6900TySSzicQvYyrsHdo8aKNSrwwlgCzNGL3q+/MnmF4zo+MqtlLDFFyNytr96WurCsvsCwDqzQSM1R0mR5OLskv3JFIMVFXwJJm7hUTHSPsUenWy8/tGHj4iZuvjlzY5C4UwryF0tcMfp6/gfrXiHqfwP2Lc9jyetduzdf1jOwhjtKuylWGUx5BYI8Ud/6lgtz9yizC5qs3zEuUHEXLlJxVZsfxUzO85bK7ptExc4orp5GXnEFodTMfJTWNblaFwQhYN8jIyR4562KmeRAVzd0dInUYKh2rcaPNx4JsZWbsTlUjKEh4muCY3b0BueJHlvPi1eivgT44+U+rbjaa7eyxQ/DGMs+TmDJVXxs6ERskeXjwi8kilVEsbkiL7RyDGwPnmWyFcOLXKjIDyMi21VkKbk+xNbFQ6pNKmhACMmnpJbGi9jdoupmQpCwqKxkkkCm8ky6iqCbcohq05WRpd1KyKEeOZuSKfHgm8Zb1Keqt/wCQ9a95JodT8l/GmBDkytkwZmtiMWTPGPc++NeGWIlIjXJHiaFrf0pLFQPIq2yZsnIDzEaHgqA4y43o+BYG/wA/L0TkBiKn1+vxeLSSeOXzK54/ylYHbdD4wV6YkylEGgN25FDNVkDbiYqobXiSbadqxcaPURR40UjFJolVRHdCGSRrfeUsQLC9iK8tYeu6L8CbvdZvyLsM3eZuDBHk6zOnllfM4Zatj5WHEpP7UZBZGfk5HNWFgCtT18v7kv3Fgo+JmvMLF8tVsRQuVf8A8u8eZ7XNA8gYfLadnb11g9qcs9QZV+HxJG2Vwk1YpPHJH50lCkUT704FJbur7EqseIcyNo4RJ+4WbxKJOXH7SQAIw1goJBt6i9ed/nXpiSzZnYR1zKiy9i+J/aJ9b/V174ZiMlplUs75rRAvKUQxhgSrcQSbfeRlgwvB4EyjLch3ECrgtKkTPxgtLE+I2hZqsOGZzuIRM5njPxTqe2Kg0RRVKq5XUIRP1jBq6bGTCTAlbYcf2XtnmCbAj8P4n6W9a8y9OxOy5XbMGDqAlHaTlJ+3Ma3dJQfD24mwj/UzEEKoJPi9fmE84eTa/LDkTb8lsGykRjmJI2x9g6oqRjOHGi4MpajmMxhTTMWLl8iRzC1wUyuDi4XOo4McwqG3DbzTu9kdrsXyVFscfbGtgOMa+EWwv6D18nzX7ifFnSF6D07H00zCTcSXyM2Xkze/mzWfJmuwU2eS5UcVsthxFRG1E1sWjShNqlxwk4zZv5fZ5gsA4IFqxsl+YSkXP2SWRWCtVeoINVJWdlLLKt4+RXiY0zWO7oO6KVV2qcrUgmFYSGlNLrc3b5y4GDYSSAgsfRV9SSbGw8f6+n1rXHyd3fq/x51WXtnaeT4eI6skSEe5LKSFjSNSyhmu1/JsoBc243H6S3C/hdjfg/TrzR8YS8wFZyHdkMhKVF84ahWqXYnNejImwRdCblSSepQUk9YC56HB1lgEShuAFANejdLpsbSQvBik+3JIG4n0U8QCF+tjb61+L/yT8lbr5P2WLtN5HH+9w8Y4/vKD7s0YkZ42nN+PuKrcbqAPX1vUytTFa3r6Kc5QMBTGKBw6TgUwgBi/QMAD6wft6VwQD6186VzRpSudh0pXHo0pVNnnh8yajxJ4dLxNpxxNZLW5CTi2L4eJjbbLUVhHnjmza1yMpMWiuu2lnYJJtmBSIkYGKssoYSnORPq3p/dtvFqtPwljMhyG4ABitrfdcsCD/C3+vivR3+MHxzsO/wDyMJ8HMiwhqIhkuzRJOzciYlVYpAYj5JJMngAXALWqpPybPOa5VcmObcZxyzE0j7NjHJ8FLNKJC1itiZbDrimwCSkEBZ8zg0q6qBoqLM3kHMmo7XUdqprCcDnHVU6f3HabPcjXZYDYsoPEAf7fEePPrxsLMWJufNegf8j/APG7onSfjR+49dZoN3hSKZ3lk8ZnuyWf7LcRLyflGsQRQgK2IFekGk27i/zggMxu6W3NYXEC5yRxXv8AcPdiTpOQYfu0StbvVK1ZpJgysKUQBpEqqbtkczU6wgcgiYo62JDLrd1HMYfuKl4GbiVcf+QViL28+o8V4z2eu7x8X5mui2Teyky420gi91Jsd7m8MssSs0Zf7bFHAa3g+DSsr8fR+LWFozCWJUHl3sGIsQy73GWHJK9MZHK98iqmyeOGEei5nHIS0grKyXSz9oqpCgkqqUDCAAAa7oxBq8MYeKObxQnhGWvIwUGwuTfz6X9Kj8uXZ967K/ZuwFcXE2GwRcnMWBlxYHlKhmIQcRwX7/bDciAbeTelfjOTe5rw5S7RmDDHxe2K8U46txxBe0omyyVNPNIuGMzUJh14UWjwrhl6jgCFIVZJXoOX0hrtxmbNw0lzIfbkdPujazFb3BU+PPj/AKio/dQx9a7Fk4PXdl+7w8XI/o5cJaNZeBDJMi3uLN5W5JBFwfQ1UfV/K8xdV/NWg+R2KcPWvDtZxZCkyC8tp16/K4dyPYbTDHrDbHeLKg0NHvcUusfIEOuoZJJZsqVYxSFJuUTVSLrOLF2hdjiwtDFEvIt4MblhbgiixjKep9R5+legs35x3ud8Dy9N3+xg2WfsJP24i/qLmY8cT+6cjKlPJcoZBsoBKsCoJv5q3aX4/wCDp2jZExlJ4ppHxfZecSL3KdUjoJpDRt+fTJ255eSspYcjBaSlZEzVIVXRz9+cUy7n7NWx9fhPBLjNFH+3m8uoFg5PqWt6k2Hn1rz3j9u7RibXD3cGwyv7xrlUYsrSF2gVL8Fi53CotzZQOIufHmtTgWr5gpsDaa5lWQxo5hoi4PozCkZjKHlYdlWMKRjRpH0as2Mksqqo8s0RHtgSXWSEUjFKGwjr4wIsuGNosoxFFciMID9sYACqb+pA+o8VkdrzuvbLLgzdCmauTJjq2a2S6OZc1iWnkj4AcY3Y3CnyPN6e5Vk1fHbEctGjsUnCarfxbdFwCDko7JuEe+IfuVkxHsOXYwfAOs3iGNiAfPjxVYWSSIExsy3BBsSLj6g2PkH6j0qAfGnzLuMnJ3L2UcCVeekqZmjGOSr5jkMcXwjKOtF4LjhZyjZLrSmka7kk3tPQUZq9Ky526/2M26Qbagtb2TW7PLlwImKZkUjJwbwW4erLa918H1sfHpW2e6fCvdukdewe2Z0SZPXM7CgyP3EHJoof3AUxwzFgpEv3LcKGXyPuqQ2OeQMfkvMed8OtseZRrUhgh/VmMld7hWDxNCyCe0xhpNFzjafOuoFlbRJA7p4YCE7pXYO3WfjZ4ycufE9uVWgIBZhZXuL/AGH62+tVDc9Rm0nXNV2N8zBmh2qSssEMnOfH9puJGTHYe2X9UFzcUWPBz6wciMc59Ty/lCEYY/pFnprjC8RLooYouq1kVMonabZDCmKz2xQe4A0VA2xAKX0esBkmE0mwjz/elVY0ZfbB+xr/AMxH4j6GuMPtEWJ1DM6odfgyzZeVHMMx0JyoRGB/Sif6Rvb7h9bn18WfvWfVUrVTkMysUPJQUj4gGEszVYvPCLqNXPcLBsfuHKQgoioAegwdoajtvq8Xd6vI1Gdz/Z5MRjfgxRuLDzxYeVP5ipHU7TK0uzg2+Dw/eY0okTmodeQ9OSnww/I13xke2iI1hFM+98JGs27FsK6pl1+4apFSTFZdQetVTpANzD2iOu3X4MGswIddjcv22PEsa8iWbiosOTHyxsPJPk105+dNss6bY5PH9zPK0jcQFXkxueKjwBc+APArHhp2GsTQ7+Ck2ksyTdOGR3TJTvUSu2h+7ctxNsH2VBTsMHwDrp1W41W8xjmafIjycRZGjLobqHQ2Zb/ip8EV3bTUbTSZIw9vBJjZTRq4RxZiji6tb8GHkGklkRSVYsI6cZv5UsVCvDL2KvQ0QnLvbVGOiFaDFlTMYqiBCCoJzHIICBd/oarXeH2WHhwbfFmyRrcSXlk40EImkzInsntAXBUC5JI82ufpVl6SmuzMufUZUOOdjlRBcbJnmMMeJKhL+6T5DGwsFP1t+NaKv0Kj2TGBaujCzsTU59ZaRPCSq7lnLMlTP/Eil65lFGiZV0gEhA3L0D9PUNpem9R3vx9/x+LEzMbrWaxkMErMk0ZMnPj5JKAMoKqDbj/Gpbc9w7Zou/HsEmXh5PZMNRGJ4lV4XHt8OXiwc8TZmPnl/ClGW3ptXNrgY6uTyq1HhGrpFRdArWNmyeD60GUVIrH7tddJNICqiOwFH06nR2dMfI2Wmw8HLabUYiOpZeEU44XWOGVjZmAFmuAAfNQZ6088Gu3ObnYiw7bLdCFblLAedmkmiUXVSTdbXLD0r5aTVgtsbSbFU3UK1iZBZJ5Z2z7qfLmjjJdK7CLctgFEsg2dFEhjjsUenXzi7Td9kwNRvOtSYsesmcPlpJ/UYxEWaKJ0+33FcEEnx4/0r7ydXpuuZ+10nY48qTZQoUxWj+xRIDdZJUf7vbZCCFHnzS//AGdv/V9LV1qmUaUr4U6xTUBMSgoJDgmJwESAoJR6BOAbCJQPtvt8Gvl+fA+3YSWNr+l7eL/lf1/KvpOHMe5f27i9vW1/Nvzt6VqK8nPpRDUlncRrqcAVvGLxKKqDA4CsoKHcprD3hRKh0gbf0m31GaRN1HrY17A8Em2HLm0KlYz9x48Q3kWW1/zqS3T6aTZSPoEnj1J48FmYNIPtHLkV8G7Xt+Vq3WpWoujSlH7faHwh9H6X7WlK0cvWK5YE2KE5DMJNtGvSSDNs4QIZBB4QBArgiRelMVOkw9hgEB+Ht1D7Pr2i3UcMG2xIcjHgmEqKyjisg/mCjwT/ABFvyqX1u/3emeabU5U0E88Jidlb7mQ/yk+Tb+BBrGq6NuSTlS295DvFlJp0eENDoqN00II3QDFu9BQpe8fp+t1mLuAhttrH69F2aNMkdnlxZZDlOYPYUqFxzb21kuBeQeeR9PSxrv7BL1uR8c9aiyYolxUE/vMGLZAv7jR2JtGfHEHz6111W1o2tKYVRiJqJCHmnkKcs0z8Gd4dkIAZ8wL1G7+PW6vUP2CbbXx1zscXY48qSLGysYYuW8BE6cC5T1kjFzyja/2t9a++w9el66+NHLk4uScnFScGB+YQP/JJ6cZB/Mv0pU/4dv8ABvqx1XqSM7UG0uuwdMpF/XXLabZTUg4gu5aLT/gSGTTj5pQExM8ZHIbYQHt2ANVnb9Yg2c8OTizzYM8eXHPK2PxRsn2wQIpza7xkeCCb/nVl1HZZ9bDNj5UEOdBJiPBGuRydcfmbmSAXskgPkEUsSAUyhQOPQQxg6hABHpKI9ogAenpDVmPk3qtDwKXCGMqzKSVRtTtSQVlaq5dyESq1kVm7Qyr5EW6pXjZHpRfJlR7AA+4APbqB2nXddt9ng7bL9393r5HeLi7Kt3Xi3NR4cW9A3pU5rew7DVa3N1WJ7X7TYIqS8kVmsh5Dgx8ob+pX1p1dT1QdGlKNKUaUo0pRpSsOR/3e+/2Nz/En0pUL/La/sG8UvxM1L+Sm0pTBeR3+qd4QfiiP/S6z6Uq1nSlGlKNKUaUo0pUGYr9ZVef7j+L/AOvfLOlKnNpSjSlGlKNKUaUqsfzTcL37kXxLzFhnFsYxnb3bY6rFgY5zMMa+iu4jbdCSzojqWd/6K38OzjznAFfq9un4Q1Xe1a7J2ugyMDDAbJkC2BPG9mB8k+B4Fbi+Ae4aPoPy5qO29klaHS4jTe66xmUgPBJGLIvk3ZgPHpe9edLk35EvIZ9Lzd9wxY4K8ryTLHZEKfZrMi2uL+dVrbdDJE1PWWTJHV1BFlPthM0SQFTv0FQAAJ0etrLcfG+0eRsnXusjEJ9jN9xPEe4WY2X1Hi17g/lXuT44/wA1uhwYsOj7djzYSRvlXnhhLQLGJScSOKFC0pLREBywHFluS3LxNTyb+BHIbi5aLnkjM1RpFKCegJqguIN9HneZKBxEzrZ9GTsTYmj15Dmqc8kooVQU+7UXI3R3KJQ6hsPQus7TTTSZeekcfJWSxF5PDXBDAkcT/pewrUH+Wnzf0P5J1uH17qOXmZnsTx5IkV+OJZ4irxPEVV/fiNrXuFLP5ubV6AdbOrwxTKuGWfx5Axsi1nMeE4zFxu7bStdVjnw5RVyqaQEzKTaSgJjHkqZIvYqiQn7wVe3b4dR5XZ/3RWVov7P7Rutj7nuX8EH042/9auEc3Rx0WSCWHPPyKdgrJKHX9mMLj9yMl+fv8/Ia1rfWtndcT1m1XmhZaeJzzi8YhjriajM2NkkouAeO7LEi0dNrBCtlSR02DgUSERM4Kbw4mExdfWTgwzZEec3L9xAG4AMQpLCx5D0P5X9KxtP2fZazS53VoTAul2rwfuWaJHlUQvyUxSEc47XJYIRyAsaQXFXJucMp4pNZ+ReFFsBZHZ2OwxL6nqvyvWLqHjXRgi7JGrqLrLpx0iyEBEVjAInIYxfUEusbS5myzcL3tpj/ALbLDsCpNxYHwR9bEfj/ANqnPkzrnTOs9n/t3QdwN5198eJ1nC8WEjr98TgAAsjf+I+oB8g0yOWs1ZExnlms3CiZTx9mPHmVilYwmCkJOFb2KMr2OY2Sl8pXHE0rDLvXWTbkskkmiWH6iFIpsXfceyPzdhl4Wak+NNFPiTeBDccgqAmRoyLmR/pwq49W6hoexdXyNVutbn6nf6w8pNiUkMTy5TomHBmpIFGJACSxnsSRc2t6vzg+Zydk9harvkhhX0MV388BOYapsrUHsJkCp1N/EgWTicsRsqZ0296CSImMUEdwTT+HtAAktdJmZiyZGWF/Zy2MSFSrqpHkSA3HK9UjuWJ1zrk2LpuvvO3ZsH3Y8/ISdZMWedX+x8J47H2eFv1ep/1qFWXScTKFxNsmOeS9Fs/DjBlezbIMKBBwttkI+xX6TgZobRD26jK1MzyZdNbO/XVd+zx6jmTTHq7ADav539kxdI+Lt43wNcuSQihiGcq3IMnG5+4+bVuDqrfKG8+T8ff/AB3m43bO6ZGmVsqSSBWixklj9mSDJE3GMGFQE93wASLVDLhxym8vhpl5WZufKbO+Urm1mD4pxzF8sI9jKkg1pqYVFe8Y6l00nylURnGZxjnTlVdouRpsVcpQEACA0O56uM73MjNyZsgN7cYyQDa58vGfPG48Ekg29RW3Pln4z+dpeqjF1HWdLrdQ0X73LfSsyGQRoLY+Ul1Exjb+qiBXUv5Qm1UxZDqOIsAly7nHGdkUsMhJ8lb9j/jrb8a2qxVqEqFTjGLqXn5tiVSGAz97ViWOMYoGcuhau0lVFEREU+vVAyoMHWCfY4b83bLdIHjZlCqASxHjyU5KoubEE29K9daHa9q7ydV0zseOIII+u42VtYMuCKaSed2VIo2tJ9qze1NIwROaMqq9g1qrwcmm5WfmJmccKykh1L2uyvnDtq7dP0VliPJCVUFVwCUg6cC6FTp6hOcxh7PTqqt7kkrSSfc/6mNxc/ifzvet+RjDxcGHEwlEUHiGFQrKFIHFEFhdQONvSwt/CrweGubbp5ddqqlgydKZulsXWGzRkTBUyOsrerYYrNNzbV4i2weWZKurlmDWaaBg6cOQQZJopt141UpnKhzlANi6HYT9WnSXLOQ2G7gBA3GJVlUMJSvnk1iTZbAFSL+a8Z/LfTtP896zKweuxaaLsuPjO8uQ8Jnz5p9fM8EmEko4CGPkFUtIWZ1lUiNQDW7xV5vdGxwE9Vcl4Wpeem5OR73I8JmaSxxUqdMzlXVfK+JtpavFxp0vjUiilKEe/Uc9SiWxV1CnLvrswu9YuJyhy8ePJX92XEpjVSVv5fiB/uD+U3/ib1h9n/xU3XYDBsuu7fM0kh6+uJJgJlzzxxzBRxg953v+yfz7sYTwfKKQbVWHzt5NVblnyFtOWqZi6s4trjtRywjWULGt4+dtSBJB05TuF98IYzR3c5lNcDOjp9QFH1BOpt1jT+y7eHd7V83HgSGI+AFFi3knk/0LH6//AFr0f8KfHWz+L+iY3V9vssnZbBQGdpHLRwngq/t8bl9wx4yPsBtcebLe1X1eUPJ8McKcXaNmy/2HHx87Oso2XHq9wjRmpGzUVxk8hW1UqVy2SI2gEHUREGOZ6oQWKSJtu/6usobL6M+g1+mj2GS0X9yMzJyFyye5+lW+i3A8n9IH1vXiL/KrG+XO4fJWb07RwZ//AAtdbDlCB/bSHJGH908+P5vKVkewjB9xmH6LWNUb4wjcK8d+bOTWnmHYCumVq/PQmT2spUq4aXkrBdrmSZC6VedYsopePLkWHmS19VRg+F+VhuoDsVRInuNI0/7bW7+eHskEsqsr/aLks1+QI9OYNvBvbze9q9P/ACId53b4l1Ox+FNpg6+eOTFAncIixRGP2ZIn5Bv2zxcwJEKGQW9viCalTyIzxkbzd8uZNwvDZyTwt5Z1RNWcnfGfdoOt+BxOtjbDyMw+rk5HxdjhpuYhxlJwW7gpzSCTSURRWA/QmGrRsM/I7ZlS4aT+z1tOL82C/ZwjuQQCCRc2PrZgD6CtG9Q6ppv8e+v4XZMrVnZ/NeQJcb9tC8l8r9xmcFkRnjdEfinJSBGXiLL6mqnMT+TZz5zm7dhi3FEROxBIlnZmEy/yJQIdu+q02sc1VmxburELxkhbIrofsCrppi4ZqFVJuXt1VcTp+9zmP7WIFLXBLILqf0ny1/uHlfxHmt/b/wDyR+Kuqxqd7nvFkGRo2QY+Q/GVP91LrFYmJrpJxJ4uCp80+uJv+Xq8y/J8VYZWVxjWMSBDIyxY5hlW3soSWtElGLERTjoaOiUZtwklJ9QnavHYN2KpCiIK+jfOxPj/ALJkqzvGsQW9g7AFiPoAL+v0JsPzqrdg/wAwPhbR5EOPBnT7AyFOTYsJkSNWueTsxQfb6MqFnB8cavx4t/8ALPcTarUqbY+TU/k7JORntWqD+5USPtEbX6JWruiim7tUTFStdjkpiyVxZ8YzYgrqpHFBPqKYDG3C96v431UUSSbJpZMkqpZeQChv5gCBci/j+FeT+9/5r9+2OxycPpUWFhaVZ5lhnaNpJ5ICSIndJGKRyBbMQARy8EeKvqwLxV438XYVSB4+YWx/idm5ZIR0i6qkC1az00ybOlnjZCw2dYF7FYit3LgxiC9dLmLuGw7AG16wNXrtYnDAhjiBFjxHk/xb9R/1Jryj2vvnc+85Aye3bLLz3ViyiVyY0JAUmOIWjjuAAeCKD/qa5zFxrxrnS64Ev16PbCT/ABvyIplDG5a5ZncDGntCjdm1MFpj2yZ07HE90wT/ANGUEhe0wb7HMAszXY+dNBPPy548nNLGw5eP1D6jx6f/AH189d7nuur63a6rViA4m5w/22R7kYdvbux/psSODfcfusfobXAIf7WfVUo0pRpSjSlMTeqBmWfzXhi9U3OCtJxJSG9tTyxhstRj5cmYV5iPM3rC57S4WI+rHuq+EFwBAh/EbdJtt9YM+PmPnQzwzcMVOXOPiD7lx9v3eq8fXx61atVtut4nWdlq9jqxldgyjEcTM91k/ZhGvKPaA4y+6Pt+63H1qL3LnzI8QcbcPZuyNQzVvPt14/XmAx9k3E0FkKDpdgrFimjPFFmEm+sLRch5CMYsFl1GrRB4uciZhAoAU4ljNt2LE12JNkQcZ5oJAjoGCkE39SfqLE+LnxV3+PfhrsPdOxa3TbT3tVrttiyT4+U8DTJIiBbFVRhYOWUBnZFFx58i/lM/5h+0czsg5Xp9uvtUyFXuGC1bxlK4VBy2QNjwl5u2LYC03FuWajgIhYLLHybx4zF0t2Am3MRDpIIgbVvyDJucjJSadZF0/FDH/wCPJkDN5Hq17i5/DxXvb/D/AAfjfT6LJ12pnw5fkdZslcwg2yDDDlSRRHgxvHGVCPxFvLAtc+jH+XzwprEDnvhFeLjyXzhgJ/yElpeHrKFFocvS8nRd1jFocY1gSVGceGeYgvqb8W6NhFqm2eAmskZApSGOGB1/TRJn4U8uTPjnIYheKlXDC1he/wDtte3K1j6Wq0/L3yXn5fU+z6vXaTV7aPTxo8pnnSfGaFg/I8eAtlwceRx+RZLqwe5Aq+/PHHTl1z85JUzkjgWQzTw3iMY5JsHEHMNWNMEx5brFiKvWl9Y57kFj6YSBCPmErUaSSaptfBHcqmbAYHKhAEAvedrtvvdimywTNhpFIceQX4MUViTKh+vK9rWPp62ryf1TuPx78TdLyemdrTW9kyM7CTbYknA5EMeXJEsceBkIbshi4li3MKOVuANXI4549YAqOSKPNLWNllXldhDCZMVpZPvNubzObzY8lFVXBX94hGMkmfw88/cdYvFmRRUBQSpq+sO9wx8DAiyEfl7u0hh4c2a8nE/+Yv8AW/rb/WvOG57d2zYabKx1gbA6Hs9mcn9tBCUwvfUAFYJGU+UVbcFewsCV8eK7s6VPzh6mlTJCGzmheLDmXKWM8fzcJgHFMGlSeNNIibgpKW/LUgvcPFy1mjrTTSEj5Bubw4tlznUSP2FAa/mw9ujCuk4eSaVEIijHGFQ12k+65IZfBBtY+lbi6tsP8dM9siHJ1TY2HrsHJnSTPyn9/YzPDxixVEVliaKa8kbDlyWwZfUh283eZpkzEnKW7cSK5wgy5lXI0bVoG64iWrloimiGc6SMqwibzcIEgwL1rXoumEcKrqFcuDqLi3OmJUhEgjmZvZcjE2b6qPCmlyAoaOzD+qt7Mw+0gBfrc+bebVXes/Cek7B0XG+Qcvs+vwNM87w5fuRMf2U3EtBC/wDUUyNLYD7VAHIEFvIEvOZl/UrOCZaqV3kHVOL+aMznY404/ZEt/h10EMxzKrZ9BQLFi5avUJCSkkmqzTpFFQEyrCqBDCUCjK7jI9rCMUeQuNmzfZE7fSQ+QAPS59PT61r3441P77tMefmaifedb1obJz8eG4JxEBV3YhlIVSQ/qL8eNxe4R/F+q+YVW7LDocschYJu2P2GEKtCPVceQksyusnnpg9RSs9keyTqIi2K9VkYhIwgQhEjGdnEwIJFDYerWQ7+OUDaSQPAIVB4g8jLfySbAcSP+p+gqQ7vnfEeZhSN0LE2mNt32cjj9w6mFcEqfbjVQ7sJFcj1JsgsXYnxMS2x05MVSzxFYnxqllla9NRtdtJWSUkaszr6OctomwFjlzFRkBhn6ibgEDiBVe76RHYdS8qyPEyxNwlKkBrXsSPBt9bHzatd6+bFxs+DIzov3GFHMjSRXK+4isC8fIeV5qCvIel71CrAPHDOdHytVrfnuzYCzk5pGEGNOi87NsOxNO5EzOTJKbfO7vLvZxggdrD0ebiHBEgYtlhMosXrU3ETbw2Brs6DKWbOeCdkgCiX2wspck8iSPRSLeB9fWtmdt7n1bZ6DI1/VINtq0ytoZmwTmNNr0xlRRAojY3eZHBPNh4BsPpU9xOcxSkMYxik36CiIiUm/aPSA9hdx+hqd/7VqcAA3HrXzpXNR+5C5YybiOIx9IYw4/2/kG+tuTqxSbPEU6YYw7igVGbOuWXyZLqPmb0ruErYJFFZAgEOfvA9cobjrA2GXk4iRtjQPkF5ArBTbip9XPr4H4Vbeo6DSb/Iy4t5tsfURY+DJNG8qFxPKluGMgBWzyXNmubW9CfFSEOAFOYoGA5SmEAMHoOADsBg+gBg7dSHoaqIuRf0NaCGYTDJaYPLTgzCT2TUdRSIskmoREeYhSkjetMRF10HATd4bYR31D6rC2mHLlNscv8AdRy5BaFeCp7MRFhF4J52PnkfJqX2mZrMuPFXXYn7V4scJK3uF/ekBuZbH9Fx44jx4rPYRsdFIGaxbBnHNjLKuDN2LZFqiZwubrXXMkgQhBWWP2mMIbmH06zMLAwddCcfXwxQQFixWNQilmN2aygDkx9T6n61iZmfnbGUT7CaWecKFDSMXbiosq3Yk2UeAPQfSsSfiVJ2JdRaUrJQh3Pc9MlELAhIN+6WTVHuFRKYCgqBOg30SmHWNutY+41smvjycjEeTj/VhbjItmDfafpytxP5E1k6bZJqNlHsJMaDLSO94pl5RtdSv3L9bX5D8xW2IUSEIQTmUEhCEFQ47nUEhQKJzj8Jz7bj9PUkgKoqMSxCgXPqbD1P5n61Gs3JiwAUEk2HoL/QfkPpXW6OcjRyYqBnZk27hRNoAhu6OVE5galA25AFwIdHaG25tfGS7LjSMEMrCNiE/wDM8SeP4Xb9PkW8+a+8dFfIjVnEamRQXP8AILj7vHn7f1ev08U2NPvCShmFfkqNL0FwWBdTzhs7bNka9DNUH6rfwq0oiVuzSeLE2X6OgoAQ24jrX3V+3xytDpc/UZWlnGE+QyuqLjQIshXg0q8UEjAe5x4iynz5vV+7N1KRFm3ODt8bcQ/vEx1ZHZsmZ2jDcxE3Jyi/o5XNyPArsvlrI3h2C1anHCzs0/AJORqrRjZ3vs5666Tg6ZpuBFtHOiAIGcb+oHaG+vvuXZEh1cMuhzHbJObjhv2aR5biOR7Hmgb7InHgy/QeReuvp/XWm2c0e8w0XGGFkFf3byYqe4iXHFyv3SKfIi+p8G1OiPpHWxD61QB6VxrilGlKb6w332a7fw9egZG3WOKWhjScIwAzQ7SNmFTELJ+MXRM3WSakL1HIQRNt9DVJ3ncv2GTNq9Hhz7Pe4zQGWCP7CkU5I93mw4MEAuwBvV00nT/32NDs91mQa3R5Cz+1PJZw8sIB9rgpDqXJsrEWpwfgD0+gN/8AFq7VS60R3FgCxoNSRrIawaLWWcSpnghIJywLdKLRNj09J2x0fWE++4D+1qIefdDepjJBEevnHZmmL/1RMG8II7eUK+eV/WpdIdKdG+Q88o34yFVYeA9sw8bs5kvcMD4429K3ZzdBDnHcQIQxxKUOoxgIUTCUpfhMIB2fT1Ks3FSxubAnx6+Bfx+JqKRebBBa5IH/AF/H8qa9pcUHSUbf5KceVCnqIuIVauWiNTjXCs0rIGbMnyrg5zroCqJelJMPVOA79mte43Z4ciLH7nsMyXWdXZWgbFy4hE5nMnCORnJLC/oi+jCxq/ZPWZseWfpuBiRbPs6ss4ycWUyqIBHzkjVQApt+p2PlT4pwIybiZwjpSJkmskmxdrRzs7VYqwNnzYQ79oqID6iyQj2h8Grpr9trtssj62eOdIZTE5RuQWRf1I34EfUVTc/VbHUtHHsYJIHmiWVA68ecbfpdfxU/Q1stSFYNGlK+iEOoYCJlMc5h2KQoCYxh+AAANxHSlP7Et/CxrJAQEokbpAJBN1dJhIAmABEAHYDCP7WlK2OlKNKUaUo0pRpSjSlYcj/u99/sbn+JPpSoX+W1/YN4pfiZqX8lNpSmC8jv9U7wg/FEf+l1n0pVrOlKNKUaUo0pRpSoMxX6yq8/3H8X/wBe+WdKVObSlGlKNKUaUrAk3oR7Fw7Eh1O5JuUiZRMYxzCBSbgHoKBh7R+ANKfxqAlS5BNspZYk6zRK6pdMas4KZdSuda9PRUhSmGRYScCHmMVuWaagyhLPHBuqqIl7spQ2+EB1F4+yTLzDBip7mIEN5lYFA4axjI9eQ9TV63HSJuvdaTab3IGJ2OSdAmtljdchsWSP3I80MRw9l/0r5uT5p+tSlUWjSlM/lLB1IzDM4qnbetZ0n2G762yPTy12ySEA1WsTRAG6SdjasjAnPwwph6zRb7GYf2xAcHM12PnyQSTl+WPL7i8WKjl6fdb1H5GrX1ruW56pibPC1QxjDt8E4k/uwrKRExuTEW8xSX9HXyKeAR3ERH4e3WdVUpvsr1q9W7HlorWNr8bFt5l48iFcv5YRpYjVp4DlBQz4sK/2aPutumdPpP2B19Qdoaxc6HJnxXixJfZyWH2vblxN/Wx8Hx4qe6vsdLqt9jbHsOCNnpYnvLje40XvLYjj7i/cvkg3Hra1R9jz2DkXXeRGAc8YoyBj+hVU0RRG+U1rGlXS50hfZxHk3fac5hitnVXYC/j91UxOfYqwk36dw1GKZdpFlazZQSxYyWT3OXH3ltcutvKi49Pzq9TjB6DsNB3npOzwc7d5XPJOGIjL/bpOXGPFyFkuszcX+02Hlb+tR143S2CrhkLHGPeB6+F3mC+JU/bIPMqUpUbNJ2uIlr3HvF4R1hLIEygu3dDMyke6PLukHJklSh077bF1E6l9bPlRYvWjjnXYLsJbqxYFwbe058eSDyIPmr98g4vddTodhvvmtduvde0QQSYBSeFIXTGZRINhixkEe2jIIEZOS+v4mp2WSkKtMnts3PMq2aBiK5j2ZqrigSUwwY4mcvX70HrW6WRF0RNUJiOVAqJVhXIQER29PpskuOVzBsGmdY0iK8CQI7k35tf6j0/hWldfuEl623TotZjz5eRnxzDJRGbNCqvE48RW49thduPEnl5qjjkLhznawrGGKRlfFFH5mU2YzPO5pylkxnPPX6eMI+ZmVI1WvY0CRlqwnSoOCx+4B2zkl/Eot3Qn2ASJ7jrvaYHY1hgx82GLYQHIaWSTlf2wTbjHcrwATyGNwDf6CvZnQu2fCk+x2+56ztM3qO1i1Eevw8NolX940cfMS5nFJv3EkmUOEkK8GZOP8zWqLHO/y1cScYvYnJnF8/XpPis6cY199oOYsh71ll07980X8+thuQ7gkBKhMRDYPEKOHJy7CsUOkggbUL2XqWDpuO3w3Q6W8fMFucp+4FjEfQ3Hrc/jWzPhP/IbtXyN73x32SDIj+TFGX+3kji/bYSr+3KxjPW/uJ7ch+1VQfyHyfFdOYcR37I1zpeJ+EkhXq5xP5bDGcjMcUPOBqhHM8g5YQFaUtFQoTSeZrzTIke5aIEfRZAbtmxSk2OZA3UbjPwcrLyI8HrxRNJm2nRJuIDyerKl/Pggcl8AW/A1z1TtOj6/qMvs/wAxR5GR8odW56rLydcJ2bFwjZIZ8lomEbcgWMUx5O12uA4sK2bTluDio/JXG+6caMESeUafe7S4hMs0euScdYAu9ZdtkZmEkIwLGwq76rypoRym5XBAibdIN2zYANvqpT50aLLqcjDxjmRyNaRFIbmpsQRyClTY3NrW9BXobW9WzMmbX/IGn7Fu4utZeFAJMLIlR4v28ykxyK/tNMsye4hReRLMbSSG1PbLYSgc2zg5P5N+YfhJ8+aYmrd6LBVGJl7a+YwdTjEDQ+JnjOPiafSqtMoMXBouPYpOxBdwbpLuG59SL66LYyfvNztMcsIVeyguQFHiIgBEU2+1Rfyf+tU3G7jm9Owf+N/HPQtwkDbSbG9yeRIFaSdzzzVZ3nyJkLATSyNGOKi5/Cms5uYJo8Axx/nnGFohabgzPUCN1484Ll2lrRulNpxnS0TZmskY6E1WI1Ra3RDt2JRmFlnHiQMAbB2YPYtbjRJFssN1j1uSvOCEhuaLezX/AFKPvBP6iTerN8N923OdNn9I7HizZfdNJN+32myjaE4+RPxDwsljHM4EDol/YULwsTVcGqpXoGpe8MstZLx3kmdp+PMbK5pa5spc3jG34gSiVppW5w0ogK7NVm2RIoZjIwMyi3dkebF7lNNQomKRQw6ndBnZmLmNj4kP7hciMxvFa/MH0t+BU2N/pWq/l3q/Xd912Da77YDTyafMjzIM4uIxBIhswYm3JJYyyGPzyJBAJAFMxlOZzjB2x4hcrPeoXL+Mhk8all55c0m/prOtM3tYGksHj4HXcNopm5csASIoKaDVQSJbenWJPkbDHzL5TSjNhJjuxvxC/bxB/BRcWv4HgVYNVqun7nr5i0cOE3WNmq5XGJQqztMRL+4Ki3mRgknIryZhdqiFWKgSmZBxlf8APOE8iVPEVlylFjbUUYK6VHHcxi965jVJyJp8iLBxMvVSRoOVwBu8d+IbFTKUph3MOZDGIposnOhkXDeQcvDKhQ2uFNrnxc+Cbiq9ssx83V5+j6tssOfsWNhOI7vDLkR5KBgjTLyCKOfFTyRQrXuR4A9svl/KcQuLXIPJ81h7EuRsZcbs40/C9ax1zEyvmMlhxjm+UbMo4uO8Y0uAl45rL1u491PuGx2Ky53CKjA6SqLcCkKO6NAdRq8+R8OKSLXTpGEneS6SEW4IoIBDeSLXuCDcCvzK+Wx8h966nhY3Ys/Dze56vJzJMjU4uH7eThqWb9xkzOjskkV41bmFCsJAyvJ5Imt5dOe75n6V5kzlpzXW8v1Cr8oLNUsTsoap2KoSGNqbFtBTLR5ttZKpVl5V6zVTIp4luaRRMYTj4kQMBCzPXs+fPfMeWZZYkyWVAFKlFH8p5Kt7fj938a1n8xdT1fUoOuYuDrJtdsJ9HFLlF5Y5VyJWP+8hjmlCg+RxYRG3Ee2LEmyzVkrStGlKNKUaUo0pRpSjSlH7fZ9MdgAPpiIiAAGlKoG5x+XnyI51tcvYwjcBcQsG1l5ySC1RWaJL2hMZByzRHdMk4Y2WnTeqOgMyy5Gv1iNkySwpIHaOFAENi9RqJuuv7DeCXFSDEgjOTyEh8s6lbe59p/3B4H3WFj/19Y/GHy71D4tk127yNr2DaZa6b2mw14rBizLMr/tAZl84rC7ExciGUG92sPGFmflLzOrtWvPDLM+Xch2Wo0vISEZaqDe5mRsTFKVxkuaChImPPPkPMQtcjvBiZJu0WbouEzJmMQQKTWm8zabiKJ9PmSyNCklmRiSLp4AF/IHj0Bsa/SfrXRPjbNzsX5I63r8ODY5OGWingRY2KZI5uze39jyNyF2cMym4v5NXQ+Q68oVJrvKfzBr3U8pZBtXE6DhKLXca4tjhuclN1nNLl73jSr1iadrO0HtQcV8fCEQdpIptXKxz9pAMNy6KYIUyt/OkskuKoUIg5EiQnwoJ/lt48gWJrzV/lWm22eZoviTV5GDh4G+leeTJyWMSpJhhTeSRBYiYSffdGYuqgetq9PnKDk7niPqddgsP8PuQmUILMuIoSWVyNQ7FVqdZMSOshLIxXs92ysAuXTC7UqIkgkzgCaiSSifQI7FOcuy9ls85YlTDxMiVJoQeakKyc/FiD5DKPu/D/vXiDo3SOqzbCbK7F2LUYOVrdi6ftp45Zo8sY4LcgY7BoZnX2x5DEG48kA5D6n8TPLvrafKLLc3ZpXIUlVMWceb/AMkbIhZ71k7JaJnzCJpja2xtb8bGKO1ZFmgVd21YI9JUCiqoIF3H6aLVdei/ueWzGcqkTSm7O/oF5Bbgm4FyAPQXNdcOf375hzj0jr0OPHqlnyc+DXx+1BjY9gzymJpOLcQrMVRpG8seKi9SxfU7J7vONYyCwy0dlhqPx1MV+cwn7sR6ydiusjIkew9+G2qCEqwPER3+jgyIHdKfVG7RHUo0OSc5chZbYYjIMfEfcxPh+XqLDxb0qgxbLRxdXn1EuvDdjfMSRM33WBjhVeLwe1+lg7fdz9RTsixZqOivxZNTv0UToJyAtEDv0WyhgMo3TeCmLlNucwAJiAYCCPpDWXxHLlYcgPW1QHuyBPaDMIib8bnjcfXj6E/nUduUZ0IvHkRcEONIcp7NS77TpeoY+ata0tOQkw6mEI1XIFfd2gpmUW/pzF0o7MskIOASIYCCG4iEdtCEgWYY/wC6lR1Kr9twb+XF/AK+tx5+lXHoofI28mufdf2HBysWZJsgmQJIgQsMeQR/cyzMAgU/bcgmvqAwFLxHKC7clls15Uk4q74xr+P0sAScoVXEdQfRCsW5WukDFAuYELbIiwOm5WBMOsHCm5jAJQD6jwWTZPsfekKvGF9q/wBi2t9yj8fFvT8a+MvtePkdIxemLrcFJsbNkn/fKoGVKHDj2XfjcxLyBUFjbitgDcmR2pCqdRpSjSlGlKNKUaUo0pRpSjSlGljSjSlN7lmJl57Gtyh4MVjSb6FcpNm7dIq6r36kyrBJI5il7x8kApgbcBL1bhqkfJWt2e56FtNXqOZ2EuIwVVAZpPqYwD4vILrf6XvV1+OdjrdR3rWbPbcRgRZSlmYlVT6CQkX8RmzW+trVHjizSrjU5e4uLRWJSvkfRUUgzWfNyIJrAg5VEzZAQUOcQSTEDbD2BvrR/wDjt1TtPW9ptJuw6/IwkmxoljMi8Q3FzdV8k+BY2rdf+Qfausdj1ush0Gwx81ociZnWNuRXkoszeAPJuL/+lTJ16nry/RpSmSyVmNrQLbSquZvHrFsTlM82/evDIBBRSjgjYrwUkw3ETiJjAYw9GyYgOtS98+UsfpXZtV190hYZrgzyO/H9vCXChyB+JuQT9tlI9a2v0X4xye5dc2u/V5lOFGRBHGgb9xMFLcLn8BYED7rsPNdIZnrtjWsMNUZBlGTreRSgK/YLC23rFimTKkAGcW7aHUUfqFSMIgmPSYOoB22Ht6h8q6PeS52q6zPFj7lJxjY2Tkqf2uTOSLJE6EmQ8fPE2IuDa1dh+Lt3pIsLZ9kglyNQ8ByMnHxn/wDlY0Fjd5UcARi/q3kGxF708bhd6ziV3QNDSUi1jzLCyaCCYv3yDfrO2aCsPSn4hcogTqHsAQ31tHImy8TWtke37+dHAW9tPHuSBblUJ8Dkwstz4v5rWMEOJlbFMf3BBhSTBfck8+3GzWDPx9eK+Wt62r5hnjqRiY5++jV4d47aIuHMW6Omo5j1lC7narHS+xmUS32ES9muNXlZGdroMzLgfFyZY1ZoXILRsR5RiPBI+tq+tpi4+DsZsPEnTKxo5GVZkBCyKD4dQfIB+l62Ws+sCsORjo6Xbi0lWLSSaiqiv4d83Tco9+3OCiKvdqgcveInDchvSUfRrFzsHB2cH7bZQxZGPyVuMih15KbqbMCLqfI/A1lYWdm62X9xrppIMjiV5RsUbiwswutvDDwR9RXLOPYR5VisGTVkVy4UduCtEE0Crultu9cKgmUoKLKbdph7R1zi4WHgh1woo4lkcuwRQvJz6sbepP1J81xk5mZmlWzJZJWRAi82LcVHooveyj6AeK7AdNTOVGRXTYzxFMiyrQq6RnSSKgiBFVG4HFVNI4h2GEAAfg19jJx2nbFWSM5SqGKBgXCn0JW9wD9CRY18HHyFgXKaNxjMbByp4Fh6gNbiSPqAbj6iu/XdXTSmqjMrqXR71BRVJIBOJiKd33RwARTUPsYpjEEQ2Hb6OlKeoP2fD/06Uo0pRpSjSlGlKNKUaUrDkf8Ad77/AGNz/En0pUL/AC2v7BvFL8TNS/kptKUwXkd/qneEH4oj/wBLrPpSrWdKUaUo0pRpSjSlQZiv1lV5/uP4v/r3yzpSpzaUo0pRpSjSlaefOYkNIGKIAbw5wARU7v4O3Y2w7jt8H+V6NKVGet1OrU5k5jahW4Krxz6TfTTxjX4plENHczKLC4k5Zy3YIoJLSMi4MJ1ljAKihh3MIjrqihhgUrCiopYkgAAEnyT4+p+tZ+w2mz20q5G0yJsmdI1jVpXZ2WNBZEBYkhFHhVHgDwKUGu2sCknfIizz9ItsHSbQFHuEvXpSOq9yGNRmAq067aqJRk8MS4MVCSCNcmKr3JxAqnTsOunJjmlx3jx39udkIV7X4k+jW+tj5tUppMrXYO5xczcY37zUxTo80HIp70asC8fMeU5jxyHkXvUcsn59huF3Hin2bkRcZrK11YMoCoKOKfXGaN5zVfVRbtnoUikJukEHMu6BYXZ2SKu6aBDCG4gADE5mzj6/q0m2sjT5ChV+1Rzmf0PBL+v1t+F62B1zo2X8vd8y9d0LEh1mnkeWcCeVjja/GFyv7jIKkhBbgJGHliAfxrAqPK47PMlexNmWtssdy2a3h5Xi5HtlJWbmch0mMqzOx2F/e0UGQx9Dn4dVwKCjJdUBMYpilERL1G+YN1xz0wdggifIN8ceSzoFDMX8WRgfHEmu7afGSy9Tn7T1LIfPxdOgTcMwSOPFyHmaKJcYluWTFIAGEir4uCbXsGinMxQPFWNyTjGq8hqpnDOLbIQZYtdK5E5ajarM45xPbnrZ9PJxbtVPclep1dRUcxbEvWsqUR2IIbAOBJnx6ZJcODKTI2Il91knlClInNzY/wDiq+VFWnC6nnfJk+v7HtNDlabprYH7KHI1WE80eXmwKViLrf8A3Z5SFmkNlBt5+tPPyYUrWeeLD5lUqlcs+Y/zOlT4pJHClvbVqff1CenGK61zr9oWDpTi4QrQHC/QAmWQKYmwAYwhIbcw7PTFYUkysbI4j+k3FipYHkrH6C1/Hr6VUfjsbHpPyWk20ysTR7zUNO5OwgMsSzxRsBBLCPV5L8Vv4ViDe4FNJWMmcQvLfwVIYslcxt5dng8ke3sqBmrOeyOwC6SsgapNLTF1GPM7TScO+pqguuQRSKAGUEN+3BhzNF1PWnCfIDLjW5ejSDmTxDBRf8rn/WrTsuu/Kn+QPdI+zY2paKXcljCeTRYjft4098wvO/EkLZ2VT5PhQbeGryly+Szz5dlqzOjgepTPvJENJe7cdsoZLjo2SNhN3bCxYXd69ilYuSZozrJJB7EdaTcFVFCFAxtvWw83ejZdVfPGMjc1u8EkgB9otbmSLEchYr6Xqy9a+KW6T8+Y3UW3eVF+3maPH2mHiO6f3BYOf7dVcOjGNi0c9i3EA+BWTSZrjpkby4My0TjBnM+JcbVWGteN7LkzJS726tsVzcqmxkblXJZ/Y1kG8/FpR02eNQUSW8L0LlFE4gADrnGk1WV1SfH0+T7GKitG0kl39sny6kt6ix4ixt58V8bjD7/oP8gNRuvkjS/3TsGVNBlxYmIFxzmxpyTHlRYgTE5eMTMGXndSHHk1SdkZvzuqTbiZDXSm1FxjuElnx+IXFaKimLqbk1cbUdyhG5gjHR2ExGJrqrSQT3cyksKLoTCUqBkNh1r3KHZIBgx5EcZxFJ/a44AJPtp4lHgjzfnZmsfS1q9hdfk+FNrL2jK1GZlLv5ol/vm5d2WNBl5AL4LjlG5ACftuUMPJPBLh/FOlyCxRnvhjx945XCOyIrcrPmyx50yZk7KL+i1h6wod2yjjeDXQq1YmSNrC+oDnwzWQT76CVborPCHMQCEKQpc3Z4Wz0GrxJ0k9yXJeaSSQopCPJGPtU+TH45C6WBP+lVrovZ+j/Lnet/qsjXjE1umx9diYeGmRMrZOPh5cgM00ZMS5Qu0R45KsyxkXuSxNV2buIFypzanX2lIzOTcRXPCBOQjLK8bWZ+MZqwyrozC1QNpkZFRUGErC2Js4aEVXMk5fJiVciHScNUvY6LIgWPJx+U2DJj++JeLAEejKx+hBuPPkjzbzXpjpvytqNrLl6PcmHXdqw9z/AGtsJ5onYSAcoZYVQDkkkTI5C3SM3RnupqOlsqYxq6eQoWizDTD9icsDQxVJ51IxDN3JNF1m1Ulrkwaope8UUu1UXO1VKV4miQp1CABuoYqfH4H91HEw17nxdiQCR4UuP5ha9vW3rV/1e0GTGdDmZsLdsx1b3P6Sq7BSA0yY7E/0nBChwTGWJCt4tVl2QeT2Eb1wmmafV8OX21Z8qlEpuDJS62J58ZmP6hjKnv05ySyzELMYZpAUCflJN+rEMXaQld9xuKqhusQPbsrc63J680EOPLJskjSEu39RFjUgmUWUKjEkqCPNvU1520Pxx3LS/MMO22W2wcbpGVm5GxTHiX9nlT5c6mNMJw0jS5MSIqzyI105eFUW8VV40xjkHMl0h8c4sqUzebzYBclhqzANRdyT8WbVV45FJPcpCERbomMY5zFIHZuPbqmYmHlZ+QuJhxtJkveyqLk28mvTfYex6LqWol7B2XKhwtLBb3JpTxReRCi/18kgWFzXpZxZgOe8snF+CrPYS2TFcnbZWt5R5K8phxvXr81xkxfKkrheKTNkiwUuMalfDrNAXeN1RKg87wxgMnuIbbwtZL1DCxppOcLyEST5Htq/tg/b+3AtyHO4uQfBv9K/O/sveML/ACM7Lutbgft9nj4sUuHqdN+7lxjlsoMv96Ziwx3ONZ+Mbrdk4gWbxVqvmHcEmnOjD8fWq1enWNJuGUkrfW0GMZHI1W7TssyZHhU8iimz9rLxzDuxUTVREy6aixxEBHVz7T1teyYIiikMUi3dQAOLsQLe54vYfiPPmvM3wL81y/C/a32Owwl2OFKEglLOxmx4kZvcOLduAZr8SrfaQoFxXmz5YcX88cahqfl8WvMUtl7G3ICGxw9TRB1KP6liRA2RYk0/LN6o4kDu4FRjPskSpyPfsknUWdVLpKCnZqrbava6N4+tPkNNjZSofqVi+8cjxJ8WYD7rgFbivf8A8ed86D8pY+X824+ni1m50c2UD4VJ84jFf2kM6qBIGjJJiCyMkwVrm3m3i9cdf0V3lXZcqA2yBzC6oNpYW/Ck/I47roI44yPapaKj423NGFlNPoSL+KsK6jxB4sJ3KKQlIURENxvWZgSdP6fkI8qzyRsGib21HB2IAaxvchvN/WvKPXO14P8AkZ/kfqMuHBl1OHlwNFnwjKlY5WNCju8JeP2yoeICMoLKSCTYG1RY8lp1YuQmZ47JPINtn+1ZPwzD5Dt+G8wzUtOt8aWWsZQfKxV3q1kItGIxFqko2Tk1VYcxHBgbJAdMC/YCCET0DKyNrme9tBkPlQh3jlNwjK5s6t4AYgm62P5fQVsX/LrQ6boHXf7Z0STUY+i2csEGbgJwbKhlxlDwTRgOXiRkQLKGQEmxv95r0J4Rx7nCjWHOUjmHOhcxwV6yi+tWG4EtRY1f4mccLpLEZY2F2zOdS0CyUOQ3jltlDiX6fZs3Cx82CSdsyf3keUtGOPH20+iX/m/jXhns+36vtMTVw9d1f9tysXBWLMk91pf3mQLXybHxFfz9i+Bf8qf/AFn1UqNKUaUo0pRpSjSlJW9U6HyHSrbQbCeTTgbrXJerTSkLJOYaYTjJtkqweni5dkdN5GPyoLCKS6RgOkfYQ7Q11TwpkQvBJfg6kGxINiLGxHoaz9VscjT7LH22JwOXjTJKnNQ6ckIYBkPhluPKnwR4NRiyTgKXpWD8XQmEL3kqCV4qRo26lVMb04IhmdOh1WQLC42zLbZRCRlZWpzSyRRer7GXE3rB2gG0Zk4Dw4USYTyA4o5KvL/c4qbJIxuSp+p9avGl7Zj7PtGdk9nxcKVd+/szS+wL4ZnlXnk4cSlVSVATwH6a8cvPPizFcvceYe5g8cLvE3zOnO69ZDynauOC7OAjrghZMeQ60DeYnBsnMsIa7XarVGRi3abhkcDGWKmV0AH7sDDqHeatNxjw7fWuJM7Od3MNgGuoswiJAZlUgi318HzX6MfFXe8j463Ox+Ou5YsmL1fq2Lj4sWxDO0JjyGDwPmIjPBDLKrIVcW43KEi9qtH8tKuWLhx5WuLeTWF7XXMa0m0ZkfZG51WTMlQdTF0j8U1b/wDp7P1/EQUqFXlXcxGv2i7iF8aQDoHeKCcwAJ97N1uOTT9Xi2eE6xwNMXyTIt2Ea/YRHxBN/Urf0v8AxrRnzVmYfyN8653SOy482btINcMfSR4coSFsqX/5EcmX7zhAjAqs3AkMEFgfBGp4/YGyX5llaZMF+eNrzz5cWDs0ZIbTMoaXyhjnktfIxeso3aPY5Il3sI0Z2GVp0tMt/Z5ilVaosNyJiKmxSfOBg5PZIwpzmn6/BM4Ju6TOOIYByRYlSRb6W/8ATv7d2vR/DGY8ydVx9V8xbXW45RQmNka2BhKYWbGVZGaNZlRvc8hi/k+LkzG4OtMmRiONJbh3mPBc/wCUShP2Vm2PyFTtD3kY+u4ryjK0NPb15hY1qBCZYEgRgKKE/wBBDoSKJxARmNKMhRG+olgbqYZv92/u8vIYXYD/ANz9P5ela4+T30sz5uP8ia7axf5BGGIkYBjGvENlaJuMEjH/APNL+59pPPyxt4qeDXF3mAXTGOAk7pyUxlijMNKzQ4tudnWJ8fKTdFy9hxtLOxjMXxjexii/rL6QgxRB3IEDqK4ARLuHpm1xt/NjQCfJjizEm5S+2l1kjB8IL+QSLXNask3vxJrd5tm1mlzs/rmTrBFgjLn4T4mYUHLJYx3WQK/LhGfBW16ffkFgmVzotiFSHzXlTCpcVZdr+UnyeLpYIwuTI6C6wUxrfOpVL2hR5kD/AOlIet17fUjrPz8F832eE0sPtTB/sNudv5H/ABU/Wqp1HtUHVl2IyNbgbI5+ukxV/cpy/bM//wDkweDxnT+VvFvxqQqihQMoqPQkQBOoYdwKRMnacw9RtgKQhfhHsAA1IfnVRUE2X1Pp/Gov0OczrbuQlztLC/YMuXDFzj+LjsdpUd4E7klHMDB+1Rt6k9YowzqvuK2gmRwQiCbgy6apiFMQglNvGQPnTbB5VkgfTlAF4m7+4COVyPFvX6n1+lqu+1xer6/qONgzYm0x/kgZbNke+vt45xGVjFwja0gkN1NyoWwJBYEWk2qoRBJZdQelJuis4VNsJulJBMyqpgAO0wlTII7fDqSvYXPpVKVSzhF/USAP4k2FMTxv5L4d5aYyRzBgqxvbTQV7JZKklLSEFLVxwadqT4I2cbezJps0fAk2djsRXo7tUo9RRENYWu2WHtcb95gsWxyxW5BHlTY+D5/1+tWjuXS+w9B3Z672iFINsIY5SqyJIOEo5IeSErci9xe49D5p+dZ1VWjSlGlKNKUaUo0pRpStG7hVnU9EzZZmUbJRbZ63PCt1SlipMXnT0rv0hKJ1FmnT9j2Hs31EZOpkyNzjbdcrIjjx45FMCkCGXnazSD1LJb7alsfaRY+oydU2LjySZEkbCdgfdiCXusZ9AHv91/Wt5qXqJpOjMyfvUED7uP8A2OMP7R96++S9m+N74E/Y/c9PfC6Eg9fVvtt8GoL+6bD/AJH/AGf9jP8A2v8Aa+7+8uPa9zlb2eP6udvuve1vpU3/AGzX/wDHv7x++h/uf7n2v2lj7vDjf3uXpwv9trXvWDf5+Yq9RmJ6Chxn5VgRuZrFbLmF0KrtBFXsbFMuYEkVDH2KH+TrE7pu9p17rOVuNNi/vNlCFKQ/d993VT+gFvtBLG34Vl9N02s3/ZcbT7fJGHrpiweb7ftsjEfqIX7iAPJ+tKhoqouzZrrJdyuu0bLro9u6CyqBFFUe0AN9iOYS9vb2asOLI82LFPKvCV40Zl/8Syglf9CbeagMmNIcqWGJucSSMqt/5BWIDfh5Av8A61ka766KaXImGajk2UiJWwi+TViWrlp0MFE24vkljFOkR2v3ZljEaKlExCgO3rCHoHWs+8fFPWO/7HF2G994SY0bJaMheasbqHaxayHyoH4kVsjpPyj2XoWvydfpfZaLJkV7yAtwZfDFFuAC4NmP5A+tNvjfGiNFPewCsWiaaVd0D2osLCMa5ZT8s3SUW9u1RIrYAi5F13CbcVO023T29mqL0ToMXT33P/6P2GXja6T3MKPJMTpkzKpb9xhgJ/SlfisXLybW8+KvHee+SduTUH9/r8TJ2EfDMkxvdV8eFmC/t8slv6sS8mk4+Be/gXqRkDIPZaFi5ORinME+fM0nLuFeKFUdxi6gD1s3ChCkKdVP4RAADW9dPnZWy1WPsM3Gkw8yaIM8DkF4mPqjEWBI+ptWkNxh4uv2mRgYWRHmYcUpVJkBCSqPR1BuQp+gNbbUjUdRpSjSlGlK0aNagm1hfWpCORTsEmxbxj+TKZQVnDFoYDN25iicUgImYNwECgI/R1Dw6DTwbqbscMCru54ViklueTRobqpF7WB/AX/E1LS73bz6WHr007NpoJmljisOKyP4ZgbXuR+dvyrYyCLpxHvm7B34B+u0cIsnwplWBk6USORB33BtireHUEDdIjsbbbWdnRZM+FNBhyezlvEypJbl7blSFfj9eJsbfX0rCwpcaHMimzI/exElUvHfjzQEFk5D05C4v9PWk9jGRkMbS9bqtlkbVkGwXqQfkGwpxyfs2IWQQTWX8R0m/wCyIwUy/Yy7jucTfS1QdLLm9MODoN/lbDcbfaZEhOT7f9KJlUEhrH+jEf5B5+64FXzcxYXcRnb7Q4uBqNTrIIwMb3f6sqsxAK3H9aW/6z4+217mpea2RWuqNKUaUo0pRpSjSlGlKw5H/d77/Y3P8SfSlQv8tr+wbxS/EzUv5KbSlMF5Hf6p3hB+KI/9LrPpSrWdKVFbOHKuFwRlXAmM7FjDKc8xz/fYnGsLkusxVfc49qFvn05dWEi7i/krJGTLdWRThFjFBkyeiQolMcCgO4KUj75zmxzU+RDLjLXKpbck5Gbq1VO7e60pjyJjKB76FF1AoSg3q8VKUssstDlGQWj4BrLPm7ASqqJFA5AFSnN5F8k63x1i6KD2o3bJV6ypdEsf4uxbjhlEvrpebP7GlbLIIRw2CYr1ejI6ErkE7fPXsg+aNGyCPrH6jEKZSlLgHOtM5GYuhMq0hCcjouTdz0NK1+1R3sa11C1VObf1q3VC1RXfOCR0/W7BFuGrghVVEhMn1pnOmYpxUqPUI4QceZTejN10Fyl4QYvKYUVk1eg3x7ZZHpP3ZjdJtu3t9OlKnZpSjSlGlKNKU3t8dp9y0ZFUDve9FdVMDG6wT6RKQRD6kSmHf0/Q0pTYGMUhROcQKQhROcxjAUpSFDqMY5hHYpSlDcREezS9vPpXIBJsPJNQjV5BWPA7fJWSeVmRcYq4VuGZq5S+M0piiEnbJKJQVoUeMIqIv60M0fpO51xJpEKZ0gJmyJSG6zh2arp2kusE2VupYf7c+QqY5jBY2bwA9gbkn6jwPNbiXomv7s+u698ZYGxHcMTUS5G3TNkjiQyQhWd8YSFSsSoT9jWdiRYGn9yi1zuvYMTGw9JY8YVpvekVc2I3hnJOpaRxwDUe+Z0JRhui0s4uu0p3GyXR/hAZTMXZmWA4BiEIk/qhwSTHb0T6Br/jVH63L0pMHaDtcee+xbCI15x2QImXfw2Ty8tDx+i+b1Can4Z5fT3OuLyZyCSotswRU8f20Mfe60smFZrN1PaF1aRMBRJ3vZZtkdnWFO6dTTYCpbCBSn7BAa9BgbyTsi5ez9p9akTcOJ+1X5HgeB8+5x8Fh4/CtxbXt3xThfCsvXeinNxe65WdCcr3kPvTY/sgZEf7mO0ZxWmF0x3Jb1JXzUlco5HzbRHmXrWx44Ncr1vH9UrEriBOn2GNXyhkezTLxBpca8hGyTc5awhCoLCsVYDf6UkmIesI7BLZuXscdp51xBNDEimLiw9x2J+5QCPtt+P1rXnW+v8ATt1DqtZP2BtZsM/KmTO9+Jxh4kMaloJS6Ee8ZCOJUj7GI/1ru504CxXkzLtEzLyFwlE0bHCuF1YBDOUXYDK5AiuQOS2wV2kUK3UODYSKtuQqrtZDwkm4ILNisU/WbutyjVeyavCzM6PP2mMsWJ+34+8G+8TSDiiOgB5cTazHwpv9K318L957N13qud1Lom5lzewjbiU654rYr6zEPu5GTBkyOggM6hg8KnnICthysaW/G7DXOpvx3oPEHLzZPF1SJiG0w8ryQxrfa+OVcd2GKvLhTHlWq8LFm9lPWrmlt24LvgSExe+OU5+vfWTqcDsf9qi0efaGAQMDPG6+4jBzwVQPB+wC5/O30qG+Qe2/Cz99zvlXqjHZbQ7WGRNTl40v7LKikxgMqaaR/vVhkFuMd7HiCBxpibLxw4o8gc15xn5K4X/jhO8Lsq0RXO2X1JWF9mZ+tqEREOIe+3dzOpumjBVmrCnTEiKZUDqvTn7sTGDaNm1Ol2ewyZmeXEl186e9LcWmawKu9/S1vwt5Pirtr/kD5O6N0/TYOPiYPYMLt+syRrcAJJz1kBeQPjY4j4swYSA3JLBYwvKwNS9qWWvL8yvc8+XXCcPRsy8hJrG0zS7jjNFrIMbVl6rYfbNXjCqxNUsaKdef18r1m1I1fNmnh1TiUwGOAanYc7rGbkZORr1jyNo0RR47ENKsViFCt4K3AAYCxrVO06v86dY0+j03cJc3U9Eh2EeRBlllaHBmziVaZ5oj7qy8WcvG78wLiwNRLyplTH+EL1x3uljxTJ8d6TmvF88+zzxlyAwgq/x3g2cg6eObPY7MjGxRmOQORcO0jUjMIcqAqPmySZQAB2DUJm52LrsnFyJYDi4+RCTNjuFWAXvyZrCzziw4pbyLVtHrPWd53LS7/T6/aR7/AHOn2Ua63b4rSy7WRkCrDFEXfljap2c+5kFrROWJ8XqpLDfH3mTn7NpcW40zvcW2JM9x+SMn1+/VqfdO6PA0GSk5hnGPrJVmbwBpMvbGcem0LWlzR7wjRQgAQUi7apGBq99sth+zxMmT9jkiSRXUngqEkAsoP2FgLe39pt+VepO297+JOj9P/wCS9h0mI3adHJiYcuNNEq5EmUixs6xTMv8A8iOBnMhy1EsZkBJPI1NPy+Mu0HjpcbvgDKHJ5jXbnehVxHWOOERVbHmikYmzYg7RqMZfo3IJ2sxVi+03ZTKu4kiiTZosqdFUelP1Z/q+fi6rIk1eZmBciS8SwBWlSOW/EOHsV8nyVBsD4PpWoPnfqu87/qcLvXW+uPPqMK2dNtZJotfkZuvKmd8Z8Xkk32LZUnILyKA6i7ee+8chPMTMtnrD6FvkHEJVYqMxdWLrRoTEDTHdhcUwpbHaMj1xzYXTGHlZS/QiSkc7j2PfN4Z0oBvVUL0D9ZG07TfJwVc+3GojV0EXBuH3NIvIgEut1IFwh/OvnTdE+BOGk7W+JGMvKlfMmx8mTObKiGR/ShxJRErSImNIRKksnFshAR5XyKqbne+HOMs2DR2kZlvNmA4iotpB7CvsgxqldfchLhU02Vsyc+jY1IkVJJ1Bw7TSK2j1W4O1o8UxVBLbVKyMnQ4ew/bgT5GsVASC44md1+6QgeDwP0Frketq9M6jSfLXY+nf3mWTVabvEuUVWRcVxKurx5i0OGruS6GcKWLyqxRZQ3HlSnv3OxDC8Xe8AcYSVG1YasOGoLDVrmbBTgZ1DIr0El3tqySwoK3hnMVaZJ48SQTePHDhYpmPe7D1E6e3J7INekmr0/tyYDY6xMWX7XPqzhPoxuBcknxf8KjtH8KP2/Jwu8/I5ysXtsG2kz4Ein5T4q+FhxGyRdXhRVLGONFUiTj4sb3i+U/beEeGeFDzknXau5xzKJ2CJxtm2yzUhJ5Ety9vauGsdEHISLYqrQ8DPu587hi2btUzpNF9nCioEA2tjdKm67r+vHbRIYn5COZiS7c/Qeg8BixIAHgHyTXjT/J/VfMXbvmFPj3YZK7DGMD5evijVMWAQEM7+XYB5YhEFkdnILreNVvar3Fm8dLM0yPWbGTYOStnQN5Bm3fNVS7EXbqKNXSaqQmLuBg6i9RR+gOtkFY3WzAFPBsfP8K8UrJkYspaJ3jnUkXUlWHqD9wI9fI8GxFRvQruGeGdFz9mKVnbjH0uas07m/KElOyc3dxhnbsqCEiFWh0UV3sdCIgYoIx7Uhik37OwOyJEWv0GNk58jSDHZzNISS9ibX4jyQPwUVsB8/t3y5utH1PGhxJNvDjR67DSNI8f3FW5T3pCQrSHzylci/1ryb80vNbgc8ZIt1jxFxyx1Bt7LUU8aWC/ZOaSlsvVzpkJNozFXBOPaTEXBVFKMlGCD9AqSa7jvQBNdRQhekdJ9g7tFscuSbAxYlDp7bPICzuitdfAIVbEch6m/g3FfqD8Q/4x5vSevYuB2rsGfNJj5Ry4sbDKQY+PPJGY5rs0byTl0ZonJKrxuUVSb1YD5KfICh5lxhkfivyWsDjLNjv+RZGy0ShZGr8lcoCTrMRW2s5aBUkJBi7gWqLaaJ4gjZdUogqG6RA7A1Z/j7aY+fhzaXbuZ5ZZSyJICylQoZvJBHr5tf8AhWi/8wei7rqPZNf8l/HcC6vX4OvSHJycSRceVJnlaOEBUZZGLR/aWVT4/Ua9KEFCw1Xgoqr1qKj6/WoJmjHQkBDNEI6HiGDcvSgzjo9qRNs0bJF7ClIUA1tqOOOGNYYQFhUWCgAAD8gK/PXNzMvZZsuy2Msk+xncvJLIxeR2Pqzs1yxP1JNJ60ZHoNJmaVXbfcICtz2SJxWtUCHl36bR/cbAg3K7WhoBA/rP5BJscpxTL29Ih9HXVNl4uPJHDPIqSzNxQE2Lt62H4ms7W9f3m4xMzP1WJPkYOvhE2TIillgiJ4iSU/yqSCLn8KWmsioim4nMwYrrOSKVh6w5Aq8NlXI8bMzNBx9ISRELRb4qukOpOyEFGiUTvWsSmmYyxgEOkCj6dh1jvl4seSmHJIq5UgJVCfuYD1IH5VM4vXd7m6bJ7FiYk8mhw3RJ8hVvFE0nhFdvoWJFv4inH1kVDUaUo0pRpSjSlcCAGASmABKYBKYogAgYpgEDFEB3ASmAdhAewQ0p6efrVc/PLD/HiJhML8usuXabwdAcFb8TLqdtxtS2MnKOoKaUb1mYojtnFxD2aJVLS9mUyvkY5PvFu8MY4CG5i17e4mvSOHbZTtAmDJ7nJFubHwVIAvxN/PHyfrW5PirsPcMnJ2Xx91/Gj2mZ2nE/aGLImKqHQGVJwzOE92IIeBkNhYAH6HtzzZcOYanafzkyDn6+VLBE5i+CxFXsINq26mMIWax5qdFkqDdrDRIiJeSnvC5VkEUlDqNwIghuCokEDAZny4eG6bvIyHTBaMRiPjeMtJ5RioF7+fw8D1r56pg9k7JjZPxfqNTi5Haos58uTNMipmxx4Q4zwxzu4X2wFYqA33E+AfFn34fY7yrjDAVYqGcIbBcLk5CTtLuxM+OtVGo4oUZyU69dwKkTCKtGaxJBSEVRB+c5PsjnrENw21najHy8bAWHNEC5F2uIRxSxJtYWH09fzqq/Im40O87ZPsery7WXRlIhG2wl93KBVFDhnuw4hwfbAPhbU4uTcFYfzHRS4yyVj2uWeglnoS0FqijQ8ZEEsFckyTULKEbwyscJHDGUTBYNh6Tm36wMAiA5GTg4mZB+2yY1aDkG4+guDcHxb0PmobSdp7F1zaf3rS5c0G2MTxe7fk/tyLwdbuG8FfH4gehFOuAAAAUA2ApSlAPoFKAFKAfSAA1l1AepufWudKV1rJJuEV26xAURcorN10x3AFEHCZkVkxEBAQA6ZxAdu3t0IBFj6GuVYowdfDKQR/EG4/8AWmhwVgDD/GbHrbFOC6Qxx7j5nMzlha1qOey8g2RmLK9GRm3oOZuQk5Ax3zw3WJRVEhOwpAKUADWJg4GJrccYuEgjgBJsCT5Pk+pJ9f8ApVg7R2vsPdNu2+7Pktl7Zo0QyFUUlIxxQWjVV8D62ufU3NPFtv2bbgICAgO2wgIbCA79ggIayj6VXvTzWFHxkZEt/BREbGxLQqii3gopizjmoLLG61lvCsUkUSrLn9Y5unqObtHfXCqqCygBfwAt/wBvFdk08+Q/u5DvI9gLuxY2HoLtc2HoBewrN/xB9ABENx2+gG+46+q66NKVqJCwV+IexEZLT0JFSVgcqMq/HSctHx8hPPEU++WZwrJ24RcyztFH1zptyqHKXtENtfLSRoQrMoZjYAkAn8hf1P5CsiHDy8iKSbGilkhhUNIyozKik2DOwBCAnwCxAv4rb6+qx6NKUaUo0pRpSk7HktQWKdUk14c9WOix920GiaxZdJcCf9ojKKHDuFEzn/euntAPTqCwk7IN5mNnvinrpWP9qqBvfVrf1fdJ+0i/6LegqczG66dNhrgJkjsAeT9yzlfZK3/pCID7gQL87/WkVEqNJTL9rVPCzzJ/Wa5FxiUws/X93pdpJGI7ORpHCiRv41ooOx1AOcdt+wNVTWyY2w+TtlI+LmxZuvwYYhO0jftpklIchIrBfcQ+CwYn6WFWnZR5Gv8AjXXImVhy4efmyymFY1/cwvFdAXk5cvbceQvED0NzTsdoD2CID/iH9m2tkgkGtc+D/CuP/LvriuaNKUaUpOM6tGMbNMWtFSQGUnGbJi9TVfLKxxEWH7wZoxN9hbKm/wAsxe02oLF69gYe/wArskLTf3DMijjkBkYxBY/0lIz4RvxI9anMrsGfmaLF67KsI1+JLJIhEYEhaT9XOT1ZfwB9KUf/AEft6nag6NKUmrEnbjqwQ1VeGQSJMIGsgS6ayii0CBR8QlFiiAgnICbbpE2xdQG8TszyYf8Axx8VIhlKcr3gxLY9vuEXH0k/AnxU9pH62keYOwplPIcVv23skALkX+0y39Y7eoHmlIYdgMYoCbYDGKUPqjbAIgUPg3N6NTzGwJUXt6D8fyqCUXIU+PI8/wD1rSV2VfTUUjISUG9rrtVZ0meKkFEVXSREHCiKSxjoCZMSOUyAoX4ek2ojRbHM2utXNz8SXByWdwYZCpcBWKhiV8WcDkPyNS2812Jqti2Fg5cWbjqqESxghCWUEqA3m6ElT+YreamKiKNKVnxr48c9buyD0giqUygB2AZIB+yAIb7dpN/paXIB82H1riwJ9PNPhFS0dNMkpGLeNnzNbqAi7VdFyl1kHpUTFRA6iYKJmDYxd9yj2Drox8rGy4/dxJI5YbkckYMtx4IupIuD4Pn1rvyMbJxJPZyo5IpbA8XUq1j5BswBsR5Bt5FbHXfXTRpSjSlGlKNKUaUrDkf93vv9jc/xJ9KVC/y2v7BvFL8TNS/kptKUwXkd/qnOEP4oj/0us+lKlrmPiNjfOFrSuFrufIKAk0YtvElZYx5IZuxRXTN2qiqiaytax5ea7BqyBhVEDuTICsoAABjCABspUT+VXHLkinF8RKRxZpdOyRT+PWcKlm2dm+QnI/IyN5lFKgNnRSqpLHMUTK0/YVpROzGUCRkHwi2BEqQJGJsJVKYDP/loZUyHnbkBd6ZWsEjH8upjjVb7hl63SE0fMnGez4LVgizQYgFpV3AWZKeZQRTRyxJCBO0eLKmcFXRHu9KVL7kHhnk7kC4YkzVS4bES+SOLWebnY8S1KcuVmja/lXDN7xTJY2sDO8WVGpyi9FyEc9icOkFGkfKMkvBJlHfvj9ClO5w+4/2PCWDZCm5Re1ywXvI1+yvlfKCNbI8UpzezZiuM1bZus10ZNNF+9r0G3liR6azhNNV0VEVjEIKnQClR1wVgLCvH/wAxXKkHhPGFNxdEWThri2esEdTIVtCtZiZJm/LLUkm/RalKRd4Vt6gHEN+ns0pVoelKNKUaUrqXVK3RVXPuJUUzqGAB23AhRMIdvZuO2lKYB++XkXSrtwcxzqGHp6vQRPcehMofAUgDpSm4yhNSNcxtfJ6IpErkuUianOPGWO4NVJCYu6xGCxQq8YsuQ6KTyZKYUSGMAgAm9GsbMkaHEklSMzOEJ4D1fx+kfmfSprreJj5/YcHCysyLXY0uVGrZUgJjxxyH9ZwPJWP9RA8+Kr04vwHIGuVCQvquDmuIeOdXxPJKYl4GLRbCZysyyMxlpKUfPJ61zjbwyErNOidUaBDCTu3ZespDEERq+nj2kWOcn9sINUkB9rDsDIJASTyYjwT/AC/kfxrfPyRm9F2G1j0i7ptt37J2afveyB2TCbEZERRHDGblI18S3F7obEg1Y/SZ6VtFOq1knKtK0eZnoKNlpWmTirdeZqz963Is5gpRZoItVX8aoYU1DJ+qJg7NvRq148rzQJNIhjkZQSp9VJ+ht9RXn7cYWNrdtk6/DyY83EgndEyIwRHMqmwkQN9wVx5F/NKfXdUbWM9apv2T1gsZUqL9m7YrGRVOisVF43UbKmRWJsdFYqaoiU5e0pthDtDXDKGUofQgj/rXZDK0EyTpbmjhhcXF1IIuPQi48j6iq9s3On/l08J3khgLG8rnOKxJMMpR3V8j2aatNgUrM9ZjurHMjLA0ezEq9hXD8p26ZU9kEtzCIFTEdVfYlurdfLayFsmOBrlXYs3FmuxvYkkE3HjwP4Gt79Nig+fPmFMfvGwi02TtImRZsSGOGITRw8Yo+HJY0WQKQxJ+4+PVhTS8eMrcyq9D8a7nygzNgqv13kHk27un1OuCrFG7BXrbHIyGFMZYmmK8knDzsw3bB4l2LjZwCSvdHExyDrB1ebvo0w8jcZGMsWVK91a3PiwvFHEV+0kepv5sbHzVn771n4kz8rsOo+N9Rup9hotdjqs8AY4/uwOU2GZmpKTJHGT9icft5LyFgalzyCxA5sd0oGULDkBpW8BYth8hzvInEDunoztfzbX1YIqjQ1sRKmcr4KkRgdwkVdBycw7d2AGDfU5s8EzZEWZLKE1kCu08RW6yra/3fjxtcXB/KtXdF7Wmv1Gd1vAwXyO8bObFj1ecs5jl18okswgJ8r75YISrIB6tcVU9jI+S63l7x5JnBJMx83KPk9zwx5VwdUcN7DWabWU2DnHGLHtKCnx0bExzertjlfPHCQLCUxSnOuKZSjSsQ5cOdzDY377YxSftckLZlRbcIynEAAKPJPn+NenuxDr2w6r7LQ7s9T6bm4Y3+mknBimnmLDLzVyPfd3czEGONW43uQE5E01+VuNGTKBlTF01zP4+5L5twbyuz2V+ReaKrcbIyxLRbTIBIwL9H3FIyRrw1+h0eKSeKnbN2T1cxutMCjsQcLN1OXi5sMnYMWbYRlWlnlVmESMbqfstx4ogv4AJ+lWTrPyH13d9Z2WH8Rb7XdNzEniwtVr5oImzsmFOEqn9yWMvu5OQ7RgM0kajw1/WpK03I2J4PibVsscZI6G8urMfNPJdfr2MCT0G4yC2vzqiunsXVgWg2bUWERV7jBtRD2gkzRTZJrCdQTKG3GWgy8KPSJm6gLqs7YSqsfJefPhcL9o8BWUfqtYX8+a15t9B2jM+Ucnq/wAiyTd+6n1DXSy5hikGK2MuSFea0jNykmgkP+0XYyFeKgKLCUHFvEOMcZYey3yZglsJ3bNOYo2TtGV7DVbHGNMASWYqojOxyzKkysw1boUKHlbE8FGRX7wh/FKnUEQEC6mdPg4eJgT7eI48mwnBaRlYCEyrcEIT+hSxsx/E3rW/yV2vsnYu16v46zRuMPqGpkSHCinidtmmDMY3DZCRknJkSJeUS2I4KFH1NeQvmFf8zRFkkuO2Rcc1DENZx3cZy6UXFlVXLYoXHyOTI5tKz6VKvhnTx1a6pdlVk5AzpRd0VVUpQRUImQSDovfZWekp1eVCkEETl0jX7gnuC7cHuSyv+q9zfxbxX6qfFGi6jla+Pv2g2GVtdjn4kePkZkwMUmUcR2SI5GNZVhnxwDEECpZSealiCIR7iAFLv2F7Ch8AAPaOwfBuOq9W5Pz+prjXNK9Vflf2O28V+B2Lbo2h8QyyXKLl5BQZm+Sb21rLVKqvkm1XF/Fg6By0k7qivEGMxiigCioABjFA22tz9Oln0vWoZwsDfvM4D+o/H7T9txfwXFvC+p/jX5l/5IYGq+TPmzZ6iSXaxP1vqskl8TGaVjMpM3B+NmTHIkAkmPhfQEirMadXpPJtG5X1PiLd8x8c8ohyUde8OSM11Z7bGB7EwdRy9qUxnAWB44bOMfz8O2M2aHR7tJE5xMUvYGrfjxPl42bDo5J8XM/d/dJMpYXFuXtq38jDwLWtevOu1z8frm56xtPlPD1HYOt/8eX2sTXzLC3tOriH95LEoYZUUh5uGuWAsTSO8wfzLMCcR6VbMVyzmHytnyUpSka3xSeMTmoAHMzEHQSdZRKZZNnE16UKJzqMxFR2qmoUAR6BE4Y/aO3azR474chWbZtHb27XHkf+5+Cn8PW30qX+CP8AHju/ynuMXsuKsur6PFmczm8zHLaNwSMPwWeVPAEniNSD93IWrwzzMiSWl5WVSjIyESk5F7IJwsIis1hokjtwouEdEtnC7pdvHM+vu0SHVUMRMoB1DtrznI4kdpAoUEk2HgD8h+AH081+0GJA2LixYzSyTNHGqmSQgyPxAHNyAAXa12IABJ9Kv28g3Ot+j8yZG49tFyyVcsuN7Rc6a3kIk8nH0+/xItwJLKPktnMFBTibgqUh0djpRNEvYcQEdmfGeyykz5dWPuieJnQEXCuLeb+qg/zfj4rw5/nB0vR5HUtf3yVfb2GPsIceco/Bp8Z7/YFPiSSOxMV/0AsfQV6pKOwzE4xAwjsmTdRZ5qc1iWZTdko0c7NTo20uiSCMRLwsbK/6Us0jSqNlTJKiHeqJmDsAQ1ufHXOOCEzGjGwKEFkB4BvNioP0Hi4/KvzO3M/U07XJP12HKfqC5KNHFkuv7h4V4l45HT7Qz2dQV/SCD6ikvjDDs8woGKGPI2frHIHMuMXT2ZaZdkqNEwrpOyOnDkqVgrUV3SoVeQSiTpNTKNjEOcqW4j266cPAkXFhTbOmVnwkn3SgU8j/ADKPPHxYXHrUl2PtmDPvNpN0CDJ0XUdkqxtgpkvIDEALxTPce8pcF7OCBe1P2H7NtSdUio81SmvbrlSxZGzDgzGkRbMV2WZq/HnJ6J4203hzjWbYkGUlUZVdiWSpC806MZJyxRVAqpfqtw7RjYIpJ8t8jOx4lkhcrDILMxjPqb2utz6rV322fharr+Lp+rbfPmwthjJLs8Vg0MCZSMeMfENxyAi2KyMPB9LfSQ2pKqRRpSjSlGwj2B6R7A/b0pUTZ7nRxNqSGXHdzzVV6TH4MyDFYpyZK3AkjXouDyHORB5yHqzV9IM00JqRko5I50fCCqU4kMXfcNRT7vVRCZpplRYJAjlrgBiLhbkWJIHi1X/F+Lu/Z766PXa2fJl2mI2VjLFxkZ8dHEbykK10VWIB52PkeKdDAOfMV8n8WV3NGE7Etbcc2taXbwU4rEycIs8WgpR1CyhBi5hs0kUBbyLNQnrpgBgL1BuAhrJ1+fi7PFXMwm5473sbEeht6Gx9ag+29U33R97N1rssIx9xAELpzVwA6h1+5CVN1IPg+PQ+a1luw1KWPJ7zJfxk3BSHNh6x4zLg6QVYuMPS03LuVn7C/wA1EqtlXa1nj1hI3IqAiBEC9gAYO34mwnkyTk+4/D2Sntn/AGyT5DkevIen8K7td2SDC0a6T9ljjI/uMeT+9UEZaIgCtAj3AETeWItct+VUv5r4w5Nwf5OULhDPNGmeReZaLlyvWGr03B1tvjlrYLdIZaeyuP039qdsXVpZ1aEYSvVIGFIiLcUyFKZMoFUCn5msyMLqC4WejZGYkoIWMsbsXJW5P3WAPn6C3i3rXpPrXd9J2j/IuXs/UsqLT9cytfIkk2bFApSJcQLPaJWERkdkATySwJJDeVN/VYczrysVp5aIttB2Z1XoRzY4Vk7GQZQ88vGtlJeLaSHSXxzWPkDKIprbB3pSAb4dXyIyNErSgLKVHIA3ANvIv+R8V5Nzo8WLOniwHaXBWZxG7DizxhiEcr/KWWxI+hNq3e2vusWmrzfO5YrGJL7YMFUaDyZl+KgxdUCg2SdCswVonfGtE/Z0nPGOkSMbgxUWVA4nIBjpgXqL1b6xc18qLFeTBRZMsC6qTYMbjwT9PFTvWcTQZ2/xcTtGVLhdfkltPPGnuPEnFvuVACWPLiPQ2BvY2tS2qruwP6tWX1timsDbHtdg3lpgmDz2iwhLI6i2q89DsZDYPHs4uVUWQSX2DvU0wP8ADrviMjRK0oCylRyA82awuL/UA+hqNz48SHOni18jS4CzOsTsOLPGGIR2X+UsliV+hNq32vusSmdvtjzTFZHw5DY+xzWrXjOxy9hb5pucta/Ys1jmHZxZF66/rcB4dT3pcS8qIoqp9ZO5IAD8O4Yc8mauTCmPGrYzE+4xaxQW8ED63Pj8qseqw+tT6bY5O3zZsfdwxxnChSLmmQ5a0iySXHtBE8g2PI+K7b/kW51DIOHKfA4aud/rmSpqdjLhkWAdxjau4eYxMUZ+xm7eg+OR69YzbsPCola7nKr2jv2APORkTxTwwxwvJHIxDOLWjsLgsD639BauNTptbsdRsdjlbLGxM3CiRoceQMZMxnfiUiI8KUH3Ny8W/wCtanHfHmh4xy3nPNFckLo5t3ISRq0ne2c9a5CZq7BzToxeJiS02uuv9DqyCrZwYXJENwXPsI7bAGvnHwIMbLnzIy5myCpa7EqOIsOI+nj1H1rv3Hb9rvOvavreauMuv1CSrAUiVJWEzBm96QeZSCPtJ9B/GtpNYQps9m6j5/fPbcS84+pVooUEwZ2qVaUtxCW1wRzKLzVPRUCIl5lJQn+jO1SiogXbpHsLt9PhQvmpnkv78aFALnjZvW6+hP4GujG7PscXrGV1KJcc6vLyYp3YxKZg8QsoSY/eiEfqUeG+vqbu/rLqu0z+RcA4fyzdsRZGyFSmFmuuB7G/tuJZ507kkF6dYpRqkyfyTNBk8btXp127dMOhyRZMpiFMBQMADrDyMDEyposnIQNPAxZDc/aTbzYEX/1uPrVh1HbOw6DWbDT6jJaHW7WFYspAqESxqSQpLAlbXPlCpIJF7Gng9Osyq9RpSk68tcFH2SIqLt6ZKfnmjx/FsvDrmBw1Ybi6UFwUgoJCnt2AYQEfg1BZfY9Phb7F6zkyld1mRPJEnFiGSP8AWeVuItb0JufpU3i9d2+bosnsuNEG02HIkcr8lHF5P0DiTyN7+oFh9aUWp2oSjSlaOWln8c7g2zOCfS6UrIizevGh0SIwbcEhU9ovgVHqUbicOnYnbv8A9MRstjmYOThwYuHNkxZE/CR0KhcdeN/ckv5K38WFS2u12Hm42XPlZkONJjwc0RwxbIa9vbjt6Nbzc+KYpeiwVjnrpYMd3y4w1wg5aRTnWyDp06ilJo7Y7hGLGOfAmgdmKyYCAJiYu47bh2a09N0/Ubzc7Xd9G3O0xO0YeTIMhVd3hM5QsIfakspTkPAUkX8XrbsPbtvo9PqtJ3bT6vK6zmY0Zx2ZFWUQBgrS+6l2D8T6sAfrS7w1YFJqlMmUm+eObVAmWjbY0lXKbmajpUHDg3cyfdgAInVSL1JkHcQT2AREQ1cPivdSbXqcWJsZpZOx4RaLMSZg08U3Jvtlt+kkeVB8hbDyRVR+UNMms7TLl6+GKPruYBJhvEpWCWHiovFf9QB8MR4LXIFqdbWyK11RpSjSlaU1giC2BKrmdiE4rGHmE2Xcq7Gj01e4Mv3/AEdyGyoCHT1dX0tRLbzWJul680n/AOl2xzOE4t5jDcS3K3H1+l71KrpNm2mbsCx//ohZxCXuviQjkF435en1tase0VeNt8WWIlVJBJqR+xkQPGvVo9z4iPV75Aoro+uZEx+w5PQYNdPYuvYHZ9eNZsmnXHE0ct4pDG/KNuSjkvniT+ofWu7r/YM7rWedjrlhbIMMkdpYxIvGQcW+1vHID9J+hpRanT+NQnp4o7fgAfo64Hn0oTb1rRyBbEMxBGi1IskGU7z3jTdkXNIKJ9yHgfZZ09kyGBftU6/8n0aiM1d6dnhnXtjLpwX/AHIcMZSOP9P2iPAIb9V/p6VLYbaQazLGeuQduQn7YoQIweX9T3QfJ+39Nvr61vNS9RNGlKbiwXSyREpLMI/HNhsDWPZxLlpKMHDdNtKrSDlJF20alVT3KrFJnFRXcR3KUdttUTdds32r2OThYOizs3HhihZJY2ULM0rhXRAR4aEEs1ybhT4FXjTdW0Wz1+NmZu8wsLImlmV4pFYtCsaFkdyD5ExAVbAWJFYuV7fUa3WHEfbXL9uWzMn0fHs4zxichIOwQKcrJs7ZpqCzWWVMRMDm7NzfQ31jfI/Z+saPr74PZZJkXYRSRxJFzEkjhQfbR0B9tiSF5Hxc/wAayPjrrfZd5v483rccLtgSxySPLwMcacrF2RyOagAsVHnx/CkxxnVyDieFioafhpJ/XbXIOXLWJSIg3Ux4kczhy4eTh1ClUdryyhyB0FEekSgID622qB8O6buPS9Ti67bY0z63YTu6RBVU65fuYtkMbMzTG3gXC2uD91qv3y9uOody22TsNXkwpstfAiNKSzDYN9qhccD7UEQv5NiwNiPF6mmleI1Z4m37ldNFQwF8UqJClLv6BOnuIgXq7BHfs16DrQNLFNVJYoHRUIqQfQZMxTl/xlEQ30pXZpSjSlGlKNKVhyP+733+xuf4k+lKhf5bX9g3il+JmpfyU2lKYLyO/wBU7wg/FEf+l1n0pVrOlKNKUaUo0pRpSoMxX6yq8/3H8X/175Z0pU5tKUaUo0pXS4MmRBYywbpFSUMoAgJgFMCiJw6Q7R3LpSo8KCUVFBT/AHsTnEnZtsQTCJA2+D1R0pXBCicxSAIB1GAu4jsAbjtuI/QDQVwTYXpksN51rmaqfartEVq+U6KqFwuFOkmmQ6w6rEs4cUk5SSc3HRyxllnteeEETtHJf38pR2LuG2o/X7GLYQPkIksaI7qea8T9nqQPqp9QR61ce29K2HUNtjabKyMLLycrEgnQ4syzIBkfojdxYLKp8On8pPrSsxdlCj5no0JknG8yNgpljB6MPLGYP4szr2c/cxjzdjKNmj5EUXrRQmx0y9XTuG4CA678PMxthjLl4jcoH9DYi9iQfUA/SovsnW9z1HdTde7BEINvj8fcTkr8eah1+5Cym6sD4Jt6HzS6ci4TauVGqJV3RGzhRogop3KbhyRE5m6B1RAe6TWWApTH2HpAd/g1kG4BI9bf+v0qGjEbSKsh4xlgCQLkC/kgfUgXNvr6U3uJprJlhoMJL5fpUNjzILo8kE5Ua/YS2mKjCISTpCMO2myEIV2Z7GJpLnAN+7OoJN+zWLgyZcuKr50axZRvdVbkB5NvP5ip3tGH17A3k2L1TMmz9CoT255YvZdyUUveO548XLKPxABpX2KAh7ZX5yrWJinKV+yxEjAzkasdVNKQiJZqqxkWSqiB0lyJumi5iCJDFMAD2CA6yJY454mglAaJ1KkfiD4I/wBaisDOy9XnQ7LAcx52PKssbgAlXQhkYAgi6sARcEVDG948xpx+h7DkzME/VkuKmDaVTVcQY/dY5CwyOAbHAmCEc3utTpEZiyycq/NIpkQApDqoCO+4gA71/JxcTVxvl5zJ/ZceNfaT2+RhZfBdW8sSb+Pw/hW3dLvuxd6y8frvU4Mk/Ju5zJxnZK5XtLs4pf6i400d44kReBLXIDenqaZXN/Gi1SeAMLZHpuWsmZ+tmAHymZsewWRshxeNYHN8pZ5WMm68lli0rJxaTGCrkO4MCSCxkk1kN0FgAT9kdsNTM+sx8vHnmypsU+8gkcIspYgr7reLKo9B6EeDVv6d8h6zG7zt+v7bV67RavexjAypMXFfLk1yQo8cpwoRzLSSyC7MoJVrOhNqm5dWeYpnFtQlMXQ2J67l8haW+ctrm0UmqhWWL0GK1+ia8/hS+J75JkougwXbnKir0lMbcohqxZC5z4cb4awJnfYfuF1UG3MKR+XgEeK07p5up4nZMvG7JLtMjqh/cKDjsI55mXkMZ5Vk8WLBXlVhyW5A80yHNvk7kLjPF4te1DF1PudbvN2bVe/3PJN1Y0bHdAhXqrRql7YmXaxSHmJ4XKhGKJyKJLmRMQQEwgUY7sO4ytQkL48KPFJIFd5HCRoPA8k/Vr/aPI8Grj8O/HGi+Q8jZRbXZZeJscLDM2Nj4mO2TlZUigsfbjUf7cdgZGBDLyBHgE1FTmjw+PmzlDxmtcJJcjUGkiKTWpX7DsrVm2KuLqNfYoyElYV4lxHmF0llNsYrMVElUwIJesnUAgTUN2DRHYbjDmRsoKfCvEV9vHt5LEW8+4PHj/1rZnxF8rL07427FrMuPr7SJcz42ekxzNwZWKpEHD/acI/1LMpvext61KuxcduOz645Cw5cbgi9oGXsQA2ccOm76Pr1N936xMHnLTkSr16AGKm05SalSlF+5TWIUygdo9o9MzLqtU88uBPIDjTwW/a3CpxU3Z1VbG7H1Nx5rWeB33v0GpwO26nEKbzVbW43xV5Z/dmj9uHFmll5x8I0v7aFTYH0/HwrcmMlQ2XM85OvtYhFaxT5OxmjKTWFnb14auUqrsmdVqkOCj909cpi1g4ZETpiqoCapzABjB2j5x2+XHnbObJhXhAXsi3vxVQFUeSfoB4+lftN8ddey+q9I1uk2UwydtHj88iYKq+7kTM000llCg3kkaxsCQASB6Uxeo6rpQO+w9JTHN/kkKG5jm/ySlAO0TGHsANcfwrkfn4FeyTAOIFOHPA/i6rmF3gieokLYbZyFvUBmaKOws6NoslOG80ClYkUmAOzjMnxkk0M2M4cEAQOYekuxRAd96zAbQ9bw/35xmxVZp3EoswZk5okV/AkBFrn0PpX5Kd57Wvyz82dlTqa7uHczY8GrxpMB+UJhin/AG+TkZ3D7nw3Vg4VD5A8nz4q85befFnzNsW2q/H2Dd8d64s1/wC3J/2s0smRZYzlsKbiPbTAsUoyvxyJzGEijREHZxEB7wm3bTd58lbPYIIdWpxYrfc1wzn8gbWUfw8/nXpH4t/wm6R07JbZd7nXfbBW/pxcGixU4m4Zo+ReVyLXDtwHn7Tc1V/gvi9yM5bWoj2h0XIl+jn9yhYG9ZLaREhZ2FadTztum4mLLJLuk1HAx7FUXSpBW7zuUx7Q7NU7XabbbublixSyoZAryAFgpb1LH8h5Pn0r0j3T5J6B8W60w7vO1+DkJiSS42I0iwtMsSmyQqAePJgEU8bBjU+755emLrHhO8RvD8M1Zv5HYyyZT4HMVQu9CcUOXotZTirm1lJyvxboyDSQr1smmrVwQyizhdu1STERJ1H3s+V1bDl18iaH9xkbWGZRKrpwKLZ7kD/xYgG5JIFvStH6P547Lgdwwsn5WOn03x9stdPJgT4+SMmPJmL45SOVxdllgjZ0ICqrOW8NYWSXlF5vsfGflzTkpRhXmFMzJaUMFX2atDHuAryijk7lsERaSlOjGy6Ez3QLM+86XZBT6w26DB09G2M2o3kYdVGPkOIXLD9Pm/hvob+o+vi/0qS/yo6br/kX4sy2xXnfbajGOyxo4Wv7otY84fV0MfLjJb7Dy4m/IH2hT2OJeWyzRslI5Iu0JE0uAssFIYwjV2ZKRc3M/wBJW83Zm6qB3ispAdH+iCmcoFH0/Dvv2XEd86LLEsipGrAxi3B+XoW8Xuv0r8icLsGLi9XzevPr8ObJzJ4ZFzHDHIx1i9Y4SDxCS3++4N/p+XGI8R1fCtXeVGovrTIRb60WO2rOLfZJC0ypZO0SBpKRQRkpIx3CUYiufZu3AehEnYG/aI84ODDr4TBAXKF2b7mLG7G58n6fgPpTtPadl2/ZJtdqmMmSmNFABBEsKcIUCISiAAuQPub1Y+ayMn5dx9hqIg57I86Nfi7JboCjQzksdJSYu7PZ3ItISOFKLau1kCu1yiArHKVJMA3OYA1zmZ2LgIsuW3GN5FQGxN2bwB4va/4+lfHXOrb3tuVNhdfg9/Kx8SXJkHNE4wwjlI93ZQeI/lF2P0FcUJDMCM1kz40HVHcQKl0VNiVOnt5JGSaUEGiZU293O+EyLiyg/AwmO3+xCT0fBsxv3wkl/eGMxe5/S4XB4f8A4r/zX/DxTeP1R8PXf8bXNXNGGP33vlCjZNzc4/HyIeNrB/uv/rWLW8sw9nyjkbFDSu3VhMY0j61IylhmK27j6ZOJWhudy0RqliVHws65YFIJXZE9hRP2DvsOvmHOjmzZcELIJIVUlitkPIXsrfzEfW3pXZsOr5eu63gdnkyMOTD2MkyJFHKrZEZhNmM8Q8xhv5Cf1DzTqbegdw2+ENw3/b23321nVWv4+lca4pRp6eaVF3IEVVcm5gZ4DyPxZb5BxU6qbTMjjLtogarM4yb5Jrk37Ogqw8iniakm4vzNqPjGrwxDAkiOwG9O0XkLFk5YwMnGEmLxEnuEKU5g2AP15D1B/Cr1qMjO0fXW7Zpd6cPfLkHDGJE8qZJxpE5PIHUhRAx+xkBuW+lK7MNFiMk47tmBKjld9gm22yvHWh5rFUhXYPI9UYEkUFnFjq8CYAMk3UXIKKrgG3dD3pg6wOO+u7MgTJx2wIZTBK6+ChUOov5Kj/0va1R/XdrkabcY/bNjgLtdfjzWdMpZHxpW4kCOWT0Jt5C8r+B4tTxQ0erEw8REryL2XXi4qNjVpeSMQ8jKrMGaLVWTkDpETSM/fnSFVYSFKUVDjsAB2azEUqgUkkgAXPqbeLn8z9areTMuRkyZCIsaPIzBFvxQMSeKg+Qq3sLm9gK2hTmIO5BEo7CG5REB2ENhDcB9Ahr6/wC9dJAPr5FUX+b55uDvgFL4vwritpTVs3ZTZpWOSs+TYW2SWP8AFePHcm8hGF1kGlXSVfWZ6rMRboho5BNZQrZEygl6zJFPSO3dsOheLCxQn72QXJcMVRSbcvt8nyDcC/j87V6j/wAef8fU+WcfO7LvWyR1jBYxrFjPEs+TkKodoVMpCxgIyESMVBZgL2DEN55ffnjL87eYdf4wV/DkRB1ZPFtvsE/lBaek1JOyW6isW53crVq4dkxQh6bZVjCu2avSnkGyShSqGAxRLrH0HdjvNsutSFVi9piXubllHqo8WU+tj5FS3y5/jAvxX8dy93y9jJLnnOiSPGCLxjimaypLJyYvNGPDOhEbEXAsQa9COr/Xkb+NGlKNKUaUph8b8dqJi3LGc8yVyUvLy18g5auTN4YWK2vpuqxbqrxp4uOTpNfcEK2qzZZucRcESMbvT7DuAAABg4+vgxcqfMjMhlnILAsSosLDiPp+dWrc9w2u90Gr63mJirgahJEgaOIJKwlYMxmkBvIQR9pNrfxJNfd35M4Lx1mPF3H24ZCi4rM2ZiPFsdUAG8g9m51mxTeHcSahGTVdvFxXUwWTI4dHSSOqmYgCJg0m2WFj5kWvmkAzJv0L5ubX8+ngeD6/hauNZ0rtG467ndt1+JJJ1zXWGRPdVRCbWUciCzfcpKqCQCCfWn11nVVqNKUaUo0pR+z/AM/+DSlJer1o9dYmavZqRs7vx8g8Sl5sqCsi3RfLd6EeisQm6bNqA9JAAQ7PTqu9e0EmjxDj5eXPsMn35HWafiZVWQ39pWAuET0AuPFWDsG+Xd5YyMTFgwMb2Y0MMBYRs0Yt7jKTYu/qTY+aVGrFVfo0pRpSvgiSSYnMmkkmZU3WqZNMhBVPtt1qiUAFQ+3wjuOvhI44yWjVVZjc2AFz+Jt6n8zX28kkgAdmYKLC5JsPwF/QfkPFYLGHiYxeQdR0ayYOZVwDyUXaN00VZF2ACUHDw5AAy6wAI+sbce3WJh6vW6+WbIwceKGfJk5zMihTK/pycgfc35nzWVlbPY50MOPmzyzQY8fCJXYsI09eKD+VfyFZLx40j2jl+/cpM2TNBRy7dLnBNBugkUTqrLHNsBE0yhuI/AGu7KysbBxpMzMdYsSJCzuxsqqouWY/QAeprpxsbJzchMPDRpcqVgqIouzMfAUD6knwBXyxfM5Nm1kY50i9YPUSOWbtscFG7luqHUmsioXsOmcO0B0w8zF2GLHnYMiS4cqBkdTdWU+jKfqDXOXiZOBlSYWbG0WXE5V0YWZWHqGH0I+tdTOVjJBd+2Yv2rxxFOCtJNBusRRVg6MTvCoOiFERSVFPtAB7dtdWLscDNmmgw5opZsZ+EqqwYxuRcK4H6SR5sa7MrXZ+FDDkZcMkcGQheJmUgSKDbkhPqL/UUh3jmXtE2RvWpJ7Whp1kbI2U8lAlOlZ4o7cq6kdEvVwE3hjmHtWT9BtVDKyNn2HbCDQzy686vPVcoy44Iy4ioYxQyG54H6upHmrbiQa3r+qM+9gizxs8FmxRFkEHFlDFRLNGLfcPojDyKccfSO3YGr5VG9PFJqxFtxlYH3VVhEkSzCA2T2wRY6ikFt9nJFiluBJATfUib1dtQG8XszSYZ642KsIylOV7wYk4/wDMIeP/ALl/QnxU7pW60seZ/wAhXKaU4rDG9kqAMj+Uy39YwPUDzWVYq+xs0d7LkFXyLbxbN6B494qwc96xXBwiUXCI9YpGOXY5fQYOzWRu9Lib/B/t+a0ywe6j3jdo2vGwZRyXzxJ/UPQjxXRpdzl6LN/uGEsLT+06WkRZFtIvFvtbxyAP2n1B81hwtqaTkzZoVuwl2q9WeN2Tx0/ZHbMX6jlIypFYlwYwg9QIBdjGDbYRDWNqex4232mw1MMGTHNrpVjd5IyschYXBhb/ANxQBYkWsaytr13J1OswNrNNjSQ7CNnRI5A0kYUgETL/ACMb3A/ClPqwVX6NKUn560QdZGJ9tOzNPbcs3hIzpbuHHiJJ0BhQbj3CagIgYCCPWfpKHwjqG3PYdT1/9t/dpfa/d5KwRWVm5Sv+lftB43t+o2A+pqZ0+g2u+/c/2qISftMZp5fuVeMS/qb7iL2v6C7H6Cts9KwIkZeSBiVBr9kMu/BuCDYQEA70VXP2NHYR+q3D9vUjljDWP3M4RezH55SceK2+t28L/HxUfiHMaQQ4JlM0njjHy5N+XFfLfw80noOOsSE3ZZOSsyMzX5dVk4rESi0RSLBNSomBwQHqW/tArw5gOU247B6NQWnwd3Dt8/YZ+wXK0uS0bYkKoo/boF+4e4v+4HJvc3t9Kmdtm6WbVYGBg4DYu5xldcqVnY/uGv8AaeB/2+ABBFh59aUq66DZIy7ldBsiTbrWcKpoIk37A6lVTEIXcdttx7dWCaaHHjM2Q6JCPVmYKo/C7MQBUDFDNPIIoEeSU+iqCzH+AAJNLWsW9sybIIgRFxHKAooV2zVIt3plD7gsChTikqmAF6fVNrmKWKeMTQOrwsLhlIZSPxBBIP8ApXEkUsMhinRklXwVYFWB/AggEf6ineTUIqQiqZgMmoQpyGDtAxTABimAfoCA67K+K+9KUaUo0pWHI/7vff7G5/iT6UqF/ltf2DeKX4mal/JTaUpgvI7/AFTvCD8UR/6XWfSlWs6Uo0pRpSjSlGlKgzFfrKrz/cfxf/XvlnSlTm0pRpSjSlJu0ySUfFrEMO6zwhm6BAAe0TBscwiAhsUpB/x6Uqqrk15j3EziPZpGi5lvExH39lUmNza0qDqkzMS01EyirxCLRi3pEUYLx8gtHqlImu6RKTp3OYpe3Vc3Ha9Jo5jjbCRhlBA/AKSSDe1j+m5sfqPzrdHx1/j98o/KWuj3XUcKKTRvlNjtkSTRxpG6BS5dSTJxUOpJVGvewBPitX5eXN1DnFiSxZEfwtWpE3GZHt1ZjKZGWMkjNr1CFGOVhbNKRjpb2myWfoyAFW6SGbFVKJSHENdfVuwjseC+WypHIsrKEDXJQWsxHqL3/hcVlfPPw5J8M9px9BBNk5mHJr4JnneIpGJ5OQkiRwODBSn23IcqblRUw4rKeO7He7bi6GvFcmcjUKPipK60hpJourHV4ydJ1QzubjAEVWbWUTHdETdhwHU8mZiy5L4SSI2XEAXQG7KD6Ej6X+lanyetb/A0mL2XLw8iHr+dI6Y+QyERTPH/ALixv6MyfzW9KWrdu3aJEbtGzdo3T3BNs1bpNW6YGETGBNBAiaSYCYREdgDcR1kBVUcQLL+VQ7ySSsZJWZ5D6liST/Em5NMpirD8/je7Zst0vmPIeSmOW7m2tkFULk5SXr+IGLdB0gapY/TIuqZtAODOAOcpikHqTL2aj8LAlxMjInkyJZlnkDKrfpiH/in/AOGrh2btWD2DT6fV4upwNfNqsQwSTwAiXOYkH38okAGUWIFifBPmsnL2ULnjabxLF1fC14yuzyLfmtQtMxVFmrdnimBcN++PfrYV0iqdevtj+oYqYkNuH1XwDznZeTiSQJDjyTrLKFYra0Qt+tvyFfHVet6nsGJtMnZbjD1kuvwWnhjnDFs2QG37aDiQBKR5BNx+VPE8ametXrBN2q0O9au2Kb9AelZkd0go2TfICPoWaGUBVP6BihrPZeQKg2JBF/w/OqnDL7MiTsocIwYqfRrEEqfya3E/lVbHBC8w8RQJnD2W8j5btF4e5tytTachyvcQ5shZNj6as2WfPaZFKFV9tUpo3HvUAHvu7Dq9YQANql1rIjjxmwM6ad8g5EiL+5tzkC2uVH1T6j1tXoP5r0uXlbyLtfV9fqsbTJpsLIyDpRJ+1w3nDBVyHFvbyGP2t+m/jxe5Mo85Y1s+VlqtjqRiMcznG2xRtljuQNZtJZltY5OHRZt3NURpjmIVbNmKbWTbdboVDk2SIHSPZtqa2OJNnFMR1ibUsGEytcMRb7eFvQX9fTxWt+mdh13WEyd/jy7CH5Bx5IX1k0PtmJHLETHIDhixKGyWB+4m9dFutr6f4wXGxcQn9LvcrF0SYhcTmZulJ2pSMpWG5oYldScxzoh3DpukxUaJFBYpiOik6zBsI6+Z53k07y6MxyusZEdvuUlfHHwfysPz9a+9Vq4cH5HxMD5VTMwsWXNjkzeS+3OqTH3DKVceFPIOx4kFCbCqgpfjU+x75ZOJ7Tyu465N5C3rE7i+XOYwRFZBs7NFpF5BkJCUfTWSRcLPHb9DHUY3I5X6DiowADdwcBEdUaTUti9Qgm3WLNlZMHNzCHYeHJJL39fbHk/h9DXqvF+QoN7/AJGbTW/GO/12h0m0XGx49k+LCxZ8VVRY8SwCqctzwW4tJ45r4FS24Nc2r5lex0XEl848HxbULth2CyBxztlCezF0oExjSvxy0U+a22yyINxrM2zdRhWzBismLlwQgmPsHSc831zsGTnTRYOTi+zBJjh4GQl0KKLEMx/SwtYD1NvNat+Z/h7SdY1+b2nSb4bPa4e2kxdrBkrHj5UeXI4dWghW/vRsr85ZFPBSQF+oCvzBjLN+fuT+RccOIyyYGxzVcYY6msXctseV5ozyW7nD2NKSveK4+6PHIHUrU03L3b9iQpAAhNzdXVsPfnYmx2e3lxSrY2IkMZjyUW0nItd4w5/lP1FRXVOxdN6P8cYHYEkx932DJ2WVHmaTKlZsRY/aKY2Y2Oo/3Yz5jkJJubC1qh/5j/k5UrOrWTzBxsgW1TzaLWJYPKJBlgKvRb6/WkyhJ3GwLOCERiptBkqdRcWhCi+OUBNufcRgu19Cx9kDn6lQmwsAUHFUc38s1/QgetvWtr/4/f5Z7fpUkfVPkGdsrpvJ2XIk92bJxlCHhBEAbvGWAC8yfbHgWXwPLHn7i9nzi9ZvdTOmM7HQ36pto2RetvFVqdJuoBVoCzMhXhZdM4on2BJYT+obcvqjrTOz02z08vs7KF4m+hI+0/wYeD/1r9LOjfJPR/knXf3Tpexx82ED70U8Zo/TxLC1pEPkfqW3kefIqWnla8J6vzazpZazfZ2yV+jYypI5EnDVpqQHk+u2mo2Pj6wEw4IdtBnfmdHWFXpOsKSB+7KOwiE303r0HYdk8WSzrjQx8zxHliCAFufS/r9TYGtXf5K/MOx+HelY+x0kGPPu9jmftYxMTxjBjdmm9sWMnDiF43C3Ycj9C2vNvnnnHmnaWKeR3rGIo9FeSsZS6BWxetq8xbNpJ6jHSsw2WeLt5q2t4wStTyQppKKJF26Sh2axOw9l2HYJgMohceMkIi3CgX8Ei9mYDxytVi+HPhLpnxBrHbr6PLuc1EfIyZeJlYlFLpGQoMcBe7iK7AE3uag1/i7BD0+gdu3YQDYRAdVytzm9q9tHk+c4cc8jMXJYOqmFkcRXDDVQiVLO1pEKijjKXaGVNHIzzN6js9YTM06Ioqo1eissc4KnKscobB6F6J2LF2uF/boMf2J8dByCD+mR6cgfUEm9w3n8zX47/wCV3w12HoHZD3PZ7c7XVbfLcQtkSE5iNbkYmU/a0cYIAePioHEFAT5s6zJHWOTx3kmvYqDHiOY7hSZxlU2t4KmnEzr8zPwHXaUo4Cz0nAtmjg6SxyAoKIGANw1b9gkr4ssWF7X7+SMhQ/oTa33fUj8fWvOPUsnX4++1+f2b9+ep4mZG05xyS8a8uX9Ev/SSUsAQDbkQfWvPBh3ytecOMqrh2rVCTxbS0JnKdnuWZiXGBomXIygW2KcpN6NljHrOfg1XbdueuNitU2jVwR4VQpFFdv8AJ1dr+m9iw4YIYDBGDMzS8wkoRh4SVOS3/T4sDf8AGvena/8AJT4a7Hs9tsdrHs8t4dZDj68wSZOC+TA4JycLKaKQKT7pLmR1KEEqv5+gmMQ5FJ56cDLyOOXvG1PFUW3aii3fI5UdZkTeNyy0i7ESDFpU50xKqdNMpxUKqYA22DfWzo12o2R5tEdT7A+h9wy38k/TgRf8714VyZOgt0lf2sewT5BOzcm5U4S4BU+2q/zmcNYEkWIB+tPnqSql10OGzV0UhHbZq7TTVTXTI6bouU010jAZFdMi5DlIskftIcAAxR7QENcFVb9QB/j5r7jkliJaJmQkWJBI8H1BII8H6j0NZG4/R/b/APPrmviuRMYQAomMJQ3EAEREAEfTsHoDfSuLC9/rTEjx7pA8ii8nfat0+MEuNTYrCH95nPuD7uGkPaQvRqXdeFGw+I7PGdfX0ertqO/teP8A3X+8cpP3Xs+3bl9nG978fTl+dXX/AJ3uB0H/APVz7WH/AGL+4/vfc9kfufd48OPv3v7VvPC1r+afTUjVLrkNtw37A37R+lpSmFxHb8/WO8Zzisv4nruPKLVLw1i8FWiFtqdif5SoyjAyrm0T8WQwmq79B6BUwbGAgiBh9UQKBz4OJLnvNOmXEscCvaJg1+a/iR9KtfYNf1TD1mryOvZ8uZtJ8UtmxPEY1xpri0aMR/UBFzyBI8A3BPER8i+E8tJ8gOR3IvI2VDzGTMi1OVxPx2vlQghq92414Vm416WQp8e6K7UhbdMo2F0SQbSTtqZwkdLpA/SYShHppXbPydhkS3yZVKRMo4tDGQbqPoxv5BIvVun+TMeDqWm6fpsAR6XDyFythBM/uw7HMRl4ysLc4kMYMbRowUg3tcXqRzObpnGnEFFj8xZpBzH1xrV6CvlnLs3HRkxdbRIKpxUUvOyJhQZL2ayPzgUpCdqig7Bv2jqQWSHW4aLmzfaoVPckNizHwLn05Mf+tUyTG2XdexZc3XNbaaZpZxiYiMyQxKC7hF8kRxr5JPoPWnu/Z+z9vWdVZpu8h4hxPlyJXgcqYxx/kiHdEbpuI+71GCsqCqTVcXLZERlGThUEUHAioUgGAgHER23EdY+RiYmWhjyo45EP0ZQf+4NTGn7F2Dr2QMrQ52XhZCkkNDK8ZBIsSODDyR4Jte1M7hHg7xE45WN5bcF8dsX43t8g5m1Bs9brLRCxNkbMZD2zExkwqCz6Lgn3hky+AbnTaEKUAKmAdmsTC0up10pmwcaKOU3+4Dz59QD9AfHj0qx9n+T/AJC7lhLru0bjOzdcip/SkkJjJiB4OyeA0i3P3sC5vckmtTb+cGB64ame6slP5oStmfUeNki5wjDjkJtjvJwpCs/Z5NVjnKQVKLhCiXxq6oHFHrD1R7R11y7rBj4e0WmDT+yfbHLg/wBQ9v0gfX8KyNf8YdqzBknPjh1jY+pOxUZr+wcjGvZTjBgfdZ/PADw1vUVL0wdJhKIgIlEQ3AdwHYdtwH4Q1L1rwefNcaUpnIuzZocZ2tlTlsaV9hgWOodfl6jllG2lc2Ww3949OlYak+pYNymjIyIZgCqbzvBBU3ZsO/q4aS5hzXieNf2IjBWTlcl7/cvG3gD6GrHPg9aj6rj7DHzZn7W+XIsuL7Vo48cLeOVZr/czt4KW8f8Ad4lRFJFVcxDiRJNVUekgmMYEkzKGImH+WoYC7AAdojrMJsL/AIVXQpZgni5IH/Wol4Es2B+WCVY5aQ+EZOv3+subvjGq23MGNGtVzDW4mGmHUXPMYlZ6LyVjavOOjqKo92sUjhNQREoCJgCKwZMHahdqkBWdSyK0iBZAAbEA+SFP0tV/7Xhdp6E0/QMnZpNqZ1hyZYsTIMuJIzoGjZwLK0qAAG48ECxIsalrqVrX9GlKNKUaUo0pRpXFxRpXP5fWjSlchv6Po6U/OkxVbUyt7B1IMWMswSaSj2KOjMMTMHKizA4JqropGMcVGiojumffYwar/W+x4nZsGTOxIsmGOPIkhKzxmNi0ZsWUG90P8rfWp/sXXsrrWZHhZcuNNJLjxzBoJBIoWQXCkj0cD9S/Q/Wux/WI6RsUFZnC0iSQryD9Biig9VRj1SSBO7XM+ZF+xuzkD6gTfUjr7zev4WdvMPsEzzjNwUkWNVkKxsJRZvcjHhyP5SfSuvD3+bg6TM0MSwnCzWjaRmQNIDGbr7bnygP8wHrW7ctGz9suyeN0XbR0iog5auEyrIOEFSiVRFZI4CVRNQo7CUQ2ENS+RjQZkD4mVGsuLIpV0YBlZT4KsD4II9QfWorHyJ8SdMrFd48mNgyup4srA3DKR5BB9CPIr5aNGka0bs2TdBixZIlRbNm6ZEGzVukGxE0ky7JpJJlDsANgANfOPjYuBjJiYiJFhxIFVVAVUUegAFgAB9PQVzkZGTnZL5WU7y5crlmZiWZ2PqST5JJ+vqax49tDkF0/iUY4BlVvEvnscDcwSLlMO575w4bCYrlYgF6eoREQ9GujBx9WpkzdasF8h+ckkXH+o48cmZf1kAWuSSPSu/Nn2bCPD2TT2x04xpJy/pqfPFVb9IJ82AAPrWy3H6I6kLmsGuNcUrrMskRRNI6qRFVuruUjKEKot0BufukxEDKdAdo7AOwa+GkjR1RmUSPfiCQC1vWw9Tb629K+1jldGkRWMa+pAJAv6XPoL/S/rXZr7r4rncR2AREQD0dvo/a0pYDz9TXAiAAIiIAAAIiI9gAAdoiI/AABoSB5PpT18D1rAjZSNmWhJCJftZJiodVNN2zWIu3OogoZJYhVSCJRMmoUSj9AdYeBscDa4ozdZNHkYbEgPGwZSVNmFx4uCCD+dZmfrs/V5Jw9lDJBlqASjqVYBhdSQfNiDcflWWomkoBRVSTV7s3eE7xMinQcoDsdMDlN0qB8AhsOsl0jcAyKrcTcXANiPqL+h/AjzWMjyISI2K8hY2JFwfobeo/EHxSRTGByXU3jWSin5oGZ8XHP4yYbOIx2um1c9CgHSKci6aSiqIGIYpg6i6rK/wBn791uXHzsaY6fK5xSRTK0TsEexuAQwBK3Ugi481ZHG36J2OLIwciEbfF4SxywssqKXW4sSCpIDWYEeD4riZNYK7H1mOpNdZyrNB/GxD5u6fi0LD1pFHuTvm51TCd0u0TTKUqYiJj/AA642p3Wjwtfg9SwYsnGSeKGRXk4CDFVeJdSTd2QBQFN7/ma51a6bdZufm9rzpcfJeGWaNkj5mbKZuQRgBZFckkt6D+Fbqx1yEtcS6gp9kSSiXYpmXaqmVSKoZBQFEjdaJ01CiQ4APYYNSu90Wp7JrpNPuoRkayQgshJAPE3XypBFiPoRUZpN5teu7GPb6aUwbKMHi4AJHIWYWYEeQSPINYEAWqV4W9DgVmDRWDjU3SNfScmUeMotZY3duTpKnO48OqsqOxzCO4m1h6Udb0vDp2meGKXEgDLjB7vHCx8OQSW4lj+ok+T/CsvcHsW5D9w3CTSRZc5VskpZJJVUXUEALyCj0AHgVv47IyVdvELVVmk05Umot4/8Ui0OtDN2jIT9aLh6KoJtnomT3TJt6wD9PTJ7HiY3ZMbrDw5JzMqB5VkWMmBVjvdXkvZXNvtW3nxXGN13Kyeu5PZUlxhiY06RNG0gEzNJazJHa7IL/c1/FSBh5hKZRVXQRVSTSV7r7L07mHpAwiUC79gb6sP/aoCtxpSjSlYcj/u99/sbn+JPpSoX+W1/YN4pfiZqX8lNpSmC8jv9U7wg/FEf+l1n0pVrOlKNKUaUo0pRpSoMxX6yq8/3H8X/wBe+WdKVObSlGlKNKUkbPAOpozQzZVFMUAVA4LCYAED9Owl6SGHcNv8Q6UqtTlp5SfG3mZZ2d7y3Fz7K+sYdrXyW+mWx/CSTmFjiPRiot80dM5KIWaRrmQVVJ0t01THNsZQS7BqsbvqOl38wyc5HGUF480Yg2F7AjyLAm/pf863n8X/AORPyX8R619J1abFfSPMZfYyIRIgkbjzdWBSQMwUA3YrYeFB80kuGXk6YK4TXdTKGO7HerJkR5UZOmSM1a7A3Uh3MTLP2b50ZtXY+IbItHIDHIkKYVldgKIgACYddGg6Zq+u5BzMRpXyjGVJYi1ibn7beD4H1NSny5/kv375i0w63v4dfj6FMpMhI4Y2EgdFZVvKzksPvYkBV9R62qRk7jDItQ5PUqWxtx/xtJVLLMBNNuRHIYkxGwl9r6lUZGNj+FUixOlK3JlIOABIB6FStA7fVANS0iTwbeOTExYjBOh9+a4VxxH2C3q4P/pWvsPK1W1+OsvE7Bvs5NprJ4zq9ZweTFk95v8A5UnOxSBlH3W8e4fxp78iVnI0FQblNY7gIe7X2Krcs/ptQkJb2Ixs9jatFFYmDeTC4JIxbeRdlKmdcxilTA24iHp1IZTzx40kmKgkyQhKqTYM1vAJ+l/S9U/Q42pzN1iYm+yHxNJJkIs86IZGiiLAPIsYuXKrchfr6Uka1UM6XbBsY9sLWv4WznZsfGPIxzR0jd4bGmQZCNUKUqC5TqM7OxgJIxDfvgkcAXbcQHfXTE2dka4GULj7F4vIB5iNyPp9GAP/AFqUz4uq6juUkWA8m36bj5/2MwOPJl4yt5uLcoWkS48C63pFcU+HL3jNjFSlHyDYMi2Sw2KTvd8t1sl3kgaWvNhI3NYXNfZLpm93K2u5b9baOIY5GwGEAMO+sfS6oajC/bmR5pXcu7MSbu36iB/Kv4L9Kmfk3vz/ACL2MbhcLF1+vx8dMbGghQL7ePFf2hKw/wB2YA2eUgF7C4qSvuJKj/8AtDL69b/Nalq15Ue8212RrNqwvYmOBXWabU4uytQibZCNYc7zDMbPxTv2lc5GUlileRlbW8MRs6FpuoIKBuHoAYzYOIpoJVxjPNz4hgBeIEeWJPov0NqvHUsds/WbbAm3aanWDEEzwO0nHPaJ14QKifa8ouXTn48GmE491bzBsjZwsdz5FV2hYFwpXK06qMLhmvzkLkVXJE1IuXaxMjLW5kdWQgSxDMqbY0ep0EcApuBAEDDqM1jdkytk+RtFjx9eqcRECH5k/wA/L1FvTj6Grv3iH4U0XToNR0PIzN12/IyVnkz5Y5MUYsagA4ogNlk5td/dHlSLXPit9hbiNNFyDJZensdssA2mhN8kYjxbRqDffaOKJrHM1LrS8ZkqWo0W0SiG9ympF2ddQFNlkttjBuIbdmBrCMo57xftpow8UaI94yhNw5QeORJvWH27vKtoo+q4ew/vmtzWxM3MyMnH45qZUaCN8SPIclzAiKEFvB9Qai/gDiFy4x9lLJFPmMr3WVvdptNOyzmzkba63Fy+E8z15UJ2KJhzGdBcqiOP5iIjXaYyqyRBRcCQDAUodIGh9Zqtxi5UsDzStlO6yTTsoMUq/cPajQn7CAfuPobVsbvHffjffddwNliazBi0mJiz4Wv1cM0kefgS/wBN/wB9l5QH/wApHdT7Kt5QEi5NyJJcT8HeYDRLDfKVyfydg/JWHmkU9QxfZKFXHlPv7h89lk1CJWOHYxrCuRcTGwSyyDdJqU5yqkIYTm7R1K6TH7JjzSwbibHmweJ9tkXg5JP8wACgAEgAf9aoPyfufhbdYOFt/jjX7bXdraVTmRZMwnxgqoQTE7M0ru8gVmLkAqSABWPjLgHaOKQCHFu7P50MhZNq0rmIOQ96td/BrQI0ZQZdvjjuWzcIexqhJD3Yq9SSnSXrN6oa4w9FJpf/APTuze7Mpl992f7Be/t/g3n+HpXZ2L5Wxfk23/6ycWGD9hrpkwf7XjQ418l+HtnLuT7kQ4eePkXNh5qw9pQ24Im8Y5VFcRP3YtzACZQ3+xibqLuc23p9Aas1aPHp59aiTzH4rz3ITBNixbGV3E13dy83WnKURlxGwGqybJlMtVZd+3dVpRtPxNgQiQVBm4arJnKqbYxugRDUPvte201j4aRwyFmXxLfjYHybrZg1vQgg1sn4n7jD0Pu0HZMjK2WHDFDMOeCYxMWaNhGpWUGJ4i9vcV1YEC4FwKjXkbi3FeXtxbzROcCeNy89lq5owcazqtFXnbJOvrFJCWvsLOueyO5p44hKMWRVf+EAe5ECmExfWOIxOTroesaeeTrWITmyBQFW7EsftDHkT4S5NvT/ANav+l7nsvnP5G1GJ829hSPrGG0jNLOI4o1iX+q0QESxqJMgosfP9Q8AHwK8gt58srzIUL1PtLJxLzFNWR9NndzE3Wqwzmau+lJtUsg5espuuLK1szQ670RUM3OCCBgMT1egQDR2R1XtH7hhLhztMWuWVbqSfJ8r9v1+nj1/Cv1N1Hz58EnTQPr+yauHXxwhY45ZWSVUjHFVZJR7vIBfHIcmFj5uKnJxj8ivmIfPeLEOSOB2nxEykgZPJT6MyvSUpCBhpCJfpA8TQiJ9eacSUO+MioCKCKhTKAACJigcNWHUfH+6/uUP91xx/bmP32kW4BB/A3uDbwBWn/kT/Lz4zPSdkfj/AHLL3ONL4ofDnKyOrqeJLxiMI68lLMQQLnwbVfRV/Ir4vY0SSkMS3PNNEtsa0inEbOMMlSgNZG01l29laxP2+Jas2rOxEZyzkguGogk3Xbpd2BCCc5jbHh6DpsMc8F8iKYAWPuHyym6swt91j6j0I8V4t2f+W3yZ2FzB2iDTZuqkdw8bYaclhmCpNFA5Zmi5IDxf7mVjyJIAAb3LXA3mVmGuYfaZRSxxdeR2Ma3kWXq/LOo3ydxw0gbfFWEspjOt2ihRjAvvFXLaQSqTCaDMUUjJEIAiUD95jZuj3mdFAM32pNrCrlclXMfFg141ZAPuVvV7Cw8f6zPWPlP4t6nsdrJ1ls3D+P8AZT4qTaWfGjyzJA8XDLlhyXb+lLD5EBZ+RDEnzbjaZgXEuaK3hyhQ2echQeQsyMoNP3/tsNEkiIiWn11lnKwR7Rs2ZIkask1StyqAgj3wJd4JCibYLlrI82LAij2Uglzwv3sBYFvyHjwPT0F/W1ebu75vVtj2vOzOk4cmB1R5j+2gkcu8cYAA5MWYksQWI5Hje1za9Ot7iSv+sMvr1v8ANBrOqq0x1LofJsM0ZfNfpDFJ8BGZ1b4jm9dRmCZEQfA3N74jfl3KIRizc7nbwXhxESl+q7dRuP8A3T+4TnKMJ1tl9njf3L/zc7+LX9LVctuehnqGq/si7Ad4Dzf3EylDilb/AND9sAeYIX/c5+CfSkpyoheWdcp9GdcVapSrtcXmV6XG3uPtjti2ZxuJ3SzsLpOR5pSWg0TyzBMiXclTUVV9YelI4htrq3Em4jgjbTIkk5mUOG9BH/ORcjyPp/2qS+NsL45ztrmR/JeVlYmqXWTvjNAGLNmgL+3jbikh9tjy5EhR4F2FSePRJLrP3bhp0dRujqOrv0bj07gCQgA7al/H0rXQvYX9bea+fcOV/wBYZfXrf5rSuaPcOV/1hl9et/mtKUe4cr/rDL69b/NaUo9w5X/WGX16v+a0pR7hyno8Qy+vV/zOlKPcOV/1ll9er/mdKVG6S4tXu85UyC/zFbaDlLj1MRdAdY2wVY8fRjsmP7/UXZ30jdlrG7TXWnHUg7AijYh0y+DOUBIPZ2xrYUk+VI2ayS68heEZQHi6+rcvU3/9KukPZsHWaHDi67Bk4Pbo5Jxk50eQ6+/BKLLCIxYIFFw1j949RUkfcSVH/wDaGX16v+a1JVS6PcOV/wBYZfXrf5rSlfadHlUlE1QXYiKZyHABOrsIlMAgA/Yg7B20Bsb18sOSkfjUPuHvApHh/GZxjIDI8tei5xzzds7ya0/HRsSavSV2KxK6rMcWHRKV7FMfBAJFlw744mHfYNRGo1KalZ1SRpPfnaUkgCxa3gW+gt/GtjfIvyFk/Ik+rny8SHFOr1UOCojZ29xYeVpG5+jNy8geBapfe4cr/rDL69b/ADWpetd0e4cr/rDL69X/ADWlK59xJX/WGX+E6v8AmdKVFt5xryRSc7Zb5PN8v5MvsBJ4bQrcDxXbOkfcNjYaqgjJhP01FwuVNO625SOFr1qkSJ1uzdZxL0gWNbFmhzpdkJZZIzDYQ+OII+q//ia3/U+au8e/1uy6vgdKfAwMXLTYmR9mQfeaOUleEtluYog3K3I+EHFQbkuthle75XxXRsi23Hthw7ZLbBN5aXxhfVm6lwpTpU6qYws+eN79kL1IiQHHuzbAU4bgBtwDJxJ3ysZMiWN4pHW5Rv1L+Rt4qB7FqcTR7zK1GBmY+ww4JSqZMFxFMBY805eeJvbz+HjxTm+4cr/rDL69b/NayahqPcOV/wBYZfXrf5rSlHuHK/6wy+vW/wA1pSj3Dlf9YZfXrf5rSlHuHK/6wy+vW/zWlKTVuxpdpSBesarZY2tTqwoCzmVmppBNqVNch1ymaKJCRXvkQEvo9XffUD2bD3mx0suJ1zMXA3DFeE5QSBLMC32HweS3H5XvU71vN0mv3EWX2HEbO1KhucIcxl7ghfvHkcT5/OlAhQpkiKJFnjJVYiKRFlQFUoKrFTKVVQCAkPQChwE23wb6mohIsSJK3KUIAx9LtbybfS582qFlZGlZohxiLEqPWwv4F/rYeK7fcOV/1hl9et/mtdlfFHuHK/6wy+vW/wA1pStfL0yytouQcxScfJyiDNdSPj1nZ2qb12RMTN2p3J0wIgRZQAL1iOxd9YmxmzYcGabXxLPnrGxjjZuIdwPtUt/KCfBP0rL18WHLnQw7CRocFpFEkiryKIT9zBf5iB5t9aIml2R1Fx7mWSj42UXZoKyEem6O6TZPDpgZdsRyRISLkROIgBg7B21xr5cyfAhm2MSw57RqZI1bmqOR9yhv5gD4v9a52EWHDnTQ6+VpsFZGEcjLwLoD9rFf5SR5t9K1FsxJYLJGt2LK1OayqhJsJA7+GUMV0uizUMdWOVFRIA8I9A2ym3bsGovsmlyd9gph4mdk6+RMiOQyQW5sqEkxG/8AI/o38Kkuu7nG0Wc+ZlYWPnxtjyRiOa/BWcWEgt/Onqv5mu22UC6r16VRqy9d9urt+6YlmheDFj3pyEckdg3T7/oM1McA6e3q213djTdTaXJi69+3G3dLR+/yMXkgMH4/dYoWtb62rq66+mh3ONJ2D9wdQj3k9jiJfAJUpy+2/PiTf6XoqeIDVKvx0DEIxcczZpCcWjZd6q2TduTC4figdyCq4pKPFDmL1CIgAhr563odf1nSwabWRJBixLfghYqHb7pOJcs1i5Yi59DX12Le5/ZNzPuNlK8+TK1ubBQxRftjDBAFuECg2HqKUXuHK/6wy+vW/wA1qcqEo9w5X/WGX163+a0pWoe4lLIykRNPUo5eUgfFDEuzLOgOyF6mCLkSFIUqZxVTDb1gHb4NRmXptZnbDG2uVEH2GHz9lyTdPcHF7AGxuPHkH8qksXcbLC1+TqsWVk1+Zw95ABZ/bPJLki4sfPgi/wBa2/uJK/6wy+vW/wA1qTqNo9w5X/WGX163+a0pXA0OUEBAXDIQEBAQE6ogICGwgIdz2gIaWB8H0oDY3HqKxGGM1YpqRlGJREezTMocjVmQzduU6xxUVOVJNACgZRQwiI/CI6xcLBwtdjjE18UcOKCSERQqgk3JAHjyfJ/E1lZmdmbHIOXnyyTZTAAu7FmIAsASfwHgfgKzPcOV/wBYZfXrf5rWVWLXI0SVH0uWX163+a1yTc3PrT+FJqx0TIPTEe6jqtFMEy0Gb9sC9MUYHY/jQj+4S3CREenoE3q+nfUDvf8AkfHG/wCOHFDfuk/ce+GN8f8An9vj6SenG/ipzRnr18n/AJCMor+2f2PZKj/5Hjh7nL/2/XlbzSkGiSm47OGe2/ZuZQOz4PQl9DU6fWoIenn1rXlxcJJNaaI3hiy67VNitJlTMD5VkkYDptVHAId4ZAhw3AojsA6wl1uvTYNtlgiGzeIRtLxHuGMG4Qt6lQfIHoDWc2x2D4C6pppTrEkMixcj7YcixcL6BiPU+tbH3Fle0PEsth7dutbbcPQP716Q1m+bW+lYNgf4121XG6UFb5K6OXr5WUfQjeC8KnIuDQxGiDgrjvixZyFRI+MdMAFYBEwlEQ+HUEnX8KPsUnZlef8AfyYywFTIfZ4K3IER+ge48t6keKnH3+ZJ16PrTJB+xjyWnDe2Pe5svEgyepSx8L6A+adPU7UJRpSsOR/3e+/2Nz/En0pUL/La/sG8UvxM1L+Sm0pTBeR3+qd4QfiiP/S6z6Uq1nSlGlKNKUaUo0pUGYr9ZVef7j+L/wCvfLOlKnNpSjSlGlKNKUaUo0pRpSjSlGlKNKUaUo0pRpSjSlGlKNKUaUo0pRpSjSlGlKNKUaUo0pRpSjSlGlKNKUaUo0pRpSjSlGlKNKUaUo0pRpSjSlGlKNKUaUo0pRpSjSlGlKNKUaUo0pRpSjSlGlKNKUaUo0pRpSjSlGlKNKUaUo0pRpSjSlGlKNKUaUo0pRpSjSlGlKNKUaUo0pRpSjSlGlKw5H/d77/Y3P8AEn0pUL/La/sG8UvxM1L+Sm0pTBeR3+qd4QfiiP8A0us+lKtZ0pRpSjSlGlKNKVX3leq8m6Vy8dZ4wzhmkZiqdj441jEkkzn8ypYrl4KxVrJlxuAuSIuaJcEZaNfRlnIUDFOidNVIQEBAQHSlb745eeXyIsZ/PAjfyIaUo+OXnl8iLGfzwI38iGlKPjl55fIixn88CN/IhpSj45eeXyIsZ/PAjfyIaUo+OXnl8iLGfzwI38iGlKPjl55fIixn88CN/IhpSj45eeXyIsZ/PAjfyIaUo+OXnl8iLGfzwI38iGlK5DMnPH4eEeM/2/8A8wI38iGlKbDDXL7mPnXGNSyzSeEFFTrFzZvXsSlMcuIdrJppMZZ/DrA8QbYZeN0zi6jlBKBFTh0CUREB3KClOd8cvPL5EWM/ngRv5ENKUfHLzy+RFjP54Eb+RDSlHxy88vkRYz+eBG/kQ0pR8cvPL5EWM/ngRv5ENKUfHLzy+RFjP54Eb+RDSlHxy88vkRYz+eBG/kQ0pR8cvPL5EWM/ngRv5ENKUfHLzy+RFjP54Eb+RDSlHxy88vkRYz+eBG/kQ0pR8cvPL5EWM/ngRv5ENKUfHLzy+RFjP54Eb+RDSlHxy88vkRYz+eBG/kQ0pR8cvPL5EWM/ngRv5ENKU19U5g8x7jkvLOKYjhBRgs+GfcILco65bw6UYoORa8vZoIIlwTDCi7nuY9uYrjvEkehXYCdYetpSnQ+OXnl8iLGfzwI38iGlKPjl55fIixn88CN/IhpSj45eeXyIsZ/PAjfyIaUo+OXnl8iLGfzwI38iGlKPjl55fIixn88CN/IhpSj45eeXyIsZ/PAjfyIaUo+OXnl8iLGfzwI38iGlKPjl55fIixn88CN/IhpSj45eeXyIsZ/PAjfyIaUo+OXnl8iLGfzwI38iGlKPjl55fIixn88CN/IhpSj45eeXyIsZ/PAjfyIaUo+OXnl8iLGfzwI38iGlKbC18wOY9OyTibFcxwgoo2bMx72nUTteW8OrGpnx5W07TOBLOD4ZTXbd7HKAVv3aa3Wr2H6C+tpSnP8Ajl55fIixn88CN/IhpSj45eeXyIsZ/PAjfyIaUo+OXnl8iLGfzwI38iGlKPjl55fIixn88CN/IhpSj45eeXyIsZ/PAjfyIaUo+OXnl8iLGfzwI38iGlKPjl55fIixn88CN/IhpSj45eeXyIsZ/PAjfyIaUo+OXnl8iLGfzwI38iGlKPjl55fIixn88CN/IhpSj45eeXyIsZ/PAjfyIaUo+OXnl8iLGfzwI38iGlKPjl55fIixn88CN/IhpSmxzNy+5kYKxhcMtXbhBRVKvSWDeSl0oblxDu5RRBzJMotMjJu5wyzbqq+IfkEQOqmHSA9ojsAqU53xyc8fkR4zH/8A7AjfyIaUo+OXnl8iLGfzwI38iGlKPjl55fIixn88CN/IhpSj45eeXyIsZ/PAjfyIaUo+OXnl8iLGfzwI38iGlKPjl55fIixn88CN/IhpSj45eeXyIsZ/PAjfyIaUo+OXnl8iLGfzwI38iGlKPjl55fIixn88CN/IhpSj45eeXyIsZ/PAjfyIaUo+OXnl8iLGfzwI38iGlKPjl55fIixn88CN/IhpSj45eeXyIsZ/PAjfyIaUrkMyc8REN+EeMw+mPMCO2D6Y7YQEdg0pTWYY5h8xs746iMn0jg/R0q3NStwh2ic1y3hmkiV3SLnYaJMisg2w08QKkebrTgyIlUN1oCQwgUwiUFKdL45eeXyIsZ/PAjfyIaUo+OXnl8iLGfzwI38iGlKPjl55fIixn88CN/IhpSj45eeXyIsZ/PAjfyIaUo+OXnl8iLGfzwI38iGlKPjl55fIixn88CN/IhpSj45eeXyIsZ/PAjfyIaUo+OXnl8iLGfzwI38iGlK6HOYOeS7ZwgHCPGYCs3WSKI8wI3YDKJmIUR//AKIb7AYe36WlKdPhli244S4qYDxLkFKJb3igYzrVbtTeCkVJeGbzjFmBZBCNlFWjBSRZormEpFhQSFQA6ugu+2lKiB5Hf6p3hB+KI/8AS6z6Uq1nSlGlKNKUaUo0pRpSjSlGlKNKUaUo0pRpSjSlGlKipM8x8Nw3Kio8PhWs0nli2VGz3BR5EQZXlHqiFWims+tA2+zi9SJE2mXgXPjWTAiK6qjQgqqd0UyYnUpq+OPOXj3lK2UXEGM6JkWgwt8reQbRg6bnaLFVrHOVKxjefRirzJ49Wh5ySeNW0ZJyhFu7lGMUq4SW71IhwEdKU4ufeYtQ43yqxsiYuzifHkUlXF7fmyvUZjK4mo7ezzCMGwXscypY2c+qgzfOUvHHjox+VimoU6wlLuIKVLgpinKU5RAxTlAxTB6BKYNwEPpCA6Ur60pRpSjSlGlKNKUaUo0pRpSjSlGlKipy05j4c4YUiAvGXj2h42s9qhapB1+jwZLJany0tLxcS9myxJ30cRGsVb2ug4lnyipU2bc5R9ZQ6aZ1KavJfOPj1g3MmQ6pI0PIjuUr0thqCzxlqm0WJfUuhTOUxaw+IY/I88ecj7I+Wft5VAiQsI+TIxQcE70yQCIApT/5/wCRtO48xtLPPV+8Xq15KtpKPjjG+M4NpYb5d7IERJWB81hY6SlYKJRaQ8BDunr108etWrZuiInUARKUylKTBmbKPyGxlX8rY9Vlfd6eWmo9SPn408NZK9P1ickazaKvZ4VVRVWIsVbsUQ5ZPG4mOBFkR6THIJTCpTuaUo0pRpSjSlGlKNKUaUo0pRpSj9n7Ph0pSHyTkan4koloyTfpdGCqFPiXExNySpTqGTbobFSbtGyQGXfST9yoRu1bJAZVy4VIkmUxzlAVKhqhzSxxe8DYk5f1DjjnfJdTmIa+WKKWjqdjWOv+JIyCGVg7k4t7C8ZLq5ayo4JBOUFiMnbsx00hKoAB2aUp5IDlri6V4kM+aU6zuePsOr4fVzi+b3qu+zrtB0NKDVsYOJatRL6bAkivDplWSQQcL94VUnSYerSlYfHrlxR+Q9ku1JjaTlPGd7oMDRrfNUvLNai6/OLUzJLeXcUm2Rh4Gw2mIeRE2EC8TAviiO0FW5iLIpjtupUqtKUaUo0pRpSjSlGlKNKUaUo0pQI7aUo0pUDa1zSwbyKNyix9QMcZEzcbjo+qcLdKxH1GsOIvKPvUD5zGOMWr2y0Q8DdIdo/gHaKrpysxb+JZKd0ZUpSnFSnN408rK5yad5gi4fGeXMYzWD761xreIjK0LU45b3sXrcVal2UI/pl1vENLFjIucag7Erkh266oJmL1AOylavHPMzGuW8z2TDmOq5fbSWoTNmrViyawZVUuN4+x09RZnYogq7i3I3V6WNmW54475CEVi/aJDIA5E5TdKlS50pRpSjSlGlKNKUaUo0pRpSjSlGlKNKVFi18w8PVLk/jTiK5Vs0rlvJ0JZJxkMDCpyFUqiVerz60oR16sJnzdOvzdnhIt0vFtASWWcpNzqHBJMSHOpTcYy5b0tHPdV4jI8Z88YYsdkq2S8i1hewVDGDDHalXp1gZEtFjVdUbJtofwzefstqR8Id1HoGeu3g79J+vZSnB5T8ycN8QIqgSOVDWmQd5KvtVoFYr1Hg07HYTuLRZYOqmtEiwO/jk46lVqTsjIZSQUVArYjggFKoociZlKlbpSjSlGlKNKUaUo0pRpSjSlGlKqm8jv9U7wg/FEf+l1n0pVrOlKNKUaUo0pRpSjSlGlKNKUaUo0pRpSjSlGlKhhJ8IKNK2p7bVs2cvWzt9PHsCkRG8rc1R1YRcHeg+GPZ1xpakotnCAcOgGaaZUCo/YwL09mlKiMh5amU6Xy8wfnShcqcsSlFrN85K5LyJX7shimQfEsmZqzDRce0iHrfFiFjscO4WjUmTgsjKGXZRjFuk1VT2HSlNfxL4BchuPuZ6llFGi4iq1kx/jrPMNki9weULbYXPMO55AkSTdFeTtVla2lGYXh42xNwkXwsfEqprKiiiU7cALpSn05LY65tZwyZhiFsuDsVXXjDWK5Rr3kvGsVnxxT3t6z0yfsZRSItq8njaV94sO4ylGZX0exSFspPv00zPSkRSKiZSrYy/Uh2AXsD1Q9Bez0B2B2BpSudKUaUo0pRpSjSlGlKNKUaUo0pUZswcW6rmezNrTNZQ5G0101jEYskXifkBk3F9cVSRVVVK6cV+nz8ZFryRxVEDuDJiqcoFAR2KGlKrj5u+VBcs1Y4thMH8o88Q1/d42o2LIOFyTc61kKuva3A5ahcizS0rcck0S+5BYysks1M4WXaP0heKsGSKwCgkBAUpNci/Lqz3kzP1vvbFnQMhzljkuOrrFXJ243qXqWQOPLLECkMra0l8U1WspU3J0nZH7F8+bKqC2R76TMkqmVJJMAUqaPIPFnJO8XTD2bqVTsayF84xZ0vstQ6FLX2TiYjK+G75jJ7QJNxL2gKs9Ci39JxMndIJeEkGZCswIZQ3fj0KU63CzBtuwFhRSuZDfQj3I13yVlrM2QCVhZ24q0Va8v5CsF8kK9W3L9Fs9dRFcSmUmSa6qaZ3JkDLCUvX0gpUs9KUaUo0pRpSjSlGlKNKUaUo0pTRZjw3DZrrzGtzdwyrS2zCUTlU5LEmTbfiyeXVTRVRBo9nKZJxci8jTlVExm6hxSMcCmENwDSlRMu3l3V6yVqBr8DyX5ZVpxXsl1XKjKancuLZncjYaY2kkq81PFZ2ick10sQyfSPjgQIzKBnzdBcRE6KYgpUe6DwW5M0zy12HCuRymhcrte73KQGULtPSsSwJVcG3vLUlYckMaY4q1MgAl5qUx86cNkUXKBDA+llh78EkUy6UqZHJLFGasiYgypxzwqnQ8b0u1caLRRaNkN6+dOpKrZEWK0rddqq1JJCrsRoitPOuVaQI5M5bHApE25uwwKVH3y++IF5435Ay5bneMsf8AH3H14o+K6uwwvQcm2jMCEjd6GnPoTuVpK32yJinsWeciZNnGtotADpkQYAsqPfKGEVKtO0pRpSjSlGlKNKUaUo0pRpSjSlaKzwDe1V2brTt/NRbadjHkWvJVuYfV+fYpPUDoHdQ85GLN5GJkUSn6knCByKpHADFEBDSlQodeX/TDQtni4zkPzNYOrLUbNUAlJDlLlS2+yULRDuYR1LMIW4zU7XVplg2dmO0WctFwQXAFCl6gAQUpjeHnBHM3EXO3KK/oZrsWW6Dc8QYKoeFqrdSY5gzKyGJKlZIpBK1p0LGNSThW0S5eoM2SzYyvfNXLhZyRRYpDaUp/uLODctcY+O2NaWmzqF8zBbsmr5H5NWSQsEhGMH9ny1b5C35pt9cekh3TqdfQC8oLWFZuE2xHLRogkdREpQ2UqDeCfLXyRhzPlCdQ1VxNWabjTlbnPkkvyRgZ6RUzflel5haXhZDBVqrpoFv4eLZSt1TI/XWlXUco2iUFGzVNyc59KVeZpSjSlGlKNKUaUo0pRpSjSlGlKNKVDOx8I6PZrXL253mrl3HO5mYWml4ev8q80QFYaLLOPEGZRVci7U2i4uIIb1StUUyJET9UAANKVEWS8tTJdf5eYJz5j/lHlp7Sq9nnL+a8rVW4fFa+ci5v2NnFPYw1el/isPbJuKcIpNodRKQlVDsIcuzRRNUoG0pUr8e4QybDcj+VvJm3N6vK2u4VyoYp48Qac07UaQuJ6DCu51NnPSBosqkG+v8AlSdePZEiBHBUW6DX1lDJgAKVC3mz5cWcuWFfNlOu55ueJ88XCv8AGWGuWMIiXoNiw1WWuL8p1LJF0YUqxW3FEzc2hUZRm8fIqIKM0Zh+zaC7RBMBTKpVzka1XYxzBk5kHUs5ZsmrVxKviNE3smu3QTSVkHibBszYkdPFCCooCKKSQHMIEIUuxQUrN0pRpSjSlGlKNKUaUo0pRpSqb/JQyZjiC8qzhTFTmQKTDSjPEp03cbK2uBj37VT3usw9Dhm7fouED7fAYoDpSrSfjixF+FPHH3cVn7aaUo+OLEX4U8cfdxWftppSj44sRfhTxx93FZ+2mlKPjixF+FPHH3cVn7aaUo+OLEX4U8cfdxWftppSj44sRfhTxx93FZ+2mlKPjixF+FPHH3cVn7aaUo+OLEX4U8cfdxWftppSj44sRfhTxx93FZ+2mlKPjixF+FPHH3cVn7aaUo+OLEX4U8cfdxWftppSj44sRfhTxx93FZ+2mlKPjixF+FPHH3cVn7aaUo+OLEX4U8cfdxWftppSj44sRfhTxx93FZ+2mlKPjixF+FPHH3cVn7aaUo+OLEX4U8cfdxWftppSj44sRfhTxx93FZ+2mlKPjixF+FPHH3cVn7aaUo+OLEX4U8cfdxWftppSj44sRfhTxx93FZ+2mlKPjixF+FPHH3cVn7aaUo+OLEX4U8cfdxWftppSj44sRfhTxx93FZ+2mlKPjixF+FPHH3cVn7aaUo+OLEX4U8cfdxWftppSj44sRfhTxx93FZ+2mlKPjixF+FPHH3cVn7aaUo+OLEX4U8cfdxWftppSj44sRfhTxx93FZ+2mlKPjixF+FPHH3cVn7aaUo+OLEX4U8cfdxWftppSj44sRfhTxx93FZ+2mlKPjixF+FPHH3cVn7aaUo+OLEX4U8cfdxWftppSj44sRfhTxx93FZ+2mlKPjixF+FPHH3cVn7aaUo+OLEX4U8cfdxWftppSj44sRfhTxx93FZ+2mlKPjixF+FPHH3cVn7aaUo+OLEX4U8cfdxWftppSj44sRfhTxx93FZ+2mlKPjixF+FPHH3cVn7aaUo+OLEX4U8cfdxWftppSj44sRfhTxx93FZ+2mlKPjixF+FPHH3cVn7aaUo+OLEX4U8cfdxWftppSj44sRfhTxx93FZ+2mlKPjixF+FPHH3cVn7aaUo+OLEX4U8cfdxWftppSj44sRfhTxx93FZ+2mlKPjixF+FPHH3cVn7aaUo+OLEX4U8cfdxWftppSj44sRfhTxx93FZ+2mlKPjixF+FPHH3cVn7aaUo+OLEX4U8cfdxWftppSj44sRfhTxx93FZ+2mlKPjixF+FPHH3cVn7aaUo+OLEX4U8cfdxWftppSj44sRfhTxx93FZ+2mlKPjixF+FPHH3cVn7aaUo+OLEX4U8cfdxWftppSj44sRfhTxx93FZ+2mlKPjixF+FPHH3cVn7aaUo+OLEX4U8cfdxWftppSj44sRfhTxx93FZ+2mlKPjixF+FPHH3cVn7aaUo+OLEX4U8cfdxWftppSj44sRfhTxx93FZ+2mlKPjixF+FPHH3cVn7aaUo+OLEX4U8cfdxWftppSj44sRfhTxx93FZ+2mlKPjixF+FPHH3cVn7aaUo+OLEX4U8cfdxWftppSj44sRfhTxx93FZ+2mlKPjixF+FPHH3cVn7aaUo+OLEX4U8cfdxWftppSj44sRfhTxx93FZ+2mlKPjixF+FPHH3cVn7aaUo+OLEX4U8cfdxWftppSj44sRfhTxx93FZ+2mlKPjixF+FPHH3cVn7aaUo+OLEX4U8cfdxWftppSv//Z) # **Neste projeto, será realizada uma Análise Exploratória de Dados (EDA), com o intuito de explorar a relação entre os hubs (locais estratégicos para a redistribuição de mercadorias) da empresa Loggi e seus pontos de entregas localizados no Distrito Federal (DF).** # --- # ## **Índice** # 1. Contexto # 1.1. O Projeto # 1.2. A Empresa # 1.3. Os Dados # 2. Pacotes e bibliotecas # 3. Exploração dos Dados # # 3.1. Coleta dos Dados # 3.2. Wrangling # 3.3. Geocodificação # 3.4. Qualidade # 4. Visualização # 5. Insight # --- # # **1. Contexto** # ## **1.1. O Projeto** # Esse projeto consiste em analisar um conjunto de dados que contém as entregas da empresa Loggi na região de Brasilia. # Verificaremos a relação entre a distância dos hubs e suas entregas e também a capacidade dos veículos. # Após a análise, iremos discutir se e como essas relações podem ser otimizadas. # ## **1.2. A Empresa** # A Loggi é uma das maiores transportadoras privadas do país e uma empresa brasileira de logística que atua principalmente no setor de e-commerce. # ## **1.3. Os Dados** # Os dados são sintetizados de fontes públicas (IBGE, IPEA, etc.) e são representativos dos desafios que a startup enfrenta no dia a dia, especialmente com relação a sua escala. # # **2\. Pacotes e bibliotecas** # * import json - Codifica e decodifica dados no formato JSON # # * import pandas as pd - Manipula e análisa dados tabulares # * import geopy - Geocodificação em Python # * from geopy.geocoders import Nominatim - Converte endereços em coordenadas geográficas e vice-versa # * from geopy.extra.rate_limiter import RateLimiter - Limita as taxas de chamada de uma função # # * import numpy as np - Fornece suporte para operações com matrizes e funções matemáticas # # * import geopandas - Adiciona funcionalidades geoespaciais ao Panda # # * import matplotlib.pyplot as plt - Gera gráficos # # * import seaborn as sns - Gera gráficos # # # # **3. Exploração dos Dados** # ## **3.1. Coleta dos Dados** # **Baixar os dados de um arquivo JSON:** # **Carregar os dados do arquivo em um dicionário Python:** import json import pandas as pd with open("deliveries.json", mode="r", encoding="utf8") as file: data = json.load(file) # **Conhecendo alguns dos dados:** # O Número de linhas len(data) # O nome das colunas nomecol = data[0] print(nomecol.keys()) # ## **3.2. Wrangling** # **Criar um dataframe Pandas para facilitar a visualização e manipulação:** # entregas_df = pd.DataFrame(data) entregas_df.head() # **Normalizando a coluna origin** # A coluna origin tem dados aninhados, vamos normalizar: # 1. Em um novo dataframe, transforrmar cada chave da coluna origin em novas colunas *(normalize)* # 2. Juntar o novo dataframe com as novas colunas ao dataframe principal # 3. Remover a coluna origin # 4. Reordenar as colunas hub_origem_df = pd.json_normalize(entregas_df["origin"]) entregas_df = pd.merge( left=entregas_df, right=hub_origem_df, how="inner", left_index=True, right_index=True, ) entregas_df = entregas_df.drop("origin", axis=1) entregas_df = entregas_df[ ["name", "region", "lng", "lat", "vehicle_capacity", "deliveries"] ] entregas_df.head() # # **Normalizando a coluna deliveries** # A coluna deliveries também tem dados aninhados, vamos normalizar: # 1. Em um novo dataframe, transformar cada elemento da lista contida na coluna em uma linha *(explode)*. # 2. Criar um dataframe para cada coluna e depois concatenar os dataframes, ou seja combinar os 3 dataframes em um novo dataframe. (A coluna ID não é relevante para as análises desse projeto) # 5. Remover a coluna deliveries # 6. Combinar o dataframe obtido com o dataframe principal # entregas_exploded_df = entregas_df[["deliveries"]].explode("deliveries") entregas_normalized_df = pd.concat( [ pd.DataFrame( entregas_exploded_df["deliveries"].apply(lambda record: record["size"]) ).rename(columns={"deliveries": "delivery_size"}), pd.DataFrame( entregas_exploded_df["deliveries"].apply( lambda record: record["point"]["lng"] ) ).rename(columns={"deliveries": "delivery_lng"}), pd.DataFrame( entregas_exploded_df["deliveries"].apply( lambda record: record["point"]["lat"] ) ).rename(columns={"deliveries": "delivery_lat"}), ], axis=1, ) entregas_df = entregas_df.drop("deliveries", axis=1) entregas_df = pd.merge( left=entregas_df, right=entregas_normalized_df, how="right", left_index=True, right_index=True, ) entregas_df.reset_index(inplace=True, drop=True) entregas_df.head() # **Vamos verificar alguns dados relacionados ao nosso dataframe:** entregas_df.info() # **Verificar se há Dados Faltantes** entregas_df.isna().any() # **Não há dados faltantes** # ## **3.3. Geocodificação** # A Geocodificação é um processo que transforma uma localização descrita em texto (endereço) em sua respectiva coordenada geográfica (latitude/longitude). Há também a Geocodificação reversa que faz o oposto e é ela que vamos empregar aqui. Utilizaremos um serviço gratuito de geocodificação através do pacote Geopy, chamado Nominatim. # ### **3.3.1. Geocodificação Reversa dos Hubs** # Processaremos as coordenadas geográficas para ter informações textuais do endereço através da Geocodificação reversa dos hubs. # Para isso vamos extrair os dados de localização dos hubs # aplicando a geocodificação nas coordenadas de cada região e extrair informações de cidade e bairro # Extrair os dados das colunas region, lat e lnt para um novo dataframe hub_df = entregas_df[["region", "lat", "lng"]] hub_df = hub_df.drop_duplicates().sort_values(by="region").reset_index(drop=True) hub_df.head() import geopy from geopy.geocoders import Nominatim from geopy.extra.rate_limiter import RateLimiter geolocator = Nominatim(user_agent="ebac_geocoder") geocoder = RateLimiter(geolocator.reverse, min_delay_seconds=1) # Criar a coluna coordinates com os dados da latitude e longitude dos hubs do tipo string # Criar a coluna geodata que irá receber os dados da coluna coordinates com aplicação da função geocoder hub_df["coordinates"] = hub_df["lat"].astype(str) + ", " + hub_df["lng"].astype(str) hub_df["geodata"] = hub_df["coordinates"].apply(geocoder) hub_df.head() # Normalizar a coluna geodata hub_geodata_df = pd.json_normalize(hub_df["geodata"].apply(lambda data: data.raw)) hub_geodata_df.head() import numpy as np # Extrair das colunas de interesse geradas # Renomear as colunas # Armazenar na coluna "hub_city" os dados da "hub_city", # se não existirem, armazenar os dados da "hub_town" # Armazenar na coluna "hub_suburb" os dados do "hub_suburb", # se não existirem, armazenar os dados do "hub_city" # Remove a coluna "hub_town" hub_geodata_df = hub_geodata_df[["address.town", "address.suburb", "address.city"]] hub_geodata_df.rename( columns={ "address.town": "hub_town", "address.suburb": "hub_suburb", "address.city": "hub_city", }, inplace=True, ) hub_geodata_df["hub_city"] = np.where( hub_geodata_df["hub_city"].notna(), hub_geodata_df["hub_city"], hub_geodata_df["hub_town"], ) hub_geodata_df["hub_suburb"] = np.where( hub_geodata_df["hub_suburb"].notna(), hub_geodata_df["hub_suburb"], hub_geodata_df["hub_city"], ) hub_geodata_df = hub_geodata_df.drop("hub_town", axis=1) hub_geodata_df.head() # Combinar o dataframe "hub_geodata_df" (que contem cidades e bairros) # com o dataframe "hub_df" (que contem as regioes) # Extrair os dados das colunas: region, hub_suburb e hub_city # Combinar o dataframe principal "entregas_df" com o novo dataframe "hub_df" # Reorganizar as colunas hub_df = pd.merge(left=hub_df, right=hub_geodata_df, left_index=True, right_index=True) hub_df = hub_df[["region", "hub_suburb", "hub_city"]] entregas_df = pd.merge(left=entregas_df, right=hub_df, how="inner", on="region") entregas_df = entregas_df[ [ "name", "region", "lng", "lat", "hub_city", "hub_suburb", "vehicle_capacity", "delivery_size", "delivery_lng", "delivery_lat", ] ] entregas_df.head() # ### **3.3.2 Geocodificação Reversa das Entregas** # Como as entregas possuem mais de 600.000 localizações, vamos baixar um arquivo que já contém os dados geocodificados, extrair esse arquivo para um dataframe e combinar as colunas "delivery_city" e "delivery_suburb" com nosso arquivo principal. # Download dos dados de geolocalização das entregas # Carregar o arquivo baixado deliveries_geodata_df = pd.read_csv("deliveries-geodata.csv") # Combinar com o arquivo principal e extrair as colunas "delivery_city" e "delivery_suburb" deliveries_df = pd.merge( left=entregas_df, right=deliveries_geodata_df[["delivery_city", "delivery_suburb"]], how="inner", left_index=True, right_index=True, ) deliveries_df.head() # ## **3.4 - Qualidade** # **Vamos observar alguns dados e verificar a qualidade do nosso material** # Verificar as informações do dataframe deliveries_df.info() # Verificar dados nulos deliveries_df.isna().any() # **Verificando as entregas relacionadas a cidade e ao bairro de Brasília** # Verificar a porcentagem de valores nulos nas entregas da cidade 100 * (deliveries_df["delivery_city"].isna().sum() / len(deliveries_df)) # Verificar a porcentagem de valores nulos nas entregas dos bairros 100 * (deliveries_df["delivery_suburb"].isna().sum() / len(deliveries_df)) # Verificar as entregas nas cidades de Brasilia prop_df = deliveries_df[["delivery_city"]].value_counts() / len(deliveries_df) prop_df.sort_values(ascending=False).head(10) # Verificar as entregas nos bairros de Brasilia prop_df = deliveries_df[["delivery_suburb"]].value_counts() / len(deliveries_df) prop_df.sort_values(ascending=False).head(10) # # **4. Visualização** # **Instalação e importação do Geopandas** # O GeopPandas adiciona funcionalidades geoespaciais ao pacote Python Pandas, que irá nos ajudar a visualizar as coordenadas dos hubs e das entregas no mapa do Distrito Federal, segmentados pela região dos hubs. # # Instalar o pacote geopandas import geopandas # **Vamos baixar os dados do mapa do Distrito Federal do site oficial do IBGE e gerar um dataframe** mapa = geopandas.read_file("distrito-federal.shp") mapa = mapa.loc[[0]] mapa.head() # **Criar um dataframe para os hubs com informações de geolocalização** hub_df = ( deliveries_df[["region", "lng", "lat"]].drop_duplicates().reset_index(drop=True) ) geo_hub_df = geopandas.GeoDataFrame( hub_df, geometry=geopandas.points_from_xy(hub_df["lng"], hub_df["lat"]) ) geo_hub_df.head() # **Criar um dataframe para as entregas com informações de geolocalização** # Criar o dataframe das entregas geo_deliveries_df = geopandas.GeoDataFrame( deliveries_df, geometry=geopandas.points_from_xy( deliveries_df["delivery_lng"], deliveries_df["delivery_lat"] ), ) geo_deliveries_df.head() # ## **4.1. Mapa dos hubs e das entregas** import matplotlib.pyplot as plt # cria o plot vazio fig, ax = plt.subplots(figsize=(25 / 2.54, 25 / 2.54)) # plot mapa do distrito federal mapa.plot(ax=ax, alpha=0.4, color="lightgrey") # plot das entregas geo_deliveries_df.query("region == 'df-0'").plot( ax=ax, markersize=1, color="sandybrown", label="df-0" ) geo_deliveries_df.query("region == 'df-1'").plot( ax=ax, markersize=1, color="darkred", label="df-1" ) geo_deliveries_df.query("region == 'df-2'").plot( ax=ax, markersize=1, color="firebrick", label="df-2" ) # plot dos hubs geo_hub_df.plot(ax=ax, markersize=30, marker="x", color="black", label="hub") # plot da legenda plt.title("Entregas no Distrito Federal por Região", fontdict={"fontsize": 14}) lgnd = plt.legend(prop={"size": 14}) for handle in lgnd.legendHandles: handle.set_sizes([50]) # De acordo o mapa, observamos que os três hubs parecem estar bem localizados em relação ao trajeto das entregas. O hub da região central (df-1) possui grande parte das entregas concentradas ao seu redor. Já região df-2 as entregas começam e se distanciar. E na região df-0 talvez por haver uma menor concentração de pessoas, exista o maior espaçamento entre os pontos de entregas e uma maior distância do hub dessa região. # ## **4.2. Gráfico de entregas por região** # Construiremos um gráfico de barras com o percentual de entregas por região, mas antes, extrairemos as colunas de interesse para um novo dataframe, contaremos quantas vezes as regiões aparecem e através do atributo Normalize faremos uma contagem relativa de proporção que resultará em uma porcentagem. # Extrair as colunas de regiao e de capacidade do veiculo # Contar o número de ocorrências de cada combinação única de "region" e "vehicle_capacity", criar nova coluna # e normalizar os resultados para que eles representem porcentagens em vez de contagens absolutas. data = pd.DataFrame( deliveries_df[["region", "vehicle_capacity"]].value_counts(normalize=True) ).reset_index() # Renomear a nova coluna chamada de 0, para "region_percent" data.rename(columns={0: "region_percent"}, inplace=True) data.head() # Visualizar o gráfico através do pacote seaborn import seaborn as sns with sns.axes_style("whitegrid"): grafico = sns.barplot( data=data, x="region", y="region_percent", errorbar=None, palette="rocket" ) grafico.set( title="Proporção de entregas por região", xlabel="Região", ylabel="Proporção" )
false
0
203,355
1
203,355
203,355
129423198
<jupyter_start><jupyter_text>Random Sample of NIH Chest X-ray Dataset # NIH Chest X-ray Dataset Sample --- ### National Institutes of Health Chest X-Ray Dataset Chest X-ray exams are one of the most frequent and cost-effective medical imaging examinations available. However, clinical diagnosis of a chest X-ray can be challenging and sometimes more difficult than diagnosis via chest CT imaging. The lack of large publicly available datasets with annotations means it is still very difficult, if not impossible, to achieve clinically relevant computer-aided detection and diagnosis (CAD) in real world medical sites with chest X-rays. One major hurdle in creating large X-ray image datasets is the lack resources for labeling so many images. Prior to the release of this dataset, [Openi][1] was the largest publicly available source of chest X-ray images with 4,143 images available. This NIH Chest X-ray Dataset is comprised of 112,120 X-ray images with disease labels from 30,805 unique patients. To create these labels, the authors used Natural Language Processing to text-mine disease classifications from the associated radiological reports. The labels are expected to be >90% accurate and suitable for weakly-supervised learning. The original radiology reports are not publicly available but you can find more details on the labeling process in this Open Access paper: "ChestX-ray8: Hospital-scale Chest X-ray Database and Benchmarks on Weakly-Supervised Classification and Localization of Common Thorax Diseases." (*Wang et al.*) [Link to paper][30] [1]: https://openi.nlm.nih.gov/ <br> ### File contents - This is a random sample (5%) of the full dataset: - **sample.zip**: Contains 5,606 images with size 1024 x 1024 - **sample_labels.csv**: Class labels and patient data for the entire dataset - Image Index: File name - Finding Labels: Disease type (Class label) - Follow-up # - Patient ID - Patient Age - Patient Gender - View Position: X-ray orientation - OriginalImageWidth - OriginalImageHeight - OriginalImagePixelSpacing_x - OriginalImagePixelSpacing_y <br> ### Class descriptions There are 15 classes (14 diseases, and one for "No findings") in the full dataset, but since this is drastically reduced version of the full dataset, some of the classes are sparse with the labeled as "No findings" - Hernia - 13 images - Pneumonia - 62 images - Fibrosis - 84 images - Edema - 118 images - Emphysema - 127 images - Cardiomegaly - 141 images - Pleural_Thickening - 176 images - Consolidation - 226 images - Pneumothorax - 271 images - Mass - 284 images - Nodule - 313 images - Atelectasis - 508 images - Effusion - 644 images - Infiltration - 967 images - No Finding - 3044 images <br> ### Full Dataset Content [The full dataset can be found here][3]. There are 12 zip files in total and range from ~2 gb to 4 gb in size. [3]: https://www.kaggle.com/nih-chest-xrays/data <br> ### Data limitations: 1. The image labels are NLP extracted so there could be some erroneous labels but the NLP labeling accuracy is estimated to be >90%. 2. Very limited numbers of disease region bounding boxes (See BBox_list_2017.csv) 3. Chest x-ray radiology reports are not anticipated to be publicly shared. Parties who use this public dataset are encouraged to share their “updated” image labels and/or new bounding boxes in their own studied later, maybe through manual annotation <br> ### Modifications to original data - Original TAR archives were converted to ZIP archives to be compatible with the Kaggle platform - CSV headers slightly modified to be more explicit in comma separation and also to allow fields to be self-explanatory <br> ### Citations - Wang X, Peng Y, Lu L, Lu Z, Bagheri M, Summers RM. ChestX-ray8: Hospital-scale Chest X-ray Database and Benchmarks on Weakly-Supervised Classification and Localization of Common Thorax Diseases. IEEE CVPR 2017, [ChestX-ray8_Hospital-Scale_Chest_CVPR_2017_paper.pdf][30] - NIH News release: [NIH Clinical Center provides one of the largest publicly available chest x-ray datasets to scientific community][30] - Original source files and documents: [https://nihcc.app.box.com/v/ChestXray-NIHCC/folder/36938765345][31] <br> Kaggle dataset identifier: sample <jupyter_script># # Hi, Welcome to my Kernel # Outline # - EDA # - Avoid Data Imbalance using Weighted Loss # - Loading Dataset and Applying Transforms # - Define Pre-trained Model # - Train Model # - Each label Accuracy # - Plot results import os import numpy as np import pandas as pd from tqdm import tqdm from operator import itemgetter from collections import OrderedDict import cv2 from torch.nn import functional as F # from pytorch_grad_cam import GradCAM from PIL import Image import seaborn as sns import matplotlib.pyplot as plt import torch from torch import optim, nn import torch.nn.functional as F from torchvision import transforms as T from torch.utils.data import Dataset, DataLoader, random_split, ConcatDataset from torchvision.utils import make_grid # pd.options.plotting.backend = "plotly" pd.set_option("plotting.backend", "plotly") device = torch.device("cuda" if torch.cuda.is_available() else "cpu") from torch.utils.data import random_split from torch.utils.data import Subset print("Device: ", device) # param_management CSV_PATH = "../input/sample/sample_labels.csv" IMG_DIR = "../input/sample/sample/sample/images/" # # EDA data = pd.read_csv(CSV_PATH) data.head() data["Patient Gender"].value_counts().plot.bar() data["Patient Age"].apply(lambda x: int(x[1:3])).plot.hist() data["Patient Age"].apply(lambda x: int(x[1:3])).plot.box() data["View Position"].value_counts().plot.bar() pathology_list = [ "Cardiomegaly", "Emphysema", "Effusion", "Hernia", "Nodule", "Pneumothorax", "Atelectasis", "Pleural_Thickening", "Mass", "Edema", "Consolidation", "Infiltration", "Fibrosis", "Pneumonia", ] for pathology in pathology_list: data[pathology] = data["Finding Labels"].apply(lambda x: 1 if pathology in x else 0) data["No Findings"] = data["Finding Labels"].apply( lambda x: 1 if "No Finding" in x else 0 ) data = data.drop(list(data.iloc[:, 1:11].columns.values), axis=1) data.iloc[:, 1:].sum().plot.barh() data = data.drop(["No Findings"], axis=1) data.iloc[:, 1:].sum().plot.barh() data.iloc[:, 1:].mean().plot.barh() # # Avoid Data Imbalance using Weighted Loss def compute_class_freqs(labels): labels = np.array(labels) N = labels.shape[0] positive_frequencies = np.sum(labels, axis=0) / N negative_frequencies = 1 - positive_frequencies return positive_frequencies, negative_frequencies freq_pos, freq_neg = compute_class_freqs(data.iloc[:, 1:]) df = pd.DataFrame({"Class": pathology_list, "Label": "Positive", "Value": freq_pos}) df = df.append( [ {"Class": pathology_list[l], "Label": "Negative", "Value": v} for l, v in enumerate(freq_neg) ], ignore_index=True, ) plt.xticks(rotation=90) f = sns.barplot(x="Class", y="Value", hue="Label", data=df) pos_weights = freq_neg neg_weights = freq_pos pos_contribution = freq_pos * pos_weights neg_contribution = freq_neg * neg_weights df = pd.DataFrame( {"Class": pathology_list, "Label": "Positive", "Value": pos_contribution} ) df = df.append( [ {"Class": pathology_list[l], "Label": "Negative", "Value": v} for l, v in enumerate(neg_contribution) ], ignore_index=True, ) plt.xticks(rotation=90) f = sns.barplot(x="Class", y="Value", hue="Label", data=df) def weighted_loss(pos_weights, neg_weights, y_pred, y_true, epsilon=1e-7): loss = 0.0 for i in range(len(pos_weights)): loss_pos = -1 * torch.mean( pos_weights[i] * y_true[:, i] * torch.log(y_pred[:, i] + epsilon) ) loss_neg = -1 * torch.mean( neg_weights[i] * (1 - y_true[:, i]) * torch.log((1 - y_pred[:, i]) + epsilon) ) loss += loss_pos + loss_neg return loss # # Loading Dataset and Applying Transforms data_transform = T.Compose( [ T.RandomRotation((-20, +20)), T.Resize((224, 224)), T.ToTensor(), T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ] ) class NIH_Dataset(Dataset): def __init__(self, data, img_dir, transform=None): self.data = data self.img_dir = img_dir self.transform = transform def __len__(self): return len(self.data) def __getitem__(self, idx): img_file = self.img_dir + self.data.iloc[:, 0][idx] img = Image.open(img_file).convert("RGB") label = np.array(self.data.iloc[:, 1:].iloc[idx]) if self.transform: img = self.transform(img) return img, label trainds = NIH_Dataset(data, img_dir=IMG_DIR, transform=data_transform) labels = pathology_list # subsets define # subset1_indices = [i in index, label in enumerate(labels) if label == 'Cardiomegaly'] subset1_indices = data[data["Cardiomegaly"] == 1].index.tolist() subset1 = Subset( NIH_Dataset(data, "../input/sample/sample/sample/images/", data_transform), subset1_indices, ) subset2_indices = data[data["Emphysema"] == 1].index.tolist() subset2 = Subset( NIH_Dataset(data, "../input/sample/sample/sample/images/", data_transform), subset2_indices, ) subset3_indices = data[data["Effusion"] == 1].index.tolist() subset3 = Subset( NIH_Dataset(data, "../input/sample/sample/sample/images/", data_transform), subset3_indices, ) subset4_indices = data[data["Hernia"] == 1].index.tolist() subset4 = Subset( NIH_Dataset(data, "../input/sample/sample/sample/images/", data_transform), subset4_indices, ) subset5_indices = data[data["Nodule"] == 1].index.tolist() subset5 = Subset( NIH_Dataset(data, "../input/sample/sample/sample/images/", data_transform), subset5_indices, ) subset6_indices = data[data["Pneumothorax"] == 1].index.tolist() subset6 = Subset( NIH_Dataset(data, "../input/sample/sample/sample/images/", data_transform), subset6_indices, ) subset7_indices = data[data["Atelectasis"] == 1].index.tolist() subset7 = Subset( NIH_Dataset(data, "../input/sample/sample/sample/images/", data_transform), subset7_indices, ) subset8_indices = data[data["Pleural_Thickening"] == 1].index.tolist() subset8 = Subset( NIH_Dataset(data, "../input/sample/sample/sample/images/", data_transform), subset8_indices, ) subset9_indices = data[data["Mass"] == 1].index.tolist() subset9 = Subset( NIH_Dataset(data, "../input/sample/sample/sample/images/", data_transform), subset9_indices, ) subset10_indices = data[data["Edema"] == 1].index.tolist() subset10 = Subset( NIH_Dataset(data, "../input/sample/sample/sample/images/", data_transform), subset10_indices, ) subset11_indices = data[data["Consolidation"] == 1].index.tolist() subset11 = Subset( NIH_Dataset(data, "../input/sample/sample/sample/images/", data_transform), subset11_indices, ) subset12_indices = data[data["Infiltration"] == 1].index.tolist() subset12 = Subset( NIH_Dataset(data, "../input/sample/sample/sample/images/", data_transform), subset12_indices, ) subset13_indices = data[data["Fibrosis"] == 1].index.tolist() subset13 = Subset( NIH_Dataset(data, "../input/sample/sample/sample/images/", data_transform), subset13_indices, ) subset14_indices = data[data["Pneumonia"] == 1].index.tolist() subset14 = Subset( NIH_Dataset(data, "../input/sample/sample/sample/images/", data_transform), subset14_indices, ) print(subset1.__len__()) print(subset2.__len__()) print(subset3.__len__()) print(subset4.__len__()) print(subset5.__len__()) print(subset6.__len__()) print(subset7.__len__()) print(subset8.__len__()) print(subset9.__len__()) print(subset10.__len__()) print(subset11.__len__()) print(subset12.__len__()) print(subset13.__len__()) print(subset14.__len__()) print(int(len(subset14) * 0.7)) print(int(len(subset14) * 0.3)) trainset1, valset1, testset1 = random_split(subset1, [99, 28, 14]) trainset2, valset2, testset2 = random_split(subset2, [89, 26, 12]) trainset3, valset3, testset3 = random_split(subset3, [451, 129, 64]) trainset4, valset4, testset4 = random_split(subset4, [10, 2, 1]) trainset5, valset5, testset5 = random_split(subset5, [220, 62, 31]) trainset6, valset6, testset6 = random_split(subset6, [190, 54, 27]) trainset7, valset7, testset7 = random_split(subset7, [356, 101, 51]) trainset8, valset8, testset8 = random_split(subset8, [123, 35, 18]) trainset5, valset5, testset5 = random_split(subset5, [220, 62, 31]) trainset6, valset6, testset6 = random_split(subset6, [190, 54, 27]) trainset7, valset7, testset7 = random_split(subset7, [356, 101, 51]) trainset8, valset8, testset8 = random_split(subset8, [123, 35, 18]) trainset9, valset9, testset9 = random_split(subset9, [200, 56, 28]) trainset10, valset10, testset10 = random_split(subset10, [83, 23, 12]) trainset11, valset11, testset11 = random_split(subset11, [159, 45, 22]) trainset12, valset12, testset12 = random_split(subset12, [677, 193, 97]) trainset13, valset13, testset13 = random_split(subset13, [59, 17, 8]) trainset14, valset14, testset14 = random_split(subset14, [43, 12, 7]) trainset = ConcatDataset( [ trainset1, trainset2, trainset3, trainset4, trainset5, trainset6, trainset7, trainset8, trainset9, trainset10, trainset11, trainset12, trainset13, trainset14, ] ) validset = ConcatDataset( [ valset1, valset2, valset3, valset4, valset5, valset6, valset7, valset8, valset9, valset10, valset11, valset12, valset13, valset14, ] ) testset = ConcatDataset( [ testset1, testset2, testset3, testset4, testset5, testset6, testset7, testset8, testset9, testset10, testset11, testset12, testset13, testset14, ] ) def deprocess(img): img = img.permute(1, 2, 0) img = img * torch.Tensor([0.229, 0.224, 0.225]) + torch.Tensor( [0.485, 0.456, 0.406] ) return img image, label = trainds[0] class_labels = list(np.where(label == 1)[0]) plt.imshow(deprocess(image)) plt.title(itemgetter(*class_labels)(pathology_list)) # # Split Dataset and create dataloaders trainset, validset, testset = random_split(trainds, [5000, 303, 303]) print("Length of trainset : {}".format(len(trainset))) print("Length of testset : {}".format(len(testset))) print("Length of validset : {}".format(len(validset))) trainloader = DataLoader(trainset, batch_size=32, shuffle=True) validloader = DataLoader(validset, batch_size=32, shuffle=False) testloader = DataLoader(testset, batch_size=32, shuffle=False) # # Define Pre-trained Model from torchvision import models model = models.resnet50() model.load_state_dict( torch.load("/kaggle/input/pretrained-model-weights-pytorch/resnet50-19c8e357.pth") ) model = torch.load("/kaggle/working/Res50_epoch10.pt") for param in model.parameters(): param.requires_grad = False model.fc = nn.Sequential(nn.Linear(2048, 14), nn.Sigmoid()) model.to(device) # # Train Model optimizer = optim.Adam(model.parameters(), lr=Learning_Rate) schedular = optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=0.1, patience=4) epochs = 5 valid_loss_min = np.Inf # epochs = 100 Learning_Rate = 0.01 optimizer = optim.Adam(model.parameters(), lr=Learning_Rate) schedular = optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=0.1, patience=4) for i in range(epochs): train_loss = 0.0 valid_loss = 0.0 train_acc = 0.0 valid_acc = 0.0 model.train() for images, labels in tqdm(trainloader): images = images.to(device) labels = labels.to(device) ps = model(images) loss = weighted_loss(pos_weights, neg_weights, ps, labels) optimizer.zero_grad() loss.backward() optimizer.step() train_loss += loss.item() avg_train_loss = train_loss / len(trainloader) model.eval() with torch.no_grad(): for images, labels in tqdm(validloader): images = images.to(device) labels = labels.to(device) ps = model(images) loss = weighted_loss(pos_weights, neg_weights, ps, labels) valid_loss += loss.item() avg_valid_loss = valid_loss / len(validloader) schedular.step(avg_valid_loss) if avg_valid_loss <= valid_loss_min: print( "Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...".format( valid_loss_min, avg_valid_loss ) ) torch.save(model, "Res50_epoch10.pt") valid_loss_min = avg_valid_loss print("Epoch : {} Train Loss : {:.6f} ".format(i + 1, avg_train_loss)) print("Epoch : {} Valid Loss : {:.6f} ".format(i + 1, avg_valid_loss)) # # Each Class Accuracy def class_accuracy(dataloader, model): per_class_accuracy = [0 for i in range(len(pathology_list))] total = 0.0 with torch.no_grad(): for images, labels in dataloader: ps = model(images.to(device)) labels = labels.to(device) ps = (ps >= 0.5).float() for i in range(ps.shape[1]): x1 = ps[:, i : i + 1] x2 = labels[:, i : i + 1] per_class_accuracy[i] += int((x1 == x2).sum()) per_class_accuracy = [ (i / len(dataloader.dataset)) * 100.0 for i in per_class_accuracy ] return per_class_accuracy def get_acc_data(class_names, acc_list): df = pd.DataFrame(list(zip(class_names, acc_list)), columns=["Labels", "Acc"]) return df print("Train Dataset Accuracy Report") acc_list = class_accuracy(trainloader, model) get_acc_data(pathology_list, acc_list) print("Test Dataset Accuracy Report") acc_list = class_accuracy(testloader, model) get_acc_data(pathology_list, acc_list) print("Valid Dataset Accuracy Report") acc_list = class_accuracy(validloader, model) get_acc_data(pathology_list, acc_list) from sklearn.metrics import roc_auc_score, roc_curve def get_roc_curve(labels, preds, class_names): plt.figure(figsize=(15, 10)) plt.title("Receiver Operating Characteristic") plt.xlabel("False Positive Rate") plt.ylabel("True Positive Rate") for i in range(len(class_names)): fpr, tpr, thresholds = roc_curve(labels[:, i], preds[:, i]) plt.plot(fpr, tpr, label=class_names[i]) plt.legend(loc="best") plt.show() def get_roc_auc_score(labels, preds): roc_auc_scores = [] for i in range(len(pathology_list)): roc_auc_scores.append(roc_auc_score(labels[:, i], preds[:, i])) return roc_auc_scores def get_roc_auc_data(class_names, roc_auc_scores): df = pd.DataFrame( list(zip(class_names, roc_auc_scores)), columns=["Labels", "ROC AUC Score"] ) return df def get_roc_data(labels, preds, class_names): get_roc_curve(labels, preds, class_names) roc_auc_scores = get_roc_auc_score(labels, preds) return get_roc_auc_data(class_names, roc_auc_scores) def get_roc_data_for_dataset(dataloader, model, class_names): labels = [] preds = [] with torch.no_grad(): for images, labels_batch in dataloader: labels_batch = labels_batch.numpy() labels.append(labels_batch) ps = model(images.to(device)) ps = ps.cpu().numpy() preds.append(ps) labels = np.concatenate(labels) preds = np.concatenate(preds) return get_roc_data(labels, preds, class_names) print("Train Dataset ROC AUC Report") get_roc_data_for_dataset(trainloader, model, pathology_list) print("Test Dataset ROC AUC Report") get_roc_data_for_dataset(testloader, model, pathology_list) print("Valid Dataset ROC AUC Report") get_roc_data_for_dataset(validloader, model, pathology_list) # GradCAM def get_gradcam(img, model, class_names, layer_name): gradcam = GradCAM.from_config( model_type="resnet", arch=model, layer_name=layer_name ) img = np.float32(img) / 255 input = preprocess_image(img) input = input.unsqueeze(0) model.to(device) model.eval() input = input.to(device) target_index = None mask = gradcam(input, target_index) heatmap, result = visualize_cam(mask, img) return heatmap, result def plot_heatmap(img, heatmap, result, class_names): plt.figure(figsize=(10, 10)) ax = sns.heatmap(heatmap, cmap="jet") ax2 = plt.axes([0, 0, 1, 1], frameon=False) plt.axis("off") plt.imshow(img) plt.savefig("heatmap.png") plt.show() plt.figure(figsize=(10, 10)) plt.imshow(result) plt.savefig("result.png") plt.show() def get_gradcam_for_dataset(dataloader, model, class_names, layer_name): images, labels = next(iter(dataloader)) images = images.numpy() labels = labels.numpy() idx = np.random.randint(0, len(images)) img = images[idx] img = np.transpose(img, (1, 2, 0)) heatmap, result = get_gradcam(img, model, class_names, layer_name) plot_heatmap(img, heatmap, result, class_names) get_gradcam_for_dataset(trainloader, model, pathology_list, "layer4") get_gradcam_for_dataset(testloader, model, pathology_list, "layer4") get_gradcam_for_dataset(validloader, model, pathology_list, "layer4") # # Plot Results def view_classify(img, ps, label): class_name = pathology_list classes = np.array(class_name) ps = ps.cpu().data.numpy().squeeze() img = deprocess(img) class_labels = list(np.where(label == 1)[0]) if not class_labels: title = "No Findings" else: title = itemgetter(*class_labels)(class_name) fig, (ax1, ax2) = plt.subplots(figsize=(8, 12), ncols=2) ax1.imshow(img) ax1.set_title("Ground Truth : {}".format(title)) ax1.axis("off") ax2.barh(classes, ps) ax2.set_aspect(0.1) ax2.set_yticks(classes) ax2.set_yticklabels(classes) ax2.set_title("Predicted Class") ax2.set_xlim(0, 1.1) plt.tight_layout() return None image, label = testset[33] ps = model(image.unsqueeze(0).to(device)) view_classify(image, ps, label) image, label = trainset[999] ps = model(image.unsqueeze(0).to(device)) view_classify(image, ps, label) image, label = validset[234] ps = model(image.unsqueeze(0).to(device)) view_classify(image, ps, label)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/423/129423198.ipynb
sample
null
[{"Id": 129423198, "ScriptId": 38191677, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 9915414, "CreationDate": "05/13/2023 16:57:29", "VersionNumber": 2.0, "Title": "Multi-label Chest X-Ray classification", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 582.0, "LinesInsertedFromPrevious": 289.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 293.0, "LinesInsertedFromFork": 289.0, "LinesDeletedFromFork": 38.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 293.0, "TotalVotes": 0}]
[{"Id": 185460457, "KernelVersionId": 129423198, "SourceDatasetVersionId": 7773}, {"Id": 185460458, "KernelVersionId": 129423198, "SourceDatasetVersionId": 791155}]
[{"Id": 7773, "DatasetId": 4667, "DatasourceVersionId": 7773, "CreatorUserId": 484516, "LicenseName": "CC0: Public Domain", "CreationDate": "11/23/2017 02:58:24", "VersionNumber": 4.0, "Title": "Random Sample of NIH Chest X-ray Dataset", "Slug": "sample", "Subtitle": "5,606 images and labels sampled from the NIH Chest X-ray Dataset", "Description": "# NIH Chest X-ray Dataset Sample\n\n---\n\n### National Institutes of Health Chest X-Ray Dataset\n\nChest X-ray exams are one of the most frequent and cost-effective medical imaging examinations available. However, clinical diagnosis of a chest X-ray can be challenging and sometimes more difficult than diagnosis via chest CT imaging. The lack of large publicly available datasets with annotations means it is still very difficult, if not impossible, to achieve clinically relevant computer-aided detection and diagnosis (CAD) in real world medical sites with chest X-rays. One major hurdle in creating large X-ray image datasets is the lack resources for labeling so many images. Prior to the release of this dataset, [Openi][1] was the largest publicly available source of chest X-ray images with 4,143 images available.\n\nThis NIH Chest X-ray Dataset is comprised of 112,120 X-ray images with disease labels from 30,805 unique patients. To create these labels, the authors used Natural Language Processing to text-mine disease classifications from the associated radiological reports. The labels are expected to be >90% accurate and suitable for weakly-supervised learning. The original radiology reports are not publicly available but you can find more details on the labeling process in this Open Access paper: \"ChestX-ray8: Hospital-scale Chest X-ray Database and Benchmarks on Weakly-Supervised Classification and Localization of Common Thorax Diseases.\" (*Wang et al.*)\n\n[Link to paper][30]\n\n[1]: https://openi.nlm.nih.gov/\n\n\n<br>\n### File contents - This is a random sample (5%) of the full dataset:\n\n- **sample.zip**: Contains 5,606 images with size 1024 x 1024\n\n- **sample_labels.csv**: Class labels and patient data for the entire dataset\n - Image Index: File name\n - Finding Labels: Disease type (Class label)\n - Follow-up # \n - Patient ID\n - Patient Age\n - Patient Gender\n - View Position: X-ray orientation\n - OriginalImageWidth\n - OriginalImageHeight\n - OriginalImagePixelSpacing_x\n - OriginalImagePixelSpacing_y\n \n\n\n\n\n<br>\n### Class descriptions\n\nThere are 15 classes (14 diseases, and one for \"No findings\") in the full dataset, but since this is drastically reduced version of the full dataset, some of the classes are sparse with the labeled as \"No findings\"\n\n- Hernia - 13 images\n- Pneumonia - 62 images\n- Fibrosis - 84 images\n- Edema - 118 images\n- Emphysema - 127 images\n- Cardiomegaly - 141 images\n- Pleural_Thickening - 176 images\n- Consolidation - 226 images\n- Pneumothorax - 271 images\n- Mass - 284 images\n- Nodule - 313 images\n- Atelectasis - 508 images\n- Effusion - 644 images\n- Infiltration - 967 images\n- No Finding - 3044 images\n\n<br>\n### Full Dataset Content\n\n[The full dataset can be found here][3]. There are 12 zip files in total and range from ~2 gb to 4 gb in size. \n\n\n[3]: https://www.kaggle.com/nih-chest-xrays/data\n\n\n<br>\n### Data limitations: \n\n1. The image labels are NLP extracted so there could be some erroneous labels but the NLP labeling accuracy is estimated to be >90%. \n2. Very limited numbers of disease region bounding boxes (See BBox_list_2017.csv)\n3. Chest x-ray radiology reports are not anticipated to be publicly shared. Parties who use this public dataset are encouraged to share their \u201cupdated\u201d image labels and/or new bounding boxes in their own studied later, maybe through manual annotation\n\n\n<br>\n### Modifications to original data\n\n- Original TAR archives were converted to ZIP archives to be compatible with the Kaggle platform\n\n- CSV headers slightly modified to be more explicit in comma separation and also to allow fields to be self-explanatory\n\n<br>\n### Citations\n\n- Wang X, Peng Y, Lu L, Lu Z, Bagheri M, Summers RM. ChestX-ray8: Hospital-scale Chest X-ray Database and Benchmarks on Weakly-Supervised Classification and Localization of Common Thorax Diseases. IEEE CVPR 2017, [ChestX-ray8_Hospital-Scale_Chest_CVPR_2017_paper.pdf][30]\n\n- NIH News release: [NIH Clinical Center provides one of the largest publicly available chest x-ray datasets to scientific community][30]\n\n- Original source files and documents: [https://nihcc.app.box.com/v/ChestXray-NIHCC/folder/36938765345][31]\n\n<br>\n### Acknowledgements\n\nThis work was supported by the Intramural Research Program of the NClinical Center (clinicalcenter.nih.gov) and National Library of Medicine (www.nlm.nih.gov). \n\n\n [30]: https://www.nih.gov/news-events/news-releases/nih-clinical-center-provides-one-largest-publicly-available-chest-x-ray-datasets-scientific-community\n\n [31]: https://nihcc.app.box.com/v/ChestXray-NIHCC/folder/36938765345", "VersionNotes": "Simplified the ZIP file", "TotalCompressedBytes": 2253119529.0, "TotalUncompressedBytes": 2253119529.0}]
[{"Id": 4667, "CreatorUserId": 484516, "OwnerUserId": NaN, "OwnerOrganizationId": 1146.0, "CurrentDatasetVersionId": 7773.0, "CurrentDatasourceVersionId": 7773.0, "ForumId": 10494, "Type": 2, "CreationDate": "11/15/2017 17:04:12", "LastActivityDate": "02/06/2018", "TotalViews": 90551, "TotalDownloads": 16454, "TotalVotes": 251, "TotalKernels": 47}]
null
# # Hi, Welcome to my Kernel # Outline # - EDA # - Avoid Data Imbalance using Weighted Loss # - Loading Dataset and Applying Transforms # - Define Pre-trained Model # - Train Model # - Each label Accuracy # - Plot results import os import numpy as np import pandas as pd from tqdm import tqdm from operator import itemgetter from collections import OrderedDict import cv2 from torch.nn import functional as F # from pytorch_grad_cam import GradCAM from PIL import Image import seaborn as sns import matplotlib.pyplot as plt import torch from torch import optim, nn import torch.nn.functional as F from torchvision import transforms as T from torch.utils.data import Dataset, DataLoader, random_split, ConcatDataset from torchvision.utils import make_grid # pd.options.plotting.backend = "plotly" pd.set_option("plotting.backend", "plotly") device = torch.device("cuda" if torch.cuda.is_available() else "cpu") from torch.utils.data import random_split from torch.utils.data import Subset print("Device: ", device) # param_management CSV_PATH = "../input/sample/sample_labels.csv" IMG_DIR = "../input/sample/sample/sample/images/" # # EDA data = pd.read_csv(CSV_PATH) data.head() data["Patient Gender"].value_counts().plot.bar() data["Patient Age"].apply(lambda x: int(x[1:3])).plot.hist() data["Patient Age"].apply(lambda x: int(x[1:3])).plot.box() data["View Position"].value_counts().plot.bar() pathology_list = [ "Cardiomegaly", "Emphysema", "Effusion", "Hernia", "Nodule", "Pneumothorax", "Atelectasis", "Pleural_Thickening", "Mass", "Edema", "Consolidation", "Infiltration", "Fibrosis", "Pneumonia", ] for pathology in pathology_list: data[pathology] = data["Finding Labels"].apply(lambda x: 1 if pathology in x else 0) data["No Findings"] = data["Finding Labels"].apply( lambda x: 1 if "No Finding" in x else 0 ) data = data.drop(list(data.iloc[:, 1:11].columns.values), axis=1) data.iloc[:, 1:].sum().plot.barh() data = data.drop(["No Findings"], axis=1) data.iloc[:, 1:].sum().plot.barh() data.iloc[:, 1:].mean().plot.barh() # # Avoid Data Imbalance using Weighted Loss def compute_class_freqs(labels): labels = np.array(labels) N = labels.shape[0] positive_frequencies = np.sum(labels, axis=0) / N negative_frequencies = 1 - positive_frequencies return positive_frequencies, negative_frequencies freq_pos, freq_neg = compute_class_freqs(data.iloc[:, 1:]) df = pd.DataFrame({"Class": pathology_list, "Label": "Positive", "Value": freq_pos}) df = df.append( [ {"Class": pathology_list[l], "Label": "Negative", "Value": v} for l, v in enumerate(freq_neg) ], ignore_index=True, ) plt.xticks(rotation=90) f = sns.barplot(x="Class", y="Value", hue="Label", data=df) pos_weights = freq_neg neg_weights = freq_pos pos_contribution = freq_pos * pos_weights neg_contribution = freq_neg * neg_weights df = pd.DataFrame( {"Class": pathology_list, "Label": "Positive", "Value": pos_contribution} ) df = df.append( [ {"Class": pathology_list[l], "Label": "Negative", "Value": v} for l, v in enumerate(neg_contribution) ], ignore_index=True, ) plt.xticks(rotation=90) f = sns.barplot(x="Class", y="Value", hue="Label", data=df) def weighted_loss(pos_weights, neg_weights, y_pred, y_true, epsilon=1e-7): loss = 0.0 for i in range(len(pos_weights)): loss_pos = -1 * torch.mean( pos_weights[i] * y_true[:, i] * torch.log(y_pred[:, i] + epsilon) ) loss_neg = -1 * torch.mean( neg_weights[i] * (1 - y_true[:, i]) * torch.log((1 - y_pred[:, i]) + epsilon) ) loss += loss_pos + loss_neg return loss # # Loading Dataset and Applying Transforms data_transform = T.Compose( [ T.RandomRotation((-20, +20)), T.Resize((224, 224)), T.ToTensor(), T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ] ) class NIH_Dataset(Dataset): def __init__(self, data, img_dir, transform=None): self.data = data self.img_dir = img_dir self.transform = transform def __len__(self): return len(self.data) def __getitem__(self, idx): img_file = self.img_dir + self.data.iloc[:, 0][idx] img = Image.open(img_file).convert("RGB") label = np.array(self.data.iloc[:, 1:].iloc[idx]) if self.transform: img = self.transform(img) return img, label trainds = NIH_Dataset(data, img_dir=IMG_DIR, transform=data_transform) labels = pathology_list # subsets define # subset1_indices = [i in index, label in enumerate(labels) if label == 'Cardiomegaly'] subset1_indices = data[data["Cardiomegaly"] == 1].index.tolist() subset1 = Subset( NIH_Dataset(data, "../input/sample/sample/sample/images/", data_transform), subset1_indices, ) subset2_indices = data[data["Emphysema"] == 1].index.tolist() subset2 = Subset( NIH_Dataset(data, "../input/sample/sample/sample/images/", data_transform), subset2_indices, ) subset3_indices = data[data["Effusion"] == 1].index.tolist() subset3 = Subset( NIH_Dataset(data, "../input/sample/sample/sample/images/", data_transform), subset3_indices, ) subset4_indices = data[data["Hernia"] == 1].index.tolist() subset4 = Subset( NIH_Dataset(data, "../input/sample/sample/sample/images/", data_transform), subset4_indices, ) subset5_indices = data[data["Nodule"] == 1].index.tolist() subset5 = Subset( NIH_Dataset(data, "../input/sample/sample/sample/images/", data_transform), subset5_indices, ) subset6_indices = data[data["Pneumothorax"] == 1].index.tolist() subset6 = Subset( NIH_Dataset(data, "../input/sample/sample/sample/images/", data_transform), subset6_indices, ) subset7_indices = data[data["Atelectasis"] == 1].index.tolist() subset7 = Subset( NIH_Dataset(data, "../input/sample/sample/sample/images/", data_transform), subset7_indices, ) subset8_indices = data[data["Pleural_Thickening"] == 1].index.tolist() subset8 = Subset( NIH_Dataset(data, "../input/sample/sample/sample/images/", data_transform), subset8_indices, ) subset9_indices = data[data["Mass"] == 1].index.tolist() subset9 = Subset( NIH_Dataset(data, "../input/sample/sample/sample/images/", data_transform), subset9_indices, ) subset10_indices = data[data["Edema"] == 1].index.tolist() subset10 = Subset( NIH_Dataset(data, "../input/sample/sample/sample/images/", data_transform), subset10_indices, ) subset11_indices = data[data["Consolidation"] == 1].index.tolist() subset11 = Subset( NIH_Dataset(data, "../input/sample/sample/sample/images/", data_transform), subset11_indices, ) subset12_indices = data[data["Infiltration"] == 1].index.tolist() subset12 = Subset( NIH_Dataset(data, "../input/sample/sample/sample/images/", data_transform), subset12_indices, ) subset13_indices = data[data["Fibrosis"] == 1].index.tolist() subset13 = Subset( NIH_Dataset(data, "../input/sample/sample/sample/images/", data_transform), subset13_indices, ) subset14_indices = data[data["Pneumonia"] == 1].index.tolist() subset14 = Subset( NIH_Dataset(data, "../input/sample/sample/sample/images/", data_transform), subset14_indices, ) print(subset1.__len__()) print(subset2.__len__()) print(subset3.__len__()) print(subset4.__len__()) print(subset5.__len__()) print(subset6.__len__()) print(subset7.__len__()) print(subset8.__len__()) print(subset9.__len__()) print(subset10.__len__()) print(subset11.__len__()) print(subset12.__len__()) print(subset13.__len__()) print(subset14.__len__()) print(int(len(subset14) * 0.7)) print(int(len(subset14) * 0.3)) trainset1, valset1, testset1 = random_split(subset1, [99, 28, 14]) trainset2, valset2, testset2 = random_split(subset2, [89, 26, 12]) trainset3, valset3, testset3 = random_split(subset3, [451, 129, 64]) trainset4, valset4, testset4 = random_split(subset4, [10, 2, 1]) trainset5, valset5, testset5 = random_split(subset5, [220, 62, 31]) trainset6, valset6, testset6 = random_split(subset6, [190, 54, 27]) trainset7, valset7, testset7 = random_split(subset7, [356, 101, 51]) trainset8, valset8, testset8 = random_split(subset8, [123, 35, 18]) trainset5, valset5, testset5 = random_split(subset5, [220, 62, 31]) trainset6, valset6, testset6 = random_split(subset6, [190, 54, 27]) trainset7, valset7, testset7 = random_split(subset7, [356, 101, 51]) trainset8, valset8, testset8 = random_split(subset8, [123, 35, 18]) trainset9, valset9, testset9 = random_split(subset9, [200, 56, 28]) trainset10, valset10, testset10 = random_split(subset10, [83, 23, 12]) trainset11, valset11, testset11 = random_split(subset11, [159, 45, 22]) trainset12, valset12, testset12 = random_split(subset12, [677, 193, 97]) trainset13, valset13, testset13 = random_split(subset13, [59, 17, 8]) trainset14, valset14, testset14 = random_split(subset14, [43, 12, 7]) trainset = ConcatDataset( [ trainset1, trainset2, trainset3, trainset4, trainset5, trainset6, trainset7, trainset8, trainset9, trainset10, trainset11, trainset12, trainset13, trainset14, ] ) validset = ConcatDataset( [ valset1, valset2, valset3, valset4, valset5, valset6, valset7, valset8, valset9, valset10, valset11, valset12, valset13, valset14, ] ) testset = ConcatDataset( [ testset1, testset2, testset3, testset4, testset5, testset6, testset7, testset8, testset9, testset10, testset11, testset12, testset13, testset14, ] ) def deprocess(img): img = img.permute(1, 2, 0) img = img * torch.Tensor([0.229, 0.224, 0.225]) + torch.Tensor( [0.485, 0.456, 0.406] ) return img image, label = trainds[0] class_labels = list(np.where(label == 1)[0]) plt.imshow(deprocess(image)) plt.title(itemgetter(*class_labels)(pathology_list)) # # Split Dataset and create dataloaders trainset, validset, testset = random_split(trainds, [5000, 303, 303]) print("Length of trainset : {}".format(len(trainset))) print("Length of testset : {}".format(len(testset))) print("Length of validset : {}".format(len(validset))) trainloader = DataLoader(trainset, batch_size=32, shuffle=True) validloader = DataLoader(validset, batch_size=32, shuffle=False) testloader = DataLoader(testset, batch_size=32, shuffle=False) # # Define Pre-trained Model from torchvision import models model = models.resnet50() model.load_state_dict( torch.load("/kaggle/input/pretrained-model-weights-pytorch/resnet50-19c8e357.pth") ) model = torch.load("/kaggle/working/Res50_epoch10.pt") for param in model.parameters(): param.requires_grad = False model.fc = nn.Sequential(nn.Linear(2048, 14), nn.Sigmoid()) model.to(device) # # Train Model optimizer = optim.Adam(model.parameters(), lr=Learning_Rate) schedular = optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=0.1, patience=4) epochs = 5 valid_loss_min = np.Inf # epochs = 100 Learning_Rate = 0.01 optimizer = optim.Adam(model.parameters(), lr=Learning_Rate) schedular = optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=0.1, patience=4) for i in range(epochs): train_loss = 0.0 valid_loss = 0.0 train_acc = 0.0 valid_acc = 0.0 model.train() for images, labels in tqdm(trainloader): images = images.to(device) labels = labels.to(device) ps = model(images) loss = weighted_loss(pos_weights, neg_weights, ps, labels) optimizer.zero_grad() loss.backward() optimizer.step() train_loss += loss.item() avg_train_loss = train_loss / len(trainloader) model.eval() with torch.no_grad(): for images, labels in tqdm(validloader): images = images.to(device) labels = labels.to(device) ps = model(images) loss = weighted_loss(pos_weights, neg_weights, ps, labels) valid_loss += loss.item() avg_valid_loss = valid_loss / len(validloader) schedular.step(avg_valid_loss) if avg_valid_loss <= valid_loss_min: print( "Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...".format( valid_loss_min, avg_valid_loss ) ) torch.save(model, "Res50_epoch10.pt") valid_loss_min = avg_valid_loss print("Epoch : {} Train Loss : {:.6f} ".format(i + 1, avg_train_loss)) print("Epoch : {} Valid Loss : {:.6f} ".format(i + 1, avg_valid_loss)) # # Each Class Accuracy def class_accuracy(dataloader, model): per_class_accuracy = [0 for i in range(len(pathology_list))] total = 0.0 with torch.no_grad(): for images, labels in dataloader: ps = model(images.to(device)) labels = labels.to(device) ps = (ps >= 0.5).float() for i in range(ps.shape[1]): x1 = ps[:, i : i + 1] x2 = labels[:, i : i + 1] per_class_accuracy[i] += int((x1 == x2).sum()) per_class_accuracy = [ (i / len(dataloader.dataset)) * 100.0 for i in per_class_accuracy ] return per_class_accuracy def get_acc_data(class_names, acc_list): df = pd.DataFrame(list(zip(class_names, acc_list)), columns=["Labels", "Acc"]) return df print("Train Dataset Accuracy Report") acc_list = class_accuracy(trainloader, model) get_acc_data(pathology_list, acc_list) print("Test Dataset Accuracy Report") acc_list = class_accuracy(testloader, model) get_acc_data(pathology_list, acc_list) print("Valid Dataset Accuracy Report") acc_list = class_accuracy(validloader, model) get_acc_data(pathology_list, acc_list) from sklearn.metrics import roc_auc_score, roc_curve def get_roc_curve(labels, preds, class_names): plt.figure(figsize=(15, 10)) plt.title("Receiver Operating Characteristic") plt.xlabel("False Positive Rate") plt.ylabel("True Positive Rate") for i in range(len(class_names)): fpr, tpr, thresholds = roc_curve(labels[:, i], preds[:, i]) plt.plot(fpr, tpr, label=class_names[i]) plt.legend(loc="best") plt.show() def get_roc_auc_score(labels, preds): roc_auc_scores = [] for i in range(len(pathology_list)): roc_auc_scores.append(roc_auc_score(labels[:, i], preds[:, i])) return roc_auc_scores def get_roc_auc_data(class_names, roc_auc_scores): df = pd.DataFrame( list(zip(class_names, roc_auc_scores)), columns=["Labels", "ROC AUC Score"] ) return df def get_roc_data(labels, preds, class_names): get_roc_curve(labels, preds, class_names) roc_auc_scores = get_roc_auc_score(labels, preds) return get_roc_auc_data(class_names, roc_auc_scores) def get_roc_data_for_dataset(dataloader, model, class_names): labels = [] preds = [] with torch.no_grad(): for images, labels_batch in dataloader: labels_batch = labels_batch.numpy() labels.append(labels_batch) ps = model(images.to(device)) ps = ps.cpu().numpy() preds.append(ps) labels = np.concatenate(labels) preds = np.concatenate(preds) return get_roc_data(labels, preds, class_names) print("Train Dataset ROC AUC Report") get_roc_data_for_dataset(trainloader, model, pathology_list) print("Test Dataset ROC AUC Report") get_roc_data_for_dataset(testloader, model, pathology_list) print("Valid Dataset ROC AUC Report") get_roc_data_for_dataset(validloader, model, pathology_list) # GradCAM def get_gradcam(img, model, class_names, layer_name): gradcam = GradCAM.from_config( model_type="resnet", arch=model, layer_name=layer_name ) img = np.float32(img) / 255 input = preprocess_image(img) input = input.unsqueeze(0) model.to(device) model.eval() input = input.to(device) target_index = None mask = gradcam(input, target_index) heatmap, result = visualize_cam(mask, img) return heatmap, result def plot_heatmap(img, heatmap, result, class_names): plt.figure(figsize=(10, 10)) ax = sns.heatmap(heatmap, cmap="jet") ax2 = plt.axes([0, 0, 1, 1], frameon=False) plt.axis("off") plt.imshow(img) plt.savefig("heatmap.png") plt.show() plt.figure(figsize=(10, 10)) plt.imshow(result) plt.savefig("result.png") plt.show() def get_gradcam_for_dataset(dataloader, model, class_names, layer_name): images, labels = next(iter(dataloader)) images = images.numpy() labels = labels.numpy() idx = np.random.randint(0, len(images)) img = images[idx] img = np.transpose(img, (1, 2, 0)) heatmap, result = get_gradcam(img, model, class_names, layer_name) plot_heatmap(img, heatmap, result, class_names) get_gradcam_for_dataset(trainloader, model, pathology_list, "layer4") get_gradcam_for_dataset(testloader, model, pathology_list, "layer4") get_gradcam_for_dataset(validloader, model, pathology_list, "layer4") # # Plot Results def view_classify(img, ps, label): class_name = pathology_list classes = np.array(class_name) ps = ps.cpu().data.numpy().squeeze() img = deprocess(img) class_labels = list(np.where(label == 1)[0]) if not class_labels: title = "No Findings" else: title = itemgetter(*class_labels)(class_name) fig, (ax1, ax2) = plt.subplots(figsize=(8, 12), ncols=2) ax1.imshow(img) ax1.set_title("Ground Truth : {}".format(title)) ax1.axis("off") ax2.barh(classes, ps) ax2.set_aspect(0.1) ax2.set_yticks(classes) ax2.set_yticklabels(classes) ax2.set_title("Predicted Class") ax2.set_xlim(0, 1.1) plt.tight_layout() return None image, label = testset[33] ps = model(image.unsqueeze(0).to(device)) view_classify(image, ps, label) image, label = trainset[999] ps = model(image.unsqueeze(0).to(device)) view_classify(image, ps, label) image, label = validset[234] ps = model(image.unsqueeze(0).to(device)) view_classify(image, ps, label)
false
0
6,153
0
7,407
6,153
129423947
import pandas as pd import numpy as np from sklearn.model_selection import train_test_split from sklearn.preprocessing import OneHotEncoder, StandardScaler from sklearn.compose import ColumnTransformer from sklearn.pipeline import Pipeline from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score from sklearn.impute import SimpleImputer # 1. Load and inspect the data train = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/train.csv") test = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/test.csv") greeks = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/greeks.csv") # 2. Preprocess the data # Identify numerical and categorical columns num_cols = train.drop(["Id", "Class", "EJ"], axis=1).columns cat_cols = ["EJ"] # Build preprocessing pipeline preprocessor = ColumnTransformer( transformers=[ ( "num", Pipeline( steps=[ ("imputer", SimpleImputer(strategy="mean")), ("scaler", StandardScaler()), ] ), num_cols, ), ( "cat", Pipeline( steps=[ ("imputer", SimpleImputer(strategy="most_frequent")), ("encoder", OneHotEncoder()), ] ), cat_cols, ), ] ) # 3. Build and train the model X = train.drop(["Id", "Class"], axis=1) y = train["Class"] X_train, X_valid, y_train, y_valid = train_test_split( X, y, test_size=0.2, random_state=42 ) rf = Pipeline( steps=[ ("preprocessor", preprocessor), ("classifier", RandomForestClassifier(n_estimators=100, random_state=42)), ] ) rf.fit(X_train, y_train) # 4. Evaluate the model y_train_pred = rf.predict(X_train) y_valid_pred = rf.predict(X_valid) print("Training accuracy: ", accuracy_score(y_train, y_train_pred)) print("Validation accuracy: ", accuracy_score(y_valid, y_valid_pred)) # Make predictions on the test set X_test = test.drop("Id", axis=1) predictions = rf.predict_proba(X_test) # Check if the number of predictions matches the number of rows in the test set assert len(predictions) == len( test ), "Number of predictions must match number of rows in test set" # Create a DataFrame for the submission submission = pd.DataFrame(predictions, columns=["class_0", "class_1"]) submission.insert(0, "Id", test["Id"]) # Save the predictions to a CSV file submission = test[["Id"]].copy() submission["Class"] = predictions submission.to_csv("submission.csv", index=False) # Save the submission DataFrame to a CSV file submission.to_csv("submission.csv", index=False)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/423/129423947.ipynb
null
null
[{"Id": 129423947, "ScriptId": 38481998, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11408729, "CreationDate": "05/13/2023 17:05:35", "VersionNumber": 2.0, "Title": "Age Related Conditions", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 71.0, "LinesInsertedFromPrevious": 16.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 55.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 4}]
null
null
null
null
import pandas as pd import numpy as np from sklearn.model_selection import train_test_split from sklearn.preprocessing import OneHotEncoder, StandardScaler from sklearn.compose import ColumnTransformer from sklearn.pipeline import Pipeline from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score from sklearn.impute import SimpleImputer # 1. Load and inspect the data train = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/train.csv") test = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/test.csv") greeks = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/greeks.csv") # 2. Preprocess the data # Identify numerical and categorical columns num_cols = train.drop(["Id", "Class", "EJ"], axis=1).columns cat_cols = ["EJ"] # Build preprocessing pipeline preprocessor = ColumnTransformer( transformers=[ ( "num", Pipeline( steps=[ ("imputer", SimpleImputer(strategy="mean")), ("scaler", StandardScaler()), ] ), num_cols, ), ( "cat", Pipeline( steps=[ ("imputer", SimpleImputer(strategy="most_frequent")), ("encoder", OneHotEncoder()), ] ), cat_cols, ), ] ) # 3. Build and train the model X = train.drop(["Id", "Class"], axis=1) y = train["Class"] X_train, X_valid, y_train, y_valid = train_test_split( X, y, test_size=0.2, random_state=42 ) rf = Pipeline( steps=[ ("preprocessor", preprocessor), ("classifier", RandomForestClassifier(n_estimators=100, random_state=42)), ] ) rf.fit(X_train, y_train) # 4. Evaluate the model y_train_pred = rf.predict(X_train) y_valid_pred = rf.predict(X_valid) print("Training accuracy: ", accuracy_score(y_train, y_train_pred)) print("Validation accuracy: ", accuracy_score(y_valid, y_valid_pred)) # Make predictions on the test set X_test = test.drop("Id", axis=1) predictions = rf.predict_proba(X_test) # Check if the number of predictions matches the number of rows in the test set assert len(predictions) == len( test ), "Number of predictions must match number of rows in test set" # Create a DataFrame for the submission submission = pd.DataFrame(predictions, columns=["class_0", "class_1"]) submission.insert(0, "Id", test["Id"]) # Save the predictions to a CSV file submission = test[["Id"]].copy() submission["Class"] = predictions submission.to_csv("submission.csv", index=False) # Save the submission DataFrame to a CSV file submission.to_csv("submission.csv", index=False)
false
0
734
4
734
734
129423402
import pandas as pd from statsmodels.graphics.tsaplots import plot_acf import matplotlib.pyplot as plt import numpy as np from statsmodels.tsa.holtwinters import ExponentialSmoothing import sys from statsmodels.tsa.seasonal import seasonal_decompose from sklearn.metrics import mean_squared_error, mean_absolute_error import warnings warnings.filterwarnings("ignore") train = pd.read_csv("/kaggle/input/fixed-data-new/train_fixed_new.csv") test = pd.read_csv("/kaggle/input/fixed-data-new/test_fixed_new.csv") # # 1. Data preparation # Tu ću sad pronaći mašinu koja ima seasonality koristeći plot_acf. # AI10158 # AI10635 # AL11466 # AL12144 # CI12166 # DL101579 # DS100760 # for machine in train.machine_name.unique(): machine_data = train[train["machine_name"] == machine]["total"] # Perform seasonal decomposition decomposition = seasonal_decompose( machine_data, model="additive", period=int(np.floor(len(machine_data) / 2)) ) # Access the seasonal component seasonal_component = decomposition.seasonal # Check if there is seasonality if abs(seasonal_component).mean() > 10000: # Adjust the threshold as needed print("Seasonality detected-" + machine) def getAnomalyLine(centil): eff_1 = [] anomalije = train.loc[(train["label"] == 1)] for index, row in anomalije.iterrows(): eff_1.append(row["broken"] / row["total"]) num_to_drop = int(len(eff_1) * centil) eff_1.sort() eff_1 = eff_1[num_to_drop:] return eff_1[0] def visualizeEff(machine_name, centil): machine_data = train.loc[ (train["machine_name"] == machine_name) & (train["day"] > 364) ] plt.figure(figsize=(20, 8)) anomalyLine = getAnomalyLine(centil) eff = [] dani_anomalija = [] anomalija_eff = [] line = [] dani = [] for index, row in machine_data.iterrows(): eff.append(row["broken"] / row["total"]) line.append(anomalyLine) dani.append(row["day"]) if row["label"] == 1: dani_anomalija.append(row["day"]) anomalija_eff.append(row["broken"] / row["total"]) plt.title(machine_name) plt.plot(dani, eff, "g-", label="Linija efektivnosti stroja po danima") plt.scatter( dani_anomalija, anomalija_eff, c="r", edgecolors="black", s=75, label="Anomalije", ) plt.plot(dani, line, "k--", label="Linija efikasnosti za dan centil") plt.legend(loc="best") plt.show() return def visualizeData(machine_name): machine_data = train.loc[ (train["machine_name"] == machine_name) & (train["day"] > 364) ] plt.figure(figsize=(20, 8)) total = [] broken = [] anomalija_day = [] anomalija_total = [] anomalija_broken = [] for index, row in machine_data.iterrows(): total.append(row["total"]) broken.append(row["broken"]) if row["label"] == 1: anomalija_total.append(row["total"]) anomalija_broken.append(row["broken"]) anomalija_day.append(row["day"]) plt.title(machine_name) plt.scatter( range(365, 365 + len(total)), np.log(total), c="cyan", edgecolors="black", label="Total", ) plt.scatter( range(365, 365 + len(broken)), np.log(broken), c="yellow", edgecolors="black", label="Broken", ) # plt.scatter(range(365,365+len(total)), total, c='cyan', edgecolors= "black", label='Total') # plt.scatter(range(365,365+len(broken)), np.log(broken), c='yellow',edgecolors= "black", label='Broken') plt.scatter( anomalija_day, np.log(anomalija_total), c="b", s=100, edgecolors="black", label="Total kod anomalije", ) plt.scatter( anomalija_day, np.log(anomalija_broken), c="r", s=100, edgecolors="black", label="Broken kod anomalije", ) plt.legend(loc="best") plt.show() return visualizeData("CI101712") visualizeEff("CI101712", 0.5) # # 2. Exponential smoothing machine_data = train[train["machine_name"] == "CI101712"]["total"] machine_data = machine_data.sample(frac=1, random_state=42) train_size = int(len(machine_data) * 0.7) train_data = machine_data[:train_size] test_data = machine_data[train_size:] train_values = train_data.values test_values = test_data.values model = ExponentialSmoothing(train_values).fit() pred = model.predict( start=len(train_values), end=len(train_values) + len(test_values) - 1 ) plt.figure(figsize=(20, 8)) plt.plot(range(1, len(train_values) + 1), train_values, label="train") plt.plot( range(len(train_values) + 1, len(train_values) + len(test_values) + 1), test_values, label="test", ) plt.plot( range(len(train_values) + 1, len(train_values) + len(test_values) + 1), pred, label="pred", ) plt.title("Exponential Smoothing") plt.legend() plt.show() # seasonal_periods=23 jer je 162 dana a gore se vid 7 perioda best_level = 0 best_slope = 0 best_seasonal = 0 best_error = sys.maxsize for smoothing_level in [0.1, 0.2, 0.4]: for smoothing_slope in [0.1, 0.2, 0.4]: for smoothing_seasonal in [0.1, 0.2, 0.4]: model = ExponentialSmoothing( train_values, seasonal="add", seasonal_periods=23 ).fit( smoothing_level=smoothing_level, smoothing_slope=smoothing_slope, smoothing_seasonal=smoothing_seasonal, ) pred = model.predict( start=len(train_values), end=len(train_values) + len(test_values) - 1 ) error = mean_absolute_error(test_values, pred) print("Parametri:") print("Soothing_level: " + str(smoothing_level), end="") print(", Soothing_slope: " + str(smoothing_slope), end="") print(", Soothing_seasonal: " + str(smoothing_seasonal)) print("Error: " + str(error)) print() if error < best_error: best_level = smoothing_level best_slope = smoothing_slope best_seasonal = smoothing_seasonal best_error = error print() print("Najbolji parametri:") print("Soothing_level: " + str(best_level)) print("Soothing_slope: " + str(best_slope)) print("Soothing_seasonal: " + str(best_seasonal)) model = ExponentialSmoothing(train_values, seasonal="add", seasonal_periods=23).fit( smoothing_level=best_level, smoothing_slope=best_slope, smoothing_seasonal=best_seasonal, ) pred = model.predict( start=len(train_values), end=len(train_values) + len(test_values) - 1 ) plt.figure(figsize=(20, 8)) plt.plot(range(1, len(train_values) + 1), train_values, label="train") plt.plot( range(len(train_values) + 1, len(train_values) + len(test_values) + 1), test_values, label="test", ) plt.plot( range(len(train_values) + 1, len(train_values) + len(test_values) + 1), pred, label="pred", ) plt.title( "s_level=" + str(best_level) + "s_slope=" + str(best_slope) + "s_seasonal=" + str(best_seasonal) ) plt.legend() plt.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/423/129423402.ipynb
null
null
[{"Id": 129423402, "ScriptId": 38468064, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 9230157, "CreationDate": "05/13/2023 16:59:49", "VersionNumber": 1.0, "Title": "[MN <0036524183>] Time-series (TS)", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 182.0, "LinesInsertedFromPrevious": 182.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import pandas as pd from statsmodels.graphics.tsaplots import plot_acf import matplotlib.pyplot as plt import numpy as np from statsmodels.tsa.holtwinters import ExponentialSmoothing import sys from statsmodels.tsa.seasonal import seasonal_decompose from sklearn.metrics import mean_squared_error, mean_absolute_error import warnings warnings.filterwarnings("ignore") train = pd.read_csv("/kaggle/input/fixed-data-new/train_fixed_new.csv") test = pd.read_csv("/kaggle/input/fixed-data-new/test_fixed_new.csv") # # 1. Data preparation # Tu ću sad pronaći mašinu koja ima seasonality koristeći plot_acf. # AI10158 # AI10635 # AL11466 # AL12144 # CI12166 # DL101579 # DS100760 # for machine in train.machine_name.unique(): machine_data = train[train["machine_name"] == machine]["total"] # Perform seasonal decomposition decomposition = seasonal_decompose( machine_data, model="additive", period=int(np.floor(len(machine_data) / 2)) ) # Access the seasonal component seasonal_component = decomposition.seasonal # Check if there is seasonality if abs(seasonal_component).mean() > 10000: # Adjust the threshold as needed print("Seasonality detected-" + machine) def getAnomalyLine(centil): eff_1 = [] anomalije = train.loc[(train["label"] == 1)] for index, row in anomalije.iterrows(): eff_1.append(row["broken"] / row["total"]) num_to_drop = int(len(eff_1) * centil) eff_1.sort() eff_1 = eff_1[num_to_drop:] return eff_1[0] def visualizeEff(machine_name, centil): machine_data = train.loc[ (train["machine_name"] == machine_name) & (train["day"] > 364) ] plt.figure(figsize=(20, 8)) anomalyLine = getAnomalyLine(centil) eff = [] dani_anomalija = [] anomalija_eff = [] line = [] dani = [] for index, row in machine_data.iterrows(): eff.append(row["broken"] / row["total"]) line.append(anomalyLine) dani.append(row["day"]) if row["label"] == 1: dani_anomalija.append(row["day"]) anomalija_eff.append(row["broken"] / row["total"]) plt.title(machine_name) plt.plot(dani, eff, "g-", label="Linija efektivnosti stroja po danima") plt.scatter( dani_anomalija, anomalija_eff, c="r", edgecolors="black", s=75, label="Anomalije", ) plt.plot(dani, line, "k--", label="Linija efikasnosti za dan centil") plt.legend(loc="best") plt.show() return def visualizeData(machine_name): machine_data = train.loc[ (train["machine_name"] == machine_name) & (train["day"] > 364) ] plt.figure(figsize=(20, 8)) total = [] broken = [] anomalija_day = [] anomalija_total = [] anomalija_broken = [] for index, row in machine_data.iterrows(): total.append(row["total"]) broken.append(row["broken"]) if row["label"] == 1: anomalija_total.append(row["total"]) anomalija_broken.append(row["broken"]) anomalija_day.append(row["day"]) plt.title(machine_name) plt.scatter( range(365, 365 + len(total)), np.log(total), c="cyan", edgecolors="black", label="Total", ) plt.scatter( range(365, 365 + len(broken)), np.log(broken), c="yellow", edgecolors="black", label="Broken", ) # plt.scatter(range(365,365+len(total)), total, c='cyan', edgecolors= "black", label='Total') # plt.scatter(range(365,365+len(broken)), np.log(broken), c='yellow',edgecolors= "black", label='Broken') plt.scatter( anomalija_day, np.log(anomalija_total), c="b", s=100, edgecolors="black", label="Total kod anomalije", ) plt.scatter( anomalija_day, np.log(anomalija_broken), c="r", s=100, edgecolors="black", label="Broken kod anomalije", ) plt.legend(loc="best") plt.show() return visualizeData("CI101712") visualizeEff("CI101712", 0.5) # # 2. Exponential smoothing machine_data = train[train["machine_name"] == "CI101712"]["total"] machine_data = machine_data.sample(frac=1, random_state=42) train_size = int(len(machine_data) * 0.7) train_data = machine_data[:train_size] test_data = machine_data[train_size:] train_values = train_data.values test_values = test_data.values model = ExponentialSmoothing(train_values).fit() pred = model.predict( start=len(train_values), end=len(train_values) + len(test_values) - 1 ) plt.figure(figsize=(20, 8)) plt.plot(range(1, len(train_values) + 1), train_values, label="train") plt.plot( range(len(train_values) + 1, len(train_values) + len(test_values) + 1), test_values, label="test", ) plt.plot( range(len(train_values) + 1, len(train_values) + len(test_values) + 1), pred, label="pred", ) plt.title("Exponential Smoothing") plt.legend() plt.show() # seasonal_periods=23 jer je 162 dana a gore se vid 7 perioda best_level = 0 best_slope = 0 best_seasonal = 0 best_error = sys.maxsize for smoothing_level in [0.1, 0.2, 0.4]: for smoothing_slope in [0.1, 0.2, 0.4]: for smoothing_seasonal in [0.1, 0.2, 0.4]: model = ExponentialSmoothing( train_values, seasonal="add", seasonal_periods=23 ).fit( smoothing_level=smoothing_level, smoothing_slope=smoothing_slope, smoothing_seasonal=smoothing_seasonal, ) pred = model.predict( start=len(train_values), end=len(train_values) + len(test_values) - 1 ) error = mean_absolute_error(test_values, pred) print("Parametri:") print("Soothing_level: " + str(smoothing_level), end="") print(", Soothing_slope: " + str(smoothing_slope), end="") print(", Soothing_seasonal: " + str(smoothing_seasonal)) print("Error: " + str(error)) print() if error < best_error: best_level = smoothing_level best_slope = smoothing_slope best_seasonal = smoothing_seasonal best_error = error print() print("Najbolji parametri:") print("Soothing_level: " + str(best_level)) print("Soothing_slope: " + str(best_slope)) print("Soothing_seasonal: " + str(best_seasonal)) model = ExponentialSmoothing(train_values, seasonal="add", seasonal_periods=23).fit( smoothing_level=best_level, smoothing_slope=best_slope, smoothing_seasonal=best_seasonal, ) pred = model.predict( start=len(train_values), end=len(train_values) + len(test_values) - 1 ) plt.figure(figsize=(20, 8)) plt.plot(range(1, len(train_values) + 1), train_values, label="train") plt.plot( range(len(train_values) + 1, len(train_values) + len(test_values) + 1), test_values, label="test", ) plt.plot( range(len(train_values) + 1, len(train_values) + len(test_values) + 1), pred, label="pred", ) plt.title( "s_level=" + str(best_level) + "s_slope=" + str(best_slope) + "s_seasonal=" + str(best_seasonal) ) plt.legend() plt.show()
false
0
2,367
0
2,367
2,367
129423015
# ## Hello everyone! 😺 # **Today we will build and train our own GAN model using the Tensorflow framework and its Keras wrapper. Watch carefully it will be entertaining!** # # Libraries import os import numpy as np import matplotlib.pyplot as plt import time import tensorflow as tf from tensorflow.keras.datasets import mnist import tensorflow.keras.backend as K from tensorflow.keras import layers from tensorflow import keras # # Prepare data (x_train, y_train), (x_test, y_test) = mnist.load_data() x_train = x_train[y_train == 8] y_train = y_train[y_train == 8] BUFFER_SIZE = x_train.shape[0] BATCH_SIZE = 100 BUFFER_SIZE = BUFFER_SIZE // BATCH_SIZE * BATCH_SIZE x_train = x_train[:BUFFER_SIZE] y_train = y_train[:BUFFER_SIZE] print(x_train.shape, y_train.shape) x_train = x_train / 255 y_train = y_train / 255 dataset = ( tf.data.Dataset.from_tensor_slices(x_train).shuffle(BUFFER_SIZE).batch(BATCH_SIZE) ) # # Our model | GAN # ![](https://i.ibb.co/cYk9Bf2/2023-05-12-13-48-56.png) class Generator(tf.keras.Model): def __init__( self, noise_size: int = 2, width: int = 256, height: int = 7, depth: int = 2 ): super().__init__() self.noise_input = keras.Input(shape=(noise_size,)) self.stem = keras.Sequential( [ layers.Dense( units=height * height * width, input_shape=(noise_size,), activation="relu", use_bias=False, ), layers.BatchNormalization(), layers.Reshape((height, height, width)), ], name="stem-network", ) self.bottleneck = keras.Sequential( [ layers.Conv2DTranspose( filters=width // 2, kernel_size=4, strides=1, padding="same", use_bias=False, ), layers.BatchNormalization(), layers.ReLU(), ], name="bottleneck", ) self.body = keras.Sequential( [*[self.deconv_block(width, i) for i in range(2, depth * 2 + 1, 2)]], name="body", ) self.head = keras.Sequential( [ layers.Dropout(0.30), layers.Conv2DTranspose( filters=1, kernel_size=4, strides=1, padding="same", activation="sigmoid", use_bias=False, ), ], name="head", ) @staticmethod def deconv_block(width: int, i: int, name: str = None) -> keras.Sequential: if not name: name = f"deconv-block-{i//2}" return keras.Sequential( [ layers.Conv2DTranspose( filters=width // i, kernel_size=4, strides=2, padding="same", use_bias=False, ), layers.BatchNormalization(), layers.ReLU(), ], name=name, ) def call(self, inputs, training=False): x = self.stem(inputs) x = self.bottleneck(x) x = self.body(x) x = self.head(x) return x gan = Generator() gan.build((None, 2)) gan.summary() class Discriminator: def __init__(): pass def call(): pass class GAN: def __init__(): pass def call(): pass
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/423/129423015.ipynb
null
null
[{"Id": 129423015, "ScriptId": 38382573, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13000811, "CreationDate": "05/13/2023 16:55:32", "VersionNumber": 3.0, "Title": "GAN with Tensorflow&Keras \ud83c\udf4e", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 117.0, "LinesInsertedFromPrevious": 66.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 51.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# ## Hello everyone! 😺 # **Today we will build and train our own GAN model using the Tensorflow framework and its Keras wrapper. Watch carefully it will be entertaining!** # # Libraries import os import numpy as np import matplotlib.pyplot as plt import time import tensorflow as tf from tensorflow.keras.datasets import mnist import tensorflow.keras.backend as K from tensorflow.keras import layers from tensorflow import keras # # Prepare data (x_train, y_train), (x_test, y_test) = mnist.load_data() x_train = x_train[y_train == 8] y_train = y_train[y_train == 8] BUFFER_SIZE = x_train.shape[0] BATCH_SIZE = 100 BUFFER_SIZE = BUFFER_SIZE // BATCH_SIZE * BATCH_SIZE x_train = x_train[:BUFFER_SIZE] y_train = y_train[:BUFFER_SIZE] print(x_train.shape, y_train.shape) x_train = x_train / 255 y_train = y_train / 255 dataset = ( tf.data.Dataset.from_tensor_slices(x_train).shuffle(BUFFER_SIZE).batch(BATCH_SIZE) ) # # Our model | GAN # ![](https://i.ibb.co/cYk9Bf2/2023-05-12-13-48-56.png) class Generator(tf.keras.Model): def __init__( self, noise_size: int = 2, width: int = 256, height: int = 7, depth: int = 2 ): super().__init__() self.noise_input = keras.Input(shape=(noise_size,)) self.stem = keras.Sequential( [ layers.Dense( units=height * height * width, input_shape=(noise_size,), activation="relu", use_bias=False, ), layers.BatchNormalization(), layers.Reshape((height, height, width)), ], name="stem-network", ) self.bottleneck = keras.Sequential( [ layers.Conv2DTranspose( filters=width // 2, kernel_size=4, strides=1, padding="same", use_bias=False, ), layers.BatchNormalization(), layers.ReLU(), ], name="bottleneck", ) self.body = keras.Sequential( [*[self.deconv_block(width, i) for i in range(2, depth * 2 + 1, 2)]], name="body", ) self.head = keras.Sequential( [ layers.Dropout(0.30), layers.Conv2DTranspose( filters=1, kernel_size=4, strides=1, padding="same", activation="sigmoid", use_bias=False, ), ], name="head", ) @staticmethod def deconv_block(width: int, i: int, name: str = None) -> keras.Sequential: if not name: name = f"deconv-block-{i//2}" return keras.Sequential( [ layers.Conv2DTranspose( filters=width // i, kernel_size=4, strides=2, padding="same", use_bias=False, ), layers.BatchNormalization(), layers.ReLU(), ], name=name, ) def call(self, inputs, training=False): x = self.stem(inputs) x = self.bottleneck(x) x = self.body(x) x = self.head(x) return x gan = Generator() gan.build((None, 2)) gan.summary() class Discriminator: def __init__(): pass def call(): pass class GAN: def __init__(): pass def call(): pass
false
0
934
0
934
934
129879523
<jupyter_start><jupyter_text>google_share_price_data Kaggle dataset identifier: google-share-price-data <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session df = pd.read_csv("/kaggle/input/google-share-price-data/GOOGL.csv") import statsmodels df.head() google_df = df.loc[:, ["Date", "Close"]] google_df from statsmodels.tsa.stattools import adfuller ADF_result = adfuller(google_df["Close"]) ADF_result ADF_result print("ADF Stats", ADF_result[0]) print("P-value", ADF_result[1]) from statsmodels.graphics.tsaplots import plot_acf plot_acf(google_df["Close"], lags=20) # ### Doing first order difference diff_google_df = np.diff(google_df["Close"], n=1) diff_google_ADF_result = adfuller(diff_google_df) diff_google_ADF_result print("ADF Stats", diff_google_ADF_result[0]) print("P-value", diff_google_ADF_result[1]) from statsmodels.graphics.tsaplots import plot_acf plot_acf(diff_google_df, lags=20) # # Naive Algos len(google_df) train = google_df.iloc[:202] test = google_df.iloc[202:] len(train) + len(test) def last_record(train, test): test_df = test.copy() test_df["Close"] = train.tail(1)["Close"].unique()[0] return test_df last_record_df = last_record(train, test) def arithmetic_mean(train, test): train_mean = train["Close"].mean() test_df = test.copy() test_df["Close"] = train_mean return test_df arithmetic_mean_df = arithmetic_mean(train, test) def last_month(train, test): train_mean = train.loc[len(train) - 30 :, ["Close"]].mean().values[0] test_df = test.copy() test_df["Close"] = train_mean return test_df last_month_df = last_month(train, test) from sklearn.metrics import mean_squared_error, mean_absolute_percentage_error import math last_month_mape = mean_absolute_percentage_error(test["Close"], last_month_df["Close"]) last_month_mse = mean_squared_error(test["Close"], last_month_df["Close"]) last_month_rmse = math.sqrt(mean_squared_error(test["Close"], last_month_df["Close"])) last_record_mape = mean_absolute_percentage_error( test["Close"], last_record_df["Close"] ) last_record_mse = mean_squared_error(test["Close"], last_record_df["Close"]) last_record_rmse = math.sqrt(mean_squared_error(test["Close"], last_record_df["Close"])) arithmetic_mean_mape = mean_absolute_percentage_error( test["Close"], arithmetic_mean_df["Close"] ) arithmetic_mean_mse = mean_squared_error(test["Close"], arithmetic_mean_df["Close"]) arithmetic_mean_rmse = math.sqrt( mean_squared_error(test["Close"], arithmetic_mean_df["Close"]) ) mape = { "last_month": last_month_mape, "last_record": last_record_mape, "arithmetic_mean": arithmetic_mean_mape, } mse = { "last_month": last_month_mse, "last_record": last_record_mse, "arithmetic_mean": arithmetic_mean_mse, } rmse = { "last_month": last_month_rmse, "last_record": last_record_rmse, "arithmetic_mean": arithmetic_mean_rmse, } print("MAPE:-", mape, "\n") print("MSE:-", mse, "\n") print("RMSE:-", rmse, "\n") def plot_bar_graph(plot_dict, ylabel): import matplotlib.pyplot as plt import numpy as np # Data for the bar graph x = plot_dict.keys() y = plot_dict.values() # Create the bar graph plt.bar(x, y) # Set the labels and title plt.xlabel("BaseLine") plt.ylabel(ylabel.upper()) plt.title("Bar Graph") # Show the plot plt.show() plot_bar_graph(mape, "mape") plot_bar_graph(mse, "mse") plot_bar_graph(rmse, "rmse") def shif_by_one_day(train, test): test_df = test.copy() test_df["Close"] = test_df["Close"].shift(1) test_df.loc[test_df["Close"].index[0], "Close"] = train.loc[ train["Close"].index[-1], "Close" ] return test_df shif_by_one_day_df = shif_by_one_day(train, test) last_month_mape = mean_absolute_percentage_error( shif_by_one_day_df["Close"], last_month_df["Close"] ) last_month_mse = mean_squared_error(shif_by_one_day_df["Close"], last_month_df["Close"]) last_month_rmse = math.sqrt( mean_squared_error(shif_by_one_day_df["Close"], last_month_df["Close"]) ) last_record_mape = mean_absolute_percentage_error( shif_by_one_day_df["Close"], last_record_df["Close"] ) last_record_mse = mean_squared_error( shif_by_one_day_df["Close"], last_record_df["Close"] ) last_record_rmse = math.sqrt( mean_squared_error(shif_by_one_day_df["Close"], last_record_df["Close"]) ) arithmetic_mean_mape = mean_absolute_percentage_error( shif_by_one_day_df["Close"], arithmetic_mean_df["Close"] ) arithmetic_mean_mse = mean_squared_error( shif_by_one_day_df["Close"], arithmetic_mean_df["Close"] ) arithmetic_mean_rmse = math.sqrt( mean_squared_error(shif_by_one_day_df["Close"], arithmetic_mean_df["Close"]) ) mape = { "last_month": last_month_mape, "last_record": last_record_mape, "arithmetic_mean": arithmetic_mean_mape, } mse = { "last_month": last_month_mse, "last_record": last_record_mse, "arithmetic_mean": arithmetic_mean_mse, } rmse = { "last_month": last_month_rmse, "last_record": last_record_rmse, "arithmetic_mean": arithmetic_mean_rmse, } print("MAPE:-", mape, "\n") print("MSE:-", mse, "\n") print("RMSE:-", rmse, "\n") def plot_bar_graph(plot_dict, ylabel): import matplotlib.pyplot as plt import numpy as np # Data for the bar graph x = plot_dict.keys() y = plot_dict.values() # Create the bar graph plt.bar(x, y) # Set the labels and title plt.xlabel("BaseLine") plt.ylabel(ylabel.upper()) plt.title("Bar Graph") # Show the plot plt.show() plot_bar_graph(mape, "mape") plot_bar_graph(mse, "mse") plot_bar_graph(rmse, "rmse")
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/879/129879523.ipynb
google-share-price-data
zidanesunesara
[{"Id": 129879523, "ScriptId": 38591595, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4829488, "CreationDate": "05/17/2023 06:40:15", "VersionNumber": 2.0, "Title": "randoom_walk", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 193.0, "LinesInsertedFromPrevious": 8.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 185.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 186284491, "KernelVersionId": 129879523, "SourceDatasetVersionId": 5697376}]
[{"Id": 5697376, "DatasetId": 3275919, "DatasourceVersionId": 5773019, "CreatorUserId": 4829488, "LicenseName": "Unknown", "CreationDate": "05/16/2023 09:47:05", "VersionNumber": 1.0, "Title": "google_share_price_data", "Slug": "google-share-price-data", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3275919, "CreatorUserId": 4829488, "OwnerUserId": 4829488.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5697376.0, "CurrentDatasourceVersionId": 5773019.0, "ForumId": 3341596, "Type": 2, "CreationDate": "05/16/2023 09:47:05", "LastActivityDate": "05/16/2023", "TotalViews": 76, "TotalDownloads": 4, "TotalVotes": 0, "TotalKernels": 3}]
[{"Id": 4829488, "UserName": "zidanesunesara", "DisplayName": "Zidane Sunesara", "RegisterDate": "04/07/2020", "PerformanceTier": 0}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session df = pd.read_csv("/kaggle/input/google-share-price-data/GOOGL.csv") import statsmodels df.head() google_df = df.loc[:, ["Date", "Close"]] google_df from statsmodels.tsa.stattools import adfuller ADF_result = adfuller(google_df["Close"]) ADF_result ADF_result print("ADF Stats", ADF_result[0]) print("P-value", ADF_result[1]) from statsmodels.graphics.tsaplots import plot_acf plot_acf(google_df["Close"], lags=20) # ### Doing first order difference diff_google_df = np.diff(google_df["Close"], n=1) diff_google_ADF_result = adfuller(diff_google_df) diff_google_ADF_result print("ADF Stats", diff_google_ADF_result[0]) print("P-value", diff_google_ADF_result[1]) from statsmodels.graphics.tsaplots import plot_acf plot_acf(diff_google_df, lags=20) # # Naive Algos len(google_df) train = google_df.iloc[:202] test = google_df.iloc[202:] len(train) + len(test) def last_record(train, test): test_df = test.copy() test_df["Close"] = train.tail(1)["Close"].unique()[0] return test_df last_record_df = last_record(train, test) def arithmetic_mean(train, test): train_mean = train["Close"].mean() test_df = test.copy() test_df["Close"] = train_mean return test_df arithmetic_mean_df = arithmetic_mean(train, test) def last_month(train, test): train_mean = train.loc[len(train) - 30 :, ["Close"]].mean().values[0] test_df = test.copy() test_df["Close"] = train_mean return test_df last_month_df = last_month(train, test) from sklearn.metrics import mean_squared_error, mean_absolute_percentage_error import math last_month_mape = mean_absolute_percentage_error(test["Close"], last_month_df["Close"]) last_month_mse = mean_squared_error(test["Close"], last_month_df["Close"]) last_month_rmse = math.sqrt(mean_squared_error(test["Close"], last_month_df["Close"])) last_record_mape = mean_absolute_percentage_error( test["Close"], last_record_df["Close"] ) last_record_mse = mean_squared_error(test["Close"], last_record_df["Close"]) last_record_rmse = math.sqrt(mean_squared_error(test["Close"], last_record_df["Close"])) arithmetic_mean_mape = mean_absolute_percentage_error( test["Close"], arithmetic_mean_df["Close"] ) arithmetic_mean_mse = mean_squared_error(test["Close"], arithmetic_mean_df["Close"]) arithmetic_mean_rmse = math.sqrt( mean_squared_error(test["Close"], arithmetic_mean_df["Close"]) ) mape = { "last_month": last_month_mape, "last_record": last_record_mape, "arithmetic_mean": arithmetic_mean_mape, } mse = { "last_month": last_month_mse, "last_record": last_record_mse, "arithmetic_mean": arithmetic_mean_mse, } rmse = { "last_month": last_month_rmse, "last_record": last_record_rmse, "arithmetic_mean": arithmetic_mean_rmse, } print("MAPE:-", mape, "\n") print("MSE:-", mse, "\n") print("RMSE:-", rmse, "\n") def plot_bar_graph(plot_dict, ylabel): import matplotlib.pyplot as plt import numpy as np # Data for the bar graph x = plot_dict.keys() y = plot_dict.values() # Create the bar graph plt.bar(x, y) # Set the labels and title plt.xlabel("BaseLine") plt.ylabel(ylabel.upper()) plt.title("Bar Graph") # Show the plot plt.show() plot_bar_graph(mape, "mape") plot_bar_graph(mse, "mse") plot_bar_graph(rmse, "rmse") def shif_by_one_day(train, test): test_df = test.copy() test_df["Close"] = test_df["Close"].shift(1) test_df.loc[test_df["Close"].index[0], "Close"] = train.loc[ train["Close"].index[-1], "Close" ] return test_df shif_by_one_day_df = shif_by_one_day(train, test) last_month_mape = mean_absolute_percentage_error( shif_by_one_day_df["Close"], last_month_df["Close"] ) last_month_mse = mean_squared_error(shif_by_one_day_df["Close"], last_month_df["Close"]) last_month_rmse = math.sqrt( mean_squared_error(shif_by_one_day_df["Close"], last_month_df["Close"]) ) last_record_mape = mean_absolute_percentage_error( shif_by_one_day_df["Close"], last_record_df["Close"] ) last_record_mse = mean_squared_error( shif_by_one_day_df["Close"], last_record_df["Close"] ) last_record_rmse = math.sqrt( mean_squared_error(shif_by_one_day_df["Close"], last_record_df["Close"]) ) arithmetic_mean_mape = mean_absolute_percentage_error( shif_by_one_day_df["Close"], arithmetic_mean_df["Close"] ) arithmetic_mean_mse = mean_squared_error( shif_by_one_day_df["Close"], arithmetic_mean_df["Close"] ) arithmetic_mean_rmse = math.sqrt( mean_squared_error(shif_by_one_day_df["Close"], arithmetic_mean_df["Close"]) ) mape = { "last_month": last_month_mape, "last_record": last_record_mape, "arithmetic_mean": arithmetic_mean_mape, } mse = { "last_month": last_month_mse, "last_record": last_record_mse, "arithmetic_mean": arithmetic_mean_mse, } rmse = { "last_month": last_month_rmse, "last_record": last_record_rmse, "arithmetic_mean": arithmetic_mean_rmse, } print("MAPE:-", mape, "\n") print("MSE:-", mse, "\n") print("RMSE:-", rmse, "\n") def plot_bar_graph(plot_dict, ylabel): import matplotlib.pyplot as plt import numpy as np # Data for the bar graph x = plot_dict.keys() y = plot_dict.values() # Create the bar graph plt.bar(x, y) # Set the labels and title plt.xlabel("BaseLine") plt.ylabel(ylabel.upper()) plt.title("Bar Graph") # Show the plot plt.show() plot_bar_graph(mape, "mape") plot_bar_graph(mse, "mse") plot_bar_graph(rmse, "rmse")
false
1
2,120
0
2,148
2,120
129879123
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # library import os from datetime import datetime import time from sklearn.preprocessing import StandardScaler import plotly.express as px import plotly.graph_objs as go import matplotlib.pyplot as plt import seaborn as sns import math import statsmodels.api as sm from statsmodels.tsa.ar_model import AutoReg from statsmodels.tsa.arima_model import ARMA from statsmodels.tsa.stattools import adfuller from statsmodels.graphics.tsaplots import plot_acf from statsmodels.graphics.tsaplots import plot_pacf from statsmodels.tsa.arima_model import ARIMA from statsmodels.tsa.statespace.sarimax import SARIMAX from statsmodels.tsa.seasonal import seasonal_decompose from scipy import stats from itertools import product import warnings warnings.filterwarnings("ignore") import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) info = pd.read_csv( "/kaggle/input/crypto-stock-data/Crypto Stock Data/daily_adjusted_IBM.csv" ) ctrain = pd.read_csv( "/kaggle/input/crypto-stock-data/Crypto Stock Data/monthly_adjusted_IBM.csv" ) intraday = pd.read_csv( "/kaggle/input/crypto-stock-data/Crypto Stock Data/intraday_5min_IBM.csv" ) daily = pd.read_csv( "/kaggle/input/crypto-stock-data/Crypto Stock Data/daily_adjusted_IBM.csv" ) sns.color_palette("YlOrRd", 10) info.head info.columns import plotly.express as px # Assuming you have a DataFrame called 'info_s' with the required columns # ['timestamp', 'open', 'high', 'low', 'close', 'adjusted_close', 'volume', # 'dividend_amount', 'split_coefficient'] # Replace 'Weight' with the correct column name from your DataFrame x_column = "timestamp" y_column = "volume" fig_index = px.bar( info_s, x=x_column, y=y_column, color=y_column, title="Popular Cryptocurrency Weight Distribution", color_continuous_scale=px.colors.sequential.YlOrRd, ) fig_index.show() ctrain.columns import pandas as pd import requests api_key = "V6M4VISDAY5MIIHY" # Replace with your actual API key symbol = "AAPL" # Replace with the stock symbol you want to retrieve data for start_date = "2021-01-01" # Start date of the data range end_date = "2021-09-21" # End date of the data range url = f"https://www.alphavantage.co/query?function=TIME_SERIES_DAILY_ADJUSTED&symbol={symbol}&outputsize=full&apikey={api_key}" response = requests.get(url) data = response.json() # Convert the JSON response to a DataFrame df = pd.DataFrame(data["Time Series (Daily)"]).T df.index = pd.to_datetime(df.index) df = df.loc[start_date:end_date] # Perform further data processing or analysis as needed import time from datetime import datetime volume_column = "volume" asset_id_column = "timestamp" asset_name_column = "high" # Update the column name here totimestamp = lambda s: np.int32( time.mktime(datetime.strptime(s, "%d/%m/%Y").timetuple()) ) def log_return(series, periods=1): return np.log(series).diff(periods=periods) all2021 = pd.DataFrame([]) for asset_id, asset_name in zip(info[asset_id_column], info[asset_name_column]): asset = ctrain[ctrain[volume_column] == asset_id].set_index("timestamp") if asset.empty: continue asset = asset.reindex(range(asset.index[0], asset.index[-1] + 60, 60), method="pad") lret = log_return(asset["close"].fillna(0))[1:] all2021 = all2021.join(lret, rsuffix=asset_name, how="outer") plt.figure(figsize=(15, 10)) # Adjust the figure size as needed plt.imshow(all2021.corr()) plt.yticks( range(len(info[volume_column])), info[asset_name_column].values, fontsize=8 ) # Increase font size plt.xticks( range(len(info[volume_column])), info[asset_name_column].values, rotation="vertical", fontsize=8, ) # Increase font size plt.colorbar(cmap="coolwarm") plt.show() daily.columns # Create time interval for 2021 def dur(start, end, data): df = data.loc[totimestamp(start) : totimestamp(end)] return df info2021 = dur(start="01/01/2021", end="21/09/2021", data=info) ctrain2021 = dur(start="01/01/2021", end="21/09/2021", data=daily) daily = dur(start="01/01/2021", end="21/09/2021", data=ctrain) import requests import json api_key = "V6M4VISDAY5MIIHY" symbol = "BTCUSD" # Replace with the desired cryptocurrency symbol (e.g., BTCUSD, ETHUSD, etc.) interval = "5min" url = f"https://www.alphavantage.co/query?function=DIGITAL_CURRENCY_INTRADAY_EXTENDED&symbol={symbol}&market=USD&interval={interval}&apikey={api_key}" response = requests.get(url) import requests import json url = "https://www.alphavantage.co/query?function=TIME_SERIES_DAILY_ADJUSTED&symbol=IBM&apikey=demo" response = requests.get(url) data = response.json() time_series_key = "Time Series (Daily)" if time_series_key not in data: print("Unable to find time series data in the API response.") else: time_series = data[time_series_key] prices = [float(entry["4. close"]) for entry in time_series.values()] highest_price = max(prices) lowest_price = min(prices) print("Highest price:", highest_price) print("Lowest price:", lowest_price) import requests import json import matplotlib.pyplot as plt from matplotlib.dates import DateFormatter from datetime import datetime url = "https://www.alphavantage.co/query?function=TIME_SERIES_DAILY_ADJUSTED&symbol=IBM&apikey=demo" response = requests.get(url) data = response.json() time_series_key = "Time Series (Daily)" if time_series_key not in data: print("Unable to find time series data in the API response.") else: time_series = data[time_series_key] # Extract data points and dates within the desired date range dates = [] high_values = [] low_values = [] today = datetime.now().date() year_2020 = datetime(2020, 1, 1).date() for date, entry in time_series.items(): date_obj = datetime.strptime(date, "%Y-%m-%d").date() if year_2020 <= date_obj <= today: dates.append(date) high_values.append(float(entry["2. high"])) low_values.append(float(entry["3. low"])) # Reverse the lists to get the oldest dates first dates = dates[::-1] high_values = high_values[::-1] low_values = low_values[::-1] # Plotting the graph plt.figure(figsize=(12, 6)) plt.plot(dates, high_values, label="High") plt.plot(dates, low_values, label="Low") plt.xlabel("Date") plt.ylabel("Price") plt.title("High and Low Prices of IBM Stock") plt.xticks(rotation=45) # Specify the date interval for x-axis ticks ax = plt.gca() date_format = DateFormatter("%Y-%m-%d") ax.xaxis.set_major_locator(plt.MaxNLocator(10)) ax.xaxis.set_major_formatter(date_format) plt.legend() plt.tight_layout() plt.show() # Define the symbols for the stocks symbols = ["IBM", "TSLA", "MSFT"] # Initialize the data dictionary to store the stock data for each symbol data = {} # Fetch the data for each symbol for symbol in symbols: url = f"https://www.alphavantage.co/query?function=TIME_SERIES_DAILY_ADJUSTED&symbol={symbol}&apikey=demo" response = requests.get(url) data[symbol] = response.json() # Plotting the graph fig, ax1 = plt.subplots(figsize=(12, 6)) # Iterate over each symbol and plot the high, low, close prices, and volumes for symbol, stock_data in data.items(): time_series_key = "Time Series (Daily)" if time_series_key not in stock_data: print( f"Unable to find time series data for symbol {symbol} in the API response." ) else: time_series = stock_data[time_series_key] # Extract data points and dates within the desired date range dates = [] high_values = [] low_values = [] close_values = [] volumes = [] for date, entry in time_series.items(): date_obj = datetime.strptime(date, "%Y-%m-%d").date() dates.append(date_obj) high_values.append(float(entry["2. high"])) low_values.append(float(entry["3. low"])) close_values.append(float(entry["4. close"])) volumes.append(float(entry["6. volume"])) # Reverse the lists to get the oldest dates first dates = dates[::-1] high_values = high_values[::-1] low_values = low_values[::-1] close_values = close_values[::-1] volumes = volumes[::-1] # Plot the high, low, close prices on the left y-axis ax1.plot( dates, high_values, label=f"{symbol} High", color="red", linestyle="--" ) ax1.plot( dates, low_values, label=f"{symbol} Low", color="green", linestyle="--" ) ax1.plot(dates, close_values, label=f"{symbol} Close", color="blue") # Plot the volume on the right y-axis ax2 = ax1.twinx() ax2.plot(dates, volumes, label=f"{symbol} Volume", color="orange") # Set the axis labels, title, and legends ax1.set_xlabel("Date") ax1.set_ylabel("Price") ax1.set_title("Stock Prices") ax1.legend(loc="upper left") ax1.xaxis.set_major_formatter(DateFormatter("%Y-%m-%d")) ax2.set_ylabel("Volume") ax2.legend(loc="upper right") plt.xticks(rotation=45) plt.tight_layout() plt.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/879/129879123.ipynb
null
null
[{"Id": 129879123, "ScriptId": 38594640, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7088409, "CreationDate": "05/17/2023 06:36:38", "VersionNumber": 1.0, "Title": "notebookd0d90ef890", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 305.0, "LinesInsertedFromPrevious": 305.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # library import os from datetime import datetime import time from sklearn.preprocessing import StandardScaler import plotly.express as px import plotly.graph_objs as go import matplotlib.pyplot as plt import seaborn as sns import math import statsmodels.api as sm from statsmodels.tsa.ar_model import AutoReg from statsmodels.tsa.arima_model import ARMA from statsmodels.tsa.stattools import adfuller from statsmodels.graphics.tsaplots import plot_acf from statsmodels.graphics.tsaplots import plot_pacf from statsmodels.tsa.arima_model import ARIMA from statsmodels.tsa.statespace.sarimax import SARIMAX from statsmodels.tsa.seasonal import seasonal_decompose from scipy import stats from itertools import product import warnings warnings.filterwarnings("ignore") import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) info = pd.read_csv( "/kaggle/input/crypto-stock-data/Crypto Stock Data/daily_adjusted_IBM.csv" ) ctrain = pd.read_csv( "/kaggle/input/crypto-stock-data/Crypto Stock Data/monthly_adjusted_IBM.csv" ) intraday = pd.read_csv( "/kaggle/input/crypto-stock-data/Crypto Stock Data/intraday_5min_IBM.csv" ) daily = pd.read_csv( "/kaggle/input/crypto-stock-data/Crypto Stock Data/daily_adjusted_IBM.csv" ) sns.color_palette("YlOrRd", 10) info.head info.columns import plotly.express as px # Assuming you have a DataFrame called 'info_s' with the required columns # ['timestamp', 'open', 'high', 'low', 'close', 'adjusted_close', 'volume', # 'dividend_amount', 'split_coefficient'] # Replace 'Weight' with the correct column name from your DataFrame x_column = "timestamp" y_column = "volume" fig_index = px.bar( info_s, x=x_column, y=y_column, color=y_column, title="Popular Cryptocurrency Weight Distribution", color_continuous_scale=px.colors.sequential.YlOrRd, ) fig_index.show() ctrain.columns import pandas as pd import requests api_key = "V6M4VISDAY5MIIHY" # Replace with your actual API key symbol = "AAPL" # Replace with the stock symbol you want to retrieve data for start_date = "2021-01-01" # Start date of the data range end_date = "2021-09-21" # End date of the data range url = f"https://www.alphavantage.co/query?function=TIME_SERIES_DAILY_ADJUSTED&symbol={symbol}&outputsize=full&apikey={api_key}" response = requests.get(url) data = response.json() # Convert the JSON response to a DataFrame df = pd.DataFrame(data["Time Series (Daily)"]).T df.index = pd.to_datetime(df.index) df = df.loc[start_date:end_date] # Perform further data processing or analysis as needed import time from datetime import datetime volume_column = "volume" asset_id_column = "timestamp" asset_name_column = "high" # Update the column name here totimestamp = lambda s: np.int32( time.mktime(datetime.strptime(s, "%d/%m/%Y").timetuple()) ) def log_return(series, periods=1): return np.log(series).diff(periods=periods) all2021 = pd.DataFrame([]) for asset_id, asset_name in zip(info[asset_id_column], info[asset_name_column]): asset = ctrain[ctrain[volume_column] == asset_id].set_index("timestamp") if asset.empty: continue asset = asset.reindex(range(asset.index[0], asset.index[-1] + 60, 60), method="pad") lret = log_return(asset["close"].fillna(0))[1:] all2021 = all2021.join(lret, rsuffix=asset_name, how="outer") plt.figure(figsize=(15, 10)) # Adjust the figure size as needed plt.imshow(all2021.corr()) plt.yticks( range(len(info[volume_column])), info[asset_name_column].values, fontsize=8 ) # Increase font size plt.xticks( range(len(info[volume_column])), info[asset_name_column].values, rotation="vertical", fontsize=8, ) # Increase font size plt.colorbar(cmap="coolwarm") plt.show() daily.columns # Create time interval for 2021 def dur(start, end, data): df = data.loc[totimestamp(start) : totimestamp(end)] return df info2021 = dur(start="01/01/2021", end="21/09/2021", data=info) ctrain2021 = dur(start="01/01/2021", end="21/09/2021", data=daily) daily = dur(start="01/01/2021", end="21/09/2021", data=ctrain) import requests import json api_key = "V6M4VISDAY5MIIHY" symbol = "BTCUSD" # Replace with the desired cryptocurrency symbol (e.g., BTCUSD, ETHUSD, etc.) interval = "5min" url = f"https://www.alphavantage.co/query?function=DIGITAL_CURRENCY_INTRADAY_EXTENDED&symbol={symbol}&market=USD&interval={interval}&apikey={api_key}" response = requests.get(url) import requests import json url = "https://www.alphavantage.co/query?function=TIME_SERIES_DAILY_ADJUSTED&symbol=IBM&apikey=demo" response = requests.get(url) data = response.json() time_series_key = "Time Series (Daily)" if time_series_key not in data: print("Unable to find time series data in the API response.") else: time_series = data[time_series_key] prices = [float(entry["4. close"]) for entry in time_series.values()] highest_price = max(prices) lowest_price = min(prices) print("Highest price:", highest_price) print("Lowest price:", lowest_price) import requests import json import matplotlib.pyplot as plt from matplotlib.dates import DateFormatter from datetime import datetime url = "https://www.alphavantage.co/query?function=TIME_SERIES_DAILY_ADJUSTED&symbol=IBM&apikey=demo" response = requests.get(url) data = response.json() time_series_key = "Time Series (Daily)" if time_series_key not in data: print("Unable to find time series data in the API response.") else: time_series = data[time_series_key] # Extract data points and dates within the desired date range dates = [] high_values = [] low_values = [] today = datetime.now().date() year_2020 = datetime(2020, 1, 1).date() for date, entry in time_series.items(): date_obj = datetime.strptime(date, "%Y-%m-%d").date() if year_2020 <= date_obj <= today: dates.append(date) high_values.append(float(entry["2. high"])) low_values.append(float(entry["3. low"])) # Reverse the lists to get the oldest dates first dates = dates[::-1] high_values = high_values[::-1] low_values = low_values[::-1] # Plotting the graph plt.figure(figsize=(12, 6)) plt.plot(dates, high_values, label="High") plt.plot(dates, low_values, label="Low") plt.xlabel("Date") plt.ylabel("Price") plt.title("High and Low Prices of IBM Stock") plt.xticks(rotation=45) # Specify the date interval for x-axis ticks ax = plt.gca() date_format = DateFormatter("%Y-%m-%d") ax.xaxis.set_major_locator(plt.MaxNLocator(10)) ax.xaxis.set_major_formatter(date_format) plt.legend() plt.tight_layout() plt.show() # Define the symbols for the stocks symbols = ["IBM", "TSLA", "MSFT"] # Initialize the data dictionary to store the stock data for each symbol data = {} # Fetch the data for each symbol for symbol in symbols: url = f"https://www.alphavantage.co/query?function=TIME_SERIES_DAILY_ADJUSTED&symbol={symbol}&apikey=demo" response = requests.get(url) data[symbol] = response.json() # Plotting the graph fig, ax1 = plt.subplots(figsize=(12, 6)) # Iterate over each symbol and plot the high, low, close prices, and volumes for symbol, stock_data in data.items(): time_series_key = "Time Series (Daily)" if time_series_key not in stock_data: print( f"Unable to find time series data for symbol {symbol} in the API response." ) else: time_series = stock_data[time_series_key] # Extract data points and dates within the desired date range dates = [] high_values = [] low_values = [] close_values = [] volumes = [] for date, entry in time_series.items(): date_obj = datetime.strptime(date, "%Y-%m-%d").date() dates.append(date_obj) high_values.append(float(entry["2. high"])) low_values.append(float(entry["3. low"])) close_values.append(float(entry["4. close"])) volumes.append(float(entry["6. volume"])) # Reverse the lists to get the oldest dates first dates = dates[::-1] high_values = high_values[::-1] low_values = low_values[::-1] close_values = close_values[::-1] volumes = volumes[::-1] # Plot the high, low, close prices on the left y-axis ax1.plot( dates, high_values, label=f"{symbol} High", color="red", linestyle="--" ) ax1.plot( dates, low_values, label=f"{symbol} Low", color="green", linestyle="--" ) ax1.plot(dates, close_values, label=f"{symbol} Close", color="blue") # Plot the volume on the right y-axis ax2 = ax1.twinx() ax2.plot(dates, volumes, label=f"{symbol} Volume", color="orange") # Set the axis labels, title, and legends ax1.set_xlabel("Date") ax1.set_ylabel("Price") ax1.set_title("Stock Prices") ax1.legend(loc="upper left") ax1.xaxis.set_major_formatter(DateFormatter("%Y-%m-%d")) ax2.set_ylabel("Volume") ax2.legend(loc="upper right") plt.xticks(rotation=45) plt.tight_layout() plt.show()
false
0
3,089
0
3,089
3,089
129169438
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # -*- coding: utf-8 -*- """ Created on Wed May 10 15:18:50 2023 @author: 2687492Z """ import pandas as pd import numpy as np import tensorflow as tf from tensorflow.keras.layers import ( Input, Conv2D, MaxPooling2D, concatenate, Flatten, Dense, Dropout, AveragePooling2D, GlobalAveragePooling2D, Reshape, Lambda, minimum, ) from tensorflow.keras.optimizers import Adam from tensorflow.keras.models import Model from tensorflow.keras.optimizers.schedules import ExponentialDecay from tensorflow.keras.callbacks import LearningRateScheduler from tensorflow.keras.layers import Input, DepthwiseConv2D, UpSampling2D from tensorflow.keras.applications import MobileNetV2 from tensorflow.keras.layers import Input, Conv2D from tensorflow.keras.callbacks import ReduceLROnPlateau def lambda_1(inputs): subtracted_12, output_3 = inputs return tf.math.square(subtracted_12 / output_3) def lambda_2(inputs): return tf.math.log(inputs) / tf.math.log(10.0) inputs_1 = tf.keras.layers.Input(shape=[1, 1]) output_1 = tf.keras.layers.Dense(32, "relu")(inputs_1) inputs_2 = tf.keras.layers.Input(shape=[1, 1]) output_2 = tf.keras.layers.Dense(32, "relu")(inputs_2) inputs_3 = tf.keras.layers.Input(shape=[1, 1]) output_3 = tf.keras.layers.Dense(32, "relu")(inputs_3) inputs_4 = tf.keras.layers.Input(shape=[1, 1]) output_4 = tf.keras.layers.Dense(32, "relu")(inputs_4) subtracted_12 = tf.keras.layers.subtract([output_1, output_2]) lambda_13 = tf.keras.layers.Lambda(lambda_1)([subtracted_12, output_3]) output_13 = tf.keras.layers.Dense(32, "relu")(lambda_13) minimum_14 = tf.keras.layers.minimum([output_13, output_4]) inputs_5 = tf.keras.layers.Input(shape=[1, 1]) output_5 = tf.keras.layers.Dense(32, "relu")(inputs_5) inputs_6 = tf.keras.layers.Input(shape=[1, 1]) output_6 = tf.keras.layers.Dense(32, "relu")(inputs_6) inputs_7 = tf.keras.layers.Input(shape=[1, 1]) output_7 = tf.keras.layers.Dense(32, "relu")(inputs_7) inputs_8 = tf.keras.layers.Input(shape=[1, 1]) output_8 = tf.keras.layers.Dense(32, "relu")(inputs_8) subtracted_56 = tf.keras.layers.subtract([output_5, output_6]) lambda_57 = tf.keras.layers.Lambda(lambda_1)([subtracted_56, output_7]) output_57 = tf.keras.layers.Dense(32, "relu")(lambda_57) minimum_58 = tf.keras.layers.minimum([output_57, output_8]) output_18 = tf.keras.layers.Dense(32, "relu")( tf.keras.layers.concatenate([minimum_14, minimum_58]) ) inputs_9 = tf.keras.layers.Input(shape=[1, 1], dtype=tf.float32) lambda_9 = tf.keras.layers.Lambda(lambda_2)(inputs_9) output_9 = tf.keras.layers.Dense(32, "relu")(lambda_9) inputs_10 = tf.keras.layers.Input(shape=[1, 1], dtype=tf.float32) lambda_10 = tf.keras.layers.Lambda(lambda_2)(inputs_10) output_10 = tf.keras.layers.Dense(32, "relu")(lambda_10) add_910 = tf.keras.layers.add([output_9, output_10]) inputs_11 = tf.keras.layers.Input(shape=[1, 1], dtype=tf.float32) lambda_11 = tf.keras.layers.Lambda(lambda_2)(inputs_11) output_11 = tf.keras.layers.Dense(32, "relu")(lambda_11) output = tf.keras.layers.add([output_18, add_910, output_11]) output = tf.keras.layers.Dense(1, activation="linear")(output) model = tf.keras.Model( inputs=[ inputs_1, inputs_2, inputs_3, inputs_4, inputs_5, inputs_6, inputs_7, inputs_8, inputs_9, inputs_10, inputs_11, ], outputs=output, ) tf.keras.utils.plot_model(model, show_shapes=True, dpi=64) tf.keras.utils.plot_model(model, show_shapes=True, dpi=64)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/169/129169438.ipynb
null
null
[{"Id": 129169438, "ScriptId": 38361088, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 8394130, "CreationDate": "05/11/2023 13:59:26", "VersionNumber": 1.0, "Title": "NN.py", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 115.0, "LinesInsertedFromPrevious": 115.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # -*- coding: utf-8 -*- """ Created on Wed May 10 15:18:50 2023 @author: 2687492Z """ import pandas as pd import numpy as np import tensorflow as tf from tensorflow.keras.layers import ( Input, Conv2D, MaxPooling2D, concatenate, Flatten, Dense, Dropout, AveragePooling2D, GlobalAveragePooling2D, Reshape, Lambda, minimum, ) from tensorflow.keras.optimizers import Adam from tensorflow.keras.models import Model from tensorflow.keras.optimizers.schedules import ExponentialDecay from tensorflow.keras.callbacks import LearningRateScheduler from tensorflow.keras.layers import Input, DepthwiseConv2D, UpSampling2D from tensorflow.keras.applications import MobileNetV2 from tensorflow.keras.layers import Input, Conv2D from tensorflow.keras.callbacks import ReduceLROnPlateau def lambda_1(inputs): subtracted_12, output_3 = inputs return tf.math.square(subtracted_12 / output_3) def lambda_2(inputs): return tf.math.log(inputs) / tf.math.log(10.0) inputs_1 = tf.keras.layers.Input(shape=[1, 1]) output_1 = tf.keras.layers.Dense(32, "relu")(inputs_1) inputs_2 = tf.keras.layers.Input(shape=[1, 1]) output_2 = tf.keras.layers.Dense(32, "relu")(inputs_2) inputs_3 = tf.keras.layers.Input(shape=[1, 1]) output_3 = tf.keras.layers.Dense(32, "relu")(inputs_3) inputs_4 = tf.keras.layers.Input(shape=[1, 1]) output_4 = tf.keras.layers.Dense(32, "relu")(inputs_4) subtracted_12 = tf.keras.layers.subtract([output_1, output_2]) lambda_13 = tf.keras.layers.Lambda(lambda_1)([subtracted_12, output_3]) output_13 = tf.keras.layers.Dense(32, "relu")(lambda_13) minimum_14 = tf.keras.layers.minimum([output_13, output_4]) inputs_5 = tf.keras.layers.Input(shape=[1, 1]) output_5 = tf.keras.layers.Dense(32, "relu")(inputs_5) inputs_6 = tf.keras.layers.Input(shape=[1, 1]) output_6 = tf.keras.layers.Dense(32, "relu")(inputs_6) inputs_7 = tf.keras.layers.Input(shape=[1, 1]) output_7 = tf.keras.layers.Dense(32, "relu")(inputs_7) inputs_8 = tf.keras.layers.Input(shape=[1, 1]) output_8 = tf.keras.layers.Dense(32, "relu")(inputs_8) subtracted_56 = tf.keras.layers.subtract([output_5, output_6]) lambda_57 = tf.keras.layers.Lambda(lambda_1)([subtracted_56, output_7]) output_57 = tf.keras.layers.Dense(32, "relu")(lambda_57) minimum_58 = tf.keras.layers.minimum([output_57, output_8]) output_18 = tf.keras.layers.Dense(32, "relu")( tf.keras.layers.concatenate([minimum_14, minimum_58]) ) inputs_9 = tf.keras.layers.Input(shape=[1, 1], dtype=tf.float32) lambda_9 = tf.keras.layers.Lambda(lambda_2)(inputs_9) output_9 = tf.keras.layers.Dense(32, "relu")(lambda_9) inputs_10 = tf.keras.layers.Input(shape=[1, 1], dtype=tf.float32) lambda_10 = tf.keras.layers.Lambda(lambda_2)(inputs_10) output_10 = tf.keras.layers.Dense(32, "relu")(lambda_10) add_910 = tf.keras.layers.add([output_9, output_10]) inputs_11 = tf.keras.layers.Input(shape=[1, 1], dtype=tf.float32) lambda_11 = tf.keras.layers.Lambda(lambda_2)(inputs_11) output_11 = tf.keras.layers.Dense(32, "relu")(lambda_11) output = tf.keras.layers.add([output_18, add_910, output_11]) output = tf.keras.layers.Dense(1, activation="linear")(output) model = tf.keras.Model( inputs=[ inputs_1, inputs_2, inputs_3, inputs_4, inputs_5, inputs_6, inputs_7, inputs_8, inputs_9, inputs_10, inputs_11, ], outputs=output, ) tf.keras.utils.plot_model(model, show_shapes=True, dpi=64) tf.keras.utils.plot_model(model, show_shapes=True, dpi=64)
false
0
1,488
0
1,488
1,488
129169846
<jupyter_start><jupyter_text>net__data_isha Kaggle dataset identifier: net-data-isha <jupyter_script>from tensorflow.keras.models import Sequential from tensorflow.keras.layers import ( Dense, Flatten, Dropout, BatchNormalization, Conv2D, MaxPooling2D, ) from tensorflow.keras.preprocessing.image import ImageDataGenerator from tensorflow.keras.applications.vgg16 import VGG16 from tensorflow.keras import optimizers from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping from keras.layers import Input, Lambda, Dense, Flatten from keras.models import Model from keras.applications.vgg16 import VGG16 from keras.applications.vgg16 import preprocess_input from keras.preprocessing import image from keras.preprocessing.image import ImageDataGenerator import numpy as np from glob import glob import matplotlib.pyplot as plt train_path = "/kaggle/input/net-data-isha/Data/train" valid_path = "/kaggle/input/net-data-isha/Data/valid" test_path = "/kaggle/input/net-data-isha/Data/test" # re-size all the images to this IMAGE_SIZE = [224, 224] N_CLASSES = 5 BATCH_SIZE = 32 vgg16 = VGG16(input_shape=IMAGE_SIZE + [3], weights="imagenet", include_top=False) # don't train existing weights for layer in vgg16.layers: layer.trainable = False # useful for getting number of output classes folders = glob("/kaggle/input/net-data-isha/Data/train/*") print(len(folders)) folders # Add the classification layers x = Flatten()(vgg16.output) x = Dense(N_CLASSES, activation="softmax")(x) # Create a new model with the VGG16 base and the new top layers model = Model(inputs=vgg16.input, outputs=x) # Print the model summary model.summary() # compile the model model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"]) train_datagen = ImageDataGenerator(dtype="float32") train_generator = train_datagen.flow_from_directory( train_path, batch_size=BATCH_SIZE, target_size=IMAGE_SIZE[:2], class_mode="categorical", ) valid_datagen = ImageDataGenerator(dtype="float32") valid_generator = valid_datagen.flow_from_directory( valid_path, batch_size=BATCH_SIZE, target_size=IMAGE_SIZE[:2], class_mode="categorical", ) test_datagen = ImageDataGenerator(dtype="float32") test_generator = test_datagen.flow_from_directory( test_path, batch_size=BATCH_SIZE, target_size=IMAGE_SIZE[:2], class_mode="categorical", ) # Train the model history = model.fit( train_generator, steps_per_epoch=len(train_generator), epochs=56, verbose=1, validation_data=valid_generator, validation_steps=len(valid_generator), ) # Evaluate the model on the test set result = model.evaluate(test_generator) print("Test loss: {:.2f}, Test accuracy: {:.2f}%".format(result[0], result[1] * 100)) # Save the model model.save("finalmodel-VGG16.hdf5") # Load the saved model from tensorflow.keras.models import load_model model = load_model("finalmodel-VGG16.hdf5") # Define the classes classes = ["class1", "class2", "class3", "class4"] # Load the test image and preprocess it from tensorflow.keras.preprocessing.image import load_img, img_to_array img_path = "/kaggle/input/net-data-isha/Data/test/adenocarcinoma/000108 (3).png" img = load_img(img_path, target_size=(224, 224)) x = img_to_array(img) x = x.reshape((1,) + x.shape) x = preprocess_input(x) # Make the prediction preds = model.predict(x) class_idx = np.argmax(preds) class_name = classes[class_idx] print("Predicted class:", class_name) # Save the model model.save("finalmodel-VGG16.hdf5") import tensorflow as tf import numpy as np img_path = "/kaggle/input/chest-ctscan-images/Data/test/normal/12 (2).png" class_names = list(test_generator.class_indices.keys()) # Load the pre-trained VGG16 model vgg16_model = tf.keras.applications.vgg16.VGG16(weights="imagenet") # Preprocess the image img = tf.keras.preprocessing.image.load_img(img_path, target_size=(224, 224)) img_array = tf.keras.preprocessing.image.img_to_array(img) img_array = tf.keras.applications.vgg16.preprocess_input(img_array) img_array = tf.expand_dims(img_array, 0) # Load the class names class_names = list( test_generator.class_indices.keys() ) # Replace with the actual class names # Make predictions on the input image prediction = model.predict(img_array) # Get the predicted class index predicted_class_index = np.argmax(prediction) # Print the predicted class and confidence predicted_class_name = class_names[predicted_class_index] confidence = 100 * prediction[0][predicted_class_index] print( "This image most likely belongs to {} with a {:.2f} percent confidence.".format( class_names[np.argmax(prediction)], 100 * np.max(prediction) ) ) from tensorflow.keras import optimizers from tensorflow.keras.preprocessing.image import load_img, img_to_array from tensorflow.keras.models import Sequential from tensorflow.keras.layers import ( Dense, Flatten, Dropout, BatchNormalization, Conv2D, MaxPooling2D, ) from tensorflow.keras.preprocessing.image import ImageDataGenerator from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping from tensorflow.keras.applications import ResNet50, DenseNet201 from tensorflow.keras.applications import resnet, densenet from keras.models import Model import numpy as np import matplotlib.pyplot as plt import cv2 import os import pandas as pd import tensorflow as tf paths = [ "/kaggle/input/net-data-isha/Data/test/adenocarcinoma/", "/kaggle/input/net-data-isha/Data/test/covid19/", "/kaggle/input/net-data-isha/Data/test/large.cell.carcinoma/", "/kaggle/input/net-data-isha/Data/test/normal/", "/kaggle/input/net-data-isha/Data/test/squamous.cell.carcinoma/", ] results = { "adeno": {0: 0, 1: 0, 2: 0, 3: 0, 4: 0}, "covid19": {0: 0, 1: 0, 2: 0, 3: 0, 4: 0}, "large": {0: 0, 1: 0, 2: 0, 3: 0, 4: 0}, "normal": {0: 0, 1: 0, 2: 0, 3: 0, 4: 0}, "squamous": {0: 0, 1: 0, 2: 0, 3: 0, 4: 0}, } for path, key in zip(paths, results.keys()): for file in os.listdir(path): img = tf.keras.utils.load_img((path + file), target_size=(224, 224)) img_array = tf.keras.utils.img_to_array(img) img_array = tf.expand_dims(img_array, 0) prediction = model.predict(img_array, verbose=0) results[key][np.argmax(prediction)] = ( results.get(key).get(np.argmax(prediction), 0) + 1 ) results df = pd.DataFrame(results) print( "Overall accuracy is : {:.2f}%\n".format( ( df["adeno"][0] + df["covid19"][1] + df["large"][2] + df["normal"][3] + df["squamous"][4] ) / 714 * 100 ) ) print( "Adeno cancer detection accuracy is : {:.2f}%".format( df["adeno"][0] / df["adeno"].sum() * 100 ) ) print( "covid19 cancer detection accuracy is : {:.2f}%".format( df["covid19"][1] / df["covid19"].sum() * 100 ) ) print( "Large cell cancer detection accuracy is : {:.2f}%".format( df["large"][2] / df["large"].sum() * 100 ) ) print( "Normal chest detection accuracy is : {:.2f}%".format( df["normal"][3] / df["normal"].sum() * 100 ) ) print( "Squamous cell cancer detection accuracy is : {:.2f}%".format( df["squamous"][4] / df["squamous"].sum() * 100 ) ) print("\nConfusion Matrix :") df.transpose()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/169/129169846.ipynb
net-data-isha
sheetallamani
[{"Id": 129169846, "ScriptId": 36363335, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 12386091, "CreationDate": "05/11/2023 14:02:43", "VersionNumber": 1.0, "Title": "vgg16work2", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 226.0, "LinesInsertedFromPrevious": 226.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 184980155, "KernelVersionId": 129169846, "SourceDatasetVersionId": 5200313}]
[{"Id": 5200313, "DatasetId": 3024128, "DatasourceVersionId": 5272580, "CreatorUserId": 12386091, "LicenseName": "Unknown", "CreationDate": "03/20/2023 11:09:22", "VersionNumber": 1.0, "Title": "net__data_isha", "Slug": "net-data-isha", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3024128, "CreatorUserId": 12386091, "OwnerUserId": 12386091.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5200313.0, "CurrentDatasourceVersionId": 5272580.0, "ForumId": 3063369, "Type": 2, "CreationDate": "03/20/2023 11:09:22", "LastActivityDate": "03/20/2023", "TotalViews": 45, "TotalDownloads": 0, "TotalVotes": 0, "TotalKernels": 0}]
[{"Id": 12386091, "UserName": "sheetallamani", "DisplayName": "Sheetal Lamani", "RegisterDate": "11/13/2022", "PerformanceTier": 0}]
from tensorflow.keras.models import Sequential from tensorflow.keras.layers import ( Dense, Flatten, Dropout, BatchNormalization, Conv2D, MaxPooling2D, ) from tensorflow.keras.preprocessing.image import ImageDataGenerator from tensorflow.keras.applications.vgg16 import VGG16 from tensorflow.keras import optimizers from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping from keras.layers import Input, Lambda, Dense, Flatten from keras.models import Model from keras.applications.vgg16 import VGG16 from keras.applications.vgg16 import preprocess_input from keras.preprocessing import image from keras.preprocessing.image import ImageDataGenerator import numpy as np from glob import glob import matplotlib.pyplot as plt train_path = "/kaggle/input/net-data-isha/Data/train" valid_path = "/kaggle/input/net-data-isha/Data/valid" test_path = "/kaggle/input/net-data-isha/Data/test" # re-size all the images to this IMAGE_SIZE = [224, 224] N_CLASSES = 5 BATCH_SIZE = 32 vgg16 = VGG16(input_shape=IMAGE_SIZE + [3], weights="imagenet", include_top=False) # don't train existing weights for layer in vgg16.layers: layer.trainable = False # useful for getting number of output classes folders = glob("/kaggle/input/net-data-isha/Data/train/*") print(len(folders)) folders # Add the classification layers x = Flatten()(vgg16.output) x = Dense(N_CLASSES, activation="softmax")(x) # Create a new model with the VGG16 base and the new top layers model = Model(inputs=vgg16.input, outputs=x) # Print the model summary model.summary() # compile the model model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"]) train_datagen = ImageDataGenerator(dtype="float32") train_generator = train_datagen.flow_from_directory( train_path, batch_size=BATCH_SIZE, target_size=IMAGE_SIZE[:2], class_mode="categorical", ) valid_datagen = ImageDataGenerator(dtype="float32") valid_generator = valid_datagen.flow_from_directory( valid_path, batch_size=BATCH_SIZE, target_size=IMAGE_SIZE[:2], class_mode="categorical", ) test_datagen = ImageDataGenerator(dtype="float32") test_generator = test_datagen.flow_from_directory( test_path, batch_size=BATCH_SIZE, target_size=IMAGE_SIZE[:2], class_mode="categorical", ) # Train the model history = model.fit( train_generator, steps_per_epoch=len(train_generator), epochs=56, verbose=1, validation_data=valid_generator, validation_steps=len(valid_generator), ) # Evaluate the model on the test set result = model.evaluate(test_generator) print("Test loss: {:.2f}, Test accuracy: {:.2f}%".format(result[0], result[1] * 100)) # Save the model model.save("finalmodel-VGG16.hdf5") # Load the saved model from tensorflow.keras.models import load_model model = load_model("finalmodel-VGG16.hdf5") # Define the classes classes = ["class1", "class2", "class3", "class4"] # Load the test image and preprocess it from tensorflow.keras.preprocessing.image import load_img, img_to_array img_path = "/kaggle/input/net-data-isha/Data/test/adenocarcinoma/000108 (3).png" img = load_img(img_path, target_size=(224, 224)) x = img_to_array(img) x = x.reshape((1,) + x.shape) x = preprocess_input(x) # Make the prediction preds = model.predict(x) class_idx = np.argmax(preds) class_name = classes[class_idx] print("Predicted class:", class_name) # Save the model model.save("finalmodel-VGG16.hdf5") import tensorflow as tf import numpy as np img_path = "/kaggle/input/chest-ctscan-images/Data/test/normal/12 (2).png" class_names = list(test_generator.class_indices.keys()) # Load the pre-trained VGG16 model vgg16_model = tf.keras.applications.vgg16.VGG16(weights="imagenet") # Preprocess the image img = tf.keras.preprocessing.image.load_img(img_path, target_size=(224, 224)) img_array = tf.keras.preprocessing.image.img_to_array(img) img_array = tf.keras.applications.vgg16.preprocess_input(img_array) img_array = tf.expand_dims(img_array, 0) # Load the class names class_names = list( test_generator.class_indices.keys() ) # Replace with the actual class names # Make predictions on the input image prediction = model.predict(img_array) # Get the predicted class index predicted_class_index = np.argmax(prediction) # Print the predicted class and confidence predicted_class_name = class_names[predicted_class_index] confidence = 100 * prediction[0][predicted_class_index] print( "This image most likely belongs to {} with a {:.2f} percent confidence.".format( class_names[np.argmax(prediction)], 100 * np.max(prediction) ) ) from tensorflow.keras import optimizers from tensorflow.keras.preprocessing.image import load_img, img_to_array from tensorflow.keras.models import Sequential from tensorflow.keras.layers import ( Dense, Flatten, Dropout, BatchNormalization, Conv2D, MaxPooling2D, ) from tensorflow.keras.preprocessing.image import ImageDataGenerator from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping from tensorflow.keras.applications import ResNet50, DenseNet201 from tensorflow.keras.applications import resnet, densenet from keras.models import Model import numpy as np import matplotlib.pyplot as plt import cv2 import os import pandas as pd import tensorflow as tf paths = [ "/kaggle/input/net-data-isha/Data/test/adenocarcinoma/", "/kaggle/input/net-data-isha/Data/test/covid19/", "/kaggle/input/net-data-isha/Data/test/large.cell.carcinoma/", "/kaggle/input/net-data-isha/Data/test/normal/", "/kaggle/input/net-data-isha/Data/test/squamous.cell.carcinoma/", ] results = { "adeno": {0: 0, 1: 0, 2: 0, 3: 0, 4: 0}, "covid19": {0: 0, 1: 0, 2: 0, 3: 0, 4: 0}, "large": {0: 0, 1: 0, 2: 0, 3: 0, 4: 0}, "normal": {0: 0, 1: 0, 2: 0, 3: 0, 4: 0}, "squamous": {0: 0, 1: 0, 2: 0, 3: 0, 4: 0}, } for path, key in zip(paths, results.keys()): for file in os.listdir(path): img = tf.keras.utils.load_img((path + file), target_size=(224, 224)) img_array = tf.keras.utils.img_to_array(img) img_array = tf.expand_dims(img_array, 0) prediction = model.predict(img_array, verbose=0) results[key][np.argmax(prediction)] = ( results.get(key).get(np.argmax(prediction), 0) + 1 ) results df = pd.DataFrame(results) print( "Overall accuracy is : {:.2f}%\n".format( ( df["adeno"][0] + df["covid19"][1] + df["large"][2] + df["normal"][3] + df["squamous"][4] ) / 714 * 100 ) ) print( "Adeno cancer detection accuracy is : {:.2f}%".format( df["adeno"][0] / df["adeno"].sum() * 100 ) ) print( "covid19 cancer detection accuracy is : {:.2f}%".format( df["covid19"][1] / df["covid19"].sum() * 100 ) ) print( "Large cell cancer detection accuracy is : {:.2f}%".format( df["large"][2] / df["large"].sum() * 100 ) ) print( "Normal chest detection accuracy is : {:.2f}%".format( df["normal"][3] / df["normal"].sum() * 100 ) ) print( "Squamous cell cancer detection accuracy is : {:.2f}%".format( df["squamous"][4] / df["squamous"].sum() * 100 ) ) print("\nConfusion Matrix :") df.transpose()
false
0
2,401
0
2,427
2,401
129064925
<jupyter_start><jupyter_text>UFO Sightings # Context This dataset contains over 80,000 reports of UFO sightings over the last century. # Content There are two versions of this dataset: scrubbed and complete. The complete data includes entries where the location of the sighting was not found or blank (0.8146%) or have an erroneous or blank time (8.0237%). Since the reports date back to the 20th century, some older data might be obscured. Data contains city, state, time, description, and duration of each sighting. # Inspiration * What areas of the country are most likely to have UFO sightings? * Are there any trends in UFO sightings over time? Do they tend to be clustered or seasonal? * Do clusters of UFO sightings correlate with landmarks, such as airports or government research centers? * What are the most common UFO descriptions? # Acknowledgement This dataset was scraped, geolocated, and time standardized from NUFORC data by Sigmond Axel [here](https://github.com/planetsig/ufo-reports). Kaggle dataset identifier: ufo-sightings <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session df = pd.read_csv("/kaggle/input/ufo-sightings/scrubbed.csv") df.head() df.info() # preprocessing def tim(x): return x.replace("24:00", "23:59") df.datetime = df.datetime.apply(tim) df["datetime"] = pd.to_datetime(df["datetime"]) df.info() df[["duration (seconds)", "duration (hours/min)"]] df.head() df.sort_values("datetime", ascending=False) df["shape"].value_counts() df["shape"].unique() df["shape"].value_counts().head(30) # eda import seaborn as sn, matplotlib.pyplot as plt plt.figure(figsize=(20, 8)) sn.countplot( x=df["shape"], ) plt.xticks(rotation=90) plt.xlabel("Shape of UFO are seen") plt.title("UFO Shape are shown") plt.show() df["year"] = df.datetime.dt.year plt.figure(figsize=(20, 10)) sn.countplot(x=df.year) plt.xticks( rotation=90, ) plt.title( "Total UFO seen in Year", ) plt.show() df["duration (seconds)"] = df["duration (seconds)"].str.replace("[^0-9]", "") df["duration (seconds)"] = df["duration (seconds)"].astype(int) df[["year", "duration (seconds)"]].groupby("year").sum().plot(figsize=(20, 8)) plt.xticks(rotation=90) plt.ylabel("Time (sec)") plt.title("Time Duration show in Year") plt.show() df[["year", "duration (seconds)"]].groupby("year").sum().plot( kind="bar", figsize=(20, 8) ) plt.xticks(rotation=90) plt.title( "Time Duration show in Year", fontdict={"size": 25, "style": "oblique", "font": "times new roman"}, ) plt.ylabel("Time (sec)", fontdict={"size": 20}) plt.xlabel("Year".upper(), fontdict={"size": 20}) plt.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/064/129064925.ipynb
ufo-sightings
null
[{"Id": 129064925, "ScriptId": 38363151, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 5939826, "CreationDate": "05/10/2023 17:32:46", "VersionNumber": 1.0, "Title": "notebookd7462f8e03", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 77.0, "LinesInsertedFromPrevious": 77.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 184791090, "KernelVersionId": 129064925, "SourceDatasetVersionId": 793053}]
[{"Id": 793053, "DatasetId": 388, "DatasourceVersionId": 814842, "CreatorUserId": 998023, "LicenseName": "Unknown", "CreationDate": "11/13/2019 19:45:57", "VersionNumber": 2.0, "Title": "UFO Sightings", "Slug": "ufo-sightings", "Subtitle": "Reports of unidentified flying object reports in the last century", "Description": "# Context\n\nThis dataset contains over 80,000 reports of UFO sightings over the last century. \n\n# Content\n\nThere are two versions of this dataset: scrubbed and complete. The complete data includes entries where the location of the sighting was not found or blank (0.8146%) or have an erroneous or blank time (8.0237%). Since the reports date back to the 20th century, some older data might be obscured. Data contains city, state, time, description, and duration of each sighting.\n\n# Inspiration\n\n* What areas of the country are most likely to have UFO sightings?\n* Are there any trends in UFO sightings over time? Do they tend to be clustered or seasonal?\n* Do clusters of UFO sightings correlate with landmarks, such as airports or government research centers?\n* What are the most common UFO descriptions? \n\n# Acknowledgement\n\nThis dataset was scraped, geolocated, and time standardized from NUFORC data by Sigmond Axel [here](https://github.com/planetsig/ufo-reports).", "VersionNotes": "Fix data", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 388, "CreatorUserId": 270995, "OwnerUserId": NaN, "OwnerOrganizationId": 222.0, "CurrentDatasetVersionId": 793053.0, "CurrentDatasourceVersionId": 814842.0, "ForumId": 1968, "Type": 2, "CreationDate": "11/17/2016 03:50:44", "LastActivityDate": "02/06/2018", "TotalViews": 248610, "TotalDownloads": 35131, "TotalVotes": 620, "TotalKernels": 194}]
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session df = pd.read_csv("/kaggle/input/ufo-sightings/scrubbed.csv") df.head() df.info() # preprocessing def tim(x): return x.replace("24:00", "23:59") df.datetime = df.datetime.apply(tim) df["datetime"] = pd.to_datetime(df["datetime"]) df.info() df[["duration (seconds)", "duration (hours/min)"]] df.head() df.sort_values("datetime", ascending=False) df["shape"].value_counts() df["shape"].unique() df["shape"].value_counts().head(30) # eda import seaborn as sn, matplotlib.pyplot as plt plt.figure(figsize=(20, 8)) sn.countplot( x=df["shape"], ) plt.xticks(rotation=90) plt.xlabel("Shape of UFO are seen") plt.title("UFO Shape are shown") plt.show() df["year"] = df.datetime.dt.year plt.figure(figsize=(20, 10)) sn.countplot(x=df.year) plt.xticks( rotation=90, ) plt.title( "Total UFO seen in Year", ) plt.show() df["duration (seconds)"] = df["duration (seconds)"].str.replace("[^0-9]", "") df["duration (seconds)"] = df["duration (seconds)"].astype(int) df[["year", "duration (seconds)"]].groupby("year").sum().plot(figsize=(20, 8)) plt.xticks(rotation=90) plt.ylabel("Time (sec)") plt.title("Time Duration show in Year") plt.show() df[["year", "duration (seconds)"]].groupby("year").sum().plot( kind="bar", figsize=(20, 8) ) plt.xticks(rotation=90) plt.title( "Time Duration show in Year", fontdict={"size": 25, "style": "oblique", "font": "times new roman"}, ) plt.ylabel("Time (sec)", fontdict={"size": 20}) plt.xlabel("Year".upper(), fontdict={"size": 20}) plt.show()
false
0
741
0
1,029
741
129064415
<jupyter_start><jupyter_text>lendingclub Kaggle dataset identifier: lendingclub <jupyter_script># # # LENDING CLUB CASE STUDY # ### Problem Statement # # The company is the largest online loan marketplace, facilitating personal loans, business loans, and financing of medical procedures. # Lending loans to ‘risky’ applicants is the largest source of financial loss (called credit loss). Credit loss is the amount of money lost by the lender when the borrower refuses to pay or runs away with the money owed. In other words, borrowers who default cause the largest amount of loss to the lenders. In this case, the customers labelled as 'charged-off' are the 'defaulters'. # # **Identification of such applicants using EDA is the aim of this case study.** # # The company wants to understand the driving factors behind loan default, i.e. the variables which are strong indicators of default. The company can utilise this knowledge for its portfolio and risk assessment. # # # # ### Import the libraries import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import warnings warnings.filterwarnings("ignore") # ### Read the dataset and check the first five rows loan_df = pd.read_csv("/kaggle/input/lendingclub/loan.csv") loan_df.head() # ### Check the shape of the dataframe print(loan_df.shape) loan_df.info(max_cols=111) # ## Data Cleaning and Manipulation # Data Quality Issues can: # - Missing Values # - We can Drop the columns containing maximum null values and also Drop columns which does not add any value to analysis. # - Sanity Checks # - We can check for data inconsistencies # - Small % of Missing Value can be Imputed. # - If categorical can be imputed by mode or if continuous if suitable we can go for Mean/median # - Modifing the Data Types: # - Converting the some string columns to numeric variables in order to perform more analysis. # - Outliers Treatment is necessary for correct analysis # ## Data Understanding # - Aim of the Analysis of this loan dataset is find out the factors influencing a loan to be default. # - The dataset has the details of the past loan applicants and whethe r they resulted in good loan or bad loan. # - Loan Status is the Target variable. # - Column Loan_status has 3 values: # >'Fully Paid' # > 'Charged Off' # > 'Current' # Since we are not sure about 'Current' customers whether they will default or fully pay at the end of the tenure, we will filter out 'Current' customers and will only consider 'Fully Paid' and 'Charged Off' values. Here **charged off means defaulters** # - Many columns which have NA values need to be removed # **Missing Value Treatment** # Calculate missing value % df_null_percent = loan_df.isnull().sum() / len(loan_df) * 100 df_null_percent.sort_values(ascending=False) # Filter columns with more than or equal to 50% missing values which will be our threshold to drop the columns loan_df = loan_df.loc[:, df_null_percent < 50] # print shape of the dataframe loan_df.shape loan_df.info(max_cols=54) # **The customer behavior variables are not available at the time of # loan application, and thus they cannot be used as predictors for # credit approval** # List the coumns which are not required for the analysis. Total 21 columns list_drop_col1 = [ "delinq_2yrs", "earliest_cr_line", "inq_last_6mths", "open_acc", "pub_rec", "revol_bal", "revol_util", "total_acc", "out_prncp", "out_prncp_inv", "total_pymnt", "total_pymnt_inv", "total_rec_prncp", "total_rec_int", "total_rec_late_fee", "recoveries", "collection_recovery_fee", "last_pymnt_d", "last_pymnt_amnt", "last_credit_pull_d", "application_type", ] # Drop the coumns which are not required for the analysis. Total 21 columns loan_df.drop(list_drop_col1, axis=1, inplace=True) # Observer the shape after dropping the columns loan_df.shape # **Drop columns with single value as these will not be helpful in the analysis** # Observe columns having single value num_unique = ( loan_df.nunique() ) # This function is a method used to return the number of unique elements in a pandas object. num_unique.sort_values() loan_df.chargeoff_within_12_mths.unique() # print columns with single value list_col_unique_val = num_unique[num_unique.values == 1] print(list_col_unique_val) # List of Columns to be dropped list_col_unique_val.index.tolist() # Drop columns having single value list_drop_col2 = list_col_unique_val.index.tolist() loan_df.drop(list_drop_col2, axis=1, inplace=True) loan_df.shape # **Drop columns which does not add any value to analysis.** # - id # -member_id # -url # -desc # -zip_code # drop columns which does not add any value to analysis and observe the shape list_drop_col3 = ["id", "member_id", "url", "desc", "zip_code"] loan_df.drop(list_drop_col3, axis=1, inplace=True) loan_df.shape # **Taking loan_status equal to "Charged Off" and "Fully Paid" as if the loan_status is "Current" we might not know whether they will turn in fully paid or will they be charged off** # observe the count of loan_status values loan_df.loan_status.value_counts() # consider subset with values "Fully Paid" and "Charged Off" and observe the shape loan_df = loan_df[loan_df.loan_status.isin(["Fully Paid", "Charged Off"])] loan_df.shape # # - Check for rows with more than 50% of missing values and drop them # print rows with null values in descending order loan_df.isnull().sum(axis=1).sort_values(ascending=False) # calculate percentage of null values across rows df_missing_val_row = loan_df.isnull().sum(axis=1) / loan_df.shape[1] * 100 print(df_missing_val_row.sort_values(ascending=False)) # Identify rows with missing values greater than or equal to 50% df_missing_val_row[df_missing_val_row >= 50] # #### Observation: # # There are no rows with missing values >=50% Hence no need to drop any rows # **Sanity Checks** # # drop rows if "funded_amnt_inv" is greater than "loan_amnt" loan_df[loan_df.funded_amnt_inv > loan_df.loan_amnt] loan_df.info(max_cols=20) # **Analysis:** There are no rows with "funded_amnt_inv" is greater than "loan_amnt". # **Missing Value Imputation** # Observe columns with null values null_val = loan_df.isnull().sum() * 100 / loan_df.shape[0] null_val[null_val.values > 0] # Treating missing values for emp_title loan_df["emp_title"].value_counts() # calculate mode loan_df["emp_title"].mode() # **Analysis: We Can replace missing values of "emp_title" with mode, but it won't create much sense so will go ahead without the imputation** # Drop column emp_title loan_df.drop(["emp_title"], axis=1, inplace=True) loan_df.shape # Treating missing values for emp_length loan_df["emp_length"].value_counts() # **Analysis: After observing the values of emp_length it seems that absence of value means the employee is not working for any firm but must be running a business. Hence we can impute the missing value by "Self Employed".** # # Replace null with "Self Employed" loan_df["emp_length"] = loan_df["emp_length"].fillna("Self Employed") loan_df["emp_length"].isnull().sum() # Observe values for title column loan_df.title.value_counts() # Drop column title as it has lot of different values so wont be useful loan_df.drop(["title"], axis=1, inplace=True) loan_df.shape # Treating missing values for pub_rec_bankruptcies loan_df.pub_rec_bankruptcies.value_counts() # Filter rows with nonnull values of pub_rec_bankruptcies loan_df = loan_df[~loan_df.pub_rec_bankruptcies.isnull()] loan_df.shape # ### Handling Data Types # Observe the data loan_df.head() # Observe data types loan_df.dtypes # Convert "funded_amnt_inv" to int64 print(loan_df.funded_amnt_inv.dtype) loan_df.funded_amnt_inv = loan_df.funded_amnt_inv.astype("int64") print(loan_df.funded_amnt_inv.dtype) # Convert "annual_inc" to int64 print(loan_df.annual_inc.dtype) loan_df.annual_inc = loan_df.annual_inc.astype("int64") print(loan_df.annual_inc.dtype) # Convert "pub_rec_bankruptcies" to int64 print(loan_df.pub_rec_bankruptcies.dtype) loan_df.pub_rec_bankruptcies = loan_df.pub_rec_bankruptcies.astype("int64") print(loan_df.pub_rec_bankruptcies.dtype) # Convert "issue_d" to Datetime print(loan_df.issue_d) loan_df.issue_d = pd.to_datetime(loan_df.issue_d, format="%b-%y") print(loan_df.issue_d) print(loan_df.issue_d.dtype) # Convert "term" to "int64" after removing " months" string print(loan_df.term) loan_df.term = loan_df.term.str.replace(" months", "").astype("int64") print(loan_df.term) print(loan_df.term.dtype) # Convert "int_rate" to "float64" after removing "%" symbol. print(loan_df.int_rate) loan_df.int_rate = loan_df.int_rate.str.replace("%", "").astype("float64") print(loan_df.int_rate) # Observe values of emp_length and Remove "+". loan_df.emp_length.value_counts() # remove '+' loan_df.emp_length = loan_df.emp_length.str.replace("+", "") loan_df.emp_length.value_counts() # # Treating Outliers # Observe basic statistical details about the dataset loan_df.annual_inc.describe() # **Remove outliers based on annual_inc** # Plot boxplot to observe the outliers loan_df.annual_inc.plot.box(fontsize=12, figsize=(8, 8)).set(title="Annual income") plt.show() # Observe Quantiles quantiles = loan_df.annual_inc.quantile([0.01, 0.25, 0.5, 0.75, 0.99]) print(quantiles) # Remove outliers df_final = loan_df[(loan_df.annual_inc > 14400) & (loan_df.annual_inc < 234144)] # Plot boxplot from new dataset after removing obvious outliers df_final.annual_inc.plot.box().set(title="Annual income") plt.show() # Final dataset after data cleaning and data handling df_final.head() # ## Data Analysis # **Derived metrics for Month and Year from column "issue_d"** # Derived metrics analysis on column "issue_d" df_final["year"] = df_final.issue_d.dt.year df_final["month"] = df_final.issue_d.dt.month df_final[["issue_d", "month", "year"]].head() # Create new column loan_status_code with 0 and 1 values based on loan_status column where 0="Charged Off" and 1="Fully Paid" loan_status_map = {"Charged Off": 1, "Fully Paid": 0} df_final["loan_status_code"] = df_final["loan_status"].map(loan_status_map) print(df_final["loan_status_code"]) df_final.loan_status.value_counts() # Creating different groups for interest rate # Int_rate is between 5% to 25%, grouping them accordingly df_final.int_rate.describe # Treating for int_rate bins = [5, 9, 13, 17, 21, 25] labels = ["5%-9%", "9%-13%", "13%-17%", "17%-21%", "21%-25%"] df_final["int_rate_group"] = pd.cut(df_final["int_rate"], bins=bins, labels=labels) df_final["int_rate_group"].value_counts() # Treating for annual_income bins = [14400, 25000, 50000, 100000, 150000, 234000] labels = ["14k-25k", "25k-50k", "50k-100k", "100k-150k", "150k-250k"] df_final["annual_inc_group"] = pd.cut(df_final["annual_inc"], bins=bins, labels=labels) df_final["annual_inc_group"].value_counts() # # Univariate Analysis # (P.S. - The plot function reference is taken from a kaggle notebook on EDA) # define a function to attach values with each bar def label(ax, x): """ Attach a text label above each bar displaying its height """ for p in ax.patches: ax.annotate("{:1}".format(p.get_height()), (p.get_x() + x, p.get_height() + 10)) # define function to plot countplot for categorical variables def cat(df, col_name): fig, ax = plt.subplots(figsize=(10, 6), dpi=100) sns.countplot( x=col_name, data=df_final[df_final.loan_status == "Charged Off"], order=df[col_name].value_counts().index, ) ax.set_xlabel(col_name) ax.set_ylabel("No of loans") ax.set_title("Plot of " + col_name, weight="bold") plt.xticks(rotation=90) label(ax, 0.01) plt.show() # define function to plot countplot for numerical variables def num(df, col_name): fig, ax = plt.subplots(figsize=(10, 6), dpi=100) plt.figure(figsize=(16, 6)) plt.subplot(1, 2, 1) sns.distplot(a=df[col_name], rug=True, color="#388E3C") plt.subplot(1, 2, 2) sns.boxplot(data=df[col_name], color="#388E3C") plt.suptitle("Distribution of " + col_name) label(ax, 0.01) plt.show() # **grade, sub_grade, term, emp_length, issue_y, issue_m** # countplot for "grade", "sub_grade","term","emp_length","issue_y","issue_m" for Charged off for factor in ["grade", "sub_grade", "term", "emp_length", "year", "month"]: cat(df_final, factor) # **Observation** # - Count Plot of **Grade** shows Grade B ,C, D are given more likely to charged off as compared to other grades # - Count Plot of **Sub Grade** shows Grade B3, B5, B4, C1 , C2 have charged off more as compared to other grades # - Count Plot of **term** shows 36 months loans are issued more so defaulted more compared to 60 months loan # - Count Plot of **emp_length** shows employees with 10 years have more defaulted on loan compared with lesser experience # - Count Plot of issue **year** shows the no. of defaulted loans have increased in the year 2011. The trend is increasing with the increase in the year # - Count Plot of issue **month** shows there is increasing trend in number of defaults with increase in the months. More defaults can be seen in the month of October, November, December. # **loan_status** sns.countplot(x="loan_status", data=df_final) # **Observation** # - This shows that 14% of total loans are charged off. # ### addr_state, purpose, home_ownership # Plot countplot for "addr_state", "purpose","home_ownership" for factor in ["addr_state", "purpose", "home_ownership"]: cat(df_final, factor) # ### Observation # - States CA, NY, FL and TX are the states for which maximum loan defaults # - Maximum loan defaults are for debt consolidation, paying off Credit card, small business and 'other' reasons # - Education and renewable energy is the least category where loans are defaulted # - People who are in Rented house or Mortgate are more likely to Default # **funded_amnt, installment, amnt_to_inc_ratio** # Plot distplot for "funded_amnt","installment","amnt_to_inc_ratio" sns.distplot(a=df_final["funded_amnt"], rug=True, color="#388E3C") plt.suptitle("Distribution of funded_amnt") sns.distplot(a=df_final["installment"], rug=True, color="#388E3C") plt.suptitle("Distribution of installment") # ### Observation # - Funded amount is ranging from 5000 to 15000 USD # - Installment amount is ranging from 200 to 400 USD # **Segmented Univariate Analysis** # - int_rate_group # Countplot of int_rate_group fig, ax = plt.subplots(figsize=(6, 4), dpi=100) sns.countplot(x="int_rate_group", data=df_final) ax.set_xlabel("int_rate_group") ax.set_ylabel("No of loans") ax.set_title("int_rate_group", weight="bold") label(ax, 0.20) plt.show() # **Observation** # - Interest rate range 9 to 13 is the range where maximum loans have been issued # - 21 - 25% is the range where minimum loans have been issued # # Observing the same above graph for charged off # Countplot of int_rate_group for charged off fig, ax = plt.subplots(figsize=(6, 4), dpi=100) sns.countplot(x="int_rate_group", data=df_final[df_final.loan_status == "Charged Off"]) ax.set_xlabel("int_rate_group") ax.set_ylabel("No of loans") ax.set_title("int_rate_group", weight="bold") label(ax, 0.20) plt.show() # **Observation** # . Interest rate range 13% - 17% is the range with maximum loan defaults # - 21 - 25% is the range where minimum loan defaults can be observed from the above plot # **But considering above 2 plots we come to a conclusion that** # > If we notice the % of defaults in each groups we can state that the interest range 21%-25% has maximum chances of defaults (as high as 44%) # P.S. (Percent here means no. of charged off divided by the total no. of loan issues in a particular interest rate group) # ## Summary of univariate analaysis # - Count Plot of **Grade** shows Grade B ,C, D are given more likely to charged off as compared to other grades # - Count Plot of **Sub Grade** shows Grade B3, B5, B4, C1 , C2 have charged off more as compared to other grades # - Count Plot of **term** shows 36 months loans are issued more so defaulted more compared to 60 months loan # - Count Plot of **emp_length** shows employees with 10 years have more defaulted on loan compared with lesser experience # - Count Plot of issue **year** shows the no. of defaulted loans have increased in the year 2011. The trend is increasing with the increase in the year # - Count Plot of issue **month** shows there is increasing trend in number of defaults with increase in the months. More defaults can be seen in the month of October, November, December. # - States CA, NY, FL and TX are the states for which maximum loan defaults # - Maximum loan defaults are for debt consolidation, paying off Credit card, small business and 'other' reasons # - Education and renewable energy is the least category where loans are defaulted # - People who are in Rented house or Mortgate are more likely to Default- Funded amount is ranging from 5000 to 15000 USD # - Installment amount is ranging from 200 to 400 USD # - Interest rate range 9 to 13 is the range where maximum loans have been issued # - 21 - 25% is the range where minimum loans have been issued # **Observation** # . Interest rate range 13% - 17% is the range with maximum loan defaults # - 21 - 25% is the range where minimum loan defaults can be observed from the above plot # **But considering above 2 plots we come to a conclusion that** # > If we notice the % of defaults in each groups we can state that the interest range 21%-25% has maximum chances of defaults (as high as 44%) # P.S. (Percent here means no. of charged off divided by the total no. of loan issues in a particular interest rate group) # # Bivariate Analysis # **Grade vs Loan Staus** # Countplot of Grade vs Loan Status fig, ax = plt.subplots(figsize=(12, 6), dpi=100) sns.countplot(x="grade", hue="loan_status", data=df_final, palette="Set2") ax.set_xlabel("Grade") ax.set_ylabel("No of loans") label(ax, 0.1) ax.set_title("Grade vs Loan Status") plt.show() # **Observation** # - The counts of Grade B, C and D are highest in Charged Off # **Sub Grade vs Loan Staus** # Countplot of Sub Grade vs Loan Status fig, ax = plt.subplots(figsize=(12, 6), dpi=100) sns.countplot(x="sub_grade", hue="loan_status", data=df_final) ax.set_xlabel("Sub Grade") ax.set_ylabel("No of loans") ax.set_title("Sub Grade vs Loan Status") plt.show() # **Observation** # - The counts of B3,B4,B5, C1,C2, D3 sub grades are higher in Charged Off # **Term vs Loan Staus** # Countplot of Term vs Loan Status fig, ax = plt.subplots(figsize=(5, 4), dpi=100) sns.countplot(x="term", hue="loan_status", data=df_final) ax.set_xlabel("Term") ax.set_ylabel("No of loans") label(ax, 0.30) ax.set_title("Term vs Loan Status(Charged off)") # **Observation** # - Though 36 month loan default is more compared to 60 month # - What is observed is if we see the % of charged off in 36 months it would be 10% of the total loans of 36 months tenure. # - Also if we calculate % charged off in 60 months it is 25% of the total loans issued with a 60 months which is much higher as compared to 36 month tenure so it is more likely for a 60 months loan issued to be a default # **Employment Length vs Loan Staus** # Countplot of Employment Length vs Loan Status fig, ax = plt.subplots(figsize=(12, 6), dpi=100) sns.countplot(x="emp_length", hue="loan_status", data=df_final, palette="Set2") ax.set_xlabel("Employment length") ax.set_ylabel("No of loans") ax.set_title("Employment length vs Loan Status") label(ax, 0.001) # **Observation** # - Maximum loans are issued for people having 10 years of emp_lenth and hence the no. of defaulters are also high # **Loan Issue Year vs Loan Status** # Countplot of Loan Issue Year vs Loan Status fig, ax = plt.subplots(figsize=(8, 6), dpi=100) plt.xticks(rotation=90) sns.countplot(x="year", data=df_final, hue="loan_status", palette="Set2") ax.set_xlabel("Loan Issue Year") ax.set_ylabel("No of loans") label(ax, 0.01) ax.set_title("Loan Issue Year (fully paid and charged off)") plt.show() # ### Observation # - Plot of loan issue year shows maximum loans were taken in the year 2011 # - Also high loans are being Charged Off in 2011 # ### Loan Issue Month vs Loan Status # Countplot of Loan Issue Month vs Loan Status fig, ax = plt.subplots(figsize=(8, 6), dpi=100) plt.xticks(rotation=90) sns.countplot(x="month", data=df_final, hue="loan_status", palette="Set2") ax.set_xlabel("Loan Issue Month") ax.set_ylabel("No of loans") label(ax, 0.01) ax.set_title("Loan Issue Month (fully paid and charged off)") plt.show() # **Observation** # - high loans are Charged Off for the loans issued in Sep - Dec months # **Purpose vs Loan Staus** # Countplot of Purpose vs Loan Status fig, ax = plt.subplots(figsize=(12, 6), dpi=100) plt.xticks(rotation=90) sns.countplot( x="purpose", hue="loan_status", data=df_final, palette="Set2", order=df_final[df_final.loan_status == "Charged Off"].purpose.value_counts().index, ) ax.set_xlabel("Loan purpose") ax.set_ylabel("No of loans") ax.set_title("Loan purpose vs Loan Status") label(ax, 0.001) plt.show() # **Observation** # - Loans with purpose debt consolidation, other, credit crd and home improvement categories are unable to pay the loan compared with education / renewable energy # - Also debt consolidation is the category where maximum loans are given. # **Home Ownership vs Loan Staus** # Hist of Home Ownership vs Loan Status fig, ax = plt.subplots(figsize=(8, 6), dpi=100) sns.histplot( data=df_final, x="home_ownership", hue="loan_status", multiple="dodge", shrink=0.8, palette="Set2", ) ax.set_xlabel("Home ownership") ax.set_ylabel("No of loans") ax.set_title("Home ownership vs Loan Status") label(ax, 0.01) # **Observation** # - Rent and Mortage category people take maximum loans and have higher chances of being defaulters of the loan compared with people in Own house # **Verification vs Loan Staus** # Histplot of Verification vs Loan Status fig, ax = plt.subplots(figsize=(6, 4), dpi=100) sns.histplot( data=df_final, x="verification_status", hue="loan_status", multiple="dodge", shrink=0.8, palette="Set2", ) ax.set_xlabel("Verification status") ax.set_ylabel("No of loans") ax.set_title("Verfification status vs Loan Status") label(ax, 0.01) plt.show() # **Observation** # - Verified loans which are Charged Off is more compared to Not Verified # **State vs Loan Staus** # Countplot of State vs Loan Status fig, ax = plt.subplots(figsize=(16, 8), dpi=100) sns.countplot(x="addr_state", hue="loan_status", data=df_final) ax.set_xlabel("State") ax.set_ylabel("No of loans") ax.set_title("State vs Loan Status = Charged Off") plt.show() # **Observation** # - Borrowers from states CA, FL, NY and NJ have failed to pay the loan # **Grade vs Loan status** # Boxplot Grade vs Loan status plt.figure(figsize=(12, 8)) ax = sns.boxplot( y="grade", x="loan_amnt", data=df_final[df_final.loan_status == "Charged Off"], palette="rainbow", ) ax.set_title("Grade vs Loan Amount", fontsize=15, color="b") ax.set_ylabel("Grade", fontsize=14, color="b") ax.set_xlabel("Loan Amount", fontsize=14, color="b") plt.show() # **Observation** # - Grade F, G and E are the three category which has higher charged off # - The median of F and G is around 20k and Q3 at 25k # - Grade A has a median at 7.5k and is at the least # **DTI vs Loan status** plt.figure(figsize=(12, 8)) ax = sns.boxplot(y="dti", x="loan_status", data=df_final, palette="rainbow") ax.set_title("DTI for both Fully Paid and Charged off loans", fontsize=15, color="b") ax.set_ylabel("DTI spread", fontsize=14, color="b") ax.set_xlabel("Loan status", fontsize=14, color="b") plt.show() # **Observation** # - DTI is not a significant factor because the median is close to each other for Fully Paid and Charged Off # **amnt_to_inc_ratio vs Loan status** # df_final["amnt_to_inc_ratio"] = df_final.loan_amnt / df_final.annual_inc df_final[["loan_amnt", "annual_inc", "amnt_to_inc_ratio"]] plt.figure(figsize=(12, 8)) ax = sns.boxplot( y="amnt_to_inc_ratio", x="loan_status", data=df_final, palette="rainbow" ) ax.set_title( "amnt_to_inc_ratio for both Fully Paid and Charged off loans", fontsize=15, color="b", ) ax.set_ylabel("amnt_to_inc_ratio spread", fontsize=14, color="b") ax.set_xlabel("Loan status", fontsize=14, color="b") plt.show() # **Observation** # - Amnt_to_int_ratio is an indicator of bad loans as for charged off the median and Q3 is higher # **Summary** # >- Above plots is for charged off loans.A Applicant is more likely to default if: # # >- Though 36 month loan default is more compared to 60 month # >- What is observed is if we see the % of charged off in 36 months it would be 10% of the total loans of 36 months tenure. # >- Also if we calculate % charged off in 60 months it is 25% of the total loans issued with a 60 months which is much higher as compared to 36 month tenure so it is more likely for a 60 months loan issued to be a default # >- Loans with purpose debt consolidation, other, credit crd and home improvement categories are unable to pay the loan compared with education / renewable energy # >- Also debt consolidation is the category where maximum loans are given. # >- Rent and Mortage category people take maximum loans and have higher chances of being defaulters of the loan compared with people in Own house # >- Verified loans which are Charged Off is more compared to Not Verified # >- Borrowers from states CA, FL, NY and NJ have failed to pay the loan # >- Grade F, G and E are the three category which has higher charged off # >- The median of F and G is around 20k and Q3 at 25k # >- Grade A has a median at 7.5k and is at the least # >- DTI is not a significant factor because the median is close to each other for Fully Paid and Charged Off # >- Amnt_to_int_ratio is an indicator of bad loans as for charged off the median and Q3 is higher # To identify correlation between all variables of the dataset and see which variables are negativey impacting loan_status variable f, ax = plt.subplots(figsize=(11, 9)) corr = df_final.corr() sns.heatmap(corr, vmax=0.3, annot=True)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/064/129064415.ipynb
lendingclub
shrinivasbhat
[{"Id": 129064415, "ScriptId": 38366452, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13160802, "CreationDate": "05/10/2023 17:27:46", "VersionNumber": 1.0, "Title": "Lending_Club_Case_Study", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 796.0, "LinesInsertedFromPrevious": 796.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
[{"Id": 184790199, "KernelVersionId": 129064415, "SourceDatasetVersionId": 4939533}]
[{"Id": 4939533, "DatasetId": 2864339, "DatasourceVersionId": 5007603, "CreatorUserId": 11454177, "LicenseName": "Unknown", "CreationDate": "02/03/2023 03:50:46", "VersionNumber": 1.0, "Title": "lendingclub", "Slug": "lendingclub", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 2864339, "CreatorUserId": 11454177, "OwnerUserId": 11454177.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 4939533.0, "CurrentDatasourceVersionId": 5007603.0, "ForumId": 2900483, "Type": 2, "CreationDate": "02/03/2023 03:50:46", "LastActivityDate": "02/03/2023", "TotalViews": 123, "TotalDownloads": 4, "TotalVotes": 1, "TotalKernels": 5}]
[{"Id": 11454177, "UserName": "shrinivasbhat", "DisplayName": "Shrinivas Bhat", "RegisterDate": "09/01/2022", "PerformanceTier": 0}]
# # # LENDING CLUB CASE STUDY # ### Problem Statement # # The company is the largest online loan marketplace, facilitating personal loans, business loans, and financing of medical procedures. # Lending loans to ‘risky’ applicants is the largest source of financial loss (called credit loss). Credit loss is the amount of money lost by the lender when the borrower refuses to pay or runs away with the money owed. In other words, borrowers who default cause the largest amount of loss to the lenders. In this case, the customers labelled as 'charged-off' are the 'defaulters'. # # **Identification of such applicants using EDA is the aim of this case study.** # # The company wants to understand the driving factors behind loan default, i.e. the variables which are strong indicators of default. The company can utilise this knowledge for its portfolio and risk assessment. # # # # ### Import the libraries import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import warnings warnings.filterwarnings("ignore") # ### Read the dataset and check the first five rows loan_df = pd.read_csv("/kaggle/input/lendingclub/loan.csv") loan_df.head() # ### Check the shape of the dataframe print(loan_df.shape) loan_df.info(max_cols=111) # ## Data Cleaning and Manipulation # Data Quality Issues can: # - Missing Values # - We can Drop the columns containing maximum null values and also Drop columns which does not add any value to analysis. # - Sanity Checks # - We can check for data inconsistencies # - Small % of Missing Value can be Imputed. # - If categorical can be imputed by mode or if continuous if suitable we can go for Mean/median # - Modifing the Data Types: # - Converting the some string columns to numeric variables in order to perform more analysis. # - Outliers Treatment is necessary for correct analysis # ## Data Understanding # - Aim of the Analysis of this loan dataset is find out the factors influencing a loan to be default. # - The dataset has the details of the past loan applicants and whethe r they resulted in good loan or bad loan. # - Loan Status is the Target variable. # - Column Loan_status has 3 values: # >'Fully Paid' # > 'Charged Off' # > 'Current' # Since we are not sure about 'Current' customers whether they will default or fully pay at the end of the tenure, we will filter out 'Current' customers and will only consider 'Fully Paid' and 'Charged Off' values. Here **charged off means defaulters** # - Many columns which have NA values need to be removed # **Missing Value Treatment** # Calculate missing value % df_null_percent = loan_df.isnull().sum() / len(loan_df) * 100 df_null_percent.sort_values(ascending=False) # Filter columns with more than or equal to 50% missing values which will be our threshold to drop the columns loan_df = loan_df.loc[:, df_null_percent < 50] # print shape of the dataframe loan_df.shape loan_df.info(max_cols=54) # **The customer behavior variables are not available at the time of # loan application, and thus they cannot be used as predictors for # credit approval** # List the coumns which are not required for the analysis. Total 21 columns list_drop_col1 = [ "delinq_2yrs", "earliest_cr_line", "inq_last_6mths", "open_acc", "pub_rec", "revol_bal", "revol_util", "total_acc", "out_prncp", "out_prncp_inv", "total_pymnt", "total_pymnt_inv", "total_rec_prncp", "total_rec_int", "total_rec_late_fee", "recoveries", "collection_recovery_fee", "last_pymnt_d", "last_pymnt_amnt", "last_credit_pull_d", "application_type", ] # Drop the coumns which are not required for the analysis. Total 21 columns loan_df.drop(list_drop_col1, axis=1, inplace=True) # Observer the shape after dropping the columns loan_df.shape # **Drop columns with single value as these will not be helpful in the analysis** # Observe columns having single value num_unique = ( loan_df.nunique() ) # This function is a method used to return the number of unique elements in a pandas object. num_unique.sort_values() loan_df.chargeoff_within_12_mths.unique() # print columns with single value list_col_unique_val = num_unique[num_unique.values == 1] print(list_col_unique_val) # List of Columns to be dropped list_col_unique_val.index.tolist() # Drop columns having single value list_drop_col2 = list_col_unique_val.index.tolist() loan_df.drop(list_drop_col2, axis=1, inplace=True) loan_df.shape # **Drop columns which does not add any value to analysis.** # - id # -member_id # -url # -desc # -zip_code # drop columns which does not add any value to analysis and observe the shape list_drop_col3 = ["id", "member_id", "url", "desc", "zip_code"] loan_df.drop(list_drop_col3, axis=1, inplace=True) loan_df.shape # **Taking loan_status equal to "Charged Off" and "Fully Paid" as if the loan_status is "Current" we might not know whether they will turn in fully paid or will they be charged off** # observe the count of loan_status values loan_df.loan_status.value_counts() # consider subset with values "Fully Paid" and "Charged Off" and observe the shape loan_df = loan_df[loan_df.loan_status.isin(["Fully Paid", "Charged Off"])] loan_df.shape # # - Check for rows with more than 50% of missing values and drop them # print rows with null values in descending order loan_df.isnull().sum(axis=1).sort_values(ascending=False) # calculate percentage of null values across rows df_missing_val_row = loan_df.isnull().sum(axis=1) / loan_df.shape[1] * 100 print(df_missing_val_row.sort_values(ascending=False)) # Identify rows with missing values greater than or equal to 50% df_missing_val_row[df_missing_val_row >= 50] # #### Observation: # # There are no rows with missing values >=50% Hence no need to drop any rows # **Sanity Checks** # # drop rows if "funded_amnt_inv" is greater than "loan_amnt" loan_df[loan_df.funded_amnt_inv > loan_df.loan_amnt] loan_df.info(max_cols=20) # **Analysis:** There are no rows with "funded_amnt_inv" is greater than "loan_amnt". # **Missing Value Imputation** # Observe columns with null values null_val = loan_df.isnull().sum() * 100 / loan_df.shape[0] null_val[null_val.values > 0] # Treating missing values for emp_title loan_df["emp_title"].value_counts() # calculate mode loan_df["emp_title"].mode() # **Analysis: We Can replace missing values of "emp_title" with mode, but it won't create much sense so will go ahead without the imputation** # Drop column emp_title loan_df.drop(["emp_title"], axis=1, inplace=True) loan_df.shape # Treating missing values for emp_length loan_df["emp_length"].value_counts() # **Analysis: After observing the values of emp_length it seems that absence of value means the employee is not working for any firm but must be running a business. Hence we can impute the missing value by "Self Employed".** # # Replace null with "Self Employed" loan_df["emp_length"] = loan_df["emp_length"].fillna("Self Employed") loan_df["emp_length"].isnull().sum() # Observe values for title column loan_df.title.value_counts() # Drop column title as it has lot of different values so wont be useful loan_df.drop(["title"], axis=1, inplace=True) loan_df.shape # Treating missing values for pub_rec_bankruptcies loan_df.pub_rec_bankruptcies.value_counts() # Filter rows with nonnull values of pub_rec_bankruptcies loan_df = loan_df[~loan_df.pub_rec_bankruptcies.isnull()] loan_df.shape # ### Handling Data Types # Observe the data loan_df.head() # Observe data types loan_df.dtypes # Convert "funded_amnt_inv" to int64 print(loan_df.funded_amnt_inv.dtype) loan_df.funded_amnt_inv = loan_df.funded_amnt_inv.astype("int64") print(loan_df.funded_amnt_inv.dtype) # Convert "annual_inc" to int64 print(loan_df.annual_inc.dtype) loan_df.annual_inc = loan_df.annual_inc.astype("int64") print(loan_df.annual_inc.dtype) # Convert "pub_rec_bankruptcies" to int64 print(loan_df.pub_rec_bankruptcies.dtype) loan_df.pub_rec_bankruptcies = loan_df.pub_rec_bankruptcies.astype("int64") print(loan_df.pub_rec_bankruptcies.dtype) # Convert "issue_d" to Datetime print(loan_df.issue_d) loan_df.issue_d = pd.to_datetime(loan_df.issue_d, format="%b-%y") print(loan_df.issue_d) print(loan_df.issue_d.dtype) # Convert "term" to "int64" after removing " months" string print(loan_df.term) loan_df.term = loan_df.term.str.replace(" months", "").astype("int64") print(loan_df.term) print(loan_df.term.dtype) # Convert "int_rate" to "float64" after removing "%" symbol. print(loan_df.int_rate) loan_df.int_rate = loan_df.int_rate.str.replace("%", "").astype("float64") print(loan_df.int_rate) # Observe values of emp_length and Remove "+". loan_df.emp_length.value_counts() # remove '+' loan_df.emp_length = loan_df.emp_length.str.replace("+", "") loan_df.emp_length.value_counts() # # Treating Outliers # Observe basic statistical details about the dataset loan_df.annual_inc.describe() # **Remove outliers based on annual_inc** # Plot boxplot to observe the outliers loan_df.annual_inc.plot.box(fontsize=12, figsize=(8, 8)).set(title="Annual income") plt.show() # Observe Quantiles quantiles = loan_df.annual_inc.quantile([0.01, 0.25, 0.5, 0.75, 0.99]) print(quantiles) # Remove outliers df_final = loan_df[(loan_df.annual_inc > 14400) & (loan_df.annual_inc < 234144)] # Plot boxplot from new dataset after removing obvious outliers df_final.annual_inc.plot.box().set(title="Annual income") plt.show() # Final dataset after data cleaning and data handling df_final.head() # ## Data Analysis # **Derived metrics for Month and Year from column "issue_d"** # Derived metrics analysis on column "issue_d" df_final["year"] = df_final.issue_d.dt.year df_final["month"] = df_final.issue_d.dt.month df_final[["issue_d", "month", "year"]].head() # Create new column loan_status_code with 0 and 1 values based on loan_status column where 0="Charged Off" and 1="Fully Paid" loan_status_map = {"Charged Off": 1, "Fully Paid": 0} df_final["loan_status_code"] = df_final["loan_status"].map(loan_status_map) print(df_final["loan_status_code"]) df_final.loan_status.value_counts() # Creating different groups for interest rate # Int_rate is between 5% to 25%, grouping them accordingly df_final.int_rate.describe # Treating for int_rate bins = [5, 9, 13, 17, 21, 25] labels = ["5%-9%", "9%-13%", "13%-17%", "17%-21%", "21%-25%"] df_final["int_rate_group"] = pd.cut(df_final["int_rate"], bins=bins, labels=labels) df_final["int_rate_group"].value_counts() # Treating for annual_income bins = [14400, 25000, 50000, 100000, 150000, 234000] labels = ["14k-25k", "25k-50k", "50k-100k", "100k-150k", "150k-250k"] df_final["annual_inc_group"] = pd.cut(df_final["annual_inc"], bins=bins, labels=labels) df_final["annual_inc_group"].value_counts() # # Univariate Analysis # (P.S. - The plot function reference is taken from a kaggle notebook on EDA) # define a function to attach values with each bar def label(ax, x): """ Attach a text label above each bar displaying its height """ for p in ax.patches: ax.annotate("{:1}".format(p.get_height()), (p.get_x() + x, p.get_height() + 10)) # define function to plot countplot for categorical variables def cat(df, col_name): fig, ax = plt.subplots(figsize=(10, 6), dpi=100) sns.countplot( x=col_name, data=df_final[df_final.loan_status == "Charged Off"], order=df[col_name].value_counts().index, ) ax.set_xlabel(col_name) ax.set_ylabel("No of loans") ax.set_title("Plot of " + col_name, weight="bold") plt.xticks(rotation=90) label(ax, 0.01) plt.show() # define function to plot countplot for numerical variables def num(df, col_name): fig, ax = plt.subplots(figsize=(10, 6), dpi=100) plt.figure(figsize=(16, 6)) plt.subplot(1, 2, 1) sns.distplot(a=df[col_name], rug=True, color="#388E3C") plt.subplot(1, 2, 2) sns.boxplot(data=df[col_name], color="#388E3C") plt.suptitle("Distribution of " + col_name) label(ax, 0.01) plt.show() # **grade, sub_grade, term, emp_length, issue_y, issue_m** # countplot for "grade", "sub_grade","term","emp_length","issue_y","issue_m" for Charged off for factor in ["grade", "sub_grade", "term", "emp_length", "year", "month"]: cat(df_final, factor) # **Observation** # - Count Plot of **Grade** shows Grade B ,C, D are given more likely to charged off as compared to other grades # - Count Plot of **Sub Grade** shows Grade B3, B5, B4, C1 , C2 have charged off more as compared to other grades # - Count Plot of **term** shows 36 months loans are issued more so defaulted more compared to 60 months loan # - Count Plot of **emp_length** shows employees with 10 years have more defaulted on loan compared with lesser experience # - Count Plot of issue **year** shows the no. of defaulted loans have increased in the year 2011. The trend is increasing with the increase in the year # - Count Plot of issue **month** shows there is increasing trend in number of defaults with increase in the months. More defaults can be seen in the month of October, November, December. # **loan_status** sns.countplot(x="loan_status", data=df_final) # **Observation** # - This shows that 14% of total loans are charged off. # ### addr_state, purpose, home_ownership # Plot countplot for "addr_state", "purpose","home_ownership" for factor in ["addr_state", "purpose", "home_ownership"]: cat(df_final, factor) # ### Observation # - States CA, NY, FL and TX are the states for which maximum loan defaults # - Maximum loan defaults are for debt consolidation, paying off Credit card, small business and 'other' reasons # - Education and renewable energy is the least category where loans are defaulted # - People who are in Rented house or Mortgate are more likely to Default # **funded_amnt, installment, amnt_to_inc_ratio** # Plot distplot for "funded_amnt","installment","amnt_to_inc_ratio" sns.distplot(a=df_final["funded_amnt"], rug=True, color="#388E3C") plt.suptitle("Distribution of funded_amnt") sns.distplot(a=df_final["installment"], rug=True, color="#388E3C") plt.suptitle("Distribution of installment") # ### Observation # - Funded amount is ranging from 5000 to 15000 USD # - Installment amount is ranging from 200 to 400 USD # **Segmented Univariate Analysis** # - int_rate_group # Countplot of int_rate_group fig, ax = plt.subplots(figsize=(6, 4), dpi=100) sns.countplot(x="int_rate_group", data=df_final) ax.set_xlabel("int_rate_group") ax.set_ylabel("No of loans") ax.set_title("int_rate_group", weight="bold") label(ax, 0.20) plt.show() # **Observation** # - Interest rate range 9 to 13 is the range where maximum loans have been issued # - 21 - 25% is the range where minimum loans have been issued # # Observing the same above graph for charged off # Countplot of int_rate_group for charged off fig, ax = plt.subplots(figsize=(6, 4), dpi=100) sns.countplot(x="int_rate_group", data=df_final[df_final.loan_status == "Charged Off"]) ax.set_xlabel("int_rate_group") ax.set_ylabel("No of loans") ax.set_title("int_rate_group", weight="bold") label(ax, 0.20) plt.show() # **Observation** # . Interest rate range 13% - 17% is the range with maximum loan defaults # - 21 - 25% is the range where minimum loan defaults can be observed from the above plot # **But considering above 2 plots we come to a conclusion that** # > If we notice the % of defaults in each groups we can state that the interest range 21%-25% has maximum chances of defaults (as high as 44%) # P.S. (Percent here means no. of charged off divided by the total no. of loan issues in a particular interest rate group) # ## Summary of univariate analaysis # - Count Plot of **Grade** shows Grade B ,C, D are given more likely to charged off as compared to other grades # - Count Plot of **Sub Grade** shows Grade B3, B5, B4, C1 , C2 have charged off more as compared to other grades # - Count Plot of **term** shows 36 months loans are issued more so defaulted more compared to 60 months loan # - Count Plot of **emp_length** shows employees with 10 years have more defaulted on loan compared with lesser experience # - Count Plot of issue **year** shows the no. of defaulted loans have increased in the year 2011. The trend is increasing with the increase in the year # - Count Plot of issue **month** shows there is increasing trend in number of defaults with increase in the months. More defaults can be seen in the month of October, November, December. # - States CA, NY, FL and TX are the states for which maximum loan defaults # - Maximum loan defaults are for debt consolidation, paying off Credit card, small business and 'other' reasons # - Education and renewable energy is the least category where loans are defaulted # - People who are in Rented house or Mortgate are more likely to Default- Funded amount is ranging from 5000 to 15000 USD # - Installment amount is ranging from 200 to 400 USD # - Interest rate range 9 to 13 is the range where maximum loans have been issued # - 21 - 25% is the range where minimum loans have been issued # **Observation** # . Interest rate range 13% - 17% is the range with maximum loan defaults # - 21 - 25% is the range where minimum loan defaults can be observed from the above plot # **But considering above 2 plots we come to a conclusion that** # > If we notice the % of defaults in each groups we can state that the interest range 21%-25% has maximum chances of defaults (as high as 44%) # P.S. (Percent here means no. of charged off divided by the total no. of loan issues in a particular interest rate group) # # Bivariate Analysis # **Grade vs Loan Staus** # Countplot of Grade vs Loan Status fig, ax = plt.subplots(figsize=(12, 6), dpi=100) sns.countplot(x="grade", hue="loan_status", data=df_final, palette="Set2") ax.set_xlabel("Grade") ax.set_ylabel("No of loans") label(ax, 0.1) ax.set_title("Grade vs Loan Status") plt.show() # **Observation** # - The counts of Grade B, C and D are highest in Charged Off # **Sub Grade vs Loan Staus** # Countplot of Sub Grade vs Loan Status fig, ax = plt.subplots(figsize=(12, 6), dpi=100) sns.countplot(x="sub_grade", hue="loan_status", data=df_final) ax.set_xlabel("Sub Grade") ax.set_ylabel("No of loans") ax.set_title("Sub Grade vs Loan Status") plt.show() # **Observation** # - The counts of B3,B4,B5, C1,C2, D3 sub grades are higher in Charged Off # **Term vs Loan Staus** # Countplot of Term vs Loan Status fig, ax = plt.subplots(figsize=(5, 4), dpi=100) sns.countplot(x="term", hue="loan_status", data=df_final) ax.set_xlabel("Term") ax.set_ylabel("No of loans") label(ax, 0.30) ax.set_title("Term vs Loan Status(Charged off)") # **Observation** # - Though 36 month loan default is more compared to 60 month # - What is observed is if we see the % of charged off in 36 months it would be 10% of the total loans of 36 months tenure. # - Also if we calculate % charged off in 60 months it is 25% of the total loans issued with a 60 months which is much higher as compared to 36 month tenure so it is more likely for a 60 months loan issued to be a default # **Employment Length vs Loan Staus** # Countplot of Employment Length vs Loan Status fig, ax = plt.subplots(figsize=(12, 6), dpi=100) sns.countplot(x="emp_length", hue="loan_status", data=df_final, palette="Set2") ax.set_xlabel("Employment length") ax.set_ylabel("No of loans") ax.set_title("Employment length vs Loan Status") label(ax, 0.001) # **Observation** # - Maximum loans are issued for people having 10 years of emp_lenth and hence the no. of defaulters are also high # **Loan Issue Year vs Loan Status** # Countplot of Loan Issue Year vs Loan Status fig, ax = plt.subplots(figsize=(8, 6), dpi=100) plt.xticks(rotation=90) sns.countplot(x="year", data=df_final, hue="loan_status", palette="Set2") ax.set_xlabel("Loan Issue Year") ax.set_ylabel("No of loans") label(ax, 0.01) ax.set_title("Loan Issue Year (fully paid and charged off)") plt.show() # ### Observation # - Plot of loan issue year shows maximum loans were taken in the year 2011 # - Also high loans are being Charged Off in 2011 # ### Loan Issue Month vs Loan Status # Countplot of Loan Issue Month vs Loan Status fig, ax = plt.subplots(figsize=(8, 6), dpi=100) plt.xticks(rotation=90) sns.countplot(x="month", data=df_final, hue="loan_status", palette="Set2") ax.set_xlabel("Loan Issue Month") ax.set_ylabel("No of loans") label(ax, 0.01) ax.set_title("Loan Issue Month (fully paid and charged off)") plt.show() # **Observation** # - high loans are Charged Off for the loans issued in Sep - Dec months # **Purpose vs Loan Staus** # Countplot of Purpose vs Loan Status fig, ax = plt.subplots(figsize=(12, 6), dpi=100) plt.xticks(rotation=90) sns.countplot( x="purpose", hue="loan_status", data=df_final, palette="Set2", order=df_final[df_final.loan_status == "Charged Off"].purpose.value_counts().index, ) ax.set_xlabel("Loan purpose") ax.set_ylabel("No of loans") ax.set_title("Loan purpose vs Loan Status") label(ax, 0.001) plt.show() # **Observation** # - Loans with purpose debt consolidation, other, credit crd and home improvement categories are unable to pay the loan compared with education / renewable energy # - Also debt consolidation is the category where maximum loans are given. # **Home Ownership vs Loan Staus** # Hist of Home Ownership vs Loan Status fig, ax = plt.subplots(figsize=(8, 6), dpi=100) sns.histplot( data=df_final, x="home_ownership", hue="loan_status", multiple="dodge", shrink=0.8, palette="Set2", ) ax.set_xlabel("Home ownership") ax.set_ylabel("No of loans") ax.set_title("Home ownership vs Loan Status") label(ax, 0.01) # **Observation** # - Rent and Mortage category people take maximum loans and have higher chances of being defaulters of the loan compared with people in Own house # **Verification vs Loan Staus** # Histplot of Verification vs Loan Status fig, ax = plt.subplots(figsize=(6, 4), dpi=100) sns.histplot( data=df_final, x="verification_status", hue="loan_status", multiple="dodge", shrink=0.8, palette="Set2", ) ax.set_xlabel("Verification status") ax.set_ylabel("No of loans") ax.set_title("Verfification status vs Loan Status") label(ax, 0.01) plt.show() # **Observation** # - Verified loans which are Charged Off is more compared to Not Verified # **State vs Loan Staus** # Countplot of State vs Loan Status fig, ax = plt.subplots(figsize=(16, 8), dpi=100) sns.countplot(x="addr_state", hue="loan_status", data=df_final) ax.set_xlabel("State") ax.set_ylabel("No of loans") ax.set_title("State vs Loan Status = Charged Off") plt.show() # **Observation** # - Borrowers from states CA, FL, NY and NJ have failed to pay the loan # **Grade vs Loan status** # Boxplot Grade vs Loan status plt.figure(figsize=(12, 8)) ax = sns.boxplot( y="grade", x="loan_amnt", data=df_final[df_final.loan_status == "Charged Off"], palette="rainbow", ) ax.set_title("Grade vs Loan Amount", fontsize=15, color="b") ax.set_ylabel("Grade", fontsize=14, color="b") ax.set_xlabel("Loan Amount", fontsize=14, color="b") plt.show() # **Observation** # - Grade F, G and E are the three category which has higher charged off # - The median of F and G is around 20k and Q3 at 25k # - Grade A has a median at 7.5k and is at the least # **DTI vs Loan status** plt.figure(figsize=(12, 8)) ax = sns.boxplot(y="dti", x="loan_status", data=df_final, palette="rainbow") ax.set_title("DTI for both Fully Paid and Charged off loans", fontsize=15, color="b") ax.set_ylabel("DTI spread", fontsize=14, color="b") ax.set_xlabel("Loan status", fontsize=14, color="b") plt.show() # **Observation** # - DTI is not a significant factor because the median is close to each other for Fully Paid and Charged Off # **amnt_to_inc_ratio vs Loan status** # df_final["amnt_to_inc_ratio"] = df_final.loan_amnt / df_final.annual_inc df_final[["loan_amnt", "annual_inc", "amnt_to_inc_ratio"]] plt.figure(figsize=(12, 8)) ax = sns.boxplot( y="amnt_to_inc_ratio", x="loan_status", data=df_final, palette="rainbow" ) ax.set_title( "amnt_to_inc_ratio for both Fully Paid and Charged off loans", fontsize=15, color="b", ) ax.set_ylabel("amnt_to_inc_ratio spread", fontsize=14, color="b") ax.set_xlabel("Loan status", fontsize=14, color="b") plt.show() # **Observation** # - Amnt_to_int_ratio is an indicator of bad loans as for charged off the median and Q3 is higher # **Summary** # >- Above plots is for charged off loans.A Applicant is more likely to default if: # # >- Though 36 month loan default is more compared to 60 month # >- What is observed is if we see the % of charged off in 36 months it would be 10% of the total loans of 36 months tenure. # >- Also if we calculate % charged off in 60 months it is 25% of the total loans issued with a 60 months which is much higher as compared to 36 month tenure so it is more likely for a 60 months loan issued to be a default # >- Loans with purpose debt consolidation, other, credit crd and home improvement categories are unable to pay the loan compared with education / renewable energy # >- Also debt consolidation is the category where maximum loans are given. # >- Rent and Mortage category people take maximum loans and have higher chances of being defaulters of the loan compared with people in Own house # >- Verified loans which are Charged Off is more compared to Not Verified # >- Borrowers from states CA, FL, NY and NJ have failed to pay the loan # >- Grade F, G and E are the three category which has higher charged off # >- The median of F and G is around 20k and Q3 at 25k # >- Grade A has a median at 7.5k and is at the least # >- DTI is not a significant factor because the median is close to each other for Fully Paid and Charged Off # >- Amnt_to_int_ratio is an indicator of bad loans as for charged off the median and Q3 is higher # To identify correlation between all variables of the dataset and see which variables are negativey impacting loan_status variable f, ax = plt.subplots(figsize=(11, 9)) corr = df_final.corr() sns.heatmap(corr, vmax=0.3, annot=True)
false
1
8,611
1
8,630
8,611
129187974
<jupyter_start><jupyter_text>Data Science Salaries 2023 💸 Data Science Job Salaries Dataset contains 11 columns, each are: 1. work_year: The year the salary was paid. 2. experience_level: The experience level in the job during the year 3. employment_type: The type of employment for the role 4. job_title: The role worked in during the year. 5. salary: The total gross salary amount paid. 6. salary_currency: The currency of the salary paid as an ISO 4217 currency code. 7. salaryinusd: The salary in USD 8. employee_residence: Employee's primary country of residence in during the work year as an ISO 3166 country code. 9. remote_ratio: The overall amount of work done remotely 10. company_location: The country of the employer's main office or contracting branch 11. company_size: The median number of people that worked for the company during the year Kaggle dataset identifier: data-science-salaries-2023 <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # ![image.png](attachment:b90b38e2-55a6-4f1e-931d-db608f561fb9.png)! import pandas as pd import numpy as np df = pd.read_csv("/kaggle/input/data-science-salaries-2023/ds_salaries.csv") df.head() df.tail() df.sample(5) df.info() df.describe() df.shape df.isnull().count() # ## EDA Exploratory Data Analysis import matplotlib.pyplot as plt import seaborn as sns # ## Plotting # ## Univariant Analysis df.columns # plotting matplotlib plt.plot(df["work_year"]) plt.show() # ## Plotting seaborn Displots sns.displot(df["work_year"]) sns.displot(df["experience_level"]) sns.displot(df["employment_type"]) sns.displot(df["job_title"]) sns.displot(df["salary"]) sns.displot(df["salary_currency"]) sns.displot(df["salary_in_usd"]) sns.displot(df["employee_residence"]) sns.displot(df["remote_ratio"]) sns.displot(df["company_location"]) sns.displot(df["company_size"]) # ## 2D Bivariant(2 variable Analysis) plt.plot("work_year", "experience_level", data=df) df.columns sns.displot(data=df, x="work_year", hue="experience_level") df.corr() # Calculate the correlation matrix sns.pairplot(df) # Create pairwise scatter plots for multiple numerical columns # ## Multi Variant Analysis sns.pairplot(df) plt.show() df.corr() # Calculate the correlation matrix for all numerical columns sns.heatmap( df.corr(), annot=True, cmap="coolwarm" ) # Visualize the correlation matrix as a heatmap
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/187/129187974.ipynb
data-science-salaries-2023
arnabchaki
[{"Id": 129187974, "ScriptId": 38403855, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11919395, "CreationDate": "05/11/2023 16:39:02", "VersionNumber": 1.0, "Title": "Data Science Salaries 2023", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 113.0, "LinesInsertedFromPrevious": 113.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 4}]
[{"Id": 185012738, "KernelVersionId": 129187974, "SourceDatasetVersionId": 5392837}]
[{"Id": 5392837, "DatasetId": 3125926, "DatasourceVersionId": 5466555, "CreatorUserId": 7428813, "LicenseName": "Database: Open Database, Contents: Database Contents", "CreationDate": "04/13/2023 09:55:16", "VersionNumber": 1.0, "Title": "Data Science Salaries 2023 \ud83d\udcb8", "Slug": "data-science-salaries-2023", "Subtitle": "Salaries of Different Data Science Fields in the Data Science Domain", "Description": "Data Science Job Salaries Dataset contains 11 columns, each are:\n\n1. work_year: The year the salary was paid.\n2. experience_level: The experience level in the job during the year\n3. employment_type: The type of employment for the role\n4. job_title: The role worked in during the year.\n5. salary: The total gross salary amount paid.\n6. salary_currency: The currency of the salary paid as an ISO 4217 currency code.\n7. salaryinusd: The salary in USD\n8. employee_residence: Employee's primary country of residence in during the work year as an ISO 3166 country code.\n9. remote_ratio: The overall amount of work done remotely\n10. company_location: The country of the employer's main office or contracting branch\n11. company_size: The median number of people that worked for the company during the year", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3125926, "CreatorUserId": 7428813, "OwnerUserId": 7428813.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5392837.0, "CurrentDatasourceVersionId": 5466555.0, "ForumId": 3189506, "Type": 2, "CreationDate": "04/13/2023 09:55:16", "LastActivityDate": "04/13/2023", "TotalViews": 234449, "TotalDownloads": 44330, "TotalVotes": 1244, "TotalKernels": 184}]
[{"Id": 7428813, "UserName": "arnabchaki", "DisplayName": "randomarnab", "RegisterDate": "05/16/2021", "PerformanceTier": 2}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # ![image.png](attachment:b90b38e2-55a6-4f1e-931d-db608f561fb9.png)! import pandas as pd import numpy as np df = pd.read_csv("/kaggle/input/data-science-salaries-2023/ds_salaries.csv") df.head() df.tail() df.sample(5) df.info() df.describe() df.shape df.isnull().count() # ## EDA Exploratory Data Analysis import matplotlib.pyplot as plt import seaborn as sns # ## Plotting # ## Univariant Analysis df.columns # plotting matplotlib plt.plot(df["work_year"]) plt.show() # ## Plotting seaborn Displots sns.displot(df["work_year"]) sns.displot(df["experience_level"]) sns.displot(df["employment_type"]) sns.displot(df["job_title"]) sns.displot(df["salary"]) sns.displot(df["salary_currency"]) sns.displot(df["salary_in_usd"]) sns.displot(df["employee_residence"]) sns.displot(df["remote_ratio"]) sns.displot(df["company_location"]) sns.displot(df["company_size"]) # ## 2D Bivariant(2 variable Analysis) plt.plot("work_year", "experience_level", data=df) df.columns sns.displot(data=df, x="work_year", hue="experience_level") df.corr() # Calculate the correlation matrix sns.pairplot(df) # Create pairwise scatter plots for multiple numerical columns # ## Multi Variant Analysis sns.pairplot(df) plt.show() df.corr() # Calculate the correlation matrix for all numerical columns sns.heatmap( df.corr(), annot=True, cmap="coolwarm" ) # Visualize the correlation matrix as a heatmap
false
1
667
4
916
667
129187256
<jupyter_start><jupyter_text>Diabetes prediction dataset The **Diabetes prediction dataset** is a collection of medical and demographic data from patients, along with their diabetes status (positive or negative). The data includes features such as age, gender, body mass index (BMI), hypertension, heart disease, smoking history, HbA1c level, and blood glucose level. This dataset can be used to build machine learning models to predict diabetes in patients based on their medical history and demographic information. This can be useful for healthcare professionals in identifying patients who may be at risk of developing diabetes and in developing personalized treatment plans. Additionally, the dataset can be used by researchers to explore the relationships between various medical and demographic factors and the likelihood of developing diabetes. Kaggle dataset identifier: diabetes-prediction-dataset <jupyter_script>import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) import seaborn as sns import matplotlib.pyplot as plt import plotly.express as px import plotly.graph_objects as go from xgboost import XGBClassifier from sklearn.preprocessing import OneHotEncoder from sklearn.model_selection import ( train_test_split, cross_val_score, StratifiedKFold, GridSearchCV, ) from xgboost import XGBClassifier from sklearn.metrics import ( accuracy_score, confusion_matrix, ConfusionMatrixDisplay, classification_report, precision_recall_curve, PrecisionRecallDisplay, ) from sklearn.pipeline import Pipeline sns.set_style("whitegrid") sns.set_palette("Set2") pd.options.plotting.backend = "plotly" # ignore warnings import warnings warnings.filterwarnings("ignore") print("-" * 25) # Diabetes Detection with Machine Learning # ## Introduction # Diabetes is a chronic disease that affects millions of people worldwide. Early detection is crucial to prevent complications and manage the disease effectively. In this project, we will be building a machine learning model that can predict whether a person has diabetes based on several features such as age, gender, hypertension, heart disease, smoking history, BMI, HbA1c level, and blood glucose level. # ## Dataset # We will be using a dataset that contains information about patients, including their medical history and lab results. By analyzing this data, we can identify patterns and relationships between the features and diabetes risk and build a predictive model to classify individuals as either diabetic or non-diabetic. # ## Goals # Our ultimate goal is to create a machine learning model that accurately predicts diabetes status and can be used by healthcare professionals to identify at-risk individuals and provide early interventions. In this notebook, we will: # * Perform exploratory data analysis (EDA) to gain insights into the data and identify any data quality issues. # * Preprocess the data to prepare it for machine learning. # * Build and train a machine learning model to predict diabetes status. # * Evaluate the performance of our model using various performance metrics. # Let's get started! # ## Load the dataset # df = pd.read_csv( "/kaggle/input/diabetes-prediction-dataset/diabetes_prediction_dataset.csv" ) # # Exploratory Data Analysis # Before we begin building our machine learning model to detect diabetes, let's perform some **exploratory data analysis (EDA)** to better understand our dataset. print(df.info()) # Dataset Overview # Our dataset contains information about 100,000 patients, including their medical history and lab results. The dataset has 9 columns, which are: # gender: The gender of the patient. # age: The age of the patient in years. # hypertension: Whether the patient has hypertension (1 = yes, 0 = no). # heart_disease: Whether the patient has heart disease (1 = yes, 0 = no). # smoking_history: The patient's smoking history (never, formerly, or currently). # bmi: The patient's body mass index (BMI). # HbA1c_level: The patient's HbA1c level. # blood_glucose_level: The patient's fasting blood glucose level. # diabetes: Whether the patient has diabetes (1 = yes, 0 = no). # The data types of the columns include float64, int64, and object. # We will need to convert some of the columns to a different data type, such as converting the gender and smoking_history columns to a categorical data type. # Data Quality Issues # Before we proceed with our analysis, we need to check for any data quality issues that may affect our model's performance. These issues can include missing data, duplicate data, or outliers. # We can use pandas functions such as isnull(), duplicated(), and describe() to identify and handle these issues. We can also use visualization tools such as histograms and box plots to detect outliers and other anomalies. print(df.isnull().sum()) print(df.duplicated().sum()) print(df.describe()) # # From the first output, we can see that there are no missing values in any of the columns since all values in the output are False. # From the second output, we can see that there are some duplicate rows in the dataset since some of the values are True. # From the third output, we can see that some columns have outliers since the maximum value is significantly higher than the 75th percentile value. These columns are age, BMI, HbA1c_level, and blood_glucose_level. # We need to handle these issues before proceeding with the analysis. We can drop the duplicate rows and handle the outliers by either removing them or imputing them with a more reasonable value. # Perform exploratory data analysis (EDA) to gain insights into the data # Density plots # import plotly.express as px import plotly.graph_objects as go from plotly.subplots import make_subplots # Create subplots for each variable fig = make_subplots(rows=2, cols=2) # Plot the density plot of age fig.add_trace(go.Histogram(x=df["age"], nbinsx=20, name="All Patients"), row=1, col=1) fig.add_trace( go.Histogram(x=df["age"][df["diabetes"] == 1], nbinsx=20, name="Diabetes Patients"), row=1, col=1, ) fig.update_xaxes(title_text="Age", row=1, col=1) fig.update_yaxes(title_text="Count", row=1, col=1) # Plot the density plot of BMI fig.add_trace(go.Histogram(x=df["bmi"], nbinsx=20, name="All Patients"), row=1, col=2) fig.add_trace( go.Histogram(x=df["bmi"][df["diabetes"] == 1], nbinsx=20, name="Diabetes Patients"), row=1, col=2, ) fig.update_xaxes(title_text="BMI", row=1, col=2) fig.update_yaxes(title_text="Count", row=1, col=2) # Plot the density plot of blood glucose level fig.add_trace( go.Histogram(x=df["blood_glucose_level"], nbinsx=20, name="All Patients"), row=2, col=1, ) fig.add_trace( go.Histogram( x=df["blood_glucose_level"][df["diabetes"] == 1], nbinsx=20, name="Diabetes Patients", ), row=2, col=1, ) fig.update_xaxes(title_text="Blood Glucose Level", row=2, col=1) fig.update_yaxes(title_text="Count", row=2, col=1) # Plot the density plot of HbA1c level fig.add_trace( go.Histogram(x=df["HbA1c_level"], nbinsx=20, name="All Patients"), row=2, col=2 ) fig.add_trace( go.Histogram( x=df["HbA1c_level"][df["diabetes"] == 1], nbinsx=20, name="Diabetes Patients" ), row=2, col=2, ) fig.update_xaxes(title_text="HbA1c Level", row=2, col=2) fig.update_yaxes(title_text="Count", row=2, col=2) fig.update_layout(title="Density Plots of Health Metrics", height=800) fig.show() # # Violin plots # # Violin plots are a method of plotting numeric data and can be considered a combination of the box plot with a kernel density plot. In the violin plot, we can find the same information as in the box plots: # median (a white dot on the violin plot) # interquartile range (the black bar in the center of violin) # the lower/upper adjacent values (the black lines stretched from the bar) — defined as first quartile — 1.5 IQR and third quartile + 1.5 IQR respectively. These values can be used in a simple outlier detection technique (Tukey’s fences) — observations lying outside of these “fences” can be considered outliers. # import plotly.express as px fig = px.violin( df, y="age", x="gender", color="diabetes", hover_data=["bmi"], category_orders={"gender": ["Male", "Female"], "diabetes": ["Yes", "No"]}, ) fig.update_layout(title="Age Distribution by Gender and Diabetes Status") fig.show() fig = px.violin( df, y="age", x="hypertension", color="diabetes", hover_data=["bmi"], category_orders={"hypertension": [0, 1], "diabetes": ["Yes", "No"]}, ) fig.update_layout(title="Age Distribution by hypertension and Diabetes Status") fig fig.show() fig = px.violin( df, y="age", x="heart_disease", color="diabetes", hover_data=["bmi"], category_orders={"heart_disease": [0, 1], "diabetes": ["Yes", "No"]}, ) fig.update_layout(title="Age Distribution by heart_disease and Diabetes Status") fig.show() fig = px.violin( df, y="age", x="smoking_history", color="diabetes", hover_data=["bmi"], category_orders={ "smoking_history": ["never", "former", "current"], "diabetes": ["Yes", "No"], }, ) fig.update_layout(title="Age Distribution by smoking_history and Diabetes Status") fig.show() fig = px.violin( df, y="age", x="diabetes", hover_data=["bmi"], category_orders={"diabetes": ["Yes", "No"]}, ) fig.update_layout(title="Age Distribution by Diabetes Status") fig.show() # Boxplot # import plotly.graph_objects as go fig = make_subplots(rows=1, cols=3) fig.add_trace(go.Box(y=df["bmi"], name="BMI"), row=1, col=1) fig.add_trace(go.Box(y=df["HbA1c_level"], name="HbA1c Level"), row=1, col=2) fig.add_trace( go.Box(y=df["blood_glucose_level"], name="Blood Glucose Level"), row=1, col=3 ) fig.update_layout(title="Box Plots for BMI, HbA1c Level, and Blood Glucose Level") fig.show() # # Data-preprocessing Q1 = df["bmi"].quantile(0.25) Q3 = df["bmi"].quantile(0.75) IQR = Q3 - Q1 lower_whisker = df["bmi"].where(df["bmi"] >= Q1 - 1.5 * IQR).dropna().min() upper_whisker = df["bmi"].where(df["bmi"] <= Q3 + 1.5 * IQR).dropna().max() outliers = df[(df["bmi"] < lower_whisker) | (df["bmi"] > upper_whisker)] print(outliers["bmi"]) # calculate the IQR Q1 = np.percentile(df["bmi"], 25) Q3 = np.percentile(df["bmi"], 75) IQR = Q3 - Q1 # determine the upper and lower bounds for outliers lower_bound = Q1 - 1.5 * IQR upper_bound = Q3 + 1.5 * IQR # remove the outliers df = df[(df["bmi"] >= lower_bound) & (df["bmi"] <= upper_bound)] # plot the boxplot for BMI fig = px.box(df, y="bmi") fig.update_layout(title="Box plot of BMI (without outliers)") fig.show() df["gender"] = df["gender"].astype("category") df["smoking_history"] = df["smoking_history"].astype("category") df["hypertension"] = df["hypertension"].astype(bool) df["heart_disease"] = df["heart_disease"].astype(bool) df["diabetes"] = df["diabetes"].astype(bool) # # Remove the Outliers # In this code, we are calculating the interquartile range (IQR) of the 'bmi' column, which is a measure of the spread of the data. We then determine the lower and upper bounds for outliers using the IQR, and remove any rows where the 'bmi' value is outside these bounds. # Outliers in the 'bmi' column are values that are significantly different from the majority of the data, and may indicate errors in data entry or measurement, or unusual characteristics of the individuals in the dataset. In this case, we have identified outliers with a 'bmi' value greater than 53.5, which are likely to be uncommon and potentially erroneous data points. # By removing these outliers from the dataset, we can ensure that our analysis is based on a more representative sample of the data. # Additionally, removing duplicates from a dataset is generally considered to be a good practice as it helps in improving the accuracy and reliability of the data. Duplicates can cause biases in the data analysis and lead to incorrect conclusions. However, before removing duplicates, it is important to carefully examine the data and ensure that the duplicates are indeed not meaningful and should be removed. In this case, since the duplicates have been identified and there is no reason to keep them, it is okay to remove them from the dataset. # # drop duplicates df.drop_duplicates(inplace=True) # check for duplicates again print(df.duplicated().any()) # # Duplicates # # Removing duplicates from a dataset is generally considered to be a good practice as it helps in improving the accuracy and reliability of the data. Duplicates can cause biases in the data analysis and lead to incorrect conclusions. # However, before removing duplicates, it is important to carefully examine the data and ensure that the duplicates are indeed not meaningful and should be removed. # In this case, since the duplicates have been identified and there is no reason to keep them, it is okay to remove them from the dataset. # import plotly.graph_objects as go # count the number of people with diabetes equal to 1 and 0 diabetes_counts = df["diabetes"].value_counts() # create the pie chart fig = go.Figure( data=[ go.Pie( labels=["No Diabetes", "Diabetes"], values=diabetes_counts, hole=0.3, ) ] ) # update the layout fig.update_layout(title="Diabetes Distribution") # show the plot fig.show() # # The dataset imbalanced # # The target variable 'diabetes' in this dataset is imbalanced, with a majority of 72,480 individuals labeled as 'No Diabetes' and only 5,843 labeled as 'Diabetes'. This means that the dataset contains significantly more examples of one class than the other, which can affect the performance of machine learning algorithms and result in biased predictions. To address this issue, we may need to use techniques such as oversampling, undersampling, or synthetic data generation to balance the dataset and improve the performance of our models. # import plotly.express as px # Create heatmap figure fig = px.imshow(df.corr(), color_continuous_scale="RdBu") # Update axis labels and title fig.update_layout( xaxis_title="Features", yaxis_title="Features", title="Correlation Heatmap" ) # Show the figure fig.show() # ****Looking at the correlation matrix, we can see that the variables most strongly related to diabetes are:**** # Blood glucose level (correlation coefficient of 0.419558) # HbA1c level (correlation coefficient of 0.400660) # BMI (correlation coefficient of 0.214357) # Age (correlation coefficient of 0.258008) # Hypertension (correlation coefficient of 0.197823) # This suggests that these variables may be important predictors of diabetes and should be considered when building predictive models or analyzing the relationship between diabetes and other variables in the data. # Preprocess the data to prepare it for machine learning. # ## one-hot encoding X = df.drop("diabetes", axis=1) y = df.diabetes X = pd.get_dummies(X, columns=["smoking_history", "gender"], drop_first=True) X = X.drop( [ "gender_Other", "smoking_history_not current", "smoking_history_never", "smoking_history_ever", ], axis=1, ) # Split the dataset # X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.3, random_state=42 ) # # Build Model & Evaluate # Build and train a machine learning model to predict diabetes status. # from sklearn.preprocessing import OneHotEncoder from sklearn.model_selection import ( train_test_split, cross_val_score, StratifiedKFold, GridSearchCV, ) from xgboost import XGBClassifier from sklearn.metrics import ( accuracy_score, confusion_matrix, ConfusionMatrixDisplay, classification_report, precision_recall_curve, PrecisionRecallDisplay, ) from sklearn.pipeline import Pipeline model = XGBClassifier(random_state=42) model.fit(X_train, y_train) cv = 5 weights = [2, 3, 25, 50, 100] # Evaluate the performance of our model. # def report_model(model): y_train_repo = model.predict(X_train) y_test_repo = model.predict(X_test) print(f"the accuracy on train set {accuracy_score(y_train, y_train_repo)}") print(f"the accuracy on test set {accuracy_score(y_test, y_test_repo)}") print() print(classification_report(y_test, y_test_repo)) ConfusionMatrixDisplay(confusion_matrix(y_test, y_test_repo)).plot() plt.show() report_model(model) weights = [2, 3, 25, 50, 100] param_grid = dict(scale_pos_weight=weights) grid = GridSearchCV(XGBClassifier(), param_grid=param_grid, cv=cv, scoring="recall") grid.fit(X_train, y_train) print(f"best parameters: {grid.best_params_}") print(f"best scores: {grid.best_score_}") # Handle imbalanced datasets # Setting the scale_pos_weight hyperparameter to a value greater than 1 helps the algorithm to focus more on the positive class, and improves the recall (true positive rate) of the model, while possibly sacrificing some precision (positive predictive value). It is important to tune this hyperparameter carefully to avoid overfitting the positive class. # model2 = XGBClassifier( n_estimators=100, max_depth=5, scale_pos_weight=5, random_state=42 ) model2.fit(X_train, y_train) report_model(model2) import pickle # save the model to a file with open("diabetes_XGB.pkl", "wb") as f: pickle.dump(model2, f) import numpy as np # load the model from the file with open("/kaggle/working/diabetes_XGB.pkl", "rb") as f: test = pickle.load(f) def diabetes( age, hypertension, heart_disease, bmi, HbA1c_level, blood_glucose_level, smoking_history_current, smoking_history_former, gender_Male, ): classes = ["Negative", "Postive"] input_array = np.array( [ [ age, hypertension, heart_disease, bmi, HbA1c_level, blood_glucose_level, smoking_history_current, smoking_history_former, gender_Male, ] ] ) pred = test.predict(input_array) class_pred = classes[pred[0]] return class_pred
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/187/129187256.ipynb
diabetes-prediction-dataset
iammustafatz
[{"Id": 129187256, "ScriptId": 38311676, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 12844285, "CreationDate": "05/11/2023 16:30:57", "VersionNumber": 5.0, "Title": "Detecting Diabetes EDA and XGB\ud83e\uddd0\ud83d\udcca\ud83d\udcc9", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 411.0, "LinesInsertedFromPrevious": 31.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 380.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 7}]
[{"Id": 185011574, "KernelVersionId": 129187256, "SourceDatasetVersionId": 5344155}]
[{"Id": 5344155, "DatasetId": 3102947, "DatasourceVersionId": 5417553, "CreatorUserId": 11427441, "LicenseName": "Data files \u00a9 Original Authors", "CreationDate": "04/08/2023 06:11:45", "VersionNumber": 1.0, "Title": "Diabetes prediction dataset", "Slug": "diabetes-prediction-dataset", "Subtitle": "A Comprehensive Dataset for Predicting Diabetes with Medical & Demographic Data", "Description": "The **Diabetes prediction dataset** is a collection of medical and demographic data from patients, along with their diabetes status (positive or negative). The data includes features such as age, gender, body mass index (BMI), hypertension, heart disease, smoking history, HbA1c level, and blood glucose level. This dataset can be used to build machine learning models to predict diabetes in patients based on their medical history and demographic information. This can be useful for healthcare professionals in identifying patients who may be at risk of developing diabetes and in developing personalized treatment plans. Additionally, the dataset can be used by researchers to explore the relationships between various medical and demographic factors and the likelihood of developing diabetes.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3102947, "CreatorUserId": 11427441, "OwnerUserId": 11427441.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5344155.0, "CurrentDatasourceVersionId": 5417553.0, "ForumId": 3166206, "Type": 2, "CreationDate": "04/08/2023 06:11:45", "LastActivityDate": "04/08/2023", "TotalViews": 127619, "TotalDownloads": 24886, "TotalVotes": 309, "TotalKernels": 120}]
[{"Id": 11427441, "UserName": "iammustafatz", "DisplayName": "Mohammed Mustafa", "RegisterDate": "08/29/2022", "PerformanceTier": 0}]
import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) import seaborn as sns import matplotlib.pyplot as plt import plotly.express as px import plotly.graph_objects as go from xgboost import XGBClassifier from sklearn.preprocessing import OneHotEncoder from sklearn.model_selection import ( train_test_split, cross_val_score, StratifiedKFold, GridSearchCV, ) from xgboost import XGBClassifier from sklearn.metrics import ( accuracy_score, confusion_matrix, ConfusionMatrixDisplay, classification_report, precision_recall_curve, PrecisionRecallDisplay, ) from sklearn.pipeline import Pipeline sns.set_style("whitegrid") sns.set_palette("Set2") pd.options.plotting.backend = "plotly" # ignore warnings import warnings warnings.filterwarnings("ignore") print("-" * 25) # Diabetes Detection with Machine Learning # ## Introduction # Diabetes is a chronic disease that affects millions of people worldwide. Early detection is crucial to prevent complications and manage the disease effectively. In this project, we will be building a machine learning model that can predict whether a person has diabetes based on several features such as age, gender, hypertension, heart disease, smoking history, BMI, HbA1c level, and blood glucose level. # ## Dataset # We will be using a dataset that contains information about patients, including their medical history and lab results. By analyzing this data, we can identify patterns and relationships between the features and diabetes risk and build a predictive model to classify individuals as either diabetic or non-diabetic. # ## Goals # Our ultimate goal is to create a machine learning model that accurately predicts diabetes status and can be used by healthcare professionals to identify at-risk individuals and provide early interventions. In this notebook, we will: # * Perform exploratory data analysis (EDA) to gain insights into the data and identify any data quality issues. # * Preprocess the data to prepare it for machine learning. # * Build and train a machine learning model to predict diabetes status. # * Evaluate the performance of our model using various performance metrics. # Let's get started! # ## Load the dataset # df = pd.read_csv( "/kaggle/input/diabetes-prediction-dataset/diabetes_prediction_dataset.csv" ) # # Exploratory Data Analysis # Before we begin building our machine learning model to detect diabetes, let's perform some **exploratory data analysis (EDA)** to better understand our dataset. print(df.info()) # Dataset Overview # Our dataset contains information about 100,000 patients, including their medical history and lab results. The dataset has 9 columns, which are: # gender: The gender of the patient. # age: The age of the patient in years. # hypertension: Whether the patient has hypertension (1 = yes, 0 = no). # heart_disease: Whether the patient has heart disease (1 = yes, 0 = no). # smoking_history: The patient's smoking history (never, formerly, or currently). # bmi: The patient's body mass index (BMI). # HbA1c_level: The patient's HbA1c level. # blood_glucose_level: The patient's fasting blood glucose level. # diabetes: Whether the patient has diabetes (1 = yes, 0 = no). # The data types of the columns include float64, int64, and object. # We will need to convert some of the columns to a different data type, such as converting the gender and smoking_history columns to a categorical data type. # Data Quality Issues # Before we proceed with our analysis, we need to check for any data quality issues that may affect our model's performance. These issues can include missing data, duplicate data, or outliers. # We can use pandas functions such as isnull(), duplicated(), and describe() to identify and handle these issues. We can also use visualization tools such as histograms and box plots to detect outliers and other anomalies. print(df.isnull().sum()) print(df.duplicated().sum()) print(df.describe()) # # From the first output, we can see that there are no missing values in any of the columns since all values in the output are False. # From the second output, we can see that there are some duplicate rows in the dataset since some of the values are True. # From the third output, we can see that some columns have outliers since the maximum value is significantly higher than the 75th percentile value. These columns are age, BMI, HbA1c_level, and blood_glucose_level. # We need to handle these issues before proceeding with the analysis. We can drop the duplicate rows and handle the outliers by either removing them or imputing them with a more reasonable value. # Perform exploratory data analysis (EDA) to gain insights into the data # Density plots # import plotly.express as px import plotly.graph_objects as go from plotly.subplots import make_subplots # Create subplots for each variable fig = make_subplots(rows=2, cols=2) # Plot the density plot of age fig.add_trace(go.Histogram(x=df["age"], nbinsx=20, name="All Patients"), row=1, col=1) fig.add_trace( go.Histogram(x=df["age"][df["diabetes"] == 1], nbinsx=20, name="Diabetes Patients"), row=1, col=1, ) fig.update_xaxes(title_text="Age", row=1, col=1) fig.update_yaxes(title_text="Count", row=1, col=1) # Plot the density plot of BMI fig.add_trace(go.Histogram(x=df["bmi"], nbinsx=20, name="All Patients"), row=1, col=2) fig.add_trace( go.Histogram(x=df["bmi"][df["diabetes"] == 1], nbinsx=20, name="Diabetes Patients"), row=1, col=2, ) fig.update_xaxes(title_text="BMI", row=1, col=2) fig.update_yaxes(title_text="Count", row=1, col=2) # Plot the density plot of blood glucose level fig.add_trace( go.Histogram(x=df["blood_glucose_level"], nbinsx=20, name="All Patients"), row=2, col=1, ) fig.add_trace( go.Histogram( x=df["blood_glucose_level"][df["diabetes"] == 1], nbinsx=20, name="Diabetes Patients", ), row=2, col=1, ) fig.update_xaxes(title_text="Blood Glucose Level", row=2, col=1) fig.update_yaxes(title_text="Count", row=2, col=1) # Plot the density plot of HbA1c level fig.add_trace( go.Histogram(x=df["HbA1c_level"], nbinsx=20, name="All Patients"), row=2, col=2 ) fig.add_trace( go.Histogram( x=df["HbA1c_level"][df["diabetes"] == 1], nbinsx=20, name="Diabetes Patients" ), row=2, col=2, ) fig.update_xaxes(title_text="HbA1c Level", row=2, col=2) fig.update_yaxes(title_text="Count", row=2, col=2) fig.update_layout(title="Density Plots of Health Metrics", height=800) fig.show() # # Violin plots # # Violin plots are a method of plotting numeric data and can be considered a combination of the box plot with a kernel density plot. In the violin plot, we can find the same information as in the box plots: # median (a white dot on the violin plot) # interquartile range (the black bar in the center of violin) # the lower/upper adjacent values (the black lines stretched from the bar) — defined as first quartile — 1.5 IQR and third quartile + 1.5 IQR respectively. These values can be used in a simple outlier detection technique (Tukey’s fences) — observations lying outside of these “fences” can be considered outliers. # import plotly.express as px fig = px.violin( df, y="age", x="gender", color="diabetes", hover_data=["bmi"], category_orders={"gender": ["Male", "Female"], "diabetes": ["Yes", "No"]}, ) fig.update_layout(title="Age Distribution by Gender and Diabetes Status") fig.show() fig = px.violin( df, y="age", x="hypertension", color="diabetes", hover_data=["bmi"], category_orders={"hypertension": [0, 1], "diabetes": ["Yes", "No"]}, ) fig.update_layout(title="Age Distribution by hypertension and Diabetes Status") fig fig.show() fig = px.violin( df, y="age", x="heart_disease", color="diabetes", hover_data=["bmi"], category_orders={"heart_disease": [0, 1], "diabetes": ["Yes", "No"]}, ) fig.update_layout(title="Age Distribution by heart_disease and Diabetes Status") fig.show() fig = px.violin( df, y="age", x="smoking_history", color="diabetes", hover_data=["bmi"], category_orders={ "smoking_history": ["never", "former", "current"], "diabetes": ["Yes", "No"], }, ) fig.update_layout(title="Age Distribution by smoking_history and Diabetes Status") fig.show() fig = px.violin( df, y="age", x="diabetes", hover_data=["bmi"], category_orders={"diabetes": ["Yes", "No"]}, ) fig.update_layout(title="Age Distribution by Diabetes Status") fig.show() # Boxplot # import plotly.graph_objects as go fig = make_subplots(rows=1, cols=3) fig.add_trace(go.Box(y=df["bmi"], name="BMI"), row=1, col=1) fig.add_trace(go.Box(y=df["HbA1c_level"], name="HbA1c Level"), row=1, col=2) fig.add_trace( go.Box(y=df["blood_glucose_level"], name="Blood Glucose Level"), row=1, col=3 ) fig.update_layout(title="Box Plots for BMI, HbA1c Level, and Blood Glucose Level") fig.show() # # Data-preprocessing Q1 = df["bmi"].quantile(0.25) Q3 = df["bmi"].quantile(0.75) IQR = Q3 - Q1 lower_whisker = df["bmi"].where(df["bmi"] >= Q1 - 1.5 * IQR).dropna().min() upper_whisker = df["bmi"].where(df["bmi"] <= Q3 + 1.5 * IQR).dropna().max() outliers = df[(df["bmi"] < lower_whisker) | (df["bmi"] > upper_whisker)] print(outliers["bmi"]) # calculate the IQR Q1 = np.percentile(df["bmi"], 25) Q3 = np.percentile(df["bmi"], 75) IQR = Q3 - Q1 # determine the upper and lower bounds for outliers lower_bound = Q1 - 1.5 * IQR upper_bound = Q3 + 1.5 * IQR # remove the outliers df = df[(df["bmi"] >= lower_bound) & (df["bmi"] <= upper_bound)] # plot the boxplot for BMI fig = px.box(df, y="bmi") fig.update_layout(title="Box plot of BMI (without outliers)") fig.show() df["gender"] = df["gender"].astype("category") df["smoking_history"] = df["smoking_history"].astype("category") df["hypertension"] = df["hypertension"].astype(bool) df["heart_disease"] = df["heart_disease"].astype(bool) df["diabetes"] = df["diabetes"].astype(bool) # # Remove the Outliers # In this code, we are calculating the interquartile range (IQR) of the 'bmi' column, which is a measure of the spread of the data. We then determine the lower and upper bounds for outliers using the IQR, and remove any rows where the 'bmi' value is outside these bounds. # Outliers in the 'bmi' column are values that are significantly different from the majority of the data, and may indicate errors in data entry or measurement, or unusual characteristics of the individuals in the dataset. In this case, we have identified outliers with a 'bmi' value greater than 53.5, which are likely to be uncommon and potentially erroneous data points. # By removing these outliers from the dataset, we can ensure that our analysis is based on a more representative sample of the data. # Additionally, removing duplicates from a dataset is generally considered to be a good practice as it helps in improving the accuracy and reliability of the data. Duplicates can cause biases in the data analysis and lead to incorrect conclusions. However, before removing duplicates, it is important to carefully examine the data and ensure that the duplicates are indeed not meaningful and should be removed. In this case, since the duplicates have been identified and there is no reason to keep them, it is okay to remove them from the dataset. # # drop duplicates df.drop_duplicates(inplace=True) # check for duplicates again print(df.duplicated().any()) # # Duplicates # # Removing duplicates from a dataset is generally considered to be a good practice as it helps in improving the accuracy and reliability of the data. Duplicates can cause biases in the data analysis and lead to incorrect conclusions. # However, before removing duplicates, it is important to carefully examine the data and ensure that the duplicates are indeed not meaningful and should be removed. # In this case, since the duplicates have been identified and there is no reason to keep them, it is okay to remove them from the dataset. # import plotly.graph_objects as go # count the number of people with diabetes equal to 1 and 0 diabetes_counts = df["diabetes"].value_counts() # create the pie chart fig = go.Figure( data=[ go.Pie( labels=["No Diabetes", "Diabetes"], values=diabetes_counts, hole=0.3, ) ] ) # update the layout fig.update_layout(title="Diabetes Distribution") # show the plot fig.show() # # The dataset imbalanced # # The target variable 'diabetes' in this dataset is imbalanced, with a majority of 72,480 individuals labeled as 'No Diabetes' and only 5,843 labeled as 'Diabetes'. This means that the dataset contains significantly more examples of one class than the other, which can affect the performance of machine learning algorithms and result in biased predictions. To address this issue, we may need to use techniques such as oversampling, undersampling, or synthetic data generation to balance the dataset and improve the performance of our models. # import plotly.express as px # Create heatmap figure fig = px.imshow(df.corr(), color_continuous_scale="RdBu") # Update axis labels and title fig.update_layout( xaxis_title="Features", yaxis_title="Features", title="Correlation Heatmap" ) # Show the figure fig.show() # ****Looking at the correlation matrix, we can see that the variables most strongly related to diabetes are:**** # Blood glucose level (correlation coefficient of 0.419558) # HbA1c level (correlation coefficient of 0.400660) # BMI (correlation coefficient of 0.214357) # Age (correlation coefficient of 0.258008) # Hypertension (correlation coefficient of 0.197823) # This suggests that these variables may be important predictors of diabetes and should be considered when building predictive models or analyzing the relationship between diabetes and other variables in the data. # Preprocess the data to prepare it for machine learning. # ## one-hot encoding X = df.drop("diabetes", axis=1) y = df.diabetes X = pd.get_dummies(X, columns=["smoking_history", "gender"], drop_first=True) X = X.drop( [ "gender_Other", "smoking_history_not current", "smoking_history_never", "smoking_history_ever", ], axis=1, ) # Split the dataset # X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.3, random_state=42 ) # # Build Model & Evaluate # Build and train a machine learning model to predict diabetes status. # from sklearn.preprocessing import OneHotEncoder from sklearn.model_selection import ( train_test_split, cross_val_score, StratifiedKFold, GridSearchCV, ) from xgboost import XGBClassifier from sklearn.metrics import ( accuracy_score, confusion_matrix, ConfusionMatrixDisplay, classification_report, precision_recall_curve, PrecisionRecallDisplay, ) from sklearn.pipeline import Pipeline model = XGBClassifier(random_state=42) model.fit(X_train, y_train) cv = 5 weights = [2, 3, 25, 50, 100] # Evaluate the performance of our model. # def report_model(model): y_train_repo = model.predict(X_train) y_test_repo = model.predict(X_test) print(f"the accuracy on train set {accuracy_score(y_train, y_train_repo)}") print(f"the accuracy on test set {accuracy_score(y_test, y_test_repo)}") print() print(classification_report(y_test, y_test_repo)) ConfusionMatrixDisplay(confusion_matrix(y_test, y_test_repo)).plot() plt.show() report_model(model) weights = [2, 3, 25, 50, 100] param_grid = dict(scale_pos_weight=weights) grid = GridSearchCV(XGBClassifier(), param_grid=param_grid, cv=cv, scoring="recall") grid.fit(X_train, y_train) print(f"best parameters: {grid.best_params_}") print(f"best scores: {grid.best_score_}") # Handle imbalanced datasets # Setting the scale_pos_weight hyperparameter to a value greater than 1 helps the algorithm to focus more on the positive class, and improves the recall (true positive rate) of the model, while possibly sacrificing some precision (positive predictive value). It is important to tune this hyperparameter carefully to avoid overfitting the positive class. # model2 = XGBClassifier( n_estimators=100, max_depth=5, scale_pos_weight=5, random_state=42 ) model2.fit(X_train, y_train) report_model(model2) import pickle # save the model to a file with open("diabetes_XGB.pkl", "wb") as f: pickle.dump(model2, f) import numpy as np # load the model from the file with open("/kaggle/working/diabetes_XGB.pkl", "rb") as f: test = pickle.load(f) def diabetes( age, hypertension, heart_disease, bmi, HbA1c_level, blood_glucose_level, smoking_history_current, smoking_history_former, gender_Male, ): classes = ["Negative", "Postive"] input_array = np.array( [ [ age, hypertension, heart_disease, bmi, HbA1c_level, blood_glucose_level, smoking_history_current, smoking_history_former, gender_Male, ] ] ) pred = test.predict(input_array) class_pred = classes[pred[0]] return class_pred
false
1
5,183
7
5,374
5,183
129013037
import pandas as pd import matplotlib.pyplot as plt import seaborn as sns train = pd.read_excel("/kaggle/input/products/ABBREV_with_CLASS.xlsx") train train.describe() train.isnull().sum() import matplotlib.pyplot as plt import numpy as np import seaborn as sns data = train sns.boxplot(x=data["Protein_(g)"]) Q1 = train["Protein_(g)"].quantile(0.25) Q3 = train["Protein_(g)"].quantile(0.75) IQR = Q3 - Q1 train = train[ (train["Protein_(g)"] >= Q1 - 1.5 * IQR) & (train["Protein_(g)"] <= Q3 + 1.5 * IQR) ] import matplotlib.pyplot as plt import numpy as np import seaborn as sns data = train sns.boxplot(x=data["Protein_(g)"]) train.info() from sklearn.preprocessing import LabelEncoder labelencoder_Shrt_Desc = LabelEncoder() train["Shrt_Desc"] = labelencoder_Shrt_Desc.fit_transform(train["Shrt_Desc"]) labelencoder_GmWt_Desc1 = LabelEncoder() train["GmWt_Desc1"] = labelencoder_GmWt_Desc1.fit_transform(train["GmWt_Desc1"]) labelencoder_GmWt_Desc2 = LabelEncoder() train["GmWt_Desc2"] = labelencoder_GmWt_Desc2.fit_transform(train["GmWt_Desc2"]) train.info() train.isnull().sum() train["Ash_(g)"] = train["Ash_(g)"].fillna(train["Ash_(g)"].median()) train["Water_(g)"] = train["Water_(g)"].fillna(train["Water_(g)"].median()) train["Fiber_TD_(g)"] = train["Fiber_TD_(g)"].fillna(train["Fiber_TD_(g)"].median()) train["Sugar_Tot_(g)"] = train["Sugar_Tot_(g)"].fillna(train["Sugar_Tot_(g)"].median()) train["Calcium_(mg)"] = train["Calcium_(mg)"].fillna(train["Calcium_(mg)"].median()) train["Iron_(mg)"] = train["Iron_(mg)"].fillna(train["Iron_(mg)"].median()) train["Magnesium_(mg)"] = train["Magnesium_(mg)"].fillna( train["Magnesium_(mg)"].median() ) train["Phosphorus_(mg)"] = train["Phosphorus_(mg)"].fillna( train["Phosphorus_(mg)"].median() ) train["Potassium_(mg)"] = train["Potassium_(mg)"].fillna( train["Potassium_(mg)"].median() ) train["Sodium_(mg)"] = train["Sodium_(mg)"].fillna(train["Sodium_(mg)"].median()) train["Zinc_(mg)"] = train["Zinc_(mg)"].fillna(train["Zinc_(mg)"].median()) train["Copper_mg)"] = train["Copper_mg)"].fillna(train["Copper_mg)"].median()) train["Manganese_(mg)"] = train["Manganese_(mg)"].fillna( train["Manganese_(mg)"].median() ) train["Selenium_(µg)"] = train["Selenium_(µg)"].fillna(train["Selenium_(µg)"].median()) train["Vit_C_(mg)"] = train["Vit_C_(mg)"].fillna(train["Vit_C_(mg)"].median()) train["Thiamin_(mg)"] = train["Thiamin_(mg)"].fillna(train["Thiamin_(mg)"].median()) train["Riboflavin_(mg)"] = train["Riboflavin_(mg)"].fillna( train["Riboflavin_(mg)"].median() ) train["Niacin_(mg)"] = train["Niacin_(mg)"].fillna(train["Niacin_(mg)"].median()) train["Panto_Acid_mg)"] = train["Panto_Acid_mg)"].fillna( train["Panto_Acid_mg)"].median() ) train["Vit_B6_(mg)"] = train["Vit_B6_(mg)"].fillna(train["Vit_B6_(mg)"].median()) train["Folate_Tot_(µg)"] = train["Folate_Tot_(µg)"].fillna( train["Folate_Tot_(µg)"].median() ) train["Folic_Acid_(µg)"] = train["Folic_Acid_(µg)"].fillna( train["Folic_Acid_(µg)"].median() ) train["Food_Folate_(µg)"] = train["Food_Folate_(µg)"].fillna( train["Food_Folate_(µg)"].median() ) train["Folate_DFE_(µg)"] = train["Folate_DFE_(µg)"].fillna( train["Folate_DFE_(µg)"].median() ) train["Choline_Tot_ (mg)"] = train["Choline_Tot_ (mg)"].fillna( train["Choline_Tot_ (mg)"].median() ) train["Vit_B12_(µg)"] = train["Vit_B12_(µg)"].fillna(train["Vit_B12_(µg)"].median()) train["Vit_A_IU"] = train["Vit_A_IU"].fillna(train["Vit_A_IU"].median()) train["Vit_A_RAE"] = train["Vit_A_RAE"].fillna(train["Vit_A_RAE"].median()) train["Retinol_(µg)"] = train["Retinol_(µg)"].fillna(train["Retinol_(µg)"].median()) train["Alpha_Carot_(µg)"] = train["Alpha_Carot_(µg)"].fillna( train["Alpha_Carot_(µg)"].median() ) train["Beta_Carot_(µg)"] = train["Beta_Carot_(µg)"].fillna( train["Beta_Carot_(µg)"].median() ) train["Beta_Crypt_(µg)"] = train["Beta_Crypt_(µg)"].fillna( train["Beta_Crypt_(µg)"].median() ) train["Lycopene_(µg)"] = train["Lycopene_(µg)"].fillna(train["Lycopene_(µg)"].median()) train["Lut+Zea_ (µg)"] = train["Lut+Zea_ (µg)"].fillna(train["Lut+Zea_ (µg)"].median()) train["Vit_E_(mg)"] = train["Vit_E_(mg)"].fillna(train["Vit_E_(mg)"].median()) train["Vit_D_µg"] = train["Vit_D_µg"].fillna(train["Vit_D_µg"].median()) train["Vit_D_IU"] = train["Vit_D_IU"].fillna(train["Vit_D_IU"].median()) train["Vit_K_(µg)"] = train["Vit_K_(µg)"].fillna(train["Vit_K_(µg)"].median()) train["FA_Sat_(g)"] = train["FA_Sat_(g)"].fillna(train["FA_Sat_(g)"].median()) train["FA_Mono_(g)"] = train["FA_Mono_(g)"].fillna(train["FA_Mono_(g)"].median()) train["FA_Poly_(g)"] = train["FA_Poly_(g)"].fillna(train["FA_Poly_(g)"].median()) train["Cholestrl_(mg)"] = train["Cholestrl_(mg)"].fillna( train["Cholestrl_(mg)"].median() ) train["GmWt_1"] = train["GmWt_1"].fillna(train["GmWt_1"].median()) train["GmWt_2"] = train["GmWt_2"].fillna(train["GmWt_2"].median()) train["Refuse_Pct"] = train["Refuse_Pct"].fillna(train["Refuse_Pct"].median()) # Метод k-средний import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set() # Создадим два множества точек, зрительно удаленных друг от друга. from sklearn.datasets import make_blobs centers = [[1, 1], [-1, -1]] X = train.drop("CLASS", axis=1) y = train["CLASS"] y X, y = make_blobs(n_samples=100, centers=centers, cluster_std=0.6, random_state=0) sns.scatterplot(x=X[:, 0], y=X[:, 1], hue=y) from sklearn.neighbors import KNeighborsClassifier from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.3, random_state=42 ) sns.scatterplot(x=X_train[:, 0], y=X_train[:, 1], hue=y_train, palette="viridis") sns.scatterplot(x=X_test[:, 0], y=X_test[:, 1], hue=y_test, palette="rocket_r") clf = KNeighborsClassifier() clf.fit(X_train, y_train) clf.score(X_test, y_test) # Визуализация DecisionBoundaryDisplay #!pip install scikit-learn --upgrade --no-deps from sklearn.inspection import DecisionBoundaryDisplay disp = DecisionBoundaryDisplay.from_estimator( clf, X_test, response_method="predict", alpha=0.7 ) disp.ax_.scatter(X_test[:, 0], X_test[:, 1], c=y_test, edgecolor="yellow") # Визуализация mlxtend, plot_decision_regions #!pip install mlxtend --upgrade --no-deps from mlxtend.plotting import plot_decision_regions fig, ax = plt.subplots(figsize=(10, 8)) plot_decision_regions(X_test, y_test, clf=clf, legend=2) # По умолчанию классификатор берет пять ближаших соседей. Уменьшим это значение до двух и посмотрим результат. clf = KNeighborsClassifier(2) clf.fit(X_train, y_train) clf.score(X_test, y_test) fig, ax = plt.subplots(figsize=(10, 8)) plot_decision_regions(X_test, y_test, clf=clf, legend=2) # Увеличим количество множеств точек до трех. centers = [[1, 1], [-1, -1], [1, -1]] X = train.drop("CLASS", axis=1) y = train["CLASS"] y X, y = make_blobs(n_samples=750, centers=centers, cluster_std=0.6, random_state=0) sns.scatterplot(x=X[:, 0], y=X[:, 1], hue=y) X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.3, random_state=53 ) sns.scatterplot(x=X_train[:, 0], y=X_train[:, 1], hue=y_train) sns.scatterplot(x=X_test[:, 0], y=X_test[:, 1], hue=y_test, palette="tab10") clf = KNeighborsClassifier() clf.fit(X_train, y_train) clf.score(X_test, y_test) fig, ax = plt.subplots(figsize=(10, 8)) plot_decision_regions(X_test, y_test, clf=clf, legend=2) scores = [] for k in range(1, 11): clf = KNeighborsClassifier(k) clf.fit(X_train, y_train) score = clf.score(X_test, y_test) print(k, score) scores.append(score) plt.figure(figsize=(12, 3)) sns.lineplot(x=map(str, range(1, 11)), y=scores, marker="o", markersize=10) # Рассмотрим метрики расстояния между точками. По умолчанию испольуется метрика l2, соответствующая евклидову расстоянию. Попробуем сменить ее на l1 - манхеттенское расстояние. clf = KNeighborsClassifier(metric="manhattan") clf.fit(X_train, y_train) clf.score(X_test, y_test) # GridSearchCV from sklearn.model_selection import GridSearchCV params = {"n_neighbors": range(1, 30), "metric": ["l1", "l2"]} best_clf = GridSearchCV(estimator=KNeighborsClassifier(), param_grid=params) best_clf.fit(X_train, y_train) best_clf.score(X_test, y_test) best_clf.best_params_ from sklearn.metrics import classification_report y_clf = clf.predict(X_test) print(classification_report(y_test, y_clf)) y_best_clf = best_clf.predict(X_test) print(classification_report(y_test, y_best_clf))
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/013/129013037.ipynb
null
null
[{"Id": 129013037, "ScriptId": 38348516, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6692497, "CreationDate": "05/10/2023 10:04:24", "VersionNumber": 1.0, "Title": "Task3_KNN", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 213.0, "LinesInsertedFromPrevious": 213.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import pandas as pd import matplotlib.pyplot as plt import seaborn as sns train = pd.read_excel("/kaggle/input/products/ABBREV_with_CLASS.xlsx") train train.describe() train.isnull().sum() import matplotlib.pyplot as plt import numpy as np import seaborn as sns data = train sns.boxplot(x=data["Protein_(g)"]) Q1 = train["Protein_(g)"].quantile(0.25) Q3 = train["Protein_(g)"].quantile(0.75) IQR = Q3 - Q1 train = train[ (train["Protein_(g)"] >= Q1 - 1.5 * IQR) & (train["Protein_(g)"] <= Q3 + 1.5 * IQR) ] import matplotlib.pyplot as plt import numpy as np import seaborn as sns data = train sns.boxplot(x=data["Protein_(g)"]) train.info() from sklearn.preprocessing import LabelEncoder labelencoder_Shrt_Desc = LabelEncoder() train["Shrt_Desc"] = labelencoder_Shrt_Desc.fit_transform(train["Shrt_Desc"]) labelencoder_GmWt_Desc1 = LabelEncoder() train["GmWt_Desc1"] = labelencoder_GmWt_Desc1.fit_transform(train["GmWt_Desc1"]) labelencoder_GmWt_Desc2 = LabelEncoder() train["GmWt_Desc2"] = labelencoder_GmWt_Desc2.fit_transform(train["GmWt_Desc2"]) train.info() train.isnull().sum() train["Ash_(g)"] = train["Ash_(g)"].fillna(train["Ash_(g)"].median()) train["Water_(g)"] = train["Water_(g)"].fillna(train["Water_(g)"].median()) train["Fiber_TD_(g)"] = train["Fiber_TD_(g)"].fillna(train["Fiber_TD_(g)"].median()) train["Sugar_Tot_(g)"] = train["Sugar_Tot_(g)"].fillna(train["Sugar_Tot_(g)"].median()) train["Calcium_(mg)"] = train["Calcium_(mg)"].fillna(train["Calcium_(mg)"].median()) train["Iron_(mg)"] = train["Iron_(mg)"].fillna(train["Iron_(mg)"].median()) train["Magnesium_(mg)"] = train["Magnesium_(mg)"].fillna( train["Magnesium_(mg)"].median() ) train["Phosphorus_(mg)"] = train["Phosphorus_(mg)"].fillna( train["Phosphorus_(mg)"].median() ) train["Potassium_(mg)"] = train["Potassium_(mg)"].fillna( train["Potassium_(mg)"].median() ) train["Sodium_(mg)"] = train["Sodium_(mg)"].fillna(train["Sodium_(mg)"].median()) train["Zinc_(mg)"] = train["Zinc_(mg)"].fillna(train["Zinc_(mg)"].median()) train["Copper_mg)"] = train["Copper_mg)"].fillna(train["Copper_mg)"].median()) train["Manganese_(mg)"] = train["Manganese_(mg)"].fillna( train["Manganese_(mg)"].median() ) train["Selenium_(µg)"] = train["Selenium_(µg)"].fillna(train["Selenium_(µg)"].median()) train["Vit_C_(mg)"] = train["Vit_C_(mg)"].fillna(train["Vit_C_(mg)"].median()) train["Thiamin_(mg)"] = train["Thiamin_(mg)"].fillna(train["Thiamin_(mg)"].median()) train["Riboflavin_(mg)"] = train["Riboflavin_(mg)"].fillna( train["Riboflavin_(mg)"].median() ) train["Niacin_(mg)"] = train["Niacin_(mg)"].fillna(train["Niacin_(mg)"].median()) train["Panto_Acid_mg)"] = train["Panto_Acid_mg)"].fillna( train["Panto_Acid_mg)"].median() ) train["Vit_B6_(mg)"] = train["Vit_B6_(mg)"].fillna(train["Vit_B6_(mg)"].median()) train["Folate_Tot_(µg)"] = train["Folate_Tot_(µg)"].fillna( train["Folate_Tot_(µg)"].median() ) train["Folic_Acid_(µg)"] = train["Folic_Acid_(µg)"].fillna( train["Folic_Acid_(µg)"].median() ) train["Food_Folate_(µg)"] = train["Food_Folate_(µg)"].fillna( train["Food_Folate_(µg)"].median() ) train["Folate_DFE_(µg)"] = train["Folate_DFE_(µg)"].fillna( train["Folate_DFE_(µg)"].median() ) train["Choline_Tot_ (mg)"] = train["Choline_Tot_ (mg)"].fillna( train["Choline_Tot_ (mg)"].median() ) train["Vit_B12_(µg)"] = train["Vit_B12_(µg)"].fillna(train["Vit_B12_(µg)"].median()) train["Vit_A_IU"] = train["Vit_A_IU"].fillna(train["Vit_A_IU"].median()) train["Vit_A_RAE"] = train["Vit_A_RAE"].fillna(train["Vit_A_RAE"].median()) train["Retinol_(µg)"] = train["Retinol_(µg)"].fillna(train["Retinol_(µg)"].median()) train["Alpha_Carot_(µg)"] = train["Alpha_Carot_(µg)"].fillna( train["Alpha_Carot_(µg)"].median() ) train["Beta_Carot_(µg)"] = train["Beta_Carot_(µg)"].fillna( train["Beta_Carot_(µg)"].median() ) train["Beta_Crypt_(µg)"] = train["Beta_Crypt_(µg)"].fillna( train["Beta_Crypt_(µg)"].median() ) train["Lycopene_(µg)"] = train["Lycopene_(µg)"].fillna(train["Lycopene_(µg)"].median()) train["Lut+Zea_ (µg)"] = train["Lut+Zea_ (µg)"].fillna(train["Lut+Zea_ (µg)"].median()) train["Vit_E_(mg)"] = train["Vit_E_(mg)"].fillna(train["Vit_E_(mg)"].median()) train["Vit_D_µg"] = train["Vit_D_µg"].fillna(train["Vit_D_µg"].median()) train["Vit_D_IU"] = train["Vit_D_IU"].fillna(train["Vit_D_IU"].median()) train["Vit_K_(µg)"] = train["Vit_K_(µg)"].fillna(train["Vit_K_(µg)"].median()) train["FA_Sat_(g)"] = train["FA_Sat_(g)"].fillna(train["FA_Sat_(g)"].median()) train["FA_Mono_(g)"] = train["FA_Mono_(g)"].fillna(train["FA_Mono_(g)"].median()) train["FA_Poly_(g)"] = train["FA_Poly_(g)"].fillna(train["FA_Poly_(g)"].median()) train["Cholestrl_(mg)"] = train["Cholestrl_(mg)"].fillna( train["Cholestrl_(mg)"].median() ) train["GmWt_1"] = train["GmWt_1"].fillna(train["GmWt_1"].median()) train["GmWt_2"] = train["GmWt_2"].fillna(train["GmWt_2"].median()) train["Refuse_Pct"] = train["Refuse_Pct"].fillna(train["Refuse_Pct"].median()) # Метод k-средний import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set() # Создадим два множества точек, зрительно удаленных друг от друга. from sklearn.datasets import make_blobs centers = [[1, 1], [-1, -1]] X = train.drop("CLASS", axis=1) y = train["CLASS"] y X, y = make_blobs(n_samples=100, centers=centers, cluster_std=0.6, random_state=0) sns.scatterplot(x=X[:, 0], y=X[:, 1], hue=y) from sklearn.neighbors import KNeighborsClassifier from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.3, random_state=42 ) sns.scatterplot(x=X_train[:, 0], y=X_train[:, 1], hue=y_train, palette="viridis") sns.scatterplot(x=X_test[:, 0], y=X_test[:, 1], hue=y_test, palette="rocket_r") clf = KNeighborsClassifier() clf.fit(X_train, y_train) clf.score(X_test, y_test) # Визуализация DecisionBoundaryDisplay #!pip install scikit-learn --upgrade --no-deps from sklearn.inspection import DecisionBoundaryDisplay disp = DecisionBoundaryDisplay.from_estimator( clf, X_test, response_method="predict", alpha=0.7 ) disp.ax_.scatter(X_test[:, 0], X_test[:, 1], c=y_test, edgecolor="yellow") # Визуализация mlxtend, plot_decision_regions #!pip install mlxtend --upgrade --no-deps from mlxtend.plotting import plot_decision_regions fig, ax = plt.subplots(figsize=(10, 8)) plot_decision_regions(X_test, y_test, clf=clf, legend=2) # По умолчанию классификатор берет пять ближаших соседей. Уменьшим это значение до двух и посмотрим результат. clf = KNeighborsClassifier(2) clf.fit(X_train, y_train) clf.score(X_test, y_test) fig, ax = plt.subplots(figsize=(10, 8)) plot_decision_regions(X_test, y_test, clf=clf, legend=2) # Увеличим количество множеств точек до трех. centers = [[1, 1], [-1, -1], [1, -1]] X = train.drop("CLASS", axis=1) y = train["CLASS"] y X, y = make_blobs(n_samples=750, centers=centers, cluster_std=0.6, random_state=0) sns.scatterplot(x=X[:, 0], y=X[:, 1], hue=y) X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.3, random_state=53 ) sns.scatterplot(x=X_train[:, 0], y=X_train[:, 1], hue=y_train) sns.scatterplot(x=X_test[:, 0], y=X_test[:, 1], hue=y_test, palette="tab10") clf = KNeighborsClassifier() clf.fit(X_train, y_train) clf.score(X_test, y_test) fig, ax = plt.subplots(figsize=(10, 8)) plot_decision_regions(X_test, y_test, clf=clf, legend=2) scores = [] for k in range(1, 11): clf = KNeighborsClassifier(k) clf.fit(X_train, y_train) score = clf.score(X_test, y_test) print(k, score) scores.append(score) plt.figure(figsize=(12, 3)) sns.lineplot(x=map(str, range(1, 11)), y=scores, marker="o", markersize=10) # Рассмотрим метрики расстояния между точками. По умолчанию испольуется метрика l2, соответствующая евклидову расстоянию. Попробуем сменить ее на l1 - манхеттенское расстояние. clf = KNeighborsClassifier(metric="manhattan") clf.fit(X_train, y_train) clf.score(X_test, y_test) # GridSearchCV from sklearn.model_selection import GridSearchCV params = {"n_neighbors": range(1, 30), "metric": ["l1", "l2"]} best_clf = GridSearchCV(estimator=KNeighborsClassifier(), param_grid=params) best_clf.fit(X_train, y_train) best_clf.score(X_test, y_test) best_clf.best_params_ from sklearn.metrics import classification_report y_clf = clf.predict(X_test) print(classification_report(y_test, y_clf)) y_best_clf = best_clf.predict(X_test) print(classification_report(y_test, y_best_clf))
false
0
3,439
0
3,439
3,439
129114823
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: pass # print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # y files to /kaggle/temp/, but they won't be saved outside of the current session with open("submission.csv", "w") as creating_new_csv_file: pass import shutil src = "/kaggle/input/quicksubmission/submission.csv" dst = "/kaggle/working/submission.csv" shutil.copyfile(src, dst) # 2nd option # shutil.copy(src, dst) # dst can be a folder; use shutil.copy2() to preserve timestamp
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/114/129114823.ipynb
null
null
[{"Id": 129114823, "ScriptId": 38383537, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4246326, "CreationDate": "05/11/2023 05:50:27", "VersionNumber": 2.0, "Title": "Simple test notebookd80b0e7c2f", "EvaluationDate": "05/11/2023", "IsChange": false, "TotalLines": 31.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 31.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: pass # print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # y files to /kaggle/temp/, but they won't be saved outside of the current session with open("submission.csv", "w") as creating_new_csv_file: pass import shutil src = "/kaggle/input/quicksubmission/submission.csv" dst = "/kaggle/working/submission.csv" shutil.copyfile(src, dst) # 2nd option # shutil.copy(src, dst) # dst can be a folder; use shutil.copy2() to preserve timestamp
false
0
278
0
278
278
129114567
<jupyter_start><jupyter_text>Red Wine Quality ### Context The two datasets are related to red and white variants of the Portuguese "Vinho Verde" wine. For more details, consult the reference [Cortez et al., 2009]. Due to privacy and logistic issues, only physicochemical (inputs) and sensory (the output) variables are available (e.g. there is no data about grape types, wine brand, wine selling price, etc.). These datasets can be viewed as classification or regression tasks. The classes are ordered and not balanced (e.g. there are much more normal wines than excellent or poor ones). --- *This dataset is also available from the UCI machine learning repository, https://archive.ics.uci.edu/ml/datasets/wine+quality , I just shared it to kaggle for convenience. (If I am mistaken and the public license type disallowed me from doing so, I will take this down if requested.)* ### Content For more information, read [Cortez et al., 2009].<br> Input variables (based on physicochemical tests):<br> 1 - fixed acidity <br> 2 - volatile acidity <br> 3 - citric acid <br> 4 - residual sugar <br> 5 - chlorides <br> 6 - free sulfur dioxide <br> 7 - total sulfur dioxide <br> 8 - density <br> 9 - pH <br> 10 - sulphates <br> 11 - alcohol <br> Output variable (based on sensory data): <br> 12 - quality (score between 0 and 10) <br> ### Tips What might be an interesting thing to do, is aside from using regression modelling, is to set an arbitrary cutoff for your dependent variable (wine quality) at e.g. 7 or higher getting classified as 'good/1' and the remainder as 'not good/0'. This allows you to practice with hyper parameter tuning on e.g. decision tree algorithms looking at the ROC curve and the AUC value. Without doing any kind of feature engineering or overfitting you should be able to get an AUC of .88 (without even using random forest algorithm) **KNIME** is a great tool (GUI) that can be used for this.<br> 1 - File Reader (for csv) to linear correlation node and to interactive histogram for basic EDA.<br> 2- File Reader to 'Rule Engine Node' to turn the 10 point scale to dichtome variable (good wine and rest), the code to put in the rule engine is something like this:<br> - **$quality$ > 6.5 => "good"**<br> - **TRUE => "bad"** <br> 3- Rule Engine Node output to input of Column Filter node to filter out your original 10point feature (this prevent leaking)<br> 4- Column Filter Node output to input of Partitioning Node (your standard train/tes split, e.g. 75%/25%, choose 'random' or 'stratified')<br> 5- Partitioning Node train data split output to input of Train data split to input Decision Tree Learner node and <br> 6- Partitioning Node test data split output to input Decision Tree predictor Node<br> 7- Decision Tree learner Node output to input Decision Tree Node input<br> 8- Decision Tree output to input ROC Node.. (here you can evaluate your model base on AUC value)<br> ### Inspiration Use machine learning to determine which physiochemical properties make a wine 'good'! Kaggle dataset identifier: red-wine-quality-cortez-et-al-2009 <jupyter_script>import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import warnings warnings.filterwarnings("ignore") from sklearn.cluster import KMeans from sklearn.decomposition import PCA from numpy.linalg import eig # ### Loading the dataset df = pd.read_csv("../input/red-wine-quality-cortez-et-al-2009/winequality-red.csv") df.head() df.shape # There are 1599 rows and 12 columns df.dtypes # ### Null Values df.isnull().sum() # There are no null values noted in the dataset # ### Five-Point Summary df.describe().T sns.pairplot(data=df, diag_kind="kde") plt.show() # From the plot we can see that the numerical features in the daigonal are mostly right skewed plt.figure(figsize=(15, 7)) mask = np.triu(np.ones_like(df.corr())) sns.heatmap(df.corr(), annot=True, mask=mask) plt.show() # Only few variables have high multicolinearity density and fixed acidity-0.67, citric acid and fixed acidity-0.67. # ### Determining Outliers for i in df: sns.boxplot(df[i]) plt.show() # From the box plot it could be seen that there are huge outliers present in multiple features like residual sugar, chloride,density etc. We will use IQR method to remove the outliers Q1 = df.quantile(0.25) Q3 = df.quantile(0.75) IQR = Q3 - Q1 df = df[~((df < (Q1 - 3.5 * IQR)) | (df > (Q3 + 3.5 * IQR))).any(axis=1)] df.shape # After IQR technique the shape of the data has changed to 1452 rows and 12 columns df.skew() df.drop("quality", axis=1, inplace=True) # ### Scaling the data from sklearn.preprocessing import StandardScaler ss = StandardScaler() df_scaled = ss.fit_transform(df) df_scaled = pd.DataFrame(df, columns=df.columns) df_scaled.shape # ### K-Means wcss = [] for k in range(1, 10): kmeans = KMeans(n_clusters=k) kmeans.fit(df_scaled) wcss.append(kmeans.inertia_) print(wcss) plt.figure(figsize=(12, 6)) plt.plot(range(1, 10), wcss) plt.xlabel("Number of Clusters") plt.title("Plot for Optimal Number of Clusters") plt.ylabel("WCSS") plt.show() # From the above elbow plot we can see that the optimal value of k can be considered as 2. KMeans3 = KMeans(n_clusters=2, random_state=10) KMeans3.fit(df_scaled) labels = kmeans.predict(df_scaled) KMeans3.cluster_centers_ KMeans3.labels_ df_Kmeans = df_scaled.copy() df_Kmeans["cluster"] = KMeans3.labels_ df_Kmeans["cluster"].value_counts() sns.countplot(df_Kmeans["cluster"]) plt.title("Cluster Size") plt.xlabel("Number of Clusters") plt.ylabel("Number of Observations") plt.show() # There are 2 clusters formed and the segregation can be seen from the above plot # ### Silhouette_score from sklearn.metrics import silhouette_score c = [2, 3, 4, 5, 6] for i in c: cluster = KMeans(n_clusters=i) cluster.fit(df_scaled) score = silhouette_score(df_scaled, cluster.labels_, random_state=10) print("score", i, "", score) from yellowbrick.cluster import SilhouetteVisualizer c = [2, 3, 4, 5] for i in c: cluster = KMeans(n_clusters=i) cluster.fit(df_scaled) score = silhouette_score(df_scaled, cluster.labels_, random_state=10) print("score", i, "", score) Visualizer = SilhouetteVisualizer(cluster, colors="yellowbrick") Visualizer.fit(df_scaled) Visualizer.show() # From silhouette_score method we can see that the silhouette_score is maximum for k=2 hence K=2 is considered. # ### Agglomerative clustering from sklearn.cluster import AgglomerativeClustering from scipy.cluster.hierarchy import dendrogram from scipy.cluster.hierarchy import linkage from scipy.cluster.hierarchy import cophenet from scipy.spatial.distance import pdist link_mat = linkage(df_scaled, method="ward") link_mat c, cd = cophenet(link_mat, pdist(df_scaled)) c # Cophenetic cofficient value is close to 1 hence it can be said that the clustering is quited good. dendrogram(link_mat) plt.show() # from the above Dendrogram the optimal number of clusters obtained is 2 as 2 clusters will fall if we cut above 700 df_aggo = df_scaled.copy() clusters = AgglomerativeClustering(n_clusters=2, linkage="ward") clusters.fit(df_scaled) df_aggo["cluster"] = clusters.labels_ df_aggo["cluster"].value_counts() sns.countplot(df_aggo["cluster"]) plt.title("Cluster Size") plt.xlabel("Number of Clusters") plt.ylabel("Number of Observations") plt.show() # Through Agglomerative clustering the there are 2 clusters formed and teh segregation can be seen from the above plot . # ## PCA pca = PCA(n_components=0.95) pca = pca.fit(df_scaled) print("Eigen Vector :\n", pca.components_) print() print("Eigen Values :\n", pca.explained_variance_) print() print("Variance :\n", pca.explained_variance_ratio_) # Highest variance is explained by PC1 that is 94%. df_pca = pd.DataFrame(pca.transform(df_scaled), columns=["PC1", "PC2"]) df_pca.head() sns.heatmap(df_pca.corr(), annot=True) # From the above heat map we can see that there is no corelation after applying PCA # ### Kmeans df_kmeans = df_pca.copy() df_kmeans["group"] = KMeans3.labels_ df_kmeans.head() df_kmeans["group"].value_counts() plt.figure(figsize=(10, 6)) sns.scatterplot(x="PC1", y="PC2", data=df_kmeans, hue="group") plt.show() # ### AgglomerativeClustering df_agg = df_pca.copy() df_agg["group"] = clusters.labels_ df_agg.head() df_agg["group"].value_counts() plt.figure(figsize=(10, 6)) sns.scatterplot(x="PC1", y="PC2", data=df_agg, hue="group") plt.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/114/129114567.ipynb
red-wine-quality-cortez-et-al-2009
null
[{"Id": 129114567, "ScriptId": 23893640, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 8101573, "CreationDate": "05/11/2023 05:47:43", "VersionNumber": 1.0, "Title": "Red Wine Clustering", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 219.0, "LinesInsertedFromPrevious": 219.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 2}]
[{"Id": 184883344, "KernelVersionId": 129114567, "SourceDatasetVersionId": 8204}]
[{"Id": 8204, "DatasetId": 4458, "DatasourceVersionId": 8204, "CreatorUserId": 1132983, "LicenseName": "Database: Open Database, Contents: Database Contents", "CreationDate": "11/27/2017 23:41:08", "VersionNumber": 2.0, "Title": "Red Wine Quality", "Slug": "red-wine-quality-cortez-et-al-2009", "Subtitle": "Simple and clean practice dataset for regression or classification modelling", "Description": "### Context\n\nThe two datasets are related to red and white variants of the Portuguese \"Vinho Verde\" wine. For more details, consult the reference [Cortez et al., 2009]. Due to privacy and logistic issues, only physicochemical (inputs) and sensory (the output) variables are available (e.g. there is no data about grape types, wine brand, wine selling price, etc.). \n\nThese datasets can be viewed as classification or regression tasks. The classes are ordered and not balanced (e.g. there are much more normal wines than excellent or poor ones). \n\n---\n*This dataset is also available from the UCI machine learning repository, https://archive.ics.uci.edu/ml/datasets/wine+quality , I just shared it to kaggle for convenience. (If I am mistaken and the public license type disallowed me from doing so, I will take this down if requested.)*\n\n\n### Content\n\nFor more information, read [Cortez et al., 2009].<br>\nInput variables (based on physicochemical tests):<br>\n1 - fixed acidity <br>\n2 - volatile acidity <br>\n3 - citric acid <br>\n4 - residual sugar <br>\n5 - chlorides <br>\n6 - free sulfur dioxide <br> \n7 - total sulfur dioxide <br>\n8 - density <br>\n9 - pH <br>\n10 - sulphates <br>\n11 - alcohol <br>\nOutput variable (based on sensory data): <br>\n12 - quality (score between 0 and 10) <br>\n\n### Tips\nWhat might be an interesting thing to do, is aside from using regression modelling, is to set an arbitrary cutoff for your dependent variable (wine quality) at e.g. 7 or higher getting classified as 'good/1' and the remainder as 'not good/0'.\nThis allows you to practice with hyper parameter tuning on e.g. decision tree algorithms looking at the ROC curve and the AUC value.\nWithout doing any kind of feature engineering or overfitting you should be able to get an AUC of .88 (without even using random forest algorithm)\n\n**KNIME** is a great tool (GUI) that can be used for this.<br>\n1 - File Reader (for csv) to linear correlation node and to interactive histogram for basic EDA.<br>\n2- File Reader to 'Rule Engine Node' to turn the 10 point scale to dichtome variable (good wine and rest), the code to put in the rule engine is something like this:<br>\n - **$quality$ > 6.5 => \"good\"**<br>\n - **TRUE => \"bad\"** <br>\n3- Rule Engine Node output to input of Column Filter node to filter out your original 10point feature (this prevent leaking)<br>\n4- Column Filter Node output to input of Partitioning Node (your standard train/tes split, e.g. 75%/25%, choose 'random' or 'stratified')<br>\n5- Partitioning Node train data split output to input of Train data split to input Decision Tree Learner node and <br>\n6- Partitioning Node test data split output to input Decision Tree predictor Node<br>\n7- Decision Tree learner Node output to input Decision Tree Node input<br>\n8- Decision Tree output to input ROC Node.. (here you can evaluate your model base on AUC value)<br>\n\n\n### Inspiration\nUse machine learning to determine which physiochemical properties make a wine 'good'!\n\n\n\n### Acknowledgements\n\nThis dataset is also available from the UCI machine learning repository, https://archive.ics.uci.edu/ml/datasets/wine+quality , I just shared it to kaggle for convenience. *(I am mistaken and the public license type disallowed me from doing so, I will take this down at first request. I am not the owner of this dataset.*\n\n**Please include this citation if you plan to use this database: \nP. Cortez, A. Cerdeira, F. Almeida, T. Matos and J. Reis. \nModeling wine preferences by data mining from physicochemical properties. In Decision Support Systems, Elsevier, 47(4):547-553, 2009.**\n\n### Relevant publication\n\nP. Cortez, A. Cerdeira, F. Almeida, T. Matos and J. Reis. Modeling wine preferences by data mining from physicochemical properties. \nIn Decision Support Systems, Elsevier, 47(4):547-553, 2009.", "VersionNotes": "Fixed csv format to use comma as delimiter", "TotalCompressedBytes": 100951.0, "TotalUncompressedBytes": 100951.0}]
[{"Id": 4458, "CreatorUserId": 1132983, "OwnerUserId": NaN, "OwnerOrganizationId": 7.0, "CurrentDatasetVersionId": 8204.0, "CurrentDatasourceVersionId": 8204.0, "ForumId": 10170, "Type": 2, "CreationDate": "11/12/2017 14:08:43", "LastActivityDate": "02/06/2018", "TotalViews": 1214229, "TotalDownloads": 194418, "TotalVotes": 2537, "TotalKernels": 1574}]
null
import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import warnings warnings.filterwarnings("ignore") from sklearn.cluster import KMeans from sklearn.decomposition import PCA from numpy.linalg import eig # ### Loading the dataset df = pd.read_csv("../input/red-wine-quality-cortez-et-al-2009/winequality-red.csv") df.head() df.shape # There are 1599 rows and 12 columns df.dtypes # ### Null Values df.isnull().sum() # There are no null values noted in the dataset # ### Five-Point Summary df.describe().T sns.pairplot(data=df, diag_kind="kde") plt.show() # From the plot we can see that the numerical features in the daigonal are mostly right skewed plt.figure(figsize=(15, 7)) mask = np.triu(np.ones_like(df.corr())) sns.heatmap(df.corr(), annot=True, mask=mask) plt.show() # Only few variables have high multicolinearity density and fixed acidity-0.67, citric acid and fixed acidity-0.67. # ### Determining Outliers for i in df: sns.boxplot(df[i]) plt.show() # From the box plot it could be seen that there are huge outliers present in multiple features like residual sugar, chloride,density etc. We will use IQR method to remove the outliers Q1 = df.quantile(0.25) Q3 = df.quantile(0.75) IQR = Q3 - Q1 df = df[~((df < (Q1 - 3.5 * IQR)) | (df > (Q3 + 3.5 * IQR))).any(axis=1)] df.shape # After IQR technique the shape of the data has changed to 1452 rows and 12 columns df.skew() df.drop("quality", axis=1, inplace=True) # ### Scaling the data from sklearn.preprocessing import StandardScaler ss = StandardScaler() df_scaled = ss.fit_transform(df) df_scaled = pd.DataFrame(df, columns=df.columns) df_scaled.shape # ### K-Means wcss = [] for k in range(1, 10): kmeans = KMeans(n_clusters=k) kmeans.fit(df_scaled) wcss.append(kmeans.inertia_) print(wcss) plt.figure(figsize=(12, 6)) plt.plot(range(1, 10), wcss) plt.xlabel("Number of Clusters") plt.title("Plot for Optimal Number of Clusters") plt.ylabel("WCSS") plt.show() # From the above elbow plot we can see that the optimal value of k can be considered as 2. KMeans3 = KMeans(n_clusters=2, random_state=10) KMeans3.fit(df_scaled) labels = kmeans.predict(df_scaled) KMeans3.cluster_centers_ KMeans3.labels_ df_Kmeans = df_scaled.copy() df_Kmeans["cluster"] = KMeans3.labels_ df_Kmeans["cluster"].value_counts() sns.countplot(df_Kmeans["cluster"]) plt.title("Cluster Size") plt.xlabel("Number of Clusters") plt.ylabel("Number of Observations") plt.show() # There are 2 clusters formed and the segregation can be seen from the above plot # ### Silhouette_score from sklearn.metrics import silhouette_score c = [2, 3, 4, 5, 6] for i in c: cluster = KMeans(n_clusters=i) cluster.fit(df_scaled) score = silhouette_score(df_scaled, cluster.labels_, random_state=10) print("score", i, "", score) from yellowbrick.cluster import SilhouetteVisualizer c = [2, 3, 4, 5] for i in c: cluster = KMeans(n_clusters=i) cluster.fit(df_scaled) score = silhouette_score(df_scaled, cluster.labels_, random_state=10) print("score", i, "", score) Visualizer = SilhouetteVisualizer(cluster, colors="yellowbrick") Visualizer.fit(df_scaled) Visualizer.show() # From silhouette_score method we can see that the silhouette_score is maximum for k=2 hence K=2 is considered. # ### Agglomerative clustering from sklearn.cluster import AgglomerativeClustering from scipy.cluster.hierarchy import dendrogram from scipy.cluster.hierarchy import linkage from scipy.cluster.hierarchy import cophenet from scipy.spatial.distance import pdist link_mat = linkage(df_scaled, method="ward") link_mat c, cd = cophenet(link_mat, pdist(df_scaled)) c # Cophenetic cofficient value is close to 1 hence it can be said that the clustering is quited good. dendrogram(link_mat) plt.show() # from the above Dendrogram the optimal number of clusters obtained is 2 as 2 clusters will fall if we cut above 700 df_aggo = df_scaled.copy() clusters = AgglomerativeClustering(n_clusters=2, linkage="ward") clusters.fit(df_scaled) df_aggo["cluster"] = clusters.labels_ df_aggo["cluster"].value_counts() sns.countplot(df_aggo["cluster"]) plt.title("Cluster Size") plt.xlabel("Number of Clusters") plt.ylabel("Number of Observations") plt.show() # Through Agglomerative clustering the there are 2 clusters formed and teh segregation can be seen from the above plot . # ## PCA pca = PCA(n_components=0.95) pca = pca.fit(df_scaled) print("Eigen Vector :\n", pca.components_) print() print("Eigen Values :\n", pca.explained_variance_) print() print("Variance :\n", pca.explained_variance_ratio_) # Highest variance is explained by PC1 that is 94%. df_pca = pd.DataFrame(pca.transform(df_scaled), columns=["PC1", "PC2"]) df_pca.head() sns.heatmap(df_pca.corr(), annot=True) # From the above heat map we can see that there is no corelation after applying PCA # ### Kmeans df_kmeans = df_pca.copy() df_kmeans["group"] = KMeans3.labels_ df_kmeans.head() df_kmeans["group"].value_counts() plt.figure(figsize=(10, 6)) sns.scatterplot(x="PC1", y="PC2", data=df_kmeans, hue="group") plt.show() # ### AgglomerativeClustering df_agg = df_pca.copy() df_agg["group"] = clusters.labels_ df_agg.head() df_agg["group"].value_counts() plt.figure(figsize=(10, 6)) sns.scatterplot(x="PC1", y="PC2", data=df_agg, hue="group") plt.show()
false
0
1,836
2
2,705
1,836
129114128
data_path = "/kaggle/input/deepfake-detection-challenge/train_sample_videos" import pandas as pd data_df = pd.read_json( "/kaggle/input/deepfake-detection-challenge/train_sample_videos/metadata.json" ) data_df = data_df.T data_df df_real = data_df[data_df["label"] == "REAL"] df_real df_fake = data_df[data_df["label"] == "FAKE"] df_fake df = pd.concat([df_real.sample(n=30), df_fake.sample(n=30)]) df from tensorflow import keras model1 = keras.models.load_model("/kaggle/input/models/ncs/densenet.h5") from tensorflow.keras.applications import InceptionResNetV2 from tensorflow.keras.layers import Conv2D from tensorflow.keras.layers import MaxPooling2D from tensorflow.keras.layers import Flatten from tensorflow.keras.layers import Dense from tensorflow.keras.layers import Dropout from tensorflow.keras.layers import InputLayer from tensorflow.keras.layers import GlobalAveragePooling2D from tensorflow.keras.models import Sequential from tensorflow.keras.models import Model from tensorflow.keras import optimizers from tensorflow.keras.callbacks import ReduceLROnPlateau, EarlyStopping base_model2 = InceptionResNetV2( include_top=False, weights="imagenet", input_shape=(128, 128, 3) ) base_model2.trainable = True model2 = Sequential() model2.add(base_model2) model2.add(GlobalAveragePooling2D()) model2.add(Dense(2, activation="softmax")) model2.summary(expand_nested=False) model2.load_weights("/kaggle/input/models/ncs/googlenet.h5") model3 = keras.models.load_model("/kaggle/input/models/ncs/densenet_sc.h5") model4 = keras.models.load_model("/kaggle/input/models/ncs/googlenet_sc.h5") import gc gc.collect() gc.collect() import dlib import cv2 from tqdm import tqdm import numpy as np from tensorflow.keras.preprocessing.image import img_to_array detector = dlib.get_frontal_face_detector() res = [] for x in tqdm(df.index): file = df.loc[x] cap = cv2.VideoCapture( "/kaggle/input/deepfake-detection-challenge/train_sample_videos/" + x ) frameRate = cap.get(5) print(x, file[0]) r1 = [] r2 = [] r3 = [] r4 = [] while cap.isOpened(): frameId = cap.get(1) ret, frame = cap.read() if ret != True: break if frameId % ((int(frameRate) + 1) * 1) == 0: face_rects, scores, idx = detector.run(frame, 0) for i, d in enumerate(face_rects): x1 = d.left() y1 = d.top() x2 = d.right() y2 = d.bottom() crop_img = frame[y1:y2, x1:x2] data = img_to_array(cv2.resize(crop_img, (128, 128))).flatten() / 255.0 data = data.reshape(-1, 128, 128, 3) pred1 = model1.predict(data, verbose=0) pred2 = model2.predict(data, verbose=0) pred3 = model3.predict(data, verbose=0) pred4 = model4.predict(data, verbose=0) cl1 = np.argmax(pred1, axis=1)[0] cl2 = np.argmax(pred2, axis=1)[0] cl3 = np.argmax(pred3, axis=1)[0] cl4 = np.argmax(pred4, axis=1)[0] r1.append(cl1) r2.append(cl2) r3.append(cl3) r4.append(cl4) print(r1) print(r2) print(r3) print(r4) row = [x] + [file[0]] if r1 != []: if 0 in r1: fc = r1.count(0) else: fc = 0 if 1 in r1: rc = r1.count(1) else: rc = 0 if rc > fc: row += ["REAL"] print("REAL", end="\t") else: row += ["FAKE"] print("FAKE", end="\t") if r2 != []: if 0 in r2: fc = r2.count(0) else: fc = 0 if 1 in r2: rc = r2.count(1) else: rc = 0 if rc > fc: row += ["REAL"] print("REAL", end="\t") else: row += ["FAKE"] print("FAKE", end="\t") if r3 != []: if 0 in r3: fc = r3.count(0) else: fc = 0 if 1 in r3: rc = r3.count(1) else: rc = 0 if rc > fc: row += ["REAL"] print("REAL") else: row += ["FAKE"] print("FAKE") if r4 != []: if 0 in r4: fc = r4.count(0) else: fc = 0 if 1 in r4: rc = r4.count(1) else: rc = 0 if rc > fc: row += ["REAL"] print("REAL", end="\t") else: row += ["FAKE"] print("FAKE", end="\t") res.append(row) gc.collect() print("--------------------------------------\n") res df_res = pd.DataFrame( res, columns=["file", "true", "densenet", "googlenet", "densenet_s", "googlenet_s"] ) df_res df_res = df_res.dropna(subset=["densenet"]) df_res.to_csv("result.csv", index=True) df_res["true"].value_counts() from sklearn import metrics print( "DenseNet :", metrics.accuracy_score(df_res["true"].values, df_res["densenet"].values), ) print( "GoogLeNet :", metrics.accuracy_score(df_res["true"].values, df_res["googlenet"].values), ) print( "DenseNet S :", metrics.accuracy_score(df_res["true"].values, df_res["densenet_s"].values), ) print( "GoogLeNet S :", metrics.accuracy_score(df_res["true"].values, df_res["googlenet_s"].values), ) print( "DenseNet :", metrics.recall_score( df_res["true"].values, df_res["densenet"].values, pos_label="FAKE" ), ) print( "GoogLeNet :", metrics.recall_score( df_res["true"].values, df_res["googlenet"].values, pos_label="FAKE" ), ) print( "DenseNet S :", metrics.recall_score( df_res["true"].values, df_res["densenet_s"].values, pos_label="FAKE" ), ) print( "GoogLeNet S :", metrics.recall_score( df_res["true"].values, df_res["googlenet_s"].values, pos_label="FAKE" ), ) print( "DenseNet :", metrics.recall_score( df_res["true"].values, df_res["densenet"].values, pos_label="REAL" ), ) print( "GoogLeNet :", metrics.recall_score( df_res["true"].values, df_res["googlenet"].values, pos_label="REAL" ), ) print( "DenseNet S :", metrics.recall_score( df_res["true"].values, df_res["densenet_s"].values, pos_label="REAL" ), ) print( "GoogLeNet S :", metrics.recall_score( df_res["true"].values, df_res["googlenet_s"].values, pos_label="REAL" ), ) from sklearn import metrics print( "DenseNet :\n", metrics.classification_report(df_res["true"].values, df_res["densenet"].values), "\n", ) print( "GoogLeNet :\n", metrics.classification_report(df_res["true"].values, df_res["googlenet"].values), "\n", ) print( "DenseNet S :\n", metrics.classification_report(df_res["true"].values, df_res["densenet_s"].values), "\n", ) print( "GoogLeNet S :\n", metrics.classification_report(df_res["true"].values, df_res["googlenet_s"].values), "\n", ) import os import matplotlib.pyplot as plt cm = metrics.confusion_matrix(df_res["true"].values, df_res["densenet"].values) cm_display = metrics.ConfusionMatrixDisplay( confusion_matrix=cm, display_labels=["FAKE", "REAL"] ) fig, ax = plt.subplots(figsize=(10, 10)) cm_display.plot(ax=ax) plt.show() import os import matplotlib.pyplot as plt cm = metrics.confusion_matrix(df_res["true"].values, df_res["googlenet"].values) cm_display = metrics.ConfusionMatrixDisplay( confusion_matrix=cm, display_labels=["FAKE", "REAL"] ) fig, ax = plt.subplots(figsize=(10, 10)) cm_display.plot(ax=ax) plt.show() import os import matplotlib.pyplot as plt cm = metrics.confusion_matrix(df_res["true"].values, df_res["densenet_s"].values) cm_display = metrics.ConfusionMatrixDisplay( confusion_matrix=cm, display_labels=["FAKE", "REAL"] ) fig, ax = plt.subplots(figsize=(10, 10)) cm_display.plot(ax=ax) plt.show() import os import matplotlib.pyplot as plt cm = metrics.confusion_matrix(df_res["true"].values, df_res["googlenet_s"].values) cm_display = metrics.ConfusionMatrixDisplay( confusion_matrix=cm, display_labels=["FAKE", "REAL"] ) fig, ax = plt.subplots(figsize=(10, 10)) cm_display.plot(ax=ax) plt.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/114/129114128.ipynb
null
null
[{"Id": 129114128, "ScriptId": 38347181, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11268436, "CreationDate": "05/11/2023 05:42:54", "VersionNumber": 1.0, "Title": "test_dfdc", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 260.0, "LinesInsertedFromPrevious": 260.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
data_path = "/kaggle/input/deepfake-detection-challenge/train_sample_videos" import pandas as pd data_df = pd.read_json( "/kaggle/input/deepfake-detection-challenge/train_sample_videos/metadata.json" ) data_df = data_df.T data_df df_real = data_df[data_df["label"] == "REAL"] df_real df_fake = data_df[data_df["label"] == "FAKE"] df_fake df = pd.concat([df_real.sample(n=30), df_fake.sample(n=30)]) df from tensorflow import keras model1 = keras.models.load_model("/kaggle/input/models/ncs/densenet.h5") from tensorflow.keras.applications import InceptionResNetV2 from tensorflow.keras.layers import Conv2D from tensorflow.keras.layers import MaxPooling2D from tensorflow.keras.layers import Flatten from tensorflow.keras.layers import Dense from tensorflow.keras.layers import Dropout from tensorflow.keras.layers import InputLayer from tensorflow.keras.layers import GlobalAveragePooling2D from tensorflow.keras.models import Sequential from tensorflow.keras.models import Model from tensorflow.keras import optimizers from tensorflow.keras.callbacks import ReduceLROnPlateau, EarlyStopping base_model2 = InceptionResNetV2( include_top=False, weights="imagenet", input_shape=(128, 128, 3) ) base_model2.trainable = True model2 = Sequential() model2.add(base_model2) model2.add(GlobalAveragePooling2D()) model2.add(Dense(2, activation="softmax")) model2.summary(expand_nested=False) model2.load_weights("/kaggle/input/models/ncs/googlenet.h5") model3 = keras.models.load_model("/kaggle/input/models/ncs/densenet_sc.h5") model4 = keras.models.load_model("/kaggle/input/models/ncs/googlenet_sc.h5") import gc gc.collect() gc.collect() import dlib import cv2 from tqdm import tqdm import numpy as np from tensorflow.keras.preprocessing.image import img_to_array detector = dlib.get_frontal_face_detector() res = [] for x in tqdm(df.index): file = df.loc[x] cap = cv2.VideoCapture( "/kaggle/input/deepfake-detection-challenge/train_sample_videos/" + x ) frameRate = cap.get(5) print(x, file[0]) r1 = [] r2 = [] r3 = [] r4 = [] while cap.isOpened(): frameId = cap.get(1) ret, frame = cap.read() if ret != True: break if frameId % ((int(frameRate) + 1) * 1) == 0: face_rects, scores, idx = detector.run(frame, 0) for i, d in enumerate(face_rects): x1 = d.left() y1 = d.top() x2 = d.right() y2 = d.bottom() crop_img = frame[y1:y2, x1:x2] data = img_to_array(cv2.resize(crop_img, (128, 128))).flatten() / 255.0 data = data.reshape(-1, 128, 128, 3) pred1 = model1.predict(data, verbose=0) pred2 = model2.predict(data, verbose=0) pred3 = model3.predict(data, verbose=0) pred4 = model4.predict(data, verbose=0) cl1 = np.argmax(pred1, axis=1)[0] cl2 = np.argmax(pred2, axis=1)[0] cl3 = np.argmax(pred3, axis=1)[0] cl4 = np.argmax(pred4, axis=1)[0] r1.append(cl1) r2.append(cl2) r3.append(cl3) r4.append(cl4) print(r1) print(r2) print(r3) print(r4) row = [x] + [file[0]] if r1 != []: if 0 in r1: fc = r1.count(0) else: fc = 0 if 1 in r1: rc = r1.count(1) else: rc = 0 if rc > fc: row += ["REAL"] print("REAL", end="\t") else: row += ["FAKE"] print("FAKE", end="\t") if r2 != []: if 0 in r2: fc = r2.count(0) else: fc = 0 if 1 in r2: rc = r2.count(1) else: rc = 0 if rc > fc: row += ["REAL"] print("REAL", end="\t") else: row += ["FAKE"] print("FAKE", end="\t") if r3 != []: if 0 in r3: fc = r3.count(0) else: fc = 0 if 1 in r3: rc = r3.count(1) else: rc = 0 if rc > fc: row += ["REAL"] print("REAL") else: row += ["FAKE"] print("FAKE") if r4 != []: if 0 in r4: fc = r4.count(0) else: fc = 0 if 1 in r4: rc = r4.count(1) else: rc = 0 if rc > fc: row += ["REAL"] print("REAL", end="\t") else: row += ["FAKE"] print("FAKE", end="\t") res.append(row) gc.collect() print("--------------------------------------\n") res df_res = pd.DataFrame( res, columns=["file", "true", "densenet", "googlenet", "densenet_s", "googlenet_s"] ) df_res df_res = df_res.dropna(subset=["densenet"]) df_res.to_csv("result.csv", index=True) df_res["true"].value_counts() from sklearn import metrics print( "DenseNet :", metrics.accuracy_score(df_res["true"].values, df_res["densenet"].values), ) print( "GoogLeNet :", metrics.accuracy_score(df_res["true"].values, df_res["googlenet"].values), ) print( "DenseNet S :", metrics.accuracy_score(df_res["true"].values, df_res["densenet_s"].values), ) print( "GoogLeNet S :", metrics.accuracy_score(df_res["true"].values, df_res["googlenet_s"].values), ) print( "DenseNet :", metrics.recall_score( df_res["true"].values, df_res["densenet"].values, pos_label="FAKE" ), ) print( "GoogLeNet :", metrics.recall_score( df_res["true"].values, df_res["googlenet"].values, pos_label="FAKE" ), ) print( "DenseNet S :", metrics.recall_score( df_res["true"].values, df_res["densenet_s"].values, pos_label="FAKE" ), ) print( "GoogLeNet S :", metrics.recall_score( df_res["true"].values, df_res["googlenet_s"].values, pos_label="FAKE" ), ) print( "DenseNet :", metrics.recall_score( df_res["true"].values, df_res["densenet"].values, pos_label="REAL" ), ) print( "GoogLeNet :", metrics.recall_score( df_res["true"].values, df_res["googlenet"].values, pos_label="REAL" ), ) print( "DenseNet S :", metrics.recall_score( df_res["true"].values, df_res["densenet_s"].values, pos_label="REAL" ), ) print( "GoogLeNet S :", metrics.recall_score( df_res["true"].values, df_res["googlenet_s"].values, pos_label="REAL" ), ) from sklearn import metrics print( "DenseNet :\n", metrics.classification_report(df_res["true"].values, df_res["densenet"].values), "\n", ) print( "GoogLeNet :\n", metrics.classification_report(df_res["true"].values, df_res["googlenet"].values), "\n", ) print( "DenseNet S :\n", metrics.classification_report(df_res["true"].values, df_res["densenet_s"].values), "\n", ) print( "GoogLeNet S :\n", metrics.classification_report(df_res["true"].values, df_res["googlenet_s"].values), "\n", ) import os import matplotlib.pyplot as plt cm = metrics.confusion_matrix(df_res["true"].values, df_res["densenet"].values) cm_display = metrics.ConfusionMatrixDisplay( confusion_matrix=cm, display_labels=["FAKE", "REAL"] ) fig, ax = plt.subplots(figsize=(10, 10)) cm_display.plot(ax=ax) plt.show() import os import matplotlib.pyplot as plt cm = metrics.confusion_matrix(df_res["true"].values, df_res["googlenet"].values) cm_display = metrics.ConfusionMatrixDisplay( confusion_matrix=cm, display_labels=["FAKE", "REAL"] ) fig, ax = plt.subplots(figsize=(10, 10)) cm_display.plot(ax=ax) plt.show() import os import matplotlib.pyplot as plt cm = metrics.confusion_matrix(df_res["true"].values, df_res["densenet_s"].values) cm_display = metrics.ConfusionMatrixDisplay( confusion_matrix=cm, display_labels=["FAKE", "REAL"] ) fig, ax = plt.subplots(figsize=(10, 10)) cm_display.plot(ax=ax) plt.show() import os import matplotlib.pyplot as plt cm = metrics.confusion_matrix(df_res["true"].values, df_res["googlenet_s"].values) cm_display = metrics.ConfusionMatrixDisplay( confusion_matrix=cm, display_labels=["FAKE", "REAL"] ) fig, ax = plt.subplots(figsize=(10, 10)) cm_display.plot(ax=ax) plt.show()
false
0
2,716
0
2,716
2,716
129114544
<jupyter_start><jupyter_text>IMDB Movies Dataset ### Context IMDB Dataset of top 1000 movies and tv shows. You can find the **EDA Process** on - https://www.kaggle.com/harshitshankhdhar/eda-on-imdb-movies-dataset Please consider **UPVOTE** if you found it useful. ### Content Data:- - **Poster_Link** - Link of the poster that imdb using - **Series_Title** = Name of the movie - **Released_Year** - Year at which that movie released - **Certificate** - Certificate earned by that movie - **Runtime** - Total runtime of the movie - **Genre** - Genre of the movie - **IMDB_Rating** - Rating of the movie at IMDB site - **Overview** - mini story/ summary - **Meta_score** - Score earned by the movie - **Director** - Name of the Director - **Star1,Star2,Star3,Star4** - Name of the Stars - **No_of_votes** - Total number of votes - **Gross** - Money earned by that movie ### Inspiration - Analysis of the gross of a movie vs directors. - Analysis of the gross of a movie vs different - different stars. - Analysis of the No_of_votes of a movie vs directors. - Analysis of the No_of_votes of a movie vs different - different stars. - Which actor prefer which Genre more? - Which combination of actors are getting good IMDB_Rating maximum time? - Which combination of actors are getting good gross? Kaggle dataset identifier: imdb-dataset-of-top-1000-movies-and-tv-shows <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # # Import Libraries import pandas as pd import numpy as np # # Load Data df = pd.read_csv( "/kaggle/input/imdb-dataset-of-top-1000-movies-and-tv-shows/imdb_top_1000.csv" ) # # Preprocess Data # Grab necessary columns columns = df.columns[ ~df.columns.isin(["Poster_Link", "Released_Year", "Certificate", "Meta_score"]) ] df = df[columns] # Transform 'Runtime' from object to int df["Runtime"] = df["Runtime"].apply(lambda x: x.split(" ")[0]) df["IMDB_Rating"] = df["IMDB_Rating"].astype(str) df["Genre"] = df["Genre"].str.lower() # Transform 'Gross' to 'Gross' in million. Fill Na value with mean value def transform_gross(gross): if gross is np.nan: return gross if isinstance(gross, str): gross = float(gross.replace(",", "")) return gross / 10**8 df["Gross"] = df["Gross"].apply(transform_gross) df["Gross"] = df["Gross"].fillna(df["Gross"].mean()) import nltk nltk.download("stopwords") from string import punctuation from nltk.corpus import stopwords from nltk.tokenize import word_tokenize punctuation = list(punctuation) stopwords = stopwords.words("english") def preprocess_text(sentence): list_words = [] tokens = word_tokenize(sentence) return " ".join( [t.lower() for t in tokens if t not in stopwords and t not in punctuation] ) df["Overview"] = df["Overview"].apply(preprocess_text) # # Join text from columns that contain movie information names = df.columns[~df.columns.isin(["No_of_Votes", "Gross"])] df["text"] = df[names].agg(" ".join, axis=1) df["text"] = df["text"] + " " + df["Director"] + " " + df["Director"] # # Load pre-trained model Word2Vec # - I have trained Word2Vec model from IMDB Data, but the result is not as good as the pre-trained model from gensim.models import Word2Vec vec_size = 300 df["tokenized_text"] = df["text"].apply(word_tokenize) # # Initialize the Word2Vec model (without training) # model = Word2Vec(vector_size=vec_size, window=10, min_count=1, workers=2) # # Build the vocabulary # model.build_vocab(df['tokenized_text']) # # Train the model # model.train(df['tokenized_text'], total_examples=model.corpus_count, epochs=30) import gensim.downloader as api model = api.load("word2vec-google-news-300") def get_avg_feature_vec(sentence, vec_size, model, vocab): vec = np.zeros(vec_size) tt_word = 0 for w in sentence: if w in vocab: vec += model[w] tt_word += 1 if tt_word: vec /= tt_word return vec vocabulary = set(model.index_to_key) feature_matrix = [ get_avg_feature_vec(s, vec_size, model, vocabulary) for s in df["tokenized_text"] ] # # Get recommendation # - Assume that I have watched 'The Dark Knight' # - Use cosine-similarity to find related movies movie_index = df[df["Series_Title"] == "The Dark Knight"].index[0] movie_index from sklearn.metrics.pairwise import cosine_similarity # Compute the cosine similarities between the user movie and all other movies user_movie_vector = feature_matrix[movie_index].reshape(1, -1) similarity_scores = cosine_similarity(user_movie_vector, feature_matrix) similar_movies = list(enumerate(similarity_scores[0])) # Get the top 10 most similar movies sorted_similar_movies = sorted(similar_movies, key=lambda x: x[1], reverse=True)[1:20] # Print the top 10 similar movies for i, score in sorted_similar_movies: print("{}: {}".format(i, df.loc[i, "Series_Title"])) # ## Apply Weight_Rating for recommendation, this take into account movies that higher rating def cal_weighted_rating(movie_indices): weighted_movie_indices = dict() m = df["No_of_Votes"].quantile(0.75) C = df["IMDB_Rating"].astype(float).mean() for movie_index in movie_indices: if df.loc[movie_index, "No_of_Votes"] > m: v = df.loc[movie_index, "No_of_Votes"] R = float(df.loc[movie_index, "IMDB_Rating"]) weighted_movie_indices[movie_index] = R * v / (v + m) + C * m / (v + m) return sorted(weighted_movie_indices.items(), key=lambda x: x[1], reverse=True) top_movies = sorted(similar_movies, key=lambda x: x[1], reverse=True)[1:100] indices = cal_weighted_rating([i[0] for i in top_movies])[0:20] for i, score in indices: print("{}: {}".format(i, df.loc[i, "Series_Title"]))
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/114/129114544.ipynb
imdb-dataset-of-top-1000-movies-and-tv-shows
harshitshankhdhar
[{"Id": 129114544, "ScriptId": 36803103, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11527547, "CreationDate": "05/11/2023 05:47:27", "VersionNumber": 1.0, "Title": "Movie Recommender", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 152.0, "LinesInsertedFromPrevious": 152.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 2}]
[{"Id": 184883315, "KernelVersionId": 129114544, "SourceDatasetVersionId": 1898721}]
[{"Id": 1898721, "DatasetId": 1131493, "DatasourceVersionId": 1937036, "CreatorUserId": 5138057, "LicenseName": "CC0: Public Domain", "CreationDate": "02/01/2021 07:35:48", "VersionNumber": 1.0, "Title": "IMDB Movies Dataset", "Slug": "imdb-dataset-of-top-1000-movies-and-tv-shows", "Subtitle": "Top 1000 Movies by IMDB Rating", "Description": "### Context\n\nIMDB Dataset of top 1000 movies and tv shows.\nYou can find the **EDA Process** on - https://www.kaggle.com/harshitshankhdhar/eda-on-imdb-movies-dataset\n\nPlease consider **UPVOTE** if you found it useful. \n\n### Content\n\nData:-\n- **Poster_Link** - Link of the poster that imdb using\n- **Series_Title** = Name of the movie \n- **Released_Year** - Year at which that movie released\n- **Certificate** - Certificate earned by that movie\n- **Runtime** - Total runtime of the movie\n- **Genre** - Genre of the movie\n- **IMDB_Rating** - Rating of the movie at IMDB site\n- **Overview** - mini story/ summary\n- **Meta_score** - Score earned by the movie\n- **Director** - Name of the Director\n- **Star1,Star2,Star3,Star4** - Name of the Stars\n- **No_of_votes** - Total number of votes\n- **Gross** - Money earned by that movie\n\n### Inspiration\n\n- Analysis of the gross of a movie vs directors.\n- Analysis of the gross of a movie vs different - different stars.\n- Analysis of the No_of_votes of a movie vs directors.\n- Analysis of the No_of_votes of a movie vs different - different stars.\n- Which actor prefer which Genre more?\n- Which combination of actors are getting good IMDB_Rating maximum time?\n- Which combination of actors are getting good gross?", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 1131493, "CreatorUserId": 5138057, "OwnerUserId": 5138057.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 1898721.0, "CurrentDatasourceVersionId": 1937036.0, "ForumId": 1148924, "Type": 2, "CreationDate": "02/01/2021 07:35:48", "LastActivityDate": "02/01/2021", "TotalViews": 179357, "TotalDownloads": 32394, "TotalVotes": 329, "TotalKernels": 92}]
[{"Id": 5138057, "UserName": "harshitshankhdhar", "DisplayName": "Harshit Shankhdhar", "RegisterDate": "05/21/2020", "PerformanceTier": 2}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # # Import Libraries import pandas as pd import numpy as np # # Load Data df = pd.read_csv( "/kaggle/input/imdb-dataset-of-top-1000-movies-and-tv-shows/imdb_top_1000.csv" ) # # Preprocess Data # Grab necessary columns columns = df.columns[ ~df.columns.isin(["Poster_Link", "Released_Year", "Certificate", "Meta_score"]) ] df = df[columns] # Transform 'Runtime' from object to int df["Runtime"] = df["Runtime"].apply(lambda x: x.split(" ")[0]) df["IMDB_Rating"] = df["IMDB_Rating"].astype(str) df["Genre"] = df["Genre"].str.lower() # Transform 'Gross' to 'Gross' in million. Fill Na value with mean value def transform_gross(gross): if gross is np.nan: return gross if isinstance(gross, str): gross = float(gross.replace(",", "")) return gross / 10**8 df["Gross"] = df["Gross"].apply(transform_gross) df["Gross"] = df["Gross"].fillna(df["Gross"].mean()) import nltk nltk.download("stopwords") from string import punctuation from nltk.corpus import stopwords from nltk.tokenize import word_tokenize punctuation = list(punctuation) stopwords = stopwords.words("english") def preprocess_text(sentence): list_words = [] tokens = word_tokenize(sentence) return " ".join( [t.lower() for t in tokens if t not in stopwords and t not in punctuation] ) df["Overview"] = df["Overview"].apply(preprocess_text) # # Join text from columns that contain movie information names = df.columns[~df.columns.isin(["No_of_Votes", "Gross"])] df["text"] = df[names].agg(" ".join, axis=1) df["text"] = df["text"] + " " + df["Director"] + " " + df["Director"] # # Load pre-trained model Word2Vec # - I have trained Word2Vec model from IMDB Data, but the result is not as good as the pre-trained model from gensim.models import Word2Vec vec_size = 300 df["tokenized_text"] = df["text"].apply(word_tokenize) # # Initialize the Word2Vec model (without training) # model = Word2Vec(vector_size=vec_size, window=10, min_count=1, workers=2) # # Build the vocabulary # model.build_vocab(df['tokenized_text']) # # Train the model # model.train(df['tokenized_text'], total_examples=model.corpus_count, epochs=30) import gensim.downloader as api model = api.load("word2vec-google-news-300") def get_avg_feature_vec(sentence, vec_size, model, vocab): vec = np.zeros(vec_size) tt_word = 0 for w in sentence: if w in vocab: vec += model[w] tt_word += 1 if tt_word: vec /= tt_word return vec vocabulary = set(model.index_to_key) feature_matrix = [ get_avg_feature_vec(s, vec_size, model, vocabulary) for s in df["tokenized_text"] ] # # Get recommendation # - Assume that I have watched 'The Dark Knight' # - Use cosine-similarity to find related movies movie_index = df[df["Series_Title"] == "The Dark Knight"].index[0] movie_index from sklearn.metrics.pairwise import cosine_similarity # Compute the cosine similarities between the user movie and all other movies user_movie_vector = feature_matrix[movie_index].reshape(1, -1) similarity_scores = cosine_similarity(user_movie_vector, feature_matrix) similar_movies = list(enumerate(similarity_scores[0])) # Get the top 10 most similar movies sorted_similar_movies = sorted(similar_movies, key=lambda x: x[1], reverse=True)[1:20] # Print the top 10 similar movies for i, score in sorted_similar_movies: print("{}: {}".format(i, df.loc[i, "Series_Title"])) # ## Apply Weight_Rating for recommendation, this take into account movies that higher rating def cal_weighted_rating(movie_indices): weighted_movie_indices = dict() m = df["No_of_Votes"].quantile(0.75) C = df["IMDB_Rating"].astype(float).mean() for movie_index in movie_indices: if df.loc[movie_index, "No_of_Votes"] > m: v = df.loc[movie_index, "No_of_Votes"] R = float(df.loc[movie_index, "IMDB_Rating"]) weighted_movie_indices[movie_index] = R * v / (v + m) + C * m / (v + m) return sorted(weighted_movie_indices.items(), key=lambda x: x[1], reverse=True) top_movies = sorted(similar_movies, key=lambda x: x[1], reverse=True)[1:100] indices = cal_weighted_rating([i[0] for i in top_movies])[0:20] for i, score in indices: print("{}: {}".format(i, df.loc[i, "Series_Title"]))
false
1
1,552
2
1,965
1,552
129080229
<jupyter_start><jupyter_text>Video Game Sales This dataset contains a list of video games with sales greater than 100,000 copies. It was generated by a scrape of [vgchartz.com][1]. Fields include * Rank - Ranking of overall sales * Name - The games name * Platform - Platform of the games release (i.e. PC,PS4, etc.) * Year - Year of the game's release * Genre - Genre of the game * Publisher - Publisher of the game * NA_Sales - Sales in North America (in millions) * EU_Sales - Sales in Europe (in millions) * JP_Sales - Sales in Japan (in millions) * Other_Sales - Sales in the rest of the world (in millions) * Global_Sales - Total worldwide sales. The script to scrape the data is available at https://github.com/GregorUT/vgchartzScrape. It is based on BeautifulSoup using Python. There are 16,598 records. 2 records were dropped due to incomplete information. [1]: http://www.vgchartz.com/ Kaggle dataset identifier: videogamesales <jupyter_code>import pandas as pd df = pd.read_csv('videogamesales/vgsales.csv') df.info() <jupyter_output><class 'pandas.core.frame.DataFrame'> RangeIndex: 16598 entries, 0 to 16597 Data columns (total 11 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Rank 16598 non-null int64 1 Name 16598 non-null object 2 Platform 16598 non-null object 3 Year 16327 non-null float64 4 Genre 16598 non-null object 5 Publisher 16540 non-null object 6 NA_Sales 16598 non-null float64 7 EU_Sales 16598 non-null float64 8 JP_Sales 16598 non-null float64 9 Other_Sales 16598 non-null float64 10 Global_Sales 16598 non-null float64 dtypes: float64(6), int64(1), object(4) memory usage: 1.4+ MB <jupyter_text>Examples: { "Rank": 1, "Name": "Wii Sports", "Platform": "Wii", "Year": 2006, "Genre": "Sports", "Publisher": "Nintendo", "NA_Sales": 41.49, "EU_Sales": 29.02, "JP_Sales": 3.77, "Other_Sales": 8.46, "Global_Sales": 82.74 } { "Rank": 2, "Name": "Super Mario Bros.", "Platform": "NES", "Year": 1985, "Genre": "Platform", "Publisher": "Nintendo", "NA_Sales": 29.08, "EU_Sales": 3.58, "JP_Sales": 6.8100000000000005, "Other_Sales": 0.77, "Global_Sales": 40.24 } { "Rank": 3, "Name": "Mario Kart Wii", "Platform": "Wii", "Year": 2008, "Genre": "Racing", "Publisher": "Nintendo", "NA_Sales": 15.85, "EU_Sales": 12.88, "JP_Sales": 3.79, "Other_Sales": 3.31, "Global_Sales": 35.82 } { "Rank": 4, "Name": "Wii Sports Resort", "Platform": "Wii", "Year": 2009, "Genre": "Sports", "Publisher": "Nintendo", "NA_Sales": 15.75, "EU_Sales": 11.01, "JP_Sales": 3.2800000000000002, "Other_Sales": 2.96, "Global_Sales": 33.0 } <jupyter_script># # VG-stats # ## Abdulkareem Abunabhan # ### 10/5/2023 import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session df = pd.read_csv("/kaggle/input/videogamesales/vgsales.csv") # #### Q1-Which company is the most common video game publisher? publisher_counts = df.groupby("Publisher")["Rank"].count() most_common_publisher = publisher_counts.idxmax() print("The most common video game publisher is:", most_common_publisher) # #### Q2-What’s the most common platform? platform_counts = df.groupby("Platform")["Rank"].count() most_common_platform = platform_counts.idxmax() print("The most common video game platform is", most_common_platform) # #### Q3-What about the most common genre? genre_counts = df.groupby("Genre")["Rank"].count() most_common_genre = genre_counts.idxmax() print("The most common video game genre is", most_common_genre) # #### Q4-What are the top 20 highest grossing games? # df["Total_sales"] = df["NA_Sales"] + df["EU_Sales"] + df["JP_Sales"] + df["Other_Sales"] df = df.sort_values("Total_sales", ascending=False) top_20_games = df.head(21) print(top_20_games[["Name", "Platform", "Publisher", "Total_sales"]]) # #### Q5-For North American video game sales, what’s the median? # na_median = df["NA_Sales"].median() print("na_median is:", na_median) # ##### Provide a secondary output showing ten games surrounding the median sales output. # ##### Assume that games with same median value are sorted in descending order. surrounding_games = df.loc[ (df["NA_Sales"] >= na_median - 0.05) & (df["NA_Sales"] <= na_median + 0.05) ] surrounding_games = surrounding_games.sort_values(by="NA_Sales", ascending=False) print("The ten games surrounding the median sales are:") print(surrounding_games[["Name", "Platform", "Publisher", "NA_Sales"]].head(10)) # #### Q6-For the top-selling game of all time, how many standard deviations above/below the mean are its sales for North America? na_mean = df["NA_Sales"].mean() na_std = df["NA_Sales"].std() top_game = df.loc[df["Rank"] == 1] deviations = (top_game["NA_Sales"] - na_mean) / na_std print(deviations) # #### Q7-The Nintendo Wii seems to have outdone itself with games. How does its average number of sales compare with all of the other platforms? platform_sales = df.groupby("Platform")["Total_sales"].mean() wii_sales = platform_sales.loc["Wii"] other_sales = platform_sales.loc[platform_sales.index != "Wii"].mean() if wii_sales > other_sales: print( "The Nintendo Wii has a higher average number of sales than all other platforms." ) else: print( "The Nintendo Wii has a lower average number of sales than all other platforms." )
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/080/129080229.ipynb
videogamesales
gregorut
[{"Id": 129080229, "ScriptId": 38370622, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 15002387, "CreationDate": "05/10/2023 20:49:21", "VersionNumber": 1.0, "Title": "Vg-stats", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 89.0, "LinesInsertedFromPrevious": 89.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 184816925, "KernelVersionId": 129080229, "SourceDatasetVersionId": 618}]
[{"Id": 618, "DatasetId": 284, "DatasourceVersionId": 618, "CreatorUserId": 462330, "LicenseName": "Unknown", "CreationDate": "10/26/2016 09:10:49", "VersionNumber": 2.0, "Title": "Video Game Sales", "Slug": "videogamesales", "Subtitle": "Analyze sales data from more than 16,500 games.", "Description": "This dataset contains a list of video games with sales greater than 100,000 copies. It was generated by a scrape of [vgchartz.com][1].\n\nFields include\n\n* Rank - Ranking of overall sales\n\n* Name - The games name\n\n* Platform - Platform of the games release (i.e. PC,PS4, etc.)\n\n* Year - Year of the game's release\n\n* Genre - Genre of the game\n\n* Publisher - Publisher of the game\n\n* NA_Sales - Sales in North America (in millions)\n\n* EU_Sales - Sales in Europe (in millions)\n\n* JP_Sales - Sales in Japan (in millions)\n\n* Other_Sales - Sales in the rest of the world (in millions)\n\n* Global_Sales - Total worldwide sales.\n\nThe script to scrape the data is available at https://github.com/GregorUT/vgchartzScrape.\nIt is based on BeautifulSoup using Python.\nThere are 16,598 records. 2 records were dropped due to incomplete information.\n\n\n [1]: http://www.vgchartz.com/", "VersionNotes": "Cleaned up formating", "TotalCompressedBytes": 1355781.0, "TotalUncompressedBytes": 1355781.0}]
[{"Id": 284, "CreatorUserId": 462330, "OwnerUserId": 462330.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 618.0, "CurrentDatasourceVersionId": 618.0, "ForumId": 1788, "Type": 2, "CreationDate": "10/26/2016 08:17:30", "LastActivityDate": "02/06/2018", "TotalViews": 1798828, "TotalDownloads": 471172, "TotalVotes": 5485, "TotalKernels": 1480}]
[{"Id": 462330, "UserName": "gregorut", "DisplayName": "GregorySmith", "RegisterDate": "11/09/2015", "PerformanceTier": 1}]
# # VG-stats # ## Abdulkareem Abunabhan # ### 10/5/2023 import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session df = pd.read_csv("/kaggle/input/videogamesales/vgsales.csv") # #### Q1-Which company is the most common video game publisher? publisher_counts = df.groupby("Publisher")["Rank"].count() most_common_publisher = publisher_counts.idxmax() print("The most common video game publisher is:", most_common_publisher) # #### Q2-What’s the most common platform? platform_counts = df.groupby("Platform")["Rank"].count() most_common_platform = platform_counts.idxmax() print("The most common video game platform is", most_common_platform) # #### Q3-What about the most common genre? genre_counts = df.groupby("Genre")["Rank"].count() most_common_genre = genre_counts.idxmax() print("The most common video game genre is", most_common_genre) # #### Q4-What are the top 20 highest grossing games? # df["Total_sales"] = df["NA_Sales"] + df["EU_Sales"] + df["JP_Sales"] + df["Other_Sales"] df = df.sort_values("Total_sales", ascending=False) top_20_games = df.head(21) print(top_20_games[["Name", "Platform", "Publisher", "Total_sales"]]) # #### Q5-For North American video game sales, what’s the median? # na_median = df["NA_Sales"].median() print("na_median is:", na_median) # ##### Provide a secondary output showing ten games surrounding the median sales output. # ##### Assume that games with same median value are sorted in descending order. surrounding_games = df.loc[ (df["NA_Sales"] >= na_median - 0.05) & (df["NA_Sales"] <= na_median + 0.05) ] surrounding_games = surrounding_games.sort_values(by="NA_Sales", ascending=False) print("The ten games surrounding the median sales are:") print(surrounding_games[["Name", "Platform", "Publisher", "NA_Sales"]].head(10)) # #### Q6-For the top-selling game of all time, how many standard deviations above/below the mean are its sales for North America? na_mean = df["NA_Sales"].mean() na_std = df["NA_Sales"].std() top_game = df.loc[df["Rank"] == 1] deviations = (top_game["NA_Sales"] - na_mean) / na_std print(deviations) # #### Q7-The Nintendo Wii seems to have outdone itself with games. How does its average number of sales compare with all of the other platforms? platform_sales = df.groupby("Platform")["Total_sales"].mean() wii_sales = platform_sales.loc["Wii"] other_sales = platform_sales.loc[platform_sales.index != "Wii"].mean() if wii_sales > other_sales: print( "The Nintendo Wii has a higher average number of sales than all other platforms." ) else: print( "The Nintendo Wii has a lower average number of sales than all other platforms." )
[{"videogamesales/vgsales.csv": {"column_names": "[\"Rank\", \"Name\", \"Platform\", \"Year\", \"Genre\", \"Publisher\", \"NA_Sales\", \"EU_Sales\", \"JP_Sales\", \"Other_Sales\", \"Global_Sales\"]", "column_data_types": "{\"Rank\": \"int64\", \"Name\": \"object\", \"Platform\": \"object\", \"Year\": \"float64\", \"Genre\": \"object\", \"Publisher\": \"object\", \"NA_Sales\": \"float64\", \"EU_Sales\": \"float64\", \"JP_Sales\": \"float64\", \"Other_Sales\": \"float64\", \"Global_Sales\": \"float64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 16598 entries, 0 to 16597\nData columns (total 11 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Rank 16598 non-null int64 \n 1 Name 16598 non-null object \n 2 Platform 16598 non-null object \n 3 Year 16327 non-null float64\n 4 Genre 16598 non-null object \n 5 Publisher 16540 non-null object \n 6 NA_Sales 16598 non-null float64\n 7 EU_Sales 16598 non-null float64\n 8 JP_Sales 16598 non-null float64\n 9 Other_Sales 16598 non-null float64\n 10 Global_Sales 16598 non-null float64\ndtypes: float64(6), int64(1), object(4)\nmemory usage: 1.4+ MB\n", "summary": "{\"Rank\": {\"count\": 16598.0, \"mean\": 8300.605253645017, \"std\": 4791.853932896403, \"min\": 1.0, \"25%\": 4151.25, \"50%\": 8300.5, \"75%\": 12449.75, \"max\": 16600.0}, \"Year\": {\"count\": 16327.0, \"mean\": 2006.4064433147546, \"std\": 5.828981114712805, \"min\": 1980.0, \"25%\": 2003.0, \"50%\": 2007.0, \"75%\": 2010.0, \"max\": 2020.0}, \"NA_Sales\": {\"count\": 16598.0, \"mean\": 0.26466742981082064, \"std\": 0.8166830292988796, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.08, \"75%\": 0.24, \"max\": 41.49}, \"EU_Sales\": {\"count\": 16598.0, \"mean\": 0.14665200626581515, \"std\": 0.5053512312869116, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.02, \"75%\": 0.11, \"max\": 29.02}, \"JP_Sales\": {\"count\": 16598.0, \"mean\": 0.077781660441017, \"std\": 0.30929064808220297, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.04, \"max\": 10.22}, \"Other_Sales\": {\"count\": 16598.0, \"mean\": 0.0480630196409206, \"std\": 0.18858840291271461, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.01, \"75%\": 0.04, \"max\": 10.57}, \"Global_Sales\": {\"count\": 16598.0, \"mean\": 0.5374406555006628, \"std\": 1.5550279355699124, \"min\": 0.01, \"25%\": 0.06, \"50%\": 0.17, \"75%\": 0.47, \"max\": 82.74}}", "examples": "{\"Rank\":{\"0\":1,\"1\":2,\"2\":3,\"3\":4},\"Name\":{\"0\":\"Wii Sports\",\"1\":\"Super Mario Bros.\",\"2\":\"Mario Kart Wii\",\"3\":\"Wii Sports Resort\"},\"Platform\":{\"0\":\"Wii\",\"1\":\"NES\",\"2\":\"Wii\",\"3\":\"Wii\"},\"Year\":{\"0\":2006.0,\"1\":1985.0,\"2\":2008.0,\"3\":2009.0},\"Genre\":{\"0\":\"Sports\",\"1\":\"Platform\",\"2\":\"Racing\",\"3\":\"Sports\"},\"Publisher\":{\"0\":\"Nintendo\",\"1\":\"Nintendo\",\"2\":\"Nintendo\",\"3\":\"Nintendo\"},\"NA_Sales\":{\"0\":41.49,\"1\":29.08,\"2\":15.85,\"3\":15.75},\"EU_Sales\":{\"0\":29.02,\"1\":3.58,\"2\":12.88,\"3\":11.01},\"JP_Sales\":{\"0\":3.77,\"1\":6.81,\"2\":3.79,\"3\":3.28},\"Other_Sales\":{\"0\":8.46,\"1\":0.77,\"2\":3.31,\"3\":2.96},\"Global_Sales\":{\"0\":82.74,\"1\":40.24,\"2\":35.82,\"3\":33.0}}"}}]
true
1
<start_data_description><data_path>videogamesales/vgsales.csv: <column_names> ['Rank', 'Name', 'Platform', 'Year', 'Genre', 'Publisher', 'NA_Sales', 'EU_Sales', 'JP_Sales', 'Other_Sales', 'Global_Sales'] <column_types> {'Rank': 'int64', 'Name': 'object', 'Platform': 'object', 'Year': 'float64', 'Genre': 'object', 'Publisher': 'object', 'NA_Sales': 'float64', 'EU_Sales': 'float64', 'JP_Sales': 'float64', 'Other_Sales': 'float64', 'Global_Sales': 'float64'} <dataframe_Summary> {'Rank': {'count': 16598.0, 'mean': 8300.605253645017, 'std': 4791.853932896403, 'min': 1.0, '25%': 4151.25, '50%': 8300.5, '75%': 12449.75, 'max': 16600.0}, 'Year': {'count': 16327.0, 'mean': 2006.4064433147546, 'std': 5.828981114712805, 'min': 1980.0, '25%': 2003.0, '50%': 2007.0, '75%': 2010.0, 'max': 2020.0}, 'NA_Sales': {'count': 16598.0, 'mean': 0.26466742981082064, 'std': 0.8166830292988796, 'min': 0.0, '25%': 0.0, '50%': 0.08, '75%': 0.24, 'max': 41.49}, 'EU_Sales': {'count': 16598.0, 'mean': 0.14665200626581515, 'std': 0.5053512312869116, 'min': 0.0, '25%': 0.0, '50%': 0.02, '75%': 0.11, 'max': 29.02}, 'JP_Sales': {'count': 16598.0, 'mean': 0.077781660441017, 'std': 0.30929064808220297, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.04, 'max': 10.22}, 'Other_Sales': {'count': 16598.0, 'mean': 0.0480630196409206, 'std': 0.18858840291271461, 'min': 0.0, '25%': 0.0, '50%': 0.01, '75%': 0.04, 'max': 10.57}, 'Global_Sales': {'count': 16598.0, 'mean': 0.5374406555006628, 'std': 1.5550279355699124, 'min': 0.01, '25%': 0.06, '50%': 0.17, '75%': 0.47, 'max': 82.74}} <dataframe_info> RangeIndex: 16598 entries, 0 to 16597 Data columns (total 11 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Rank 16598 non-null int64 1 Name 16598 non-null object 2 Platform 16598 non-null object 3 Year 16327 non-null float64 4 Genre 16598 non-null object 5 Publisher 16540 non-null object 6 NA_Sales 16598 non-null float64 7 EU_Sales 16598 non-null float64 8 JP_Sales 16598 non-null float64 9 Other_Sales 16598 non-null float64 10 Global_Sales 16598 non-null float64 dtypes: float64(6), int64(1), object(4) memory usage: 1.4+ MB <some_examples> {'Rank': {'0': 1, '1': 2, '2': 3, '3': 4}, 'Name': {'0': 'Wii Sports', '1': 'Super Mario Bros.', '2': 'Mario Kart Wii', '3': 'Wii Sports Resort'}, 'Platform': {'0': 'Wii', '1': 'NES', '2': 'Wii', '3': 'Wii'}, 'Year': {'0': 2006.0, '1': 1985.0, '2': 2008.0, '3': 2009.0}, 'Genre': {'0': 'Sports', '1': 'Platform', '2': 'Racing', '3': 'Sports'}, 'Publisher': {'0': 'Nintendo', '1': 'Nintendo', '2': 'Nintendo', '3': 'Nintendo'}, 'NA_Sales': {'0': 41.49, '1': 29.08, '2': 15.85, '3': 15.75}, 'EU_Sales': {'0': 29.02, '1': 3.58, '2': 12.88, '3': 11.01}, 'JP_Sales': {'0': 3.77, '1': 6.81, '2': 3.79, '3': 3.28}, 'Other_Sales': {'0': 8.46, '1': 0.77, '2': 3.31, '3': 2.96}, 'Global_Sales': {'0': 82.74, '1': 40.24, '2': 35.82, '3': 33.0}} <end_description>
996
0
2,109
996
129080111
# # Import import pandas as pd import numpy as np import torch import torchaudio import math, random from IPython.display import Audio import librosa from tqdm import tqdm import warnings from torch.utils.data import Dataset, DataLoader import os import matplotlib.pyplot as plt from torchvision.transforms import Resize from sklearn.model_selection import train_test_split import torchvision.models as models import torch.nn as nn from timeit import default_timer as timer from sklearn import preprocessing from sklearn.metrics import ( f1_score, recall_score, confusion_matrix, classification_report, ) # disable all warning messages warnings.filterwarnings("ignore") train_on_gpu = True def seed_everything(seed: int): import random, os import numpy as np import torch random.seed(seed) os.environ["PYTHONHASHSEED"] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = True seed_everything(42) config = { "sample_rate": 22050, "clip_length": 5, "classes_number": 264, "learning_rate": 0.001, "batch_size": 32, "num_epochs": 10, } # # Create Train DataFrame # Because the audio dosn't have the same legnth we will create new dataframe contains information about the length of each Audio, # and then Create dataframe the contains the start and the end of each new audio which will be the input to the AI pipline # Original_Df = pd.read_csv("/kaggle/input/birdclef-2023/train_metadata.csv") # for i, audio_name in tqdm(enumerate(Original_Df.filename)): # audio_data, Audio_sample_rate = librosa.load(f"/kaggle/input/birdclef-2023/train_audio/{audio_name}") # audio_duration = librosa.get_duration(y=audio_data, sr=Audio_sample_rate) # Original_Df.loc[i, "duration"] = audio_duration # Original_Df.to_csv("train_metadata_with_length.csv", index =False) # ## Split each audio file into equil lengths DF Df_length = pd.read_csv("/kaggle/working/train_metadata_with_length.csv") def Create_from_end(Df_length, clip_length=5, overlap_length=0): # Create New DataFrame New_columns = np.append(Df_length.columns.values, ["start", "end"]) New_df = pd.DataFrame(columns=New_columns) # itterate over the audios for i in tqdm(range(len(Df_length)), total=len(Df_length)): row = Df_length.iloc[i] audio_length = original_length = row.duration # Audio Duration start = 0 end = min(clip_length, audio_length) while audio_length > 1: # Append Start and the End to the DataFrame new_row = pd.concat([row, pd.Series([start, end], index=["start", "end"])]) New_df = New_df.append(new_row, ignore_index=True) # Update the start and the end start = clip_length + (start - overlap_length) end = min(clip_length + start, original_length) # Decrease the length of the audio audio_length = audio_length - (clip_length - overlap_length) return New_df # train_df = Create_from_end(Df_length,clip_length=5, overlap_length =2) # train_df.to_csv("train_df_5s.csv", index=False) train_df DataSet = pd.read_csv("/kaggle/working/train_df_5s.csv") X_train, X_valid, _, _ = train_test_split( DataSet, DataSet["primary_label"], shuffle=True, test_size=0.1, random_state=42, stratify=DataSet["primary_label"], ) le = preprocessing.LabelEncoder() le.fit(X_train["primary_label"]) train_label = le.transform(X_train["primary_label"]) valid_label = le.transform(X_valid["primary_label"]) X_train["label"] = train_label X_valid["label"] = valid_label X_train.reset_index(inplace=True) X_valid.reset_index(inplace=True) X_train.to_csv("Splited_train.csv", index=False) X_valid.to_csv("Splited_valid.csv", index=False) # # Data loader class Bird_Dataset(Dataset): def __init__( self, csv_file, root_dir, mode="train", duration_sec=5, transform=None, transform_Aug=None, ): """ Args: csv_file (string): Path to the csv file with annotations. root_dir (string): Directory with all the images. transform (callable, optional): Optional transform to be applied on a sample. """ self.Bird_audios = pd.read_csv(csv_file) self.root_dir = root_dir self.transform = transform self.transform_Aug = transform_Aug self.duration_sec = duration_sec self.mode = mode def __len__(self): return len(self.Bird_audios) def __getitem__(self, idx): if torch.is_tensor(idx): idx = idx.tolist() audio_path = os.path.join( self.root_dir, str(self.Bird_audios.loc[idx, "filename"]) ) # Load the audio signal waveform, sample_rate = torchaudio.load(audio_path) # Resample the Audio waveform = torchaudio.functional.resample( waveform, orig_freq=sample_rate, new_freq=22050 ) # Clip the audio start_sample = int(self.Bird_audios.loc[idx, "start"] * config["sample_rate"]) end_sample = int(self.Bird_audios.loc[idx, "end"] * config["sample_rate"]) waveform = waveform[:, start_sample:end_sample] # # Padd if the shorter less than duration_sec # target_frames = int(self.duration_sec * config["sample_rate"]) # pad_transform = torchaudio.transforms.PadTrim(target_frames) # waveform = pad_transform(waveform) # Compute the spectrogram spec_transform = torchaudio.transforms.MelSpectrogram( n_fft=800, hop_length=320, n_mels=128 ) specgram = spec_transform(waveform) specgram = torchaudio.transforms.AmplitudeToDB()(specgram) resize_transform = Resize((128, 224)) specgram = resize_transform(specgram) # # Define the learnable parameter alpha # alpha = torch.nn.Parameter(torch.tensor([1.0])) # # Apply exponential transformation with alpha # exp_specgram = torch.exp(alpha * specgram) # # If alpha is a tensor, apply different values for each mel band # if alpha.dim() == 1: # exp_specgram = exp_specgram * alpha.view(1, -1, 1) specgram = torch.cat([specgram, specgram, specgram], dim=0) label = self.Bird_audios.loc[idx, "label"] return (specgram, label) if self.mode == "train" else specgram train_Dataset = Bird_Dataset( csv_file="/kaggle/working/Splited_train.csv", root_dir="/kaggle/input/birdclef-2023/train_audio", ) valid_Dataset = Bird_Dataset( csv_file="/kaggle/working/Splited_train.csv", root_dir="/kaggle/input/birdclef-2023/train_audio", ) train_loader = torch.utils.data.DataLoader( train_Dataset, batch_size=128, shuffle=True, num_workers=8, pin_memory=True ) valid_loader = torch.utils.data.DataLoader( valid_Dataset, batch_size=128, shuffle=True, num_workers=8, pin_memory=True ) # show item from dataset x = None for image, label in train_Dataset: # specgram_db = torchaudio.transforms.AmplitudeToDB()(image) print(image.shape) # plot the spectrogram plt.imshow(image[0, :, :].numpy()) plt.xlabel("Time") plt.ylabel("Frequency") plt.show() break model_Type = "resnet50" feature = "baseline_model" rd = np.random.randint(100000) modelName = f"{model_Type}_{feature}_{rd}" device = torch.device("cuda" if torch.cuda.is_available() else "cpu") def get_pretrained_model(model_name): if model_name == "ViT": model = torch.hub.load( "facebookresearch/deit:main", "deit_tiny_patch16_224", pretrained=True ) for param in model.parameters(): # freeze model param.requires_grad = False n_inputs = model.head.in_features model.head = nn.Sequential( nn.Linear(n_inputs, 512), nn.ReLU(), nn.Dropout(0.3), nn.Linear(512, 7) ) if model_name == "MaxViT": model = timm.create_model("maxvit_tiny_rw_224", pretrained=False, img_size=224) model.head.fc = nn.Linear(512, 7, bias=True) if model_name == "vgg16": model = models.vgg16(pretrained=True) n_inputs = model.classifier[6].in_features model.classifier[6] = nn.Sequential( nn.Linear(n_inputs, 2048), nn.ReLU(), nn.Dropout(0.2), nn.Linear(2048, 4), nn.LogSoftmax(dim=1), ) if model_name == "resnet50": model = models.resnet50(pretrained=True) n_inputs = model.fc.in_features model.fc = nn.Sequential( nn.Linear(n_inputs, config["classes_number"]), nn.LogSoftmax(dim=1), ) if model_name == "alexnet": model = models.AlexNet() model.classifier[-1] = nn.Sequential(nn.Linear(4096, 7), nn.LogSoftmax(dim=1)) # Move to gpu and parallelize model = model.to(device) return model model = get_pretrained_model(model_Type) def train( model, criterion, optimizer, train_loader, valid_loader, save_file_name, max_epochs_stop=3, n_epochs=20, print_every=1, ): """Train a PyTorch Model Params -------- model (PyTorch model): cnn to train criterion (PyTorch loss): objective to minimize optimizer (PyTorch optimizier): optimizer to compute gradients of model parameters train_loader (PyTorch dataloader): training dataloader to iterate through valid_loader (PyTorch dataloader): validation dataloader used for early stopping save_file_name (str ending in '.pt'): file path to save the model state dict max_epochs_stop (int): maximum number of epochs with no improvement in validation loss for early stopping n_epochs (int): maximum number of training epochs print_every (int): frequency of epochs to print training stats Returns -------- model (PyTorch model): trained cnn with best weights history (DataFrame): history of train and validation loss and accuracy """ # Early stopping intialization epochs_no_improve = 0 valid_loss_min = np.Inf valid_acc_max = 0 valid_f1_max = 0 valid_max_acc = 0 history = [] # Number of epochs already trained (if using loaded in model weights) try: print(f"Model has been trained for: {model.epochs} epochs.\n") except: model.epochs = 0 print(f"Starting Training from Scratch.\n") overall_start = timer() # Main loop for epoch in range(n_epochs): # keep track of training and validation loss each epoch train_loss = 0.0 valid_loss = 0.0 train_acc = 0 valid_acc = 0 Train_f1_sc = 0 Valid_f1_sc = 0 # Set to training model.train() start = timer() # Training loop for ii, (data, target) in enumerate(train_loader): # Tensors to gpu if train_on_gpu: data, target = data.cuda(), target.cuda() # Clear gradients optimizer.zero_grad() # Predicted outputs are log probabilities output = model(data) # .sigmoid() # target = target.unsqueeze(1) # Loss and backpropagation of gradients loss = criterion(output, target) loss.backward() # Update the parameters optimizer.step() # Track train loss by multiplying average loss by number of examples in batch train_loss += loss.item() * data.size(0) # Calculate accuracy by finding max log probability _, pred = torch.max(output, dim=1) # pred = torch.ge(output, 0.35) correct_tensor = pred.eq(target.data.view_as(pred)) # Need to convert correct tensor from int to float to average accuracy = torch.mean(correct_tensor.type(torch.FloatTensor)) # Multiply average accuracy times the number of examples in batch Train_f1_sc = f1_score(target.cpu().data, pred.cpu(), average="macro") train_acc += accuracy.item() * data.size(0) # Track training progress print( f"Epoch: {epoch}\t{100 * (ii + 1) / len(train_loader):.2f}% complete. {timer() - start:.2f} seconds elapsed in epoch.", end="\r", ) # After training loops ends, start validation else: model.epochs += 1 # Don't need to keep track of gradients with torch.no_grad(): # Set to evaluation mode model.eval() # Validation loop for data, target in valid_loader: # Tensors to gpu if train_on_gpu: data, target = data.cuda(), target.cuda() # Forward pass # output = model(data) # Validation loss output = model(data) # .sigmoid() # target = target.unsqueeze(1) # Loss and backpropagation of gradients loss = criterion(output, target) # Multiply average loss times the number of examples in batch valid_loss += loss.item() * data.size(0) # Calculate validation accuracy _, pred = torch.max(output, dim=1) # pred = torch.ge(output, 0.35) correct_tensor = pred.eq(target.data.view_as(pred)) accuracy = torch.mean(correct_tensor.type(torch.FloatTensor)) # Multiply average accuracy times the number of examples valid_acc += accuracy.item() * data.size(0) Valid_f1_sc = f1_score( target.cpu().data, pred.cpu(), average="macro" ) # Calculate average losses train_loss = train_loss / len(train_loader.dataset) valid_loss = valid_loss / len(valid_loader.dataset) # Calculate average accuracy train_acc = train_acc / len(train_loader.dataset) valid_acc = valid_acc / len(valid_loader.dataset) history.append( [ train_loss, valid_loss, train_acc, valid_acc, Train_f1_sc, Valid_f1_sc, ] ) # Print training and validation results if (epoch + 1) % print_every == 0: print( f"\nEpoch: {epoch} \tTraining Loss: {train_loss:.4f} \tValidation Loss: {valid_loss:.4f}" ) print( f"\t\tTraining Accuracy: {100 * train_acc:.2f}%\t Train F1 score: {100 * Train_f1_sc:.2f}%\t Validation Accuracy: {100 * valid_acc:.2f}%\t Validation F1 score: {100 * Valid_f1_sc:.2f}%" ) # Save the model if validation loss decreases if valid_loss < valid_loss_min: # if Valid_f1_sc > valid_f1_max: # Save model torch.save(model.state_dict(), save_file_name) # Track improvement epochs_no_improve = 0 valid_loss_min = valid_loss valid_acc_max = valid_acc valid_f1_max = Valid_f1_sc # valid_best_acc = valid_acc best_epoch = epoch # Otherwise increment count of epochs with no improvement elif valid_loss >= valid_loss_min: epochs_no_improve += 1 # Trigger early stopping if epochs_no_improve >= max_epochs_stop: print( f"\nEarly Stopping! Total epochs: {epoch}. Best epoch: {best_epoch} with loss: {valid_loss_min:.2f} and acc: {100 * valid_acc_max:.2f}%" ) total_time = timer() - overall_start print( f"{total_time:.2f} total seconds elapsed. {total_time / (epoch+1):.2f} seconds per epoch." ) # Load the best state dict model.load_state_dict(torch.load(save_file_name)) # Attach the optimizer model.optimizer = optimizer # Format history history = pd.DataFrame( history, columns=[ "train_loss", "valid_loss", "train_acc", "valid_acc", "train_f1", "valid_f1", ], ) return model, history # Attach the optimizer model.optimizer = optimizer # Record overall time and print out stats total_time = timer() - overall_start print( f"\nBest epoch: {best_epoch} with loss: {valid_loss_min:.2f} and acc: {100 * valid_acc:.2f}%" ) print( f"{total_time:.2f} total seconds elapsed. {total_time / (epoch):.2f} seconds per epoch." ) # Format history history = pd.DataFrame( history, columns=[ "train_loss", "valid_loss", "train_acc", "valid_acc", "train_f1", "valid_f1", ], ) return model, history # criterion = nn.CrossEntropyLoss(weight = torch.FloatTensor(class_w).to(device)) criterion = nn.CrossEntropyLoss() # criterion =torch.nn.BCEWithLogitsLoss(pos_weight = torch.tensor(weight_for_1).to(device)) # criterion =torch.nn.BCEWithLogitsLoss() # criterion = LabelSmoothingCrossEntropy(weight = class_w) criterion = criterion.to("cuda") optimizer = torch.optim.Adam(model.parameters(), lr=1e-5) # optimizer = optim.Adam(model.parameters(), lr=0.003) # exp_lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=4, gamma=0.97) model, history = train( model, criterion, optimizer, train_loader, valid_loader, save_file_name=f"{modelName}.pt", max_epochs_stop=4, n_epochs=10, print_every=1, ) device
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/080/129080111.ipynb
null
null
[{"Id": 129080111, "ScriptId": 38204879, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13581918, "CreationDate": "05/10/2023 20:47:34", "VersionNumber": 1.0, "Title": "Bird_Classifiaction", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 546.0, "LinesInsertedFromPrevious": 546.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# # Import import pandas as pd import numpy as np import torch import torchaudio import math, random from IPython.display import Audio import librosa from tqdm import tqdm import warnings from torch.utils.data import Dataset, DataLoader import os import matplotlib.pyplot as plt from torchvision.transforms import Resize from sklearn.model_selection import train_test_split import torchvision.models as models import torch.nn as nn from timeit import default_timer as timer from sklearn import preprocessing from sklearn.metrics import ( f1_score, recall_score, confusion_matrix, classification_report, ) # disable all warning messages warnings.filterwarnings("ignore") train_on_gpu = True def seed_everything(seed: int): import random, os import numpy as np import torch random.seed(seed) os.environ["PYTHONHASHSEED"] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = True seed_everything(42) config = { "sample_rate": 22050, "clip_length": 5, "classes_number": 264, "learning_rate": 0.001, "batch_size": 32, "num_epochs": 10, } # # Create Train DataFrame # Because the audio dosn't have the same legnth we will create new dataframe contains information about the length of each Audio, # and then Create dataframe the contains the start and the end of each new audio which will be the input to the AI pipline # Original_Df = pd.read_csv("/kaggle/input/birdclef-2023/train_metadata.csv") # for i, audio_name in tqdm(enumerate(Original_Df.filename)): # audio_data, Audio_sample_rate = librosa.load(f"/kaggle/input/birdclef-2023/train_audio/{audio_name}") # audio_duration = librosa.get_duration(y=audio_data, sr=Audio_sample_rate) # Original_Df.loc[i, "duration"] = audio_duration # Original_Df.to_csv("train_metadata_with_length.csv", index =False) # ## Split each audio file into equil lengths DF Df_length = pd.read_csv("/kaggle/working/train_metadata_with_length.csv") def Create_from_end(Df_length, clip_length=5, overlap_length=0): # Create New DataFrame New_columns = np.append(Df_length.columns.values, ["start", "end"]) New_df = pd.DataFrame(columns=New_columns) # itterate over the audios for i in tqdm(range(len(Df_length)), total=len(Df_length)): row = Df_length.iloc[i] audio_length = original_length = row.duration # Audio Duration start = 0 end = min(clip_length, audio_length) while audio_length > 1: # Append Start and the End to the DataFrame new_row = pd.concat([row, pd.Series([start, end], index=["start", "end"])]) New_df = New_df.append(new_row, ignore_index=True) # Update the start and the end start = clip_length + (start - overlap_length) end = min(clip_length + start, original_length) # Decrease the length of the audio audio_length = audio_length - (clip_length - overlap_length) return New_df # train_df = Create_from_end(Df_length,clip_length=5, overlap_length =2) # train_df.to_csv("train_df_5s.csv", index=False) train_df DataSet = pd.read_csv("/kaggle/working/train_df_5s.csv") X_train, X_valid, _, _ = train_test_split( DataSet, DataSet["primary_label"], shuffle=True, test_size=0.1, random_state=42, stratify=DataSet["primary_label"], ) le = preprocessing.LabelEncoder() le.fit(X_train["primary_label"]) train_label = le.transform(X_train["primary_label"]) valid_label = le.transform(X_valid["primary_label"]) X_train["label"] = train_label X_valid["label"] = valid_label X_train.reset_index(inplace=True) X_valid.reset_index(inplace=True) X_train.to_csv("Splited_train.csv", index=False) X_valid.to_csv("Splited_valid.csv", index=False) # # Data loader class Bird_Dataset(Dataset): def __init__( self, csv_file, root_dir, mode="train", duration_sec=5, transform=None, transform_Aug=None, ): """ Args: csv_file (string): Path to the csv file with annotations. root_dir (string): Directory with all the images. transform (callable, optional): Optional transform to be applied on a sample. """ self.Bird_audios = pd.read_csv(csv_file) self.root_dir = root_dir self.transform = transform self.transform_Aug = transform_Aug self.duration_sec = duration_sec self.mode = mode def __len__(self): return len(self.Bird_audios) def __getitem__(self, idx): if torch.is_tensor(idx): idx = idx.tolist() audio_path = os.path.join( self.root_dir, str(self.Bird_audios.loc[idx, "filename"]) ) # Load the audio signal waveform, sample_rate = torchaudio.load(audio_path) # Resample the Audio waveform = torchaudio.functional.resample( waveform, orig_freq=sample_rate, new_freq=22050 ) # Clip the audio start_sample = int(self.Bird_audios.loc[idx, "start"] * config["sample_rate"]) end_sample = int(self.Bird_audios.loc[idx, "end"] * config["sample_rate"]) waveform = waveform[:, start_sample:end_sample] # # Padd if the shorter less than duration_sec # target_frames = int(self.duration_sec * config["sample_rate"]) # pad_transform = torchaudio.transforms.PadTrim(target_frames) # waveform = pad_transform(waveform) # Compute the spectrogram spec_transform = torchaudio.transforms.MelSpectrogram( n_fft=800, hop_length=320, n_mels=128 ) specgram = spec_transform(waveform) specgram = torchaudio.transforms.AmplitudeToDB()(specgram) resize_transform = Resize((128, 224)) specgram = resize_transform(specgram) # # Define the learnable parameter alpha # alpha = torch.nn.Parameter(torch.tensor([1.0])) # # Apply exponential transformation with alpha # exp_specgram = torch.exp(alpha * specgram) # # If alpha is a tensor, apply different values for each mel band # if alpha.dim() == 1: # exp_specgram = exp_specgram * alpha.view(1, -1, 1) specgram = torch.cat([specgram, specgram, specgram], dim=0) label = self.Bird_audios.loc[idx, "label"] return (specgram, label) if self.mode == "train" else specgram train_Dataset = Bird_Dataset( csv_file="/kaggle/working/Splited_train.csv", root_dir="/kaggle/input/birdclef-2023/train_audio", ) valid_Dataset = Bird_Dataset( csv_file="/kaggle/working/Splited_train.csv", root_dir="/kaggle/input/birdclef-2023/train_audio", ) train_loader = torch.utils.data.DataLoader( train_Dataset, batch_size=128, shuffle=True, num_workers=8, pin_memory=True ) valid_loader = torch.utils.data.DataLoader( valid_Dataset, batch_size=128, shuffle=True, num_workers=8, pin_memory=True ) # show item from dataset x = None for image, label in train_Dataset: # specgram_db = torchaudio.transforms.AmplitudeToDB()(image) print(image.shape) # plot the spectrogram plt.imshow(image[0, :, :].numpy()) plt.xlabel("Time") plt.ylabel("Frequency") plt.show() break model_Type = "resnet50" feature = "baseline_model" rd = np.random.randint(100000) modelName = f"{model_Type}_{feature}_{rd}" device = torch.device("cuda" if torch.cuda.is_available() else "cpu") def get_pretrained_model(model_name): if model_name == "ViT": model = torch.hub.load( "facebookresearch/deit:main", "deit_tiny_patch16_224", pretrained=True ) for param in model.parameters(): # freeze model param.requires_grad = False n_inputs = model.head.in_features model.head = nn.Sequential( nn.Linear(n_inputs, 512), nn.ReLU(), nn.Dropout(0.3), nn.Linear(512, 7) ) if model_name == "MaxViT": model = timm.create_model("maxvit_tiny_rw_224", pretrained=False, img_size=224) model.head.fc = nn.Linear(512, 7, bias=True) if model_name == "vgg16": model = models.vgg16(pretrained=True) n_inputs = model.classifier[6].in_features model.classifier[6] = nn.Sequential( nn.Linear(n_inputs, 2048), nn.ReLU(), nn.Dropout(0.2), nn.Linear(2048, 4), nn.LogSoftmax(dim=1), ) if model_name == "resnet50": model = models.resnet50(pretrained=True) n_inputs = model.fc.in_features model.fc = nn.Sequential( nn.Linear(n_inputs, config["classes_number"]), nn.LogSoftmax(dim=1), ) if model_name == "alexnet": model = models.AlexNet() model.classifier[-1] = nn.Sequential(nn.Linear(4096, 7), nn.LogSoftmax(dim=1)) # Move to gpu and parallelize model = model.to(device) return model model = get_pretrained_model(model_Type) def train( model, criterion, optimizer, train_loader, valid_loader, save_file_name, max_epochs_stop=3, n_epochs=20, print_every=1, ): """Train a PyTorch Model Params -------- model (PyTorch model): cnn to train criterion (PyTorch loss): objective to minimize optimizer (PyTorch optimizier): optimizer to compute gradients of model parameters train_loader (PyTorch dataloader): training dataloader to iterate through valid_loader (PyTorch dataloader): validation dataloader used for early stopping save_file_name (str ending in '.pt'): file path to save the model state dict max_epochs_stop (int): maximum number of epochs with no improvement in validation loss for early stopping n_epochs (int): maximum number of training epochs print_every (int): frequency of epochs to print training stats Returns -------- model (PyTorch model): trained cnn with best weights history (DataFrame): history of train and validation loss and accuracy """ # Early stopping intialization epochs_no_improve = 0 valid_loss_min = np.Inf valid_acc_max = 0 valid_f1_max = 0 valid_max_acc = 0 history = [] # Number of epochs already trained (if using loaded in model weights) try: print(f"Model has been trained for: {model.epochs} epochs.\n") except: model.epochs = 0 print(f"Starting Training from Scratch.\n") overall_start = timer() # Main loop for epoch in range(n_epochs): # keep track of training and validation loss each epoch train_loss = 0.0 valid_loss = 0.0 train_acc = 0 valid_acc = 0 Train_f1_sc = 0 Valid_f1_sc = 0 # Set to training model.train() start = timer() # Training loop for ii, (data, target) in enumerate(train_loader): # Tensors to gpu if train_on_gpu: data, target = data.cuda(), target.cuda() # Clear gradients optimizer.zero_grad() # Predicted outputs are log probabilities output = model(data) # .sigmoid() # target = target.unsqueeze(1) # Loss and backpropagation of gradients loss = criterion(output, target) loss.backward() # Update the parameters optimizer.step() # Track train loss by multiplying average loss by number of examples in batch train_loss += loss.item() * data.size(0) # Calculate accuracy by finding max log probability _, pred = torch.max(output, dim=1) # pred = torch.ge(output, 0.35) correct_tensor = pred.eq(target.data.view_as(pred)) # Need to convert correct tensor from int to float to average accuracy = torch.mean(correct_tensor.type(torch.FloatTensor)) # Multiply average accuracy times the number of examples in batch Train_f1_sc = f1_score(target.cpu().data, pred.cpu(), average="macro") train_acc += accuracy.item() * data.size(0) # Track training progress print( f"Epoch: {epoch}\t{100 * (ii + 1) / len(train_loader):.2f}% complete. {timer() - start:.2f} seconds elapsed in epoch.", end="\r", ) # After training loops ends, start validation else: model.epochs += 1 # Don't need to keep track of gradients with torch.no_grad(): # Set to evaluation mode model.eval() # Validation loop for data, target in valid_loader: # Tensors to gpu if train_on_gpu: data, target = data.cuda(), target.cuda() # Forward pass # output = model(data) # Validation loss output = model(data) # .sigmoid() # target = target.unsqueeze(1) # Loss and backpropagation of gradients loss = criterion(output, target) # Multiply average loss times the number of examples in batch valid_loss += loss.item() * data.size(0) # Calculate validation accuracy _, pred = torch.max(output, dim=1) # pred = torch.ge(output, 0.35) correct_tensor = pred.eq(target.data.view_as(pred)) accuracy = torch.mean(correct_tensor.type(torch.FloatTensor)) # Multiply average accuracy times the number of examples valid_acc += accuracy.item() * data.size(0) Valid_f1_sc = f1_score( target.cpu().data, pred.cpu(), average="macro" ) # Calculate average losses train_loss = train_loss / len(train_loader.dataset) valid_loss = valid_loss / len(valid_loader.dataset) # Calculate average accuracy train_acc = train_acc / len(train_loader.dataset) valid_acc = valid_acc / len(valid_loader.dataset) history.append( [ train_loss, valid_loss, train_acc, valid_acc, Train_f1_sc, Valid_f1_sc, ] ) # Print training and validation results if (epoch + 1) % print_every == 0: print( f"\nEpoch: {epoch} \tTraining Loss: {train_loss:.4f} \tValidation Loss: {valid_loss:.4f}" ) print( f"\t\tTraining Accuracy: {100 * train_acc:.2f}%\t Train F1 score: {100 * Train_f1_sc:.2f}%\t Validation Accuracy: {100 * valid_acc:.2f}%\t Validation F1 score: {100 * Valid_f1_sc:.2f}%" ) # Save the model if validation loss decreases if valid_loss < valid_loss_min: # if Valid_f1_sc > valid_f1_max: # Save model torch.save(model.state_dict(), save_file_name) # Track improvement epochs_no_improve = 0 valid_loss_min = valid_loss valid_acc_max = valid_acc valid_f1_max = Valid_f1_sc # valid_best_acc = valid_acc best_epoch = epoch # Otherwise increment count of epochs with no improvement elif valid_loss >= valid_loss_min: epochs_no_improve += 1 # Trigger early stopping if epochs_no_improve >= max_epochs_stop: print( f"\nEarly Stopping! Total epochs: {epoch}. Best epoch: {best_epoch} with loss: {valid_loss_min:.2f} and acc: {100 * valid_acc_max:.2f}%" ) total_time = timer() - overall_start print( f"{total_time:.2f} total seconds elapsed. {total_time / (epoch+1):.2f} seconds per epoch." ) # Load the best state dict model.load_state_dict(torch.load(save_file_name)) # Attach the optimizer model.optimizer = optimizer # Format history history = pd.DataFrame( history, columns=[ "train_loss", "valid_loss", "train_acc", "valid_acc", "train_f1", "valid_f1", ], ) return model, history # Attach the optimizer model.optimizer = optimizer # Record overall time and print out stats total_time = timer() - overall_start print( f"\nBest epoch: {best_epoch} with loss: {valid_loss_min:.2f} and acc: {100 * valid_acc:.2f}%" ) print( f"{total_time:.2f} total seconds elapsed. {total_time / (epoch):.2f} seconds per epoch." ) # Format history history = pd.DataFrame( history, columns=[ "train_loss", "valid_loss", "train_acc", "valid_acc", "train_f1", "valid_f1", ], ) return model, history # criterion = nn.CrossEntropyLoss(weight = torch.FloatTensor(class_w).to(device)) criterion = nn.CrossEntropyLoss() # criterion =torch.nn.BCEWithLogitsLoss(pos_weight = torch.tensor(weight_for_1).to(device)) # criterion =torch.nn.BCEWithLogitsLoss() # criterion = LabelSmoothingCrossEntropy(weight = class_w) criterion = criterion.to("cuda") optimizer = torch.optim.Adam(model.parameters(), lr=1e-5) # optimizer = optim.Adam(model.parameters(), lr=0.003) # exp_lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=4, gamma=0.97) model, history = train( model, criterion, optimizer, train_loader, valid_loader, save_file_name=f"{modelName}.pt", max_epochs_stop=4, n_epochs=10, print_every=1, ) device
false
0
4,946
0
4,946
4,946
129080661
<jupyter_start><jupyter_text>Iris Species The Iris dataset was used in R.A. Fisher's classic 1936 paper, [The Use of Multiple Measurements in Taxonomic Problems](http://rcs.chemometrics.ru/Tutorials/classification/Fisher.pdf), and can also be found on the [UCI Machine Learning Repository][1]. It includes three iris species with 50 samples each as well as some properties about each flower. One flower species is linearly separable from the other two, but the other two are not linearly separable from each other. The columns in this dataset are: - Id - SepalLengthCm - SepalWidthCm - PetalLengthCm - PetalWidthCm - Species [![Sepal Width vs. Sepal Length](https://www.kaggle.io/svf/138327/e401fb2cc596451b1e4d025aaacda95f/sepalWidthvsLength.png)](https://www.kaggle.com/benhamner/d/uciml/iris/sepal-width-vs-length) [1]: http://archive.ics.uci.edu/ml/ Kaggle dataset identifier: iris <jupyter_script># # EDA: Iris Data # Import packages import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt # import statsmodels.api as sm import plotly.express as px # Settings pd.options.display.max_columns = 100 pd.options.display.max_rows = 100 pd.options.display.float_format = "{:.2f}".format plt.style.use("ggplot") df = pd.read_csv("/kaggle/input/iris/Iris.csv") newcols = [ "id", "sepal_length", "sepal_width", "petal_length", "petal_width", "species", ] df.columns = newcols df.head() df.shape df.columns df.loc[:, "species"].value_counts() # # Introduction to IRIS dataset and 2D scatter plot # sns.relplot(data=df, x="sepal_length", y="sepal_width", hue="species") sns.relplot(data=df, x="petal_length", y="petal_width", hue="species") # 3D scatter plot fig = px.scatter_3d( df, x="sepal_length", y="sepal_width", z="petal_length", color="species" ) fig.show() # PairPlots sns.pairplot(df, hue="species") # Limitations of Pair Plots # Histogram and Introduction to PDF(Probability Density Function) # * Histogram: How many points exist for each value on the x-axis. # * PDF: Smoothed form of histogram sns.displot(df, x="petal_length", hue="species", kind="kde", fill=True) # Univariate Analysis using PDF fig, ax = plt.subplots(nrows=2, ncols=2, figsize=(10, 8)) sns.kdeplot(df, x="petal_length", hue="species", fill=True, ax=ax[0, 0]) sns.kdeplot(df, x="petal_width", hue="species", fill=True, ax=ax[0, 1]) sns.kdeplot(df, x="sepal_length", hue="species", fill=True, ax=ax[1, 0]) sns.kdeplot(df, x="sepal_width", hue="species", fill=True, ax=ax[1, 1]) # Cumulative Distibution Function # * What fraction of sns.ecdfplot(data=df, x="petal_length", stat="proportion")
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/080/129080661.ipynb
iris
null
[{"Id": 129080661, "ScriptId": 38285178, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 473615, "CreationDate": "05/10/2023 20:55:57", "VersionNumber": 2.0, "Title": "EDA: Iris Data", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 105.0, "LinesInsertedFromPrevious": 65.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 40.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 184817708, "KernelVersionId": 129080661, "SourceDatasetVersionId": 420}]
[{"Id": 420, "DatasetId": 19, "DatasourceVersionId": 420, "CreatorUserId": 1, "LicenseName": "CC0: Public Domain", "CreationDate": "09/27/2016 07:38:05", "VersionNumber": 2.0, "Title": "Iris Species", "Slug": "iris", "Subtitle": "Classify iris plants into three species in this classic dataset", "Description": "The Iris dataset was used in R.A. Fisher's classic 1936 paper, [The Use of Multiple Measurements in Taxonomic Problems](http://rcs.chemometrics.ru/Tutorials/classification/Fisher.pdf), and can also be found on the [UCI Machine Learning Repository][1].\n\nIt includes three iris species with 50 samples each as well as some properties about each flower. One flower species is linearly separable from the other two, but the other two are not linearly separable from each other.\n\nThe columns in this dataset are:\n\n - Id\n - SepalLengthCm\n - SepalWidthCm\n - PetalLengthCm\n - PetalWidthCm\n - Species\n\n[![Sepal Width vs. Sepal Length](https://www.kaggle.io/svf/138327/e401fb2cc596451b1e4d025aaacda95f/sepalWidthvsLength.png)](https://www.kaggle.com/benhamner/d/uciml/iris/sepal-width-vs-length)\n\n\n [1]: http://archive.ics.uci.edu/ml/", "VersionNotes": "Republishing files so they're formally in our system", "TotalCompressedBytes": 15347.0, "TotalUncompressedBytes": 15347.0}]
[{"Id": 19, "CreatorUserId": 1, "OwnerUserId": NaN, "OwnerOrganizationId": 7.0, "CurrentDatasetVersionId": 420.0, "CurrentDatasourceVersionId": 420.0, "ForumId": 997, "Type": 2, "CreationDate": "01/12/2016 00:33:31", "LastActivityDate": "02/06/2018", "TotalViews": 1637863, "TotalDownloads": 423540, "TotalVotes": 3416, "TotalKernels": 6420}]
null
# # EDA: Iris Data # Import packages import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt # import statsmodels.api as sm import plotly.express as px # Settings pd.options.display.max_columns = 100 pd.options.display.max_rows = 100 pd.options.display.float_format = "{:.2f}".format plt.style.use("ggplot") df = pd.read_csv("/kaggle/input/iris/Iris.csv") newcols = [ "id", "sepal_length", "sepal_width", "petal_length", "petal_width", "species", ] df.columns = newcols df.head() df.shape df.columns df.loc[:, "species"].value_counts() # # Introduction to IRIS dataset and 2D scatter plot # sns.relplot(data=df, x="sepal_length", y="sepal_width", hue="species") sns.relplot(data=df, x="petal_length", y="petal_width", hue="species") # 3D scatter plot fig = px.scatter_3d( df, x="sepal_length", y="sepal_width", z="petal_length", color="species" ) fig.show() # PairPlots sns.pairplot(df, hue="species") # Limitations of Pair Plots # Histogram and Introduction to PDF(Probability Density Function) # * Histogram: How many points exist for each value on the x-axis. # * PDF: Smoothed form of histogram sns.displot(df, x="petal_length", hue="species", kind="kde", fill=True) # Univariate Analysis using PDF fig, ax = plt.subplots(nrows=2, ncols=2, figsize=(10, 8)) sns.kdeplot(df, x="petal_length", hue="species", fill=True, ax=ax[0, 0]) sns.kdeplot(df, x="petal_width", hue="species", fill=True, ax=ax[0, 1]) sns.kdeplot(df, x="sepal_length", hue="species", fill=True, ax=ax[1, 0]) sns.kdeplot(df, x="sepal_width", hue="species", fill=True, ax=ax[1, 1]) # Cumulative Distibution Function # * What fraction of sns.ecdfplot(data=df, x="petal_length", stat="proportion")
false
0
640
0
938
640
129163813
<jupyter_start><jupyter_text>Advertising dataset This data expresses sales according to the type of advertisement and the size of the cost . The dataset contains 200 rows of 3 features [ TV , Radio , Newspaper] and target variable [Sales]. Kaggle dataset identifier: advertising-dataset <jupyter_script>import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error import math from sklearn.model_selection import GridSearchCV from sklearn.ensemble import RandomForestRegressor from sklearn.neighbors import KNeighborsRegressor from sklearn.tree import DecisionTreeRegressor reklamveri = pd.read_csv("/kaggle/input/advertising-dataset/Advertising.csv") reklamveri.head() reklamveri.tail() reklamveri.shape reklamveri.columns reklamveri.rename( columns={ "Unnamed: 0": "unnamed:0", "TV": "Televizyon", "Radio": "Radio", "Newspaper": "Gazete", "Sales": "Satış", }, inplace=True, ) reklamveri = reklamveri.drop(["unnamed:0"], axis=1) reklamveri.info() reklamveri.isnull().sum() corr = reklamveri.corr() corr sns.heatmap(corr, annot=True) sns.scatterplot(x="Televizyon", y="Satış", data=reklamveri) sns.scatterplot(x="Radio", y="Satış", data=reklamveri) reklamveri.describe().T plt.figure(figsize=(6, 6)) sns.distplot(reklamveri["Televizyon"], hist=True) plt.figure(figsize=(6, 6)) sns.distplot(reklamveri["Radio"], hist=True) plt.figure(figsize=(6, 6)) sns.distplot(reklamveri["Gazete"], hist=True) def satış_grup_(x): if x <= 2: x = "0-2sg" elif x > 2 and x <= 7: x = "3-7sg" elif x > 7 and x <= 12: x = "8-12sg" elif x > 12 and x <= 17: x = "13-17sg" elif x > 17 and x <= 22: x = "18-22sg" else: x = "22sg+" return x reklamveri["satış_grup"] = reklamveri["Satış"].apply(satış_grup_) reklamveri.groupby("satış_grup")["Satış"].agg(["count"]) plt.figure(figsize=(12, 5)) sns.countplot(x="satış_grup", data=reklamveri) sns.pairplot(reklamveri) reklamveri = reklamveri.drop(["satış_grup"], axis=1) x = reklamveri.drop(["Satış"], axis=1) y = reklamveri["Satış"] x_train, x_test, y_train, y_test = train_test_split( x, y, test_size=0.10, random_state=41 ) # DecisionTreeRegressor r_dt = DecisionTreeRegressor(random_state=0) model = r_dt.fit(x_train, y_train) y_pred = r_dt.predict(x_test) print("R2 score:", r2_score(y_test, y_pred)) MAE = mean_absolute_error(y_test, y_pred) print("Ortalama Mutlak Hata (Mean Absolute Error):", MAE) MSE = mean_squared_error(y_test, y_pred) print("Ortalama Kare Hata (Mean Squared Error):", MSE) RMSE = math.sqrt(MSE) print("Kök Ortalama Kare Hata (Root Mean Square Error):", RMSE) # RandomForestRegressor rf_reg = RandomForestRegressor(n_estimators=10, random_state=0) model = rf_reg.fit(x_train, y_train) y_pred = rf_reg.predict(x_test) print("R2 score:", r2_score(y_test, y_pred)) MAE = mean_absolute_error(y_test, y_pred) print("Ortalama Mutlak Hata (Mean Absolute Error):", MAE) MSE = mean_squared_error(y_test, y_pred) print("Ortalama Kare Hata (Mean Squared Error):", MSE) RMSE = math.sqrt(MSE) print("Kök Ortalama Kare Hata (Root Mean Square Error):", RMSE) # KNeighborsRegressor neigh = KNeighborsRegressor(n_neighbors=2) neigh.fit(x_train, y_train) y_pred = neigh.predict(x_test) print("R2 score:", r2_score(y_test, y_pred)) score_list = [] for each in range(1, 15): knn2 = KNeighborsRegressor(n_neighbors=each) knn2.fit(x_train, y_train) score_list.append(knn2.score(x_test, y_test)) plt.plot(range(1, 15), score_list) plt.xlabel("k values") plt.ylabel("accuracy") plt.show neigh = KNeighborsRegressor(n_neighbors=1) neigh.fit(x_train, y_train) y_pred = neigh.predict(x_test) print("R2 score:", r2_score(y_test, y_pred)) knn = KNeighborsRegressor() knn_params = {"n_neighbors": np.arange(1, 11, 1)} knn_cv_model = GridSearchCV(knn, knn_params, cv=10) knn_cv_model.fit(x_train, y_train) knn_cv_model.best_params_["n_neighbors"] knn_cv_model.best_estimator_ knn_cv_model.best_score_ MAE = mean_absolute_error(y_test, y_pred) print("Ortalama Mutlak Hata (Mean Absolute Error):", MAE) MSE = mean_squared_error(y_test, y_pred) print("Ortalama Kare Hata (Mean Squared Error):", MSE) RMSE = math.sqrt(MSE) print("Kök Ortalama Kare Hata (Root Mean Square Error):", RMSE)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/163/129163813.ipynb
advertising-dataset
tawfikelmetwally
[{"Id": 129163813, "ScriptId": 38398333, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 9623697, "CreationDate": "05/11/2023 13:13:35", "VersionNumber": 1.0, "Title": "Advertising dataset (%95 Accuracy", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 174.0, "LinesInsertedFromPrevious": 174.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
[{"Id": 184968141, "KernelVersionId": 129163813, "SourceDatasetVersionId": 5604378}]
[{"Id": 5604378, "DatasetId": 3223827, "DatasourceVersionId": 5679433, "CreatorUserId": 12641535, "LicenseName": "CC BY-SA 4.0", "CreationDate": "05/04/2023 21:48:38", "VersionNumber": 1.0, "Title": "Advertising dataset", "Slug": "advertising-dataset", "Subtitle": "sales prediction using linear regression", "Description": "This data expresses sales according to the type of advertisement and the size of the cost .\nThe dataset contains 200 rows of 3 features [ TV , Radio , Newspaper] and target variable [Sales].", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3223827, "CreatorUserId": 12641535, "OwnerUserId": 12641535.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5604378.0, "CurrentDatasourceVersionId": 5679433.0, "ForumId": 3288817, "Type": 2, "CreationDate": "05/04/2023 21:48:38", "LastActivityDate": "05/04/2023", "TotalViews": 5648, "TotalDownloads": 883, "TotalVotes": 28, "TotalKernels": 14}]
[{"Id": 12641535, "UserName": "tawfikelmetwally", "DisplayName": "tawfik elmetwally", "RegisterDate": "11/27/2022", "PerformanceTier": 2}]
import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error import math from sklearn.model_selection import GridSearchCV from sklearn.ensemble import RandomForestRegressor from sklearn.neighbors import KNeighborsRegressor from sklearn.tree import DecisionTreeRegressor reklamveri = pd.read_csv("/kaggle/input/advertising-dataset/Advertising.csv") reklamveri.head() reklamveri.tail() reklamveri.shape reklamveri.columns reklamveri.rename( columns={ "Unnamed: 0": "unnamed:0", "TV": "Televizyon", "Radio": "Radio", "Newspaper": "Gazete", "Sales": "Satış", }, inplace=True, ) reklamveri = reklamveri.drop(["unnamed:0"], axis=1) reklamveri.info() reklamveri.isnull().sum() corr = reklamveri.corr() corr sns.heatmap(corr, annot=True) sns.scatterplot(x="Televizyon", y="Satış", data=reklamveri) sns.scatterplot(x="Radio", y="Satış", data=reklamveri) reklamveri.describe().T plt.figure(figsize=(6, 6)) sns.distplot(reklamveri["Televizyon"], hist=True) plt.figure(figsize=(6, 6)) sns.distplot(reklamveri["Radio"], hist=True) plt.figure(figsize=(6, 6)) sns.distplot(reklamveri["Gazete"], hist=True) def satış_grup_(x): if x <= 2: x = "0-2sg" elif x > 2 and x <= 7: x = "3-7sg" elif x > 7 and x <= 12: x = "8-12sg" elif x > 12 and x <= 17: x = "13-17sg" elif x > 17 and x <= 22: x = "18-22sg" else: x = "22sg+" return x reklamveri["satış_grup"] = reklamveri["Satış"].apply(satış_grup_) reklamveri.groupby("satış_grup")["Satış"].agg(["count"]) plt.figure(figsize=(12, 5)) sns.countplot(x="satış_grup", data=reklamveri) sns.pairplot(reklamveri) reklamveri = reklamveri.drop(["satış_grup"], axis=1) x = reklamveri.drop(["Satış"], axis=1) y = reklamveri["Satış"] x_train, x_test, y_train, y_test = train_test_split( x, y, test_size=0.10, random_state=41 ) # DecisionTreeRegressor r_dt = DecisionTreeRegressor(random_state=0) model = r_dt.fit(x_train, y_train) y_pred = r_dt.predict(x_test) print("R2 score:", r2_score(y_test, y_pred)) MAE = mean_absolute_error(y_test, y_pred) print("Ortalama Mutlak Hata (Mean Absolute Error):", MAE) MSE = mean_squared_error(y_test, y_pred) print("Ortalama Kare Hata (Mean Squared Error):", MSE) RMSE = math.sqrt(MSE) print("Kök Ortalama Kare Hata (Root Mean Square Error):", RMSE) # RandomForestRegressor rf_reg = RandomForestRegressor(n_estimators=10, random_state=0) model = rf_reg.fit(x_train, y_train) y_pred = rf_reg.predict(x_test) print("R2 score:", r2_score(y_test, y_pred)) MAE = mean_absolute_error(y_test, y_pred) print("Ortalama Mutlak Hata (Mean Absolute Error):", MAE) MSE = mean_squared_error(y_test, y_pred) print("Ortalama Kare Hata (Mean Squared Error):", MSE) RMSE = math.sqrt(MSE) print("Kök Ortalama Kare Hata (Root Mean Square Error):", RMSE) # KNeighborsRegressor neigh = KNeighborsRegressor(n_neighbors=2) neigh.fit(x_train, y_train) y_pred = neigh.predict(x_test) print("R2 score:", r2_score(y_test, y_pred)) score_list = [] for each in range(1, 15): knn2 = KNeighborsRegressor(n_neighbors=each) knn2.fit(x_train, y_train) score_list.append(knn2.score(x_test, y_test)) plt.plot(range(1, 15), score_list) plt.xlabel("k values") plt.ylabel("accuracy") plt.show neigh = KNeighborsRegressor(n_neighbors=1) neigh.fit(x_train, y_train) y_pred = neigh.predict(x_test) print("R2 score:", r2_score(y_test, y_pred)) knn = KNeighborsRegressor() knn_params = {"n_neighbors": np.arange(1, 11, 1)} knn_cv_model = GridSearchCV(knn, knn_params, cv=10) knn_cv_model.fit(x_train, y_train) knn_cv_model.best_params_["n_neighbors"] knn_cv_model.best_estimator_ knn_cv_model.best_score_ MAE = mean_absolute_error(y_test, y_pred) print("Ortalama Mutlak Hata (Mean Absolute Error):", MAE) MSE = mean_squared_error(y_test, y_pred) print("Ortalama Kare Hata (Mean Squared Error):", MSE) RMSE = math.sqrt(MSE) print("Kök Ortalama Kare Hata (Root Mean Square Error):", RMSE)
false
1
1,608
1
1,676
1,608
129019596
<jupyter_start><jupyter_text>Forest Fire Classification Kaggle dataset identifier: forest-fire-classification <jupyter_script>data_dir = "/kaggle/input/forest-fire-classification/Forest_Fire" from tensorflow.keras.preprocessing.image import ImageDataGenerator train = ImageDataGenerator( rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True ) test = ImageDataGenerator(rescale=1.0 / 255) traindata = train.flow_from_directory( "/kaggle/input/forest-fire-classification/Forest_Fire/Training and Validation", target_size=(128, 128), batch_size=32, class_mode="categorical", ) testdata = test.flow_from_directory( "/kaggle/input/forest-fire-classification/Forest_Fire/Testing", target_size=(128, 128), batch_size=32, class_mode="categorical", ) from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Flatten, Softmax, Activation from tensorflow.keras.preprocessing import image import matplotlib.pyplot as plt import pandas as pd from tensorflow.keras.layers import Input, Conv2D, MaxPooling2D from tensorflow.keras.layers import Dense, Flatten from tensorflow.keras.models import Model import tensorflow as tf # ***VGG NET 19*** import tensorflow as tf from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout def VGG19(input_shape, num_classes): model = tf.keras.Sequential() # Block 1 model.add( Conv2D(64, (3, 3), activation="relu", padding="same", input_shape=input_shape) ) model.add(Conv2D(64, (3, 3), activation="relu", padding="same")) model.add(MaxPooling2D((2, 2), strides=(2, 2))) # Block 2 model.add(Conv2D(128, (3, 3), activation="relu", padding="same")) model.add(Conv2D(128, (3, 3), activation="relu", padding="same")) model.add(MaxPooling2D((2, 2), strides=(2, 2))) # Block 3 model.add(Conv2D(256, (3, 3), activation="relu", padding="same")) model.add(Conv2D(256, (3, 3), activation="relu", padding="same")) model.add(Conv2D(256, (3, 3), activation="relu", padding="same")) model.add(Conv2D(256, (3, 3), activation="relu", padding="same")) model.add(MaxPooling2D((2, 2), strides=(2, 2))) # Block 4 model.add(Conv2D(512, (3, 3), activation="relu", padding="same")) model.add(Conv2D(512, (3, 3), activation="relu", padding="same")) model.add(Conv2D(512, (3, 3), activation="relu", padding="same")) model.add(Conv2D(512, (3, 3), activation="relu", padding="same")) model.add(MaxPooling2D((2, 2), strides=(2, 2))) # Block 5 model.add(Conv2D(512, (3, 3), activation="relu", padding="same")) model.add(Conv2D(512, (3, 3), activation="relu", padding="same")) model.add(Conv2D(512, (3, 3), activation="relu", padding="same")) model.add(Conv2D(512, (3, 3), activation="relu", padding="same")) model.add(MaxPooling2D((2, 2), strides=(2, 2))) # Dense Layers model.add(Flatten()) model.add(Dense(4096, activation="relu")) model.add(Dropout(0.5)) model.add(Dense(4096, activation="relu")) model.add(Dropout(0.5)) model.add(Dense(num_classes, activation="softmax")) return model input_shape = (128, 128, 3) num_classes = 2 model = VGG19(input_shape, num_classes) model.compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"]) model1 = model.fit(traindata, epochs=5, validation_data=testdata, batch_size=32) import pandas as pd x x = model1.history # Load the history into a pandas Dataframe df = pd.DataFrame(x) df.head() plt.plot(model1.history["accuracy"]) plt.plot(model1.history["val_accuracy"]) plt.title("model accuracy") plt.ylabel("accuracy") plt.xlabel("epoch") plt.legend(["Train", "Validation"], loc="upper left") plt.show() # summarize history for loss plt.plot(model1.history["loss"]) plt.plot(model1.history["val_loss"]) plt.title("model loss") plt.ylabel("loss") plt.xlabel("epoch") plt.legend(["Train", "Validation"], loc="upper left") plt.show() import cv2 t = cv2.imread( "/content/gdrive/MyDrive/fire_clasify/forest_fire/Testing/nofire/abc358.jpg" ) plt.imshow(t) import numpy as np from tensorflow.keras.preprocessing.image import img_to_array testimg = cv2.resize(t, (128, 128)) testimg = img_to_array(testimg) / 255 h = np.expand_dims(testimg, axis=0) r = model.predict(h) classnames = ["fire", "nofire"] ypred = classnames[np.argmax(r)] ypred
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/019/129019596.ipynb
forest-fire-classification
seebicb
[{"Id": 129019596, "ScriptId": 38294738, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14551355, "CreationDate": "05/10/2023 11:03:23", "VersionNumber": 1.0, "Title": "Forest Fire Classification || VGG19", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 127.0, "LinesInsertedFromPrevious": 127.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 3}]
[{"Id": 184706482, "KernelVersionId": 129019596, "SourceDatasetVersionId": 5621471}]
[{"Id": 5621471, "DatasetId": 3232225, "DatasourceVersionId": 5696667, "CreatorUserId": 14551355, "LicenseName": "Unknown", "CreationDate": "05/06/2023 22:09:59", "VersionNumber": 1.0, "Title": "Forest Fire Classification", "Slug": "forest-fire-classification", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3232225, "CreatorUserId": 14551355, "OwnerUserId": 14551355.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5621471.0, "CurrentDatasourceVersionId": 5696667.0, "ForumId": 3297373, "Type": 2, "CreationDate": "05/06/2023 22:09:59", "LastActivityDate": "05/06/2023", "TotalViews": 113, "TotalDownloads": 9, "TotalVotes": 1, "TotalKernels": 4}]
[{"Id": 14551355, "UserName": "seebicb", "DisplayName": "Haseeb Ahmed", "RegisterDate": "04/08/2023", "PerformanceTier": 0}]
data_dir = "/kaggle/input/forest-fire-classification/Forest_Fire" from tensorflow.keras.preprocessing.image import ImageDataGenerator train = ImageDataGenerator( rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True ) test = ImageDataGenerator(rescale=1.0 / 255) traindata = train.flow_from_directory( "/kaggle/input/forest-fire-classification/Forest_Fire/Training and Validation", target_size=(128, 128), batch_size=32, class_mode="categorical", ) testdata = test.flow_from_directory( "/kaggle/input/forest-fire-classification/Forest_Fire/Testing", target_size=(128, 128), batch_size=32, class_mode="categorical", ) from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Flatten, Softmax, Activation from tensorflow.keras.preprocessing import image import matplotlib.pyplot as plt import pandas as pd from tensorflow.keras.layers import Input, Conv2D, MaxPooling2D from tensorflow.keras.layers import Dense, Flatten from tensorflow.keras.models import Model import tensorflow as tf # ***VGG NET 19*** import tensorflow as tf from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout def VGG19(input_shape, num_classes): model = tf.keras.Sequential() # Block 1 model.add( Conv2D(64, (3, 3), activation="relu", padding="same", input_shape=input_shape) ) model.add(Conv2D(64, (3, 3), activation="relu", padding="same")) model.add(MaxPooling2D((2, 2), strides=(2, 2))) # Block 2 model.add(Conv2D(128, (3, 3), activation="relu", padding="same")) model.add(Conv2D(128, (3, 3), activation="relu", padding="same")) model.add(MaxPooling2D((2, 2), strides=(2, 2))) # Block 3 model.add(Conv2D(256, (3, 3), activation="relu", padding="same")) model.add(Conv2D(256, (3, 3), activation="relu", padding="same")) model.add(Conv2D(256, (3, 3), activation="relu", padding="same")) model.add(Conv2D(256, (3, 3), activation="relu", padding="same")) model.add(MaxPooling2D((2, 2), strides=(2, 2))) # Block 4 model.add(Conv2D(512, (3, 3), activation="relu", padding="same")) model.add(Conv2D(512, (3, 3), activation="relu", padding="same")) model.add(Conv2D(512, (3, 3), activation="relu", padding="same")) model.add(Conv2D(512, (3, 3), activation="relu", padding="same")) model.add(MaxPooling2D((2, 2), strides=(2, 2))) # Block 5 model.add(Conv2D(512, (3, 3), activation="relu", padding="same")) model.add(Conv2D(512, (3, 3), activation="relu", padding="same")) model.add(Conv2D(512, (3, 3), activation="relu", padding="same")) model.add(Conv2D(512, (3, 3), activation="relu", padding="same")) model.add(MaxPooling2D((2, 2), strides=(2, 2))) # Dense Layers model.add(Flatten()) model.add(Dense(4096, activation="relu")) model.add(Dropout(0.5)) model.add(Dense(4096, activation="relu")) model.add(Dropout(0.5)) model.add(Dense(num_classes, activation="softmax")) return model input_shape = (128, 128, 3) num_classes = 2 model = VGG19(input_shape, num_classes) model.compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"]) model1 = model.fit(traindata, epochs=5, validation_data=testdata, batch_size=32) import pandas as pd x x = model1.history # Load the history into a pandas Dataframe df = pd.DataFrame(x) df.head() plt.plot(model1.history["accuracy"]) plt.plot(model1.history["val_accuracy"]) plt.title("model accuracy") plt.ylabel("accuracy") plt.xlabel("epoch") plt.legend(["Train", "Validation"], loc="upper left") plt.show() # summarize history for loss plt.plot(model1.history["loss"]) plt.plot(model1.history["val_loss"]) plt.title("model loss") plt.ylabel("loss") plt.xlabel("epoch") plt.legend(["Train", "Validation"], loc="upper left") plt.show() import cv2 t = cv2.imread( "/content/gdrive/MyDrive/fire_clasify/forest_fire/Testing/nofire/abc358.jpg" ) plt.imshow(t) import numpy as np from tensorflow.keras.preprocessing.image import img_to_array testimg = cv2.resize(t, (128, 128)) testimg = img_to_array(testimg) / 255 h = np.expand_dims(testimg, axis=0) r = model.predict(h) classnames = ["fire", "nofire"] ypred = classnames[np.argmax(r)] ypred
false
0
1,474
3
1,496
1,474
129019255
""" Python 3.10 Titanic Exploratory Data Analysis and visualization program will you survive on the titanic or not File name Titanic_eda.py Version: 0.1 Author: MLCV Date: 2023-05-09 """ # Importing Libraries # visualization from plotnine import * # plotnine is an implementation of a grammar of graphics in Python, it is based on ggplot2. The grammar allows users to compose plots by explicitly mapping data to the visual objects that make up the plot. import matplotlib.pyplot as plt # collection of command style functions that make matplotlib work like MATLAB import seaborn as sns # statistical data visualization # data analysis import pandas as pd # is a fast, powerful, flexible and easy to use open source data analysis and manipulation tool, built on top of the Python programming language import numpy as np # offers comprehensive mathematical functions, random number generators, linear algebra routines, Fourier transforms, and more import warnings # Base category for warnings triggered during the process of importing a module (ignored by default) warnings.filterwarnings("ignore") # here we prescribe the action in case of an error. """let's create a function, feed the training and test data sets as an input, and at the output we will get a combined one, without an index""" def concat_df(train_data, test_data): # Returns a concatenated df of training and test set return pd.concat([train_data, test_data], sort=True).reset_index(drop=True) """Let's create a function at the input of which we feed the combined data set, and at the output it returns the separated df of the training and test set, saved without a label""" def divide_df(all_data): # Returns divided dfs of training and test set return all_data.loc[:890], all_data.loc[891:].drop(["Survived"], axis=1) """The Python Pandas packages helps us work with our datasets. We start by acquiring the training and testing datasets into Pandas DataFrames. We also combine these datasets to run certain operations on both datasets together. """ df_train = pd.read_csv("/kaggle/input/titanic/train.csv") # load train data df_test = pd.read_csv("/kaggle/input/titanic/test.csv") # load test data df_all = concat_df( df_train, df_test ) # we apply the function described above, the union of two dataframes. """supplement the data sets with the name parameter""" df_train.name = "Training Set" # set parameter for dataset - dataframe name df_test.name = "Test Set" # set parameter for dataset - dataframe name df_all.name = "All Set" # set parameter for dataset - dataframe name dfs = [df_train, df_test] # display information about datasets print("Number of Training Examples = {}".format(df_train.shape[0])) print("Number of Test Examples = {}\n".format(df_test.shape[0])) print("Training X Shape = {}".format(df_train.shape)) print("Training y Shape = {}\n".format(df_train["Survived"].shape[0])) print("Test X Shape = {}".format(df_test.shape)) print("Test y Shape = {}\n".format(df_test.shape[0])) print(df_train.columns) print(df_test.columns) print(df_train.info()) print(df_train.describe()) df_train.sample(5) """Correlation Between The Features""" sns.heatmap(df_train.corr(), annot=True, cmap="RdYlGn", linewidths=0.2) fig = plt.gcf() fig.set_size_inches(10, 8) plt.show() # function to analyze each column of the dataframe def display_missing(df): for col in df.columns.tolist(): print("{} column missing values: {}".format(col, df[col].isnull().sum())) print("\n") for df in dfs: print("{}".format(df.name)) display_missing(df) df_train[["Sex", "Survived"]].groupby(["Sex"], as_index=False).mean().sort_values( by="Survived", ascending=False ) df_train["Sex"].value_counts() df_train[["Pclass", "Survived"]].groupby(["Pclass"], as_index=False).mean().sort_values( by="Survived", ascending=False ) df_train["Pclass"].value_counts() df_train["Age"].value_counts() df_train[["Age", "Survived"]].groupby(["Age"], as_index=False).mean().sort_values( by="Survived", ascending=False ) train_dropna_age = df_train["Age"].dropna() df_train["Survived"] = df_train["Survived"].astype("category") # Although it is a number, it is changed to a category because it is categorical data df_train["Survived"] = df_train["Survived"].astype("category") df_train.head(3) df_train["Initial"] = 0 for i in df_train: df_train["Initial"] = df_train.Name.str.extract( "([A-Za-z]+)\." ) # lets extract the Salutations """We are using the Regex: [A-Za-z]+).. So what it does is, it looks for strings which lie between A-Z or a-z and followed by a .(dot). So we successfully extract the Initials from the Name.""" pd.crosstab(df_train.Initial, df_train.Sex).T.style.background_gradient( cmap="summer_r" ) # Checking the Initials with the Sex df_train["Initial"].replace( [ "Mlle", "Mme", "Ms", "Dr", "Major", "Lady", "Countess", "Jonkheer", "Col", "Rev", "Capt", "Sir", "Don", ], [ "Miss", "Miss", "Miss", "Mr", "Mr", "Mrs", "Mrs", "Other", "Other", "Other", "Mr", "Mr", "Mr", ], inplace=True, ) df_train.groupby("Initial")["Age"].mean() # lets check the average age by Initials # Assigning the NaN Values with the Ceil values of the mean ages df_train.loc[(df_train.Age.isnull()) & (df_train.Initial == "Mr"), "Age"] = 33 df_train.loc[(df_train.Age.isnull()) & (df_train.Initial == "Mrs"), "Age"] = 36 df_train.loc[(df_train.Age.isnull()) & (df_train.Initial == "Master"), "Age"] = 5 df_train.loc[(df_train.Age.isnull()) & (df_train.Initial == "Miss"), "Age"] = 22 df_train.loc[(df_train.Age.isnull()) & (df_train.Initial == "Other"), "Age"] = 46 df_train.Age.isnull().any() # So no null values left finally # print(df_all.info()) # Assign all the null values to N df_all.Cabin.fillna("N", inplace=True) # group these cabins according to the letter of the cabin name df_all.Cabin = [str(i)[0] for i in df_all.Cabin] def percent_value_counts(df, feature): """This function takes in a dataframe and a column and finds the percentage of the value_counts""" percent = pd.DataFrame( round(df.loc[:, feature].value_counts(dropna=False, normalize=True) * 100, 2) ) ## creating a df with th total = pd.DataFrame(df.loc[:, feature].value_counts(dropna=False)) ## concating percent and total dataframe total.columns = ["Total"] percent.columns = ["Percent"] return pd.concat([total, percent], axis=1) percent_value_counts(df_all, "Cabin") df_all.groupby("Cabin")["Fare"].mean().sort_values() def cabin_estimator(i): """Grouping cabin feature by the first letter""" a = 0 if i < 16: a = "G" elif i >= 16 and i < 27: a = "F" elif i >= 27 and i < 38: a = "T" elif i >= 38 and i < 47: a = "A" elif i >= 47 and i < 53: a = "E" elif i >= 53 and i < 54: a = "D" elif i >= 54 and i < 116: a = "C" else: a = "B" return a """Now, these means can help us determine the unknown cabins, if we compare each unknown cabin rows with the given mean's above. Let's write a simple function so that we can give cabin names based on the means.""" # applying cabin estimator function. df_all["Cabin"] = df_all.Fare.apply(lambda x: cabin_estimator(x)) percent_value_counts(df_all, "Cabin") df_all["Fare"] = pd.qcut(df_all["Fare"], 13) fig, axs = plt.subplots(figsize=(22, 9)) sns.countplot(x="Fare", hue="Survived", data=df_all) plt.xlabel("Fare", size=15, labelpad=20) plt.ylabel("Passenger Count", size=15, labelpad=20) plt.tick_params(axis="x", labelsize=10) plt.tick_params(axis="y", labelsize=15) plt.legend(["Not Survived", "Survived"], loc="upper right", prop={"size": 15}) plt.title("Count of Survival in {} Feature".format("Fare"), size=15, y=1.05) plt.show() df_train["Embarked"].value_counts() df_train[df_train.Embarked.isnull()] """ We may be able to solve these two missing values by looking at other independent variables of the two raws. Both passengers paid a fare of $80, are of Pclass 1 and female Sex. the average fare closest to $80 are in the C Embarked values where pclass is 1. So, let's fill in the missing values as "C" """ df_train["Embarked"].fillna("C", inplace=True) df_train["Embarked"].value_counts() df_train[["Embarked", "Survived"]].groupby(["Embarked"], as_index=False).mean() pd.crosstab( [df_train.Embarked, df_train.Pclass], [df_train.Sex, df_train.Survived], margins=True, ).style.background_gradient(cmap="summer_r") df_train.head(3) output = pd.DataFrame( {"PassengerId": df_train.PassengerId, "Survived": df_train.Survived} ) output.to_csv("submission.csv", index=False)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/019/129019255.ipynb
null
null
[{"Id": 129019255, "ScriptId": 38350646, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 15035340, "CreationDate": "05/10/2023 11:00:15", "VersionNumber": 4.0, "Title": "Titanic", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 211.0, "LinesInsertedFromPrevious": 211.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
""" Python 3.10 Titanic Exploratory Data Analysis and visualization program will you survive on the titanic or not File name Titanic_eda.py Version: 0.1 Author: MLCV Date: 2023-05-09 """ # Importing Libraries # visualization from plotnine import * # plotnine is an implementation of a grammar of graphics in Python, it is based on ggplot2. The grammar allows users to compose plots by explicitly mapping data to the visual objects that make up the plot. import matplotlib.pyplot as plt # collection of command style functions that make matplotlib work like MATLAB import seaborn as sns # statistical data visualization # data analysis import pandas as pd # is a fast, powerful, flexible and easy to use open source data analysis and manipulation tool, built on top of the Python programming language import numpy as np # offers comprehensive mathematical functions, random number generators, linear algebra routines, Fourier transforms, and more import warnings # Base category for warnings triggered during the process of importing a module (ignored by default) warnings.filterwarnings("ignore") # here we prescribe the action in case of an error. """let's create a function, feed the training and test data sets as an input, and at the output we will get a combined one, without an index""" def concat_df(train_data, test_data): # Returns a concatenated df of training and test set return pd.concat([train_data, test_data], sort=True).reset_index(drop=True) """Let's create a function at the input of which we feed the combined data set, and at the output it returns the separated df of the training and test set, saved without a label""" def divide_df(all_data): # Returns divided dfs of training and test set return all_data.loc[:890], all_data.loc[891:].drop(["Survived"], axis=1) """The Python Pandas packages helps us work with our datasets. We start by acquiring the training and testing datasets into Pandas DataFrames. We also combine these datasets to run certain operations on both datasets together. """ df_train = pd.read_csv("/kaggle/input/titanic/train.csv") # load train data df_test = pd.read_csv("/kaggle/input/titanic/test.csv") # load test data df_all = concat_df( df_train, df_test ) # we apply the function described above, the union of two dataframes. """supplement the data sets with the name parameter""" df_train.name = "Training Set" # set parameter for dataset - dataframe name df_test.name = "Test Set" # set parameter for dataset - dataframe name df_all.name = "All Set" # set parameter for dataset - dataframe name dfs = [df_train, df_test] # display information about datasets print("Number of Training Examples = {}".format(df_train.shape[0])) print("Number of Test Examples = {}\n".format(df_test.shape[0])) print("Training X Shape = {}".format(df_train.shape)) print("Training y Shape = {}\n".format(df_train["Survived"].shape[0])) print("Test X Shape = {}".format(df_test.shape)) print("Test y Shape = {}\n".format(df_test.shape[0])) print(df_train.columns) print(df_test.columns) print(df_train.info()) print(df_train.describe()) df_train.sample(5) """Correlation Between The Features""" sns.heatmap(df_train.corr(), annot=True, cmap="RdYlGn", linewidths=0.2) fig = plt.gcf() fig.set_size_inches(10, 8) plt.show() # function to analyze each column of the dataframe def display_missing(df): for col in df.columns.tolist(): print("{} column missing values: {}".format(col, df[col].isnull().sum())) print("\n") for df in dfs: print("{}".format(df.name)) display_missing(df) df_train[["Sex", "Survived"]].groupby(["Sex"], as_index=False).mean().sort_values( by="Survived", ascending=False ) df_train["Sex"].value_counts() df_train[["Pclass", "Survived"]].groupby(["Pclass"], as_index=False).mean().sort_values( by="Survived", ascending=False ) df_train["Pclass"].value_counts() df_train["Age"].value_counts() df_train[["Age", "Survived"]].groupby(["Age"], as_index=False).mean().sort_values( by="Survived", ascending=False ) train_dropna_age = df_train["Age"].dropna() df_train["Survived"] = df_train["Survived"].astype("category") # Although it is a number, it is changed to a category because it is categorical data df_train["Survived"] = df_train["Survived"].astype("category") df_train.head(3) df_train["Initial"] = 0 for i in df_train: df_train["Initial"] = df_train.Name.str.extract( "([A-Za-z]+)\." ) # lets extract the Salutations """We are using the Regex: [A-Za-z]+).. So what it does is, it looks for strings which lie between A-Z or a-z and followed by a .(dot). So we successfully extract the Initials from the Name.""" pd.crosstab(df_train.Initial, df_train.Sex).T.style.background_gradient( cmap="summer_r" ) # Checking the Initials with the Sex df_train["Initial"].replace( [ "Mlle", "Mme", "Ms", "Dr", "Major", "Lady", "Countess", "Jonkheer", "Col", "Rev", "Capt", "Sir", "Don", ], [ "Miss", "Miss", "Miss", "Mr", "Mr", "Mrs", "Mrs", "Other", "Other", "Other", "Mr", "Mr", "Mr", ], inplace=True, ) df_train.groupby("Initial")["Age"].mean() # lets check the average age by Initials # Assigning the NaN Values with the Ceil values of the mean ages df_train.loc[(df_train.Age.isnull()) & (df_train.Initial == "Mr"), "Age"] = 33 df_train.loc[(df_train.Age.isnull()) & (df_train.Initial == "Mrs"), "Age"] = 36 df_train.loc[(df_train.Age.isnull()) & (df_train.Initial == "Master"), "Age"] = 5 df_train.loc[(df_train.Age.isnull()) & (df_train.Initial == "Miss"), "Age"] = 22 df_train.loc[(df_train.Age.isnull()) & (df_train.Initial == "Other"), "Age"] = 46 df_train.Age.isnull().any() # So no null values left finally # print(df_all.info()) # Assign all the null values to N df_all.Cabin.fillna("N", inplace=True) # group these cabins according to the letter of the cabin name df_all.Cabin = [str(i)[0] for i in df_all.Cabin] def percent_value_counts(df, feature): """This function takes in a dataframe and a column and finds the percentage of the value_counts""" percent = pd.DataFrame( round(df.loc[:, feature].value_counts(dropna=False, normalize=True) * 100, 2) ) ## creating a df with th total = pd.DataFrame(df.loc[:, feature].value_counts(dropna=False)) ## concating percent and total dataframe total.columns = ["Total"] percent.columns = ["Percent"] return pd.concat([total, percent], axis=1) percent_value_counts(df_all, "Cabin") df_all.groupby("Cabin")["Fare"].mean().sort_values() def cabin_estimator(i): """Grouping cabin feature by the first letter""" a = 0 if i < 16: a = "G" elif i >= 16 and i < 27: a = "F" elif i >= 27 and i < 38: a = "T" elif i >= 38 and i < 47: a = "A" elif i >= 47 and i < 53: a = "E" elif i >= 53 and i < 54: a = "D" elif i >= 54 and i < 116: a = "C" else: a = "B" return a """Now, these means can help us determine the unknown cabins, if we compare each unknown cabin rows with the given mean's above. Let's write a simple function so that we can give cabin names based on the means.""" # applying cabin estimator function. df_all["Cabin"] = df_all.Fare.apply(lambda x: cabin_estimator(x)) percent_value_counts(df_all, "Cabin") df_all["Fare"] = pd.qcut(df_all["Fare"], 13) fig, axs = plt.subplots(figsize=(22, 9)) sns.countplot(x="Fare", hue="Survived", data=df_all) plt.xlabel("Fare", size=15, labelpad=20) plt.ylabel("Passenger Count", size=15, labelpad=20) plt.tick_params(axis="x", labelsize=10) plt.tick_params(axis="y", labelsize=15) plt.legend(["Not Survived", "Survived"], loc="upper right", prop={"size": 15}) plt.title("Count of Survival in {} Feature".format("Fare"), size=15, y=1.05) plt.show() df_train["Embarked"].value_counts() df_train[df_train.Embarked.isnull()] """ We may be able to solve these two missing values by looking at other independent variables of the two raws. Both passengers paid a fare of $80, are of Pclass 1 and female Sex. the average fare closest to $80 are in the C Embarked values where pclass is 1. So, let's fill in the missing values as "C" """ df_train["Embarked"].fillna("C", inplace=True) df_train["Embarked"].value_counts() df_train[["Embarked", "Survived"]].groupby(["Embarked"], as_index=False).mean() pd.crosstab( [df_train.Embarked, df_train.Pclass], [df_train.Sex, df_train.Survived], margins=True, ).style.background_gradient(cmap="summer_r") df_train.head(3) output = pd.DataFrame( {"PassengerId": df_train.PassengerId, "Survived": df_train.Survived} ) output.to_csv("submission.csv", index=False)
false
0
2,800
0
2,800
2,800
129019356
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import numpy as np import pandas as pd pd.set_option("max_columns", None) pd.set_option("max_rows", 90) import matplotlib.pyplot as plt import seaborn as sns sns.set_style("darkgrid") from sklearn.neighbors import KNeighborsRegressor import scipy.stats from sklearn.preprocessing import StandardScaler from pycaret.regression import setup, compare_models from sklearn.model_selection import KFold, cross_val_score from catboost import CatBoostRegressor from sklearn.linear_model import ( BayesianRidge, HuberRegressor, Ridge, OrthogonalMatchingPursuit, ) from lightgbm import LGBMRegressor from sklearn.ensemble import GradientBoostingRegressor from xgboost import XGBRegressor import optuna train0 = pd.read_csv("../input/house-prices-advanced-regression-techniques/train.csv") test0 = pd.read_csv("../input/house-prices-advanced-regression-techniques/test.csv") sample_submission = pd.read_csv( "../input/house-prices-advanced-regression-techniques/sample_submission.csv" ) train0 test0 sample_submission target = train0["SalePrice"] test_ids = test0["Id"] train1 = train0.drop(["Id", "SalePrice"], axis=1) test1 = test0.drop("Id", axis=1) data1 = pd.concat([train1, test1], axis=0).reset_index(drop=True) data1 target # ## Cleaning data2 = data1.copy() data2["MSSubClass"] = data2["MSSubClass"].astype(str) # Impute using a constant value for column in [ "Alley", "BsmtQual", "BsmtCond", "BsmtExposure", "BsmtFinType1", "BsmtFinType2", "FireplaceQu", "GarageType", "GarageFinish", "GarageQual", "GarageCond", "PoolQC", "Fence", "MiscFeature", ]: data2[column] = data2[column].fillna("None") # Impute using the column mode for column in [ "MSZoning", "Utilities", "Exterior1st", "Exterior2nd", "MasVnrType", "Electrical", "KitchenQual", "Functional", "SaleType", ]: data2[column] = data2[column].fillna(data2[column].mode()[0]) data3 = data2.copy() import pandas as pd data = { "Name": ["John", "Mary", "Peter", "Sarah"], "Age": [25, 30, 35, 40], "Gender": ["M", "F", "M", "F"], "Salary": [50000, 60000, 70000, 80000], } df = pd.DataFrame(data) # select row with index 2 and all columns row2 = df.loc[2, :] # select all rows and columns 'Name' and 'Gender' subset = df.loc[:, ["Name", "Gender"]] subset # select first two rows and first three columns subset = df.iloc[:2, :3] subset # select last row and last column cell = df.iloc[-1, -1] cell
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/019/129019356.ipynb
null
null
[{"Id": 129019356, "ScriptId": 38308563, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 12619548, "CreationDate": "05/10/2023 11:01:07", "VersionNumber": 2.0, "Title": "Hse_Predict", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 133.0, "LinesInsertedFromPrevious": 104.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 29.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import numpy as np import pandas as pd pd.set_option("max_columns", None) pd.set_option("max_rows", 90) import matplotlib.pyplot as plt import seaborn as sns sns.set_style("darkgrid") from sklearn.neighbors import KNeighborsRegressor import scipy.stats from sklearn.preprocessing import StandardScaler from pycaret.regression import setup, compare_models from sklearn.model_selection import KFold, cross_val_score from catboost import CatBoostRegressor from sklearn.linear_model import ( BayesianRidge, HuberRegressor, Ridge, OrthogonalMatchingPursuit, ) from lightgbm import LGBMRegressor from sklearn.ensemble import GradientBoostingRegressor from xgboost import XGBRegressor import optuna train0 = pd.read_csv("../input/house-prices-advanced-regression-techniques/train.csv") test0 = pd.read_csv("../input/house-prices-advanced-regression-techniques/test.csv") sample_submission = pd.read_csv( "../input/house-prices-advanced-regression-techniques/sample_submission.csv" ) train0 test0 sample_submission target = train0["SalePrice"] test_ids = test0["Id"] train1 = train0.drop(["Id", "SalePrice"], axis=1) test1 = test0.drop("Id", axis=1) data1 = pd.concat([train1, test1], axis=0).reset_index(drop=True) data1 target # ## Cleaning data2 = data1.copy() data2["MSSubClass"] = data2["MSSubClass"].astype(str) # Impute using a constant value for column in [ "Alley", "BsmtQual", "BsmtCond", "BsmtExposure", "BsmtFinType1", "BsmtFinType2", "FireplaceQu", "GarageType", "GarageFinish", "GarageQual", "GarageCond", "PoolQC", "Fence", "MiscFeature", ]: data2[column] = data2[column].fillna("None") # Impute using the column mode for column in [ "MSZoning", "Utilities", "Exterior1st", "Exterior2nd", "MasVnrType", "Electrical", "KitchenQual", "Functional", "SaleType", ]: data2[column] = data2[column].fillna(data2[column].mode()[0]) data3 = data2.copy() import pandas as pd data = { "Name": ["John", "Mary", "Peter", "Sarah"], "Age": [25, 30, 35, 40], "Gender": ["M", "F", "M", "F"], "Salary": [50000, 60000, 70000, 80000], } df = pd.DataFrame(data) # select row with index 2 and all columns row2 = df.loc[2, :] # select all rows and columns 'Name' and 'Gender' subset = df.loc[:, ["Name", "Gender"]] subset # select first two rows and first three columns subset = df.iloc[:2, :3] subset # select last row and last column cell = df.iloc[-1, -1] cell
false
0
1,001
0
1,001
1,001
129019808
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import matplotlib.pyplot as plt import seaborn as sns sns.set() df = pd.read_excel("/kaggle/input/iedata/iedatason.xlsx", sheet_name="Form Yanıtları 1") df.info() df.head() # # Mean & Standard Deviation mean = np.mean(df["Result"]) std = np.std(df["Result"]) print("Mean: ", mean) print("Standard Deviation: ", std) # # **Confidence Interval** import numpy as np from scipy.stats import t # Calculate the mean and standard deviation of the dataset # Set the level of confidence (1 - alpha) conf_level = 0.95 # Calculate the sample size and degrees of freedom n = len(df["Result"]) df = n - 1 # Calculate the t-statistic t_stat = t.ppf((1 + conf_level) / 2, df) # Calculate the margin of error margin_of_error = t_stat * std / np.sqrt(n) # Calculate the confidence interval lower = mean - margin_of_error upper = mean + margin_of_error # Print the confidence interval print( "The 95% confidence interval for the mean is: ({:.2f}, {:.2f})".format(lower, upper) ) # # **Hypothesis Testing** import pandas as pd import numpy as np from scipy.stats import ttest_1samp df = pd.read_excel("/kaggle/input/iedata/iedatason.xlsx", sheet_name="Form Yanıtları 1") # Set the null hypothesis mean null_mean = 0 # Set the level of significance (alpha) alpha = 0.05 # Perform the t-test t_stat, p_val = ttest_1samp(df["Result"], null_mean) # Print the results print("t-statistic: {:.2f}".format(t_stat)) print("p-value: {:.2f}".format(p_val)) # Check if the result is statistically significant if p_val < alpha: print("The result is statistically significant") else: print("The result is not statistically significant") import pandas as pd # your code for loading data df = pd.read_excel("/kaggle/input/iedata/iedatason.xlsx", sheet_name="Form Yanıtları 1") # replace F with 1 and M with 0 in Gender column df["Gender"] = df["Gender"].replace({"F": 1, "M": 0}) # convert Gender column to float64 df["Gender"] = df["Gender"].astype("float64") df.head() # # **Regression** # import pandas as pd import statsmodels.api as sm # Split the data into X and y X = df["Result"] y = df["Gender"] # Add a constant to the X data X = sm.add_constant(X) # Create the linear regression model model = sm.OLS(y, X).fit() # Print the model summary print(model.summary()) # # Anova for Result import pandas as pd from scipy import stats # Remove rows with missing data df.dropna(inplace=True) # Separate the data by gender bigResult = df[df["Result"] >= 4.0].iloc[:, 1:] smallResult = df[df["Result"] < 4.0].iloc[:, 1:] # Check for empty groups and remove them if len(bigResult) == 0 or len(smallResult) == 0: print("Error: one or more groups has no data.") else: # Perform ANOVA using the f_oneway() function f_val, p_val = stats.f_oneway(bigResult, smallResult) # Print the F-value and p-value to the console print("F-value:", f_val) print("p-value:", p_val) # # **Anova for Gender** import pandas as pd # your code for loading data df = pd.read_excel("/kaggle/input/iedata/iedatason.xlsx", sheet_name="Form Yanıtları 1") # replace F with 1 and M with 0 in Gender column df["Gender"] = df["Gender"].replace({"F": 1, "M": 0}) # convert Gender column to float64 df["Gender"] = df["Gender"].astype("float64") df.head() import pandas as pd from scipy import stats # Remove rows with missing data df.dropna(inplace=True) # Separate the data by gender maleData = df[df["Gender"] == 0.0].iloc[:, 1:] femaleData = df[df["Gender"] == 1.0].iloc[:, 1:] # Check for empty groups and remove them if len(maleData) == 0 or len(femaleData) == 0: print("Error: one or more groups has no data.") else: # Perform ANOVA using the f_oneway() function f_val, p_val = stats.f_oneway(maleData, femaleData) # Print the F-value and p-value to the console print("F-value:", f_val) print("p-value:", p_val) # # **Graphs** data = df["Result"].values # Create a histogram plot with matplotlib plt.hist(data, bins=9) # the number of bins can be adjusted as needed # Add titles and labels to the plot plt.title("Histogram of Reesults") plt.xlabel("Data Values") plt.ylabel("Frequency") # Show the plot plt.show() data = df["Result"] # Count the number of occurrences of each value in the column counts = data.value_counts() # Create a bar plot with matplotlib counts.plot(kind="bar") # Add titles and labels to the plot plt.title("Bar Plot of Results") plt.xlabel("Data Values") plt.ylabel("Counts") # Show the plot plt.show() data = df["Result"] # Count the number of occurrences of each value in the column counts = data.value_counts() # Create a pie chart with matplotlib counts.plot(kind="pie") # Add titles and labels to the plot plt.title("Pie Chart of Results") plt.legend(labels=counts.index, loc="upper right") # Show the plot plt.show() import pandas as pd # your code for loading data df = pd.read_excel("/kaggle/input/iedata/iedatason.xlsx", sheet_name="Form Yanıtları 1") # replace F with 1 and M with 0 in Gender column df["Gender"] = df["Gender"].replace({1: "F", 0: "M"}) # convert Gender column to float64 df["Gender"] = df["Gender"].astype("object") df.head() # Create a box plot using seaborn sns.boxplot(x="Gender", y="Result", data=df) # Create a violin plot using seaborn sns.violinplot(x="Gender", y="Result", data=df) # Show the plots plt.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/019/129019808.ipynb
null
null
[{"Id": 129019808, "ScriptId": 38206276, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13154053, "CreationDate": "05/10/2023 11:05:31", "VersionNumber": 1.0, "Title": "statistical analysis", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 250.0, "LinesInsertedFromPrevious": 250.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import matplotlib.pyplot as plt import seaborn as sns sns.set() df = pd.read_excel("/kaggle/input/iedata/iedatason.xlsx", sheet_name="Form Yanıtları 1") df.info() df.head() # # Mean & Standard Deviation mean = np.mean(df["Result"]) std = np.std(df["Result"]) print("Mean: ", mean) print("Standard Deviation: ", std) # # **Confidence Interval** import numpy as np from scipy.stats import t # Calculate the mean and standard deviation of the dataset # Set the level of confidence (1 - alpha) conf_level = 0.95 # Calculate the sample size and degrees of freedom n = len(df["Result"]) df = n - 1 # Calculate the t-statistic t_stat = t.ppf((1 + conf_level) / 2, df) # Calculate the margin of error margin_of_error = t_stat * std / np.sqrt(n) # Calculate the confidence interval lower = mean - margin_of_error upper = mean + margin_of_error # Print the confidence interval print( "The 95% confidence interval for the mean is: ({:.2f}, {:.2f})".format(lower, upper) ) # # **Hypothesis Testing** import pandas as pd import numpy as np from scipy.stats import ttest_1samp df = pd.read_excel("/kaggle/input/iedata/iedatason.xlsx", sheet_name="Form Yanıtları 1") # Set the null hypothesis mean null_mean = 0 # Set the level of significance (alpha) alpha = 0.05 # Perform the t-test t_stat, p_val = ttest_1samp(df["Result"], null_mean) # Print the results print("t-statistic: {:.2f}".format(t_stat)) print("p-value: {:.2f}".format(p_val)) # Check if the result is statistically significant if p_val < alpha: print("The result is statistically significant") else: print("The result is not statistically significant") import pandas as pd # your code for loading data df = pd.read_excel("/kaggle/input/iedata/iedatason.xlsx", sheet_name="Form Yanıtları 1") # replace F with 1 and M with 0 in Gender column df["Gender"] = df["Gender"].replace({"F": 1, "M": 0}) # convert Gender column to float64 df["Gender"] = df["Gender"].astype("float64") df.head() # # **Regression** # import pandas as pd import statsmodels.api as sm # Split the data into X and y X = df["Result"] y = df["Gender"] # Add a constant to the X data X = sm.add_constant(X) # Create the linear regression model model = sm.OLS(y, X).fit() # Print the model summary print(model.summary()) # # Anova for Result import pandas as pd from scipy import stats # Remove rows with missing data df.dropna(inplace=True) # Separate the data by gender bigResult = df[df["Result"] >= 4.0].iloc[:, 1:] smallResult = df[df["Result"] < 4.0].iloc[:, 1:] # Check for empty groups and remove them if len(bigResult) == 0 or len(smallResult) == 0: print("Error: one or more groups has no data.") else: # Perform ANOVA using the f_oneway() function f_val, p_val = stats.f_oneway(bigResult, smallResult) # Print the F-value and p-value to the console print("F-value:", f_val) print("p-value:", p_val) # # **Anova for Gender** import pandas as pd # your code for loading data df = pd.read_excel("/kaggle/input/iedata/iedatason.xlsx", sheet_name="Form Yanıtları 1") # replace F with 1 and M with 0 in Gender column df["Gender"] = df["Gender"].replace({"F": 1, "M": 0}) # convert Gender column to float64 df["Gender"] = df["Gender"].astype("float64") df.head() import pandas as pd from scipy import stats # Remove rows with missing data df.dropna(inplace=True) # Separate the data by gender maleData = df[df["Gender"] == 0.0].iloc[:, 1:] femaleData = df[df["Gender"] == 1.0].iloc[:, 1:] # Check for empty groups and remove them if len(maleData) == 0 or len(femaleData) == 0: print("Error: one or more groups has no data.") else: # Perform ANOVA using the f_oneway() function f_val, p_val = stats.f_oneway(maleData, femaleData) # Print the F-value and p-value to the console print("F-value:", f_val) print("p-value:", p_val) # # **Graphs** data = df["Result"].values # Create a histogram plot with matplotlib plt.hist(data, bins=9) # the number of bins can be adjusted as needed # Add titles and labels to the plot plt.title("Histogram of Reesults") plt.xlabel("Data Values") plt.ylabel("Frequency") # Show the plot plt.show() data = df["Result"] # Count the number of occurrences of each value in the column counts = data.value_counts() # Create a bar plot with matplotlib counts.plot(kind="bar") # Add titles and labels to the plot plt.title("Bar Plot of Results") plt.xlabel("Data Values") plt.ylabel("Counts") # Show the plot plt.show() data = df["Result"] # Count the number of occurrences of each value in the column counts = data.value_counts() # Create a pie chart with matplotlib counts.plot(kind="pie") # Add titles and labels to the plot plt.title("Pie Chart of Results") plt.legend(labels=counts.index, loc="upper right") # Show the plot plt.show() import pandas as pd # your code for loading data df = pd.read_excel("/kaggle/input/iedata/iedatason.xlsx", sheet_name="Form Yanıtları 1") # replace F with 1 and M with 0 in Gender column df["Gender"] = df["Gender"].replace({1: "F", 0: "M"}) # convert Gender column to float64 df["Gender"] = df["Gender"].astype("object") df.head() # Create a box plot using seaborn sns.boxplot(x="Gender", y="Result", data=df) # Create a violin plot using seaborn sns.violinplot(x="Gender", y="Result", data=df) # Show the plots plt.show()
false
0
1,896
0
1,896
1,896
129019183
<jupyter_start><jupyter_text>Brain Tumor Classification (MRI) # Contribute to OpenSource ##Repo: [GitHub](https://github.com/SartajBhuvaji/Brain-Tumor-Classification-Using-Deep-Learning-Algorithms) ## Read Me: [Link](https://github.com/SartajBhuvaji/Brain-Tumor-Classification-Using-Deep-Learning-Algorithms/tree/master#contributing-to-the-project) # Abstract A Brain tumor is considered as one of the aggressive diseases, among children and adults. Brain tumors account for 85 to 90 percent of all primary Central Nervous System(CNS) tumors. Every year, around 11,700 people are diagnosed with a brain tumor. The 5-year survival rate for people with a cancerous brain or CNS tumor is approximately 34 percent for men and36 percent for women. Brain Tumors are classified as: Benign Tumor, Malignant Tumor, Pituitary Tumor, etc. Proper treatment, planning, and accurate diagnostics should be implemented to improve the life expectancy of the patients. The best technique to detect brain tumors is Magnetic Resonance Imaging (MRI). A huge amount of image data is generated through the scans. These images are examined by the radiologist. A manual examination can be error-prone due to the level of complexities involved in brain tumors and their properties. Application of automated classification techniques using Machine Learning(ML) and Artificial Intelligence(AI)has consistently shown higher accuracy than manual classification. Hence, proposing a system performing detection and classification by using Deep Learning Algorithms using ConvolutionNeural Network (CNN), Artificial Neural Network (ANN), and TransferLearning (TL) would be helpful to doctors all around the world. ### Context Brain Tumors are complex. There are a lot of abnormalities in the sizes and location of the brain tumor(s). This makes it really difficult for complete understanding of the nature of the tumor. Also, a professional Neurosurgeon is required for MRI analysis. Often times in developing countries the lack of skillful doctors and lack of knowledge about tumors makes it really challenging and time-consuming to generate reports from MRI’. So an automated system on Cloud can solve this problem. ### Definition To Detect and Classify Brain Tumor using, CNN and TL; as an asset of Deep Learning and to examine the tumor position(segmentation). Kaggle dataset identifier: brain-tumor-classification-mri <jupyter_script>import subprocess whls = [ "/kaggle/input/pyg-cp37-pt111/torch_cluster-1.6.0-cp37-cp37m-linux_x86_64.whl", "/kaggle/input/pyg-cp37-pt111/torch_scatter-2.1.0-cp37-cp37m-linux_x86_64.whl", "/kaggle/input/pyg-cp37-pt111/torch_sparse-0.6.16-cp37-cp37m-linux_x86_64.whl", "/kaggle/input/pyg-cp37-pt111/torch_spline_conv-1.2.1-cp37-cp37m-linux_x86_64.whl", "/kaggle/input/pyg-cp37-pt111/torch_geometric-2.2.0-py3-none-any.whl", "/kaggle/input/pyg-cp37-pt111/ruamel.yaml-0.17.21-py3-none-any.whl", ] for w in whls: print("Installing", w) subprocess.call(["pip", "install", w, "--no-deps", "--upgrade"]) import torch import torch.nn as nn import torchvision import torch.utils.data as data_utils import torch_geometric from types import MethodType import matplotlib.pyplot as plt import tqdm from IPython import display dtype = torch.float16 device = torch.device("cuda:0") def train(model: nn.Module, num_epochs, train_loader, validate_loader, lr): model.type(dtype) model.to(device) model.train() optimizer = torch.optim.SGD(model.parameters(), lr) loss = nn.CrossEntropyLoss() lrc = torch.optim.lr_scheduler.CosineAnnealingLR( optimizer, T_max=num_epochs, eta_min=lr / 10 ) loss_array = [] acc_array = [] for eopch in tqdm.tqdm(range(num_epochs)): running_loss, c = 0, 0 for inputs, target in train_loader: inputs = inputs.type(dtype) inputs = inputs.to(device) target = target.to(device) y_pre = model(inputs) l = loss(y_pre, target) optimizer.zero_grad() l.backward() optimizer.step() running_loss += l c += 1 loss_array.append(running_loss.item() / c) acc_array.append(test(model, validate_loader)) display.clear_output(True) plt.xlim([0, num_epochs - 1]) plt.plot(loss_array) plt.plot(acc_array) plt.show() print(f"current loss{loss_array[-1]} current acc{acc_array[-1]}") print(f"best acc: {max(acc_array)}") @torch.no_grad() def test(model, validate_loader): correct = 0 total = 0 model.eval() model.to(device) model.type(dtype) with torch.no_grad(): for data in validate_loader: image, lables = data outputs = model(image.type(dtype).to(device)) _, predicted = torch.max(outputs.data, dim=1) total += lables.size(0) correct += (predicted == lables.to(device)).sum().item() acc = correct / total return acc transforms = torchvision.transforms data_transform = { "train": transforms.Compose( [ transforms.RandomResizedCrop(224), transforms.Resize((224, 224)), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), ] ), "val": transforms.Compose( [ transforms.Resize((224, 224)), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), ] ), } def train_flowers(model, epochs, batch_size=32, lr=0.001): train_dataset = torchvision.datasets.ImageFolder( "/kaggle/input/flowerss/train", transform=data_transform["train"] ) train_loader = data_utils.DataLoader( train_dataset, batch_size=batch_size, shuffle=True ) validate_dataset = torchvision.datasets.ImageFolder( "/kaggle/input/flowerss/val", transform=data_transform["val"] ) validate_loader = data_utils.DataLoader( validate_dataset, batch_size=batch_size, shuffle=False ) train(model, epochs, train_loader, validate_loader, lr) test(model, validate_loader) def train_tumor(model, epochs, batch_size=32, lr=0.001): train_dataset = torchvision.datasets.ImageFolder( "/kaggle/input/brain-tumor-classification-mri/Training", transform=data_transform["train"], ) train_loader = data_utils.DataLoader( train_dataset, batch_size=batch_size, shuffle=True ) validate_dataset = torchvision.datasets.ImageFolder( "/kaggle/input/brain-tumor-classification-mri/Testing", transform=data_transform["val"], ) validate_loader = data_utils.DataLoader( validate_dataset, batch_size=batch_size, shuffle=False ) train(model, epochs, train_loader, validate_loader, lr) def _forward_impl(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) return x class ResGCN2(nn.Module): def __init__(self, k, num_classes): super().__init__() self.k = k self.num_classes = num_classes self.resnet = torchvision.models.resnet18(True) self.resnet._forward_impl = MethodType(_forward_impl, self.resnet) self.conv1 = torch_geometric.nn.GCNConv(512, 256) self.dropout = nn.Dropout(0.5) self.conv2 = torch_geometric.nn.GCNConv(256, num_classes) def forward_features(self, X): # B * 512 * 7 * 7 return self.resnet(X) def convert_graph(self, x): b, c, h, w = x.shape device = x.device k = self.k x = torch.permute(x, (0, 2, 3, 1)).reshape((b, h * w, c)) y = torch.cdist(x, x, 2) # [b, hw, hw] _, idx = torch.topk(y, k, -1) source = torch.arange(h * w, device=device).repeat_interleave(k).repeat(b) target = torch.flatten(idx) step = torch.arange(b, device=device).repeat_interleave(h * w * k) * h * w adj = torch.row_stack([source, target]) + step return x.reshape((b * h * w, c)), adj.long() def froward_gcn(self, X, adj): X = self.conv1(X, adj) X = self.dropout(X) X = self.conv2(X, adj) # (B*7*7, num_classes) return X def forward(self, X): batch = X.shape[0] X = self.forward_features(X) X, adj = self.convert_graph(X) X = self.froward_gcn(X, adj) X = torch.reshape(X, (batch, -1, self.num_classes)) X = torch.mean(X, 1) # (B, num_classes) return X model = ResGCN2(k=8, num_classes=5) train_flowers(model, 20, batch_size=32, lr=0.001)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/019/129019183.ipynb
brain-tumor-classification-mri
sartajbhuvaji
[{"Id": 129019183, "ScriptId": 38352626, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 12160881, "CreationDate": "05/10/2023 10:59:41", "VersionNumber": 1.0, "Title": "Fork of Resnet+GCN\u56fe\u7247\u5206\u7c7b", "EvaluationDate": NaN, "IsChange": true, "TotalLines": 168.0, "LinesInsertedFromPrevious": 79.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 89.0, "LinesInsertedFromFork": 79.0, "LinesDeletedFromFork": 158.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 89.0, "TotalVotes": 0}]
[{"Id": 184705588, "KernelVersionId": 129019183, "SourceDatasetVersionId": 1183165}, {"Id": 184705590, "KernelVersionId": 129019183, "SourceDatasetVersionId": 5610519}, {"Id": 184705589, "KernelVersionId": 129019183, "SourceDatasetVersionId": 5226033}]
[{"Id": 1183165, "DatasetId": 672377, "DatasourceVersionId": 1214258, "CreatorUserId": 3469060, "LicenseName": "CC0: Public Domain", "CreationDate": "05/24/2020 16:24:55", "VersionNumber": 2.0, "Title": "Brain Tumor Classification (MRI)", "Slug": "brain-tumor-classification-mri", "Subtitle": "Classify MRI images into four classes", "Description": "# Contribute to OpenSource\n##Repo: [GitHub](https://github.com/SartajBhuvaji/Brain-Tumor-Classification-Using-Deep-Learning-Algorithms)\n## Read Me: [Link](https://github.com/SartajBhuvaji/Brain-Tumor-Classification-Using-Deep-Learning-Algorithms/tree/master#contributing-to-the-project)\n\n\n# Abstract\nA Brain tumor is considered as one of the aggressive diseases, among children and adults. Brain tumors account for 85 to 90 percent of all primary Central Nervous System(CNS) tumors. Every year, around 11,700 people are diagnosed with a brain tumor. The 5-year survival rate for people with a cancerous brain or CNS tumor is approximately 34 percent for men and36 percent for women. Brain Tumors are classified as: Benign Tumor, Malignant Tumor, Pituitary Tumor, etc. Proper treatment, planning, and accurate diagnostics should be implemented to improve the life expectancy of the patients. The best technique to detect brain tumors is Magnetic Resonance Imaging (MRI). A huge amount of image data is generated through the scans. These images are examined by the radiologist. A manual examination can be error-prone due to the level of complexities involved in brain tumors and their properties.\n\nApplication of automated classification techniques using Machine Learning(ML) and Artificial Intelligence(AI)has consistently shown higher accuracy than manual classification. Hence, proposing a system performing detection and classification by using Deep Learning Algorithms using ConvolutionNeural Network (CNN), Artificial Neural Network (ANN), and TransferLearning (TL) would be helpful to doctors all around the world.\n\n### Context\n\nBrain Tumors are complex. There are a lot of abnormalities in the sizes and location of the brain tumor(s). This makes it really difficult for complete understanding of the nature of the tumor. Also, a professional Neurosurgeon is required for MRI analysis. Often times in developing countries the lack of skillful doctors and lack of knowledge about tumors makes it really challenging and time-consuming to generate reports from MRI\u2019. So an automated system on Cloud can solve this problem.\n\n\n### Definition\n\nTo Detect and Classify Brain Tumor using, CNN and TL; as an asset of Deep Learning and to examine the tumor position(segmentation).\n\n\n### Acknowledgements for Dataset.\n\nNavoneel Chakrabarty\nSwati Kanchan\n\n### Team\n\nSartaj Bhuvaji\nAnkita Kadam\nPrajakta Bhumkar\nSameer Dedge", "VersionNotes": "Automatic Update 2020-05-24", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 672377, "CreatorUserId": 3469060, "OwnerUserId": 3469060.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 1183165.0, "CurrentDatasourceVersionId": 1214258.0, "ForumId": 686859, "Type": 2, "CreationDate": "05/24/2020 16:22:54", "LastActivityDate": "05/24/2020", "TotalViews": 302511, "TotalDownloads": 32508, "TotalVotes": 481, "TotalKernels": 255}]
[{"Id": 3469060, "UserName": "sartajbhuvaji", "DisplayName": "Sartaj", "RegisterDate": "07/16/2019", "PerformanceTier": 0}]
import subprocess whls = [ "/kaggle/input/pyg-cp37-pt111/torch_cluster-1.6.0-cp37-cp37m-linux_x86_64.whl", "/kaggle/input/pyg-cp37-pt111/torch_scatter-2.1.0-cp37-cp37m-linux_x86_64.whl", "/kaggle/input/pyg-cp37-pt111/torch_sparse-0.6.16-cp37-cp37m-linux_x86_64.whl", "/kaggle/input/pyg-cp37-pt111/torch_spline_conv-1.2.1-cp37-cp37m-linux_x86_64.whl", "/kaggle/input/pyg-cp37-pt111/torch_geometric-2.2.0-py3-none-any.whl", "/kaggle/input/pyg-cp37-pt111/ruamel.yaml-0.17.21-py3-none-any.whl", ] for w in whls: print("Installing", w) subprocess.call(["pip", "install", w, "--no-deps", "--upgrade"]) import torch import torch.nn as nn import torchvision import torch.utils.data as data_utils import torch_geometric from types import MethodType import matplotlib.pyplot as plt import tqdm from IPython import display dtype = torch.float16 device = torch.device("cuda:0") def train(model: nn.Module, num_epochs, train_loader, validate_loader, lr): model.type(dtype) model.to(device) model.train() optimizer = torch.optim.SGD(model.parameters(), lr) loss = nn.CrossEntropyLoss() lrc = torch.optim.lr_scheduler.CosineAnnealingLR( optimizer, T_max=num_epochs, eta_min=lr / 10 ) loss_array = [] acc_array = [] for eopch in tqdm.tqdm(range(num_epochs)): running_loss, c = 0, 0 for inputs, target in train_loader: inputs = inputs.type(dtype) inputs = inputs.to(device) target = target.to(device) y_pre = model(inputs) l = loss(y_pre, target) optimizer.zero_grad() l.backward() optimizer.step() running_loss += l c += 1 loss_array.append(running_loss.item() / c) acc_array.append(test(model, validate_loader)) display.clear_output(True) plt.xlim([0, num_epochs - 1]) plt.plot(loss_array) plt.plot(acc_array) plt.show() print(f"current loss{loss_array[-1]} current acc{acc_array[-1]}") print(f"best acc: {max(acc_array)}") @torch.no_grad() def test(model, validate_loader): correct = 0 total = 0 model.eval() model.to(device) model.type(dtype) with torch.no_grad(): for data in validate_loader: image, lables = data outputs = model(image.type(dtype).to(device)) _, predicted = torch.max(outputs.data, dim=1) total += lables.size(0) correct += (predicted == lables.to(device)).sum().item() acc = correct / total return acc transforms = torchvision.transforms data_transform = { "train": transforms.Compose( [ transforms.RandomResizedCrop(224), transforms.Resize((224, 224)), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), ] ), "val": transforms.Compose( [ transforms.Resize((224, 224)), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), ] ), } def train_flowers(model, epochs, batch_size=32, lr=0.001): train_dataset = torchvision.datasets.ImageFolder( "/kaggle/input/flowerss/train", transform=data_transform["train"] ) train_loader = data_utils.DataLoader( train_dataset, batch_size=batch_size, shuffle=True ) validate_dataset = torchvision.datasets.ImageFolder( "/kaggle/input/flowerss/val", transform=data_transform["val"] ) validate_loader = data_utils.DataLoader( validate_dataset, batch_size=batch_size, shuffle=False ) train(model, epochs, train_loader, validate_loader, lr) test(model, validate_loader) def train_tumor(model, epochs, batch_size=32, lr=0.001): train_dataset = torchvision.datasets.ImageFolder( "/kaggle/input/brain-tumor-classification-mri/Training", transform=data_transform["train"], ) train_loader = data_utils.DataLoader( train_dataset, batch_size=batch_size, shuffle=True ) validate_dataset = torchvision.datasets.ImageFolder( "/kaggle/input/brain-tumor-classification-mri/Testing", transform=data_transform["val"], ) validate_loader = data_utils.DataLoader( validate_dataset, batch_size=batch_size, shuffle=False ) train(model, epochs, train_loader, validate_loader, lr) def _forward_impl(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) return x class ResGCN2(nn.Module): def __init__(self, k, num_classes): super().__init__() self.k = k self.num_classes = num_classes self.resnet = torchvision.models.resnet18(True) self.resnet._forward_impl = MethodType(_forward_impl, self.resnet) self.conv1 = torch_geometric.nn.GCNConv(512, 256) self.dropout = nn.Dropout(0.5) self.conv2 = torch_geometric.nn.GCNConv(256, num_classes) def forward_features(self, X): # B * 512 * 7 * 7 return self.resnet(X) def convert_graph(self, x): b, c, h, w = x.shape device = x.device k = self.k x = torch.permute(x, (0, 2, 3, 1)).reshape((b, h * w, c)) y = torch.cdist(x, x, 2) # [b, hw, hw] _, idx = torch.topk(y, k, -1) source = torch.arange(h * w, device=device).repeat_interleave(k).repeat(b) target = torch.flatten(idx) step = torch.arange(b, device=device).repeat_interleave(h * w * k) * h * w adj = torch.row_stack([source, target]) + step return x.reshape((b * h * w, c)), adj.long() def froward_gcn(self, X, adj): X = self.conv1(X, adj) X = self.dropout(X) X = self.conv2(X, adj) # (B*7*7, num_classes) return X def forward(self, X): batch = X.shape[0] X = self.forward_features(X) X, adj = self.convert_graph(X) X = self.froward_gcn(X, adj) X = torch.reshape(X, (batch, -1, self.num_classes)) X = torch.mean(X, 1) # (B, num_classes) return X model = ResGCN2(k=8, num_classes=5) train_flowers(model, 20, batch_size=32, lr=0.001)
false
0
2,106
0
2,735
2,106
129019305
<jupyter_start><jupyter_text>User_Data The dataset consists of information about users who are potential customers for a product or service. It contains four input features - User ID, Gender, Age, and Estimated Salary - which are used to predict whether or not the user purchased the product, indicated by the output or target column 'Purchased'. The User ID is a unique identifier assigned to each user, while Gender is the user's gender, which can be either male or female. Age is the age of the user in years, and Estimated Salary is an estimate of the user's annual salary. The dataset is likely used for binary classification tasks to determine whether or not a user is likely to purchase a particular product or service. The features provided could potentially be used to create a model that predicts the probability of a user purchasing the product based on their age, gender, and estimated salary. Kaggle dataset identifier: user-data <jupyter_script># # Naive Bayes # #### Python Implementation of the Naïve Bayes algorithm: # - Now we will implement a Naive Bayes Algorithm using Python. So for this, we will use the "user_data" dataset, which we have used in our other classification model. Therefore we can easily compare the Naive Bayes model with the other models. # # Importing the libraries # import numpy as np import matplotlib.pyplot as mtp import pandas as pd # Importing the dataset dataset = pd.read_csv("/kaggle/input/user-data/User_Data.csv") x = dataset.iloc[:, [2, 3]].values y = dataset.iloc[:, 4].values dataset.head() # Splitting the dataset into the Training set and Test set from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split( x, y, test_size=0.25, random_state=0 ) # Feature Scaling from sklearn.preprocessing import StandardScaler sc = StandardScaler() x_train = sc.fit_transform(x_train) x_test = sc.transform(x_test) # Fitting Naive Bayes to the Training set from sklearn.naive_bayes import GaussianNB classifier = GaussianNB() classifier.fit(x_train, y_train) # Predicting the Test set results y_pred = classifier.predict(x_test) pd.DataFrame(y_pred, y_test).head(20) # Making the Confusion Matrix from sklearn.metrics import confusion_matrix cm = confusion_matrix(y_test, y_pred) cm # Visualising the Training set results from matplotlib.colors import ListedColormap x_set, y_set = x_train, y_train X1, X2 = np.meshgrid( np.arange(start=x_set[:, 0].min() - 1, stop=x_set[:, 0].max() + 1, step=0.01), np.arange(start=x_set[:, 1].min() - 1, stop=x_set[:, 1].max() + 1, step=0.01), ) mtp.contourf( X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape), alpha=0.75, cmap=ListedColormap(("purple", "green")), ) mtp.xlim(X1.min(), X1.max()) mtp.ylim(X2.min(), X2.max()) for i, j in enumerate(np.unique(y_set)): mtp.scatter( x_set[y_set == j, 0], x_set[y_set == j, 1], c=ListedColormap(("purple", "green"))(i), label=j, ) mtp.title("Naive Bayes (Training set)") mtp.xlabel("Age") mtp.ylabel("Estimated Salary") mtp.legend() mtp.show() # Visualising the Test set results from matplotlib.colors import ListedColormap x_set, y_set = x_test, y_test X1, X2 = np.meshgrid( np.arange(start=x_set[:, 0].min() - 1, stop=x_set[:, 0].max() + 1, step=0.01), np.arange(start=x_set[:, 1].min() - 1, stop=x_set[:, 1].max() + 1, step=0.01), ) mtp.contourf( X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape), alpha=0.75, cmap=ListedColormap(("purple", "green")), ) mtp.xlim(X1.min(), X1.max()) mtp.ylim(X2.min(), X2.max()) for i, j in enumerate(np.unique(y_set)): mtp.scatter( x_set[y_set == j, 0], x_set[y_set == j, 1], c=ListedColormap(("purple", "green"))(i), label=j, ) mtp.title("Naive Bayes (test set)") mtp.xlabel("Age") mtp.ylabel("Estimated Salary") mtp.legend() mtp.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/019/129019305.ipynb
user-data
sandragracenelson
[{"Id": 129019305, "ScriptId": 38352314, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 8803259, "CreationDate": "05/10/2023 11:00:40", "VersionNumber": 1.0, "Title": "Naive Bayes", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 80.0, "LinesInsertedFromPrevious": 80.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 184705819, "KernelVersionId": 129019305, "SourceDatasetVersionId": 3960552}]
[{"Id": 3960552, "DatasetId": 2350614, "DatasourceVersionId": 4016089, "CreatorUserId": 8893519, "LicenseName": "CC0: Public Domain", "CreationDate": "07/19/2022 10:12:10", "VersionNumber": 1.0, "Title": "User_Data", "Slug": "user-data", "Subtitle": "Data about product purchased or not", "Description": "The dataset consists of information about users who are potential customers for a product or service. It contains four input features - User ID, Gender, Age, and Estimated Salary - which are used to predict whether or not the user purchased the product, indicated by the output or target column 'Purchased'.\n\nThe User ID is a unique identifier assigned to each user, while Gender is the user's gender, which can be either male or female. Age is the age of the user in years, and Estimated Salary is an estimate of the user's annual salary.\n\nThe dataset is likely used for binary classification tasks to determine whether or not a user is likely to purchase a particular product or service. The features provided could potentially be used to create a model that predicts the probability of a user purchasing the product based on their age, gender, and estimated salary.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 2350614, "CreatorUserId": 8893519, "OwnerUserId": 8893519.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 3960552.0, "CurrentDatasourceVersionId": 4016089.0, "ForumId": 2377623, "Type": 2, "CreationDate": "07/19/2022 10:12:10", "LastActivityDate": "07/19/2022", "TotalViews": 5428, "TotalDownloads": 3239, "TotalVotes": 30, "TotalKernels": 11}]
[{"Id": 8893519, "UserName": "sandragracenelson", "DisplayName": "Sandra Grace Nelson", "RegisterDate": "11/15/2021", "PerformanceTier": 3}]
# # Naive Bayes # #### Python Implementation of the Naïve Bayes algorithm: # - Now we will implement a Naive Bayes Algorithm using Python. So for this, we will use the "user_data" dataset, which we have used in our other classification model. Therefore we can easily compare the Naive Bayes model with the other models. # # Importing the libraries # import numpy as np import matplotlib.pyplot as mtp import pandas as pd # Importing the dataset dataset = pd.read_csv("/kaggle/input/user-data/User_Data.csv") x = dataset.iloc[:, [2, 3]].values y = dataset.iloc[:, 4].values dataset.head() # Splitting the dataset into the Training set and Test set from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split( x, y, test_size=0.25, random_state=0 ) # Feature Scaling from sklearn.preprocessing import StandardScaler sc = StandardScaler() x_train = sc.fit_transform(x_train) x_test = sc.transform(x_test) # Fitting Naive Bayes to the Training set from sklearn.naive_bayes import GaussianNB classifier = GaussianNB() classifier.fit(x_train, y_train) # Predicting the Test set results y_pred = classifier.predict(x_test) pd.DataFrame(y_pred, y_test).head(20) # Making the Confusion Matrix from sklearn.metrics import confusion_matrix cm = confusion_matrix(y_test, y_pred) cm # Visualising the Training set results from matplotlib.colors import ListedColormap x_set, y_set = x_train, y_train X1, X2 = np.meshgrid( np.arange(start=x_set[:, 0].min() - 1, stop=x_set[:, 0].max() + 1, step=0.01), np.arange(start=x_set[:, 1].min() - 1, stop=x_set[:, 1].max() + 1, step=0.01), ) mtp.contourf( X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape), alpha=0.75, cmap=ListedColormap(("purple", "green")), ) mtp.xlim(X1.min(), X1.max()) mtp.ylim(X2.min(), X2.max()) for i, j in enumerate(np.unique(y_set)): mtp.scatter( x_set[y_set == j, 0], x_set[y_set == j, 1], c=ListedColormap(("purple", "green"))(i), label=j, ) mtp.title("Naive Bayes (Training set)") mtp.xlabel("Age") mtp.ylabel("Estimated Salary") mtp.legend() mtp.show() # Visualising the Test set results from matplotlib.colors import ListedColormap x_set, y_set = x_test, y_test X1, X2 = np.meshgrid( np.arange(start=x_set[:, 0].min() - 1, stop=x_set[:, 0].max() + 1, step=0.01), np.arange(start=x_set[:, 1].min() - 1, stop=x_set[:, 1].max() + 1, step=0.01), ) mtp.contourf( X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape), alpha=0.75, cmap=ListedColormap(("purple", "green")), ) mtp.xlim(X1.min(), X1.max()) mtp.ylim(X2.min(), X2.max()) for i, j in enumerate(np.unique(y_set)): mtp.scatter( x_set[y_set == j, 0], x_set[y_set == j, 1], c=ListedColormap(("purple", "green"))(i), label=j, ) mtp.title("Naive Bayes (test set)") mtp.xlabel("Age") mtp.ylabel("Estimated Salary") mtp.legend() mtp.show()
false
1
1,101
0
1,300
1,101
129524947
# # **Study Case** # The case study that will be used is a related to Health using the Heart Disease Dataset. # The dataset can be accessed through the following link: # [Heart Disease Dataset](https://www.kaggle.com/datasets/johnsmith88/heart-disease-dataset/code?select=heart.csv) # # **Workflow** # **1) Business Understanding** # Business Understanding is the process of understanding the goals and business environment to be faced. In this case study, the business refers to the health sector, primarily related to heart disease. The Business Understanding phase is carried out to identify business problems, metrics, and objectives. # **2) Data Understanding** # Data Understanding is a stage to understand the data contained in the dataset. # **3) Data Preparation** # Data Preparation is a working stage that is conducted to prepare data before further processing. The activities in the Data Preparation process include: # * Checking for missing values # * Checking for duplicate data # * Checking for outliers # * Checking for data imbalance # **4) Descriptive Statistic Analysis** # Descriptive statistics or descriptive statistical analysis is conducted to find and obtain an overview of data characteristics through statistical measurements. # **5) Visualization** # Visualization is process to present data visually in the form of graphs or diagrams to make it easier to understand and analyze. Visualization can be done manually with EDA or automatically using Automated EDA. # **6) Correlation Analysis and Feature Selection** # Correlation analysis is performed to identify the relationship or correlation between variables or features in the dataset. # **7) Feature Engineering** # Feature engineering is the process of selecting and transforming features (or variables) in a dataset to improve the performance of a machine learning model. It involves identifying and extracting important features that are relevant and informative to the problem at hand, as well as creating new features from existing ones. The goal of feature engineering is to enhance the accuracy, efficiency, and interpretability of the model by providing it with more useful and relevant information to learn from. # **8) Determine The Hypothesis** # Formulating a hypothesis serves as a guide to test or evaluate an assumption or conjecture about the available data, which is the heart disease dataset in this case. # **9) Choosing Model** # Choosing a model to use for modeling the data. # **10) Cross Validation and Bootsrapping** # Cross validation and bootsrapping conducted to evaluate the model to be used. # **11) Building Model** # Build the model. # **12) Model Evaluation** # The purpose of model evaluation is to assess the performance of a machine learning model and determine how well it is able to generalize to new, unseen data. Model evaluation helps to identify potential issues with the model, such as overfitting or underfitting, and provides insights into how the model can be improved. By evaluating the model's performance on a separate test set, it is possible to estimate how well the model will perform on new data and make more informed decisions about its deployment. Additionally, model evaluation can help to compare different models and select the best one for the given task or problem. # # **1) Business Understanding** # a. Business Problem # Based on the Heart Disease Dataset that will be used, the dataset contains data related to patient demographics and clinical conditions that can be used to estimate the likelihood of the presence or absence of heart disease in patients, as well as which factors can influence the presence or absence of heart disease. # b. Goals # * Identifying the features that have the most influence on the presence or absence of heart disease in patients. This can be used to determine treatment and prevention strategies for patients. # * Developing a predictive model that can classify patients as having or not having heart disease based on their clinical conditions. # # **2) Data Understanding** # In this case study, the dataset used is the Heart Disease Dataset obtained through Kaggle. The dataset contains data related to the clinical and demographic conditions of patients who have the potential to develop heart disease. # There are 13 features or columns in the dataset: # * **Age** # # Description: Patient's age in years # Data Type: Continuous # * **Sex** # # Description: Gender of the patient with a value of 1 = Male, 0 = Female # Data Type: Categoric # * **Chest Pain Type (cp)** # # Description: Type of patient's chest pain in a value of 0, 1, 2, 3 # Data Type: Categoric # * **Resting Blood Pressure (trestbps)** # # Description: The patient's blood pressure at rest in mmHG # Data Type: Continuous # * **Serum Cholesterol in mg/dl (chol)** # # Description: Total cholesterol in the patient's blood in units of mg/dl # Data Type: Continuous # * **Fasting Blood Sugar (fbs)** # # Description: The patient's blood sugar level is in the condition of fasting for at least 8 hours in units of mg/dl # Data Type: Categoric # * **Resting Electrocardiographic Results (restecg)** # # Description: The results of the patient's electrocardiogram performed at rest in values 0, 1, 2 # Data Type: Categoric # * **Maximum Heart Rate Achieved (thalach)** # # Description: The highest value of the patient's heart rate during maximum physical activity # Data Type: Continuous # * **Exercise Induced Angina (exang)** # # Description: Chest pain that appears after doing physical activity with a value of 1 = there is chest pain, 0 = there is no chest pain # Data Type: Categoric # * **ST Depression Induced by Exercise Relative to Rest (oldpeak)** # # Description: ST segment depression values on the electrocardiogram during exercise compared to rest # Data Type: Continuous # * **The Slope of The Peak Exercise ST Segment (slope)** # # Description: Slope of the ST segment at the time of the peak exercise test # Data Type: Categoric # * **Number of Major Vessels Colored by Flourosopy (ca)** # # Description: The number of large blood vessels seen on the results of angiography in the value 0, 1, 2, 3 # Data Type: Categoric # * **Thalasemia (thal)** # # Description: Whether or not thalassemia is present in the patient in a value of 0 = normal, 1 = fixed defect, 2 = reversible defect # Data Type: Categoric # * **Target** # Description: Presence or absence of heart disease in the patient # Data Type: Categoric # # **3) Data Preparation** # ## Import Library and Dataset import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt from google.colab import files uploaded = files.upload() import io df = pd.read_csv(io.BytesIO(uploaded["heart.csv"])) df.shape df.info() df # ## Checking for Missing Values df.isnull().sum() # ## Checking for Balanced Dataset df["target"].value_counts() sns.countplot(data=df["target"]) # ## Handling Duplicate Data duplicate_rows = df[df.duplicated()] duplicate_sorted = duplicate_rows.sort_values(by=["age"]) print(duplicate_sorted) df = df.drop_duplicates() df.shape # ## Handling Outliers plt.figure(figsize=(17, 6)) sns.boxplot(data=df, orient="h") # ### Outliers trestbps plt.figure(figsize=(10, 3)) sns.boxplot(df["trestbps"]) q1 = df["trestbps"].quantile(0.25) q3 = df["trestbps"].quantile(0.75) IQR = q3 - q1 lower_limit = q1 - 1.5 * IQR upper_limit = q3 + 1.5 * IQR df = df[(df["trestbps"] > lower_limit) & (df["trestbps"] < upper_limit)] plt.figure(figsize=(10, 3)) sns.boxplot(df["trestbps"]) # ### Outliers chol plt.figure(figsize=(10, 3)) sns.boxplot(df["chol"]) q1 = df["chol"].quantile(0.25) q3 = df["chol"].quantile(0.75) IQR = q3 - q1 lower_limit = q1 - 1.5 * IQR upper_limit = q3 + 1.5 * IQR df = df[(df["chol"] > lower_limit) & (df["chol"] < upper_limit)] plt.figure(figsize=(10, 3)) sns.boxplot(df["chol"]) # ### Outliers thalach plt.figure(figsize=(10, 3)) sns.boxplot(df["thalach"]) q1 = df["thalach"].quantile(0.25) q3 = df["thalach"].quantile(0.75) IQR = q3 - q1 lower_limit = q1 - 1.5 * IQR upper_limit = q3 + 1.5 * IQR df = df[(df["thalach"] > lower_limit) & (df["thalach"] < upper_limit)] plt.figure(figsize=(10, 3)) sns.boxplot(df["thalach"]) # ### Outliers oldpeak plt.figure(figsize=(10, 3)) sns.boxplot(df["oldpeak"]) q1 = df["oldpeak"].quantile(0.25) q3 = df["oldpeak"].quantile(0.75) IQR = q3 - q1 lower_limit = q1 - 1.5 * IQR upper_limit = q3 + 1.5 * IQR df = df[(df["oldpeak"] > lower_limit) & (df["oldpeak"] < upper_limit)] plt.figure(figsize=(10, 3)) sns.boxplot(df["oldpeak"]) # ### Rechecking Outliers plt.figure(figsize=(15, 6)) sns.boxplot(data=df, orient="h") # # **4) Descriptive Statistic Analysis** df.describe() # # **5) Visualization** from dataprep.eda import create_report create_report(df).show() # # **6) Correlation Analysis and Feature Selection** # ## Performing Feature Selection # ### Checking Correlation corr = df.corr() corr print(corr["target"].sort_values(ascending=False)) X = df.drop("target", axis=1) X y = df["target"] y # ### Univariate Selection for Categorical Variable from sklearn.feature_selection import SelectKBest from sklearn.feature_selection import chi2 bestfeatures = SelectKBest(score_func=chi2) fit = bestfeatures.fit(X, y) scores = pd.DataFrame(fit.scores_) dfcolumns = pd.DataFrame(X.columns) featureScores = pd.concat([dfcolumns, scores], axis=1) featureScores.columns = ["Label", "Score"] featureScores.sort_values(by="Score", ascending=False) # ### Dropping Features which are Not Correlated df.drop(["fbs", "restecg"], axis=1, inplace=True) df.head() df.shape # # **7) Feature Engineering** # ### Feature Engineering for i in df.columns: print(i, df[i].unique()) print("\n") new_sex = pd.get_dummies(data=df["sex"], prefix="sex") new_sex new_cp = pd.get_dummies(df["cp"], prefix="chestPain") new_cp new_exang = pd.get_dummies(df["exang"], prefix="exang") new_exang new_slope = pd.get_dummies(df["slope"], prefix="slope") new_slope new_thal = pd.get_dummies(df["thal"], prefix="thal") new_thal new_ca = pd.get_dummies(df["ca"], prefix="ca") new_ca app = [df, new_sex, new_cp, new_ca, new_thal, new_exang, new_slope] df1 = pd.concat(app, axis=1) df1.columns df1.drop(["sex", "cp", "thal", "exang", "ca", "slope"], axis=1, inplace=True) df1.head() df1.shape # ### Feature Scaling from sklearn.preprocessing import StandardScaler sc = StandardScaler() df1[["age", "trestbps", "chol", "oldpeak", "thalach"]] = sc.fit_transform( df1[["age", "trestbps", "chol", "oldpeak", "thalach"]] ) df1.head(10) df1.columns X = df1.drop("target", axis=1) X y = df1["target"] y # # **8) Determine The Hypothesis** # **Null Hypothesis** # H0: There is no relationship between the independent variables ('age', 'trestbps', 'chol', 'thalach', 'oldpeak', 'target', 'sex', 'chestPain', 'ca', 'thal', 'exang' , 'slope') with the dependent variable ('target') # **Alternative Hypothesis** # H1: There is a relationship between the independent variables ('age', 'trestbps', 'chol', 'thalach', 'oldpeak', 'target', 'sex', 'chestPain', 'ca', 'thal', 'exang ', 'slope') with dependent variable ('target') # # **9) Choosing Model** # ## 1. Random Forest # The Random Forest model will be used for modeling the Heart Disease dataset because this model is effective for dealing with multicollinearity in dataset variables. In addition, Random Forest is also suitable for use in disease prediction case studies and can provide an important feature for identifying risk factors that most influence the risk of heart disease in patients. # ## 2. Logistic Regression # The Logistic Regression model is used because this model can predict the probability of an event occurring based on a given variable, in this case study it is predicting the probability of the presence or absence of heart disease based on the patient's health factors. So by using the Logistic Regression model it is expected to be able to analyze the relationship between these variables with the risk of the presence or absence of heart disease. # # **10) Cross Validation and Bootsrapping** # ## Splitting Dataset to Data Test and Data Train from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=42 ) # ## Cross Validation from sklearn.metrics import accuracy_score from sklearn.model_selection import cross_val_score # ### Cross Validation for Model using Random Forest from sklearn.ensemble import RandomForestClassifier rfc = RandomForestClassifier() rfc_scores = cross_val_score(rfc, X_train, y_train, cv=5) print( "Accuracy with cross-validation: %.2f with standard deviation %.2f" % (rfc_scores.mean(), rfc_scores.std()) ) # The Cross Validation result for the model using Random Forest is 0.82 or 82%, which consider to category of good performance for prediction. Therefore, the process can proceed to create a model using Random Forest. # ### Cross Validation for Model using Logistic Regression from sklearn.linear_model import LogisticRegression lr = LogisticRegression() lr_scores = cross_val_score(lr, X_train, y_train, cv=5) print( "Accuracy with cross-validation: %.2f with standard deviation %.2f" % (lr_scores.mean(), lr_scores.std()) ) # The Cross Validation result for the model using Logistic Regression is 0.83 or 83%, which consider into the category of good performance for prediction. Thus, the process can be continued to create a model using Logistic Regression. # ## Bootsrapping from sklearn.utils import resample # ### Bootsrapping for Model using Random Forest rfc = RandomForestClassifier() n_boot_rfc = 100 accuracies = [] train_size = 0.8 for i in range(n_boot_rfc): X_boot_rfc, y_boot_rfc = resample( X_train, y_train, n_samples=int(train_size * len(X_train)) ) rfc.fit(X_boot_rfc, y_boot_rfc) rfc_accuracy = rfc.score(X_test, y_test) accuracies.append(rfc_accuracy) rfc_mean_acc = np.mean(accuracies) rfc_std_acc = np.std(accuracies) rfc_lower_ci = rfc_mean_acc - 1.96 * rfc_std_acc rfc_upper_ci = rfc_mean_acc + 1.96 * rfc_std_acc print("Mean accuracy: %.2f" % rfc_mean_acc) print("95%% confidence interval: [%.2f, %.2f]" % (rfc_lower_ci, rfc_upper_ci)) # Bootstrapping results for models using Random Forest are 0.87 or 87%, indicating that the model has good performance for predicting new data. # ### Bootsrapping for Model using Logistic Regression lr = LogisticRegression() n_boot_lr = 100 accuracies = [] train_size = 0.8 for i in range(n_boot_lr): X_boot_lr, y_boot_lr = resample( X_train, y_train, n_samples=int(train_size * len(X_train)) ) lr.fit(X_boot_lr, y_boot_lr) lr_accuracy = lr.score(X_test, y_test) accuracies.append(lr_accuracy) lr_mean_acc = np.mean(accuracies) lr_std_acc = np.std(accuracies) lr_lower_ci = lr_mean_acc - 1.96 * lr_std_acc lr_upper_ci = lr_mean_acc + 1.96 * lr_std_acc print("Mean accuracy: %.2f" % lr_mean_acc) print("95%% confidence interval: [%.2f, %.2f]" % (lr_lower_ci, lr_upper_ci)) # Bootstrapping results for models using Logistic Regression have a value of 0.87 or 87%, indicating that the model has good performance for predicting new data. # # **11) Building Model** # ## Random Forest rfc = RandomForestClassifier() rfc.fit(X_train, y_train) y_pred_rfc = rfc.predict(X_test) pd.DataFrame(np.c_[y_test, y_pred_rfc], columns=["Actual", "Predicted"]) # ## Logistic Regression from sklearn.linear_model import LogisticRegression lr = LogisticRegression().fit(X_train, y_train) y_pred_lr = lr.predict(X_test) pd.DataFrame(np.c_[y_test, y_pred_lr], columns=["Actual", "Predicted"]) # # **12) Model Evaluation** # The evaluation model used is: # ## 1. Confusion Matrix # Confusion matrix is suitable for use in the Heart Disease dataset because it can provide information regarding true positive, true negative, false positive, and false negative that are relevant to the classification case in the case study using the Heart Disease dataset. # ## 2. Precision Recall # Precision-recall is used because it can provide information about the trade-off between precision and recall which is important for evaluating classification models such as the Heart Disease dataset. from sklearn.metrics import ( confusion_matrix, accuracy_score, precision_score, recall_score, f1_score, ) from sklearn.metrics import classification_report from sklearn import metrics # ## Model Evaluation for Model using Random Forest rfc = RandomForestClassifier() rfc.fit(X_train, y_train) rfc_train = rfc.score(X_train, y_train) * 100 rfc_test = rfc.score(X_test, y_test) * 100 print("Testing Accuracy:", round(rfc_test, 2), "%") print("Training Accuracy:", round(rfc_train, 2), "%") print(classification_report(y_pred_rfc, y_test)) data_rfc = confusion_matrix(y_test, y_pred_rfc) rfc_con = pd.DataFrame(data_rfc, columns=np.unique(y_test), index=np.unique(y_test)) plt.figure(figsize=(5, 3)) sns.heatmap(rfc_con, annot=True, fmt="g", cmap="YlGnBu") plt.xlabel("Predicted") plt.ylabel("Actual") plt.show() # Based on the results of the model evaluation above, it can be concluded that the Random Forest model has a good performance in classifying the dataset. It can be seen from the high value of precision, recall, and f1-score. # ## Model Evaluation for Model using Logistic Regression lr = LogisticRegression() lr.fit(X_train, y_train) lr_train = lr.score(X_train, y_train) * 100 lr_test = lr.score(X_test, y_test) * 100 print("Testing Accuracy:", round(lr_test, 2), "%") print("Training Accuracy:", round(lr_train, 2), "%") print(classification_report(y_pred_lr, y_test)) data_lr = confusion_matrix(y_test, y_pred_lr) lr_con = pd.DataFrame(data_lr, columns=np.unique(y_test), index=np.unique(y_test)) plt.figure(figsize=(5, 3)) sns.heatmap(lr_con, annot=True, fmt="g", cmap="YlGnBu") plt.xlabel("Predicted") plt.ylabel("Actual") plt.show() # Based on the results of the model evaluation above, it can be concluded that the Logistic Regression model has good performance in classifying the dataset. It can be seen from the high value of precision, recall, and f1-score. # ## Feature Importance # ### Feature Importance untuk Model dengan Menggunakan Random Forest rfc = RandomForestClassifier(n_estimators=100, random_state=42) rfc.fit(X_train, y_train) rfc_fimp = pd.Series(rfc.feature_importances_, index=X.columns) rfc_fimp.sort_values(ascending=False) plt.figure(figsize=(10, 6)) rfc_fimp.nsmallest(30).plot(kind="barh") round(rfc_fimp, 4) * 100 plt.title("Important Features", size=12) plt.show() # Features that have the most significant feature importance values in the models created using the Random Forest: # * oldpeak # * thalach # * age # * ca_0 # * chol # Based on the results of the feature importance, it can be seen that the oldpeak variable has the greatest influence on the 'target' prediction results (presence or absence of heart disease) in the Random Forest model. Then the variables thalach, age, ca_0, and chol, have a significant effect on the 'target' prediction results. # Thus, if you want to make a model using the Random Forest method that is more effective for predicting the potential risk of heart disease, then this variable must be considered and selected as an important feature. # ### Feature Importance untuk Model dengan Menggunakan Logistic Regression lr = LogisticRegression() lr.fit(X_train, y_train) coef = lr.coef_[0] features = X.columns feature_importance = pd.DataFrame({"Features": features, "Coef": coef}) feature_importance["Exp_Coef"] = np.exp(feature_importance["Coef"]) feature_importance = feature_importance.sort_values(by="Exp_Coef", ascending=False) print(feature_importance) fig, ax = plt.subplots(figsize=(8, 6)) ax.barh(feature_importance["Features"], feature_importance["Exp_Coef"]) ax.set_xlabel("Feature Importance (Exp Coef)") ax.set_ylabel("Features") ax.invert_yaxis() plt.title("Important Features", size=12) plt.show() # Features that have the most significant feature importance values in the models created using the Logistic Regression: # * ca_0 # * chestPain_3 # * sex_0 # * chestPain_2 # * ca_4 # Based on the results of the feature importance, it can be seen that the ca_0 variable has the greatest influence on the 'target' prediction results in the Random Forest model. Then the variables chestPain_3, sex_0, chestPain_2, and ca_4, have a significant effect on the 'target' prediction results. # Thus, if you want to make a model using the Logistic Regression method which is more effective for predicting the potential risk of heart disease, then this variable must be considered and selected as an important feature. # # **13) Model Comparison** # a. Confusion Matrix Results # Confusion Matrix in the Random Forest model obtained True Positive values = 24, False Positive 4, True Negative = 27, and False Negative 1. Meanwhile in the Logistic Regression model obtained True Positive values = 24, False Positive 6, True = 25, and False Negatives 1. # b. Classification Report results # Classification Report on the Random Forest model obtained an F1-Score value of 91% while on the Logistic Regression model obtained an F1-Score value of 88% # c. Performance # Random Forest is more accurate than Logistic Regression, it can be seen from the F1-Score value of Random Forest which is bigger than Logistic Regression # d. Scalability # Logistic Regression can be used for very large datasets with a relatively faster time compared to using Random Forest # # **14) Hyperparameter Tuning** from sklearn.model_selection import GridSearchCV # ## Hyperparameter Tuning for Random Forest rfc = RandomForestClassifier() rfc_param = [ { "max_depth": np.arange(1, 10), "min_samples_split": [0.1, 0.5, 1.0], "random_state": np.arange(1, 50), "n_estimators": [25, 50, 100], } ] rfc_search = GridSearchCV(rfc, rfc_param, scoring="accuracy") rfc_result = rfc_search.fit(X_train, y_train) print("Best Score: %s" % rfc_result.best_score_) print("Best Hyperparameters: %s" % rfc_result.best_params_) # The results of Hyperparameter Tuning for models using Random Forest are that the best score is 0.86, then the best parameters used in making models with Random Forest are the maximum tree depth (max depth) is 9, the minimum number of sample splits (minimum sample split) is 0.1, the number of estimators (n estimators) is 50, and the random state is 30. # ## Hyperparameter Tuning for Logistic Regression lr = LogisticRegression() lr_param = [{"penalty": ["l2"], "C": [0.1, 0.4, 0.5], "random_state": [0]}] lr_search = GridSearchCV( lr, lr_param, scoring="accuracy", n_jobs=-1, ) lr_result = lr_search.fit(X_train, y_train) print("Best Score: %s" % lr_result.best_score_) print("Best Hyperparameters: %s" % lr_result.best_params_) # The results of Hyperparameter Tuning for models using Logistic Regression are that the best score is 0.84, then the best parameters used in making models with Logistic Regression are the type of regularization used (penalty) is l2, the regularization strength (C) is 0.1, and the random state is 0. # # **15) Predict How Well Model Performance in Testing The Dataset** # ### Random Forest hrfc = RandomForestClassifier( max_depth=9, min_samples_split=0.1, n_estimators=50, random_state=30 ) hrfc.fit(X_train, y_train) y_pred_hrfc = hrfc.predict(X_test) hrfc_accuracy = accuracy_score(y_test, y_pred_hrfc) print("Accuracy on test dataset: %.2f%%" % (hrfc_accuracy * 100)) # The prediction accuracy of the model made with Random Forest and Hyperparameter Tuning has been done is 89.29% # ## Logistic Regression hlr = LogisticRegression(C=0.1, penalty="l2", random_state=0) hlr.fit(X_train, y_train) y_pred_hlr = hlr.predict(X_test) hlr_accuracy = accuracy_score(y_test, y_pred_hlr) print("Accuracy on test dataset: %.2f%%" % (hlr_accuracy * 100)) # Predicted model accuracy made with Logistics and Hyperparameter Tuning has been done is 87.5% # # **16) Learning Curve** from sklearn.model_selection import learning_curve def plot_learning_curve( estimator, title, X, y, cv=None, n_jobs=None, train_sizes=np.linspace(0.1, 1.0, 5) ): plt.figure() plt.title(title) plt.xlabel("Training examples") plt.ylabel("Score") train_sizes, train_scores, test_scores, fit_times, _ = learning_curve( estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes, return_times=True, ) train_scores_mean = np.mean(train_scores, axis=1) train_scores_std = np.std(train_scores, axis=1) test_scores_mean = np.mean(test_scores, axis=1) test_scores_std = np.std(test_scores, axis=1) plt.grid() plt.fill_between( train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std, alpha=0.1, color="r", ) plt.fill_between( train_sizes, test_scores_mean - test_scores_std, test_scores_mean + test_scores_std, alpha=0.1, color="g", ) plt.plot(train_sizes, train_scores_mean, "o-", color="r", label="Training score") plt.plot( train_sizes, test_scores_mean, "o-", color="g", label="Cross-validation score" ) plt.legend(loc="best") return plt # ### Learning Curve Model Random Forest hrfc = RandomForestClassifier( max_depth=9, min_samples_split=0.1, n_estimators=50, random_state=30 ) title = "Random Forest Learning Curve" plot_learning_curve(hrfc, title, X_train, y_train, cv=5, n_jobs=-1) # Based on the Learning Curve for the Random Forest Model above, it can be seen: # 1. Based on the Training Score curve, initially the model has a perfect level of accuracy (1.0) for the training data when modeling small amounts of data, then this accuracy decreases when the amount of data to be modeled starts to increase. This shows that the model has the possibility of overfitting the training data when the amount of data is still small, but is able to balance the accuracy and complexity of the model when the amount of data increases. # 2. Based on the Cross-Validation Score curve, it shows that initially the model has low accuracy (about 0.76) when modeling data that has never been seen before (out of sample) with a small amount. But then the accuracy starts to increase when the amount of data increases. # 3. At the initial point, the two curves have a fairly large gap but the gap gets smaller as the amount of data increases. This shows that the model which initially has the problem of overfitting, is then able to start to balance the accuracy when the amount of data increases # Thus, the model using Random Forest is good enough to handle data that is not too complex and not too simple, and is able to generalize well to data that has never been seen before. # ### Learning Curve Model Logistic Regression hlr = LogisticRegression(C=0.1, penalty="l2", random_state=0) title = "Logistic Regression Learning Curve" plot_learning_curve(hlr, title, X_train, y_train, cv=5, n_jobs=-1) # Based on the Learning Curve for the Logistic Regression Model above, it can be seen: # 1. Based on the Training Score curve, the training score value increases as the amount of training data increases, which means that the model is able to slowly increase its accuracy to the training data when the data provided begins to increase. # 2. Based on the Cross-Validation Score curve, it shows that the model tends to be stable when modeling data that has never been seen before and continues to increase as the amount of data increases. This shows that the model can improve performance but it is not optimal, seen from the increase in the curve which tends to be small. # 3. At the second point, the Cross-Validation Score curve point is higher than the Training Score curve point. This indicates that in these conditions there is overfitting in the training data. # Thus, the model using Logistic Regression experiences a bit of overfitting at the second curve point. # # **17) ROC Analysis** from sklearn.metrics import roc_curve, roc_auc_score hrfc = RandomForestClassifier( max_depth=9, min_samples_split=0.1, n_estimators=50, random_state=30 ) hlr = LogisticRegression(C=0.1, penalty="l2", random_state=0) hrfc.fit(X_train, y_train) hlr.fit(X_train, y_train) hrfc_prob = hrfc.predict_proba(X_test)[:, 1] hlr_prob = hlr.predict_proba(X_test)[:, 1] fpr_hrfc, tpr_hrfc, _ = roc_curve(y_test, hrfc_prob) roc_auc_hrfc = roc_auc_score(y_test, hrfc_prob) fpr_hlr, tpr_hlr, _ = roc_curve(y_test, hlr_prob) roc_auc_hlr = roc_auc_score(y_test, hlr_prob) plt.figure(figsize=(8, 6)) plt.plot(fpr_hrfc, tpr_hrfc, label="Random Forest (AUC = %0.2f)" % roc_auc_hrfc) plt.plot(fpr_hlr, tpr_hlr, label="Logistic Regression (AUC = %0.2f)" % roc_auc_hlr) plt.plot([0, 1], [0, 1], linestyle="--", color="gray") plt.xlim([0, 1]) plt.ylim([0, 1]) plt.xlabel("False Positive Rate") plt.ylabel("True Positive Rate") plt.title("ROC Curve") plt.legend() plt.show() # Hasil ROC Curve dari kedua model yang telah dibuat yaitu Random Forest dan Decision Tree menunjukkan bahwa model dengan Logistic Regression memiliki nilai AUC yaitu 0.96 yang lebih besar dari nilai AUC dari model dengan Random Forest, berdasarkan nilai AUC tersebut kedua model dapat dikategorikan memiliki kemampuan yang sangat baik dalam membedakan antara kelas positif dan negatif. Kemudian model dengan Logistic Regression juka memiliki posisi yang cenderung lebih dekat dengan bagian pojok kiri atas (koordinat 0.0, 1.0), semakin dekat kurva model dengan bagian pojok kiri atas maka semakin baik model yang dibuat. Oleh karena itu jika dilihat berdasarkan kurva ROC di atas, model dengan Logistic Regression lebih baik dibandingkan model dengan Random Forest. # ### ROC Model Random Forest plt.figure(figsize=(8, 6)) plt.plot(fpr_hrfc, tpr_hrfc, label="Random Forest (AUC = %0.2f)" % roc_auc_hrfc) plt.plot([0, 1], [0, 1], linestyle="--", color="gray") plt.xlim([0, 1]) plt.ylim([0, 1]) plt.xlabel("False Positive Rate") plt.ylabel("True Positive Rate") plt.title("ROC Curve") plt.legend() plt.show() # ### ROC Model Logistic Regression plt.figure(figsize=(8, 6)) plt.plot( fpr_hlr, tpr_hlr, label="Logistic Regression (AUC = %0.2f)" % roc_auc_hlr, color="orange", ) plt.plot([0, 1], [0, 1], linestyle="--", color="gray") plt.xlim([0, 1]) plt.ylim([0, 1]) plt.xlabel("False Positive Rate") plt.ylabel("True Positive Rate") plt.title("ROC Curve") plt.legend() plt.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/524/129524947.ipynb
null
null
[{"Id": 129524947, "ScriptId": 38514066, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14357379, "CreationDate": "05/14/2023 14:27:57", "VersionNumber": 1.0, "Title": "Heart Disease Prediction Modeling", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 868.0, "LinesInsertedFromPrevious": 868.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
null
null
null
null
# # **Study Case** # The case study that will be used is a related to Health using the Heart Disease Dataset. # The dataset can be accessed through the following link: # [Heart Disease Dataset](https://www.kaggle.com/datasets/johnsmith88/heart-disease-dataset/code?select=heart.csv) # # **Workflow** # **1) Business Understanding** # Business Understanding is the process of understanding the goals and business environment to be faced. In this case study, the business refers to the health sector, primarily related to heart disease. The Business Understanding phase is carried out to identify business problems, metrics, and objectives. # **2) Data Understanding** # Data Understanding is a stage to understand the data contained in the dataset. # **3) Data Preparation** # Data Preparation is a working stage that is conducted to prepare data before further processing. The activities in the Data Preparation process include: # * Checking for missing values # * Checking for duplicate data # * Checking for outliers # * Checking for data imbalance # **4) Descriptive Statistic Analysis** # Descriptive statistics or descriptive statistical analysis is conducted to find and obtain an overview of data characteristics through statistical measurements. # **5) Visualization** # Visualization is process to present data visually in the form of graphs or diagrams to make it easier to understand and analyze. Visualization can be done manually with EDA or automatically using Automated EDA. # **6) Correlation Analysis and Feature Selection** # Correlation analysis is performed to identify the relationship or correlation between variables or features in the dataset. # **7) Feature Engineering** # Feature engineering is the process of selecting and transforming features (or variables) in a dataset to improve the performance of a machine learning model. It involves identifying and extracting important features that are relevant and informative to the problem at hand, as well as creating new features from existing ones. The goal of feature engineering is to enhance the accuracy, efficiency, and interpretability of the model by providing it with more useful and relevant information to learn from. # **8) Determine The Hypothesis** # Formulating a hypothesis serves as a guide to test or evaluate an assumption or conjecture about the available data, which is the heart disease dataset in this case. # **9) Choosing Model** # Choosing a model to use for modeling the data. # **10) Cross Validation and Bootsrapping** # Cross validation and bootsrapping conducted to evaluate the model to be used. # **11) Building Model** # Build the model. # **12) Model Evaluation** # The purpose of model evaluation is to assess the performance of a machine learning model and determine how well it is able to generalize to new, unseen data. Model evaluation helps to identify potential issues with the model, such as overfitting or underfitting, and provides insights into how the model can be improved. By evaluating the model's performance on a separate test set, it is possible to estimate how well the model will perform on new data and make more informed decisions about its deployment. Additionally, model evaluation can help to compare different models and select the best one for the given task or problem. # # **1) Business Understanding** # a. Business Problem # Based on the Heart Disease Dataset that will be used, the dataset contains data related to patient demographics and clinical conditions that can be used to estimate the likelihood of the presence or absence of heart disease in patients, as well as which factors can influence the presence or absence of heart disease. # b. Goals # * Identifying the features that have the most influence on the presence or absence of heart disease in patients. This can be used to determine treatment and prevention strategies for patients. # * Developing a predictive model that can classify patients as having or not having heart disease based on their clinical conditions. # # **2) Data Understanding** # In this case study, the dataset used is the Heart Disease Dataset obtained through Kaggle. The dataset contains data related to the clinical and demographic conditions of patients who have the potential to develop heart disease. # There are 13 features or columns in the dataset: # * **Age** # # Description: Patient's age in years # Data Type: Continuous # * **Sex** # # Description: Gender of the patient with a value of 1 = Male, 0 = Female # Data Type: Categoric # * **Chest Pain Type (cp)** # # Description: Type of patient's chest pain in a value of 0, 1, 2, 3 # Data Type: Categoric # * **Resting Blood Pressure (trestbps)** # # Description: The patient's blood pressure at rest in mmHG # Data Type: Continuous # * **Serum Cholesterol in mg/dl (chol)** # # Description: Total cholesterol in the patient's blood in units of mg/dl # Data Type: Continuous # * **Fasting Blood Sugar (fbs)** # # Description: The patient's blood sugar level is in the condition of fasting for at least 8 hours in units of mg/dl # Data Type: Categoric # * **Resting Electrocardiographic Results (restecg)** # # Description: The results of the patient's electrocardiogram performed at rest in values 0, 1, 2 # Data Type: Categoric # * **Maximum Heart Rate Achieved (thalach)** # # Description: The highest value of the patient's heart rate during maximum physical activity # Data Type: Continuous # * **Exercise Induced Angina (exang)** # # Description: Chest pain that appears after doing physical activity with a value of 1 = there is chest pain, 0 = there is no chest pain # Data Type: Categoric # * **ST Depression Induced by Exercise Relative to Rest (oldpeak)** # # Description: ST segment depression values on the electrocardiogram during exercise compared to rest # Data Type: Continuous # * **The Slope of The Peak Exercise ST Segment (slope)** # # Description: Slope of the ST segment at the time of the peak exercise test # Data Type: Categoric # * **Number of Major Vessels Colored by Flourosopy (ca)** # # Description: The number of large blood vessels seen on the results of angiography in the value 0, 1, 2, 3 # Data Type: Categoric # * **Thalasemia (thal)** # # Description: Whether or not thalassemia is present in the patient in a value of 0 = normal, 1 = fixed defect, 2 = reversible defect # Data Type: Categoric # * **Target** # Description: Presence or absence of heart disease in the patient # Data Type: Categoric # # **3) Data Preparation** # ## Import Library and Dataset import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt from google.colab import files uploaded = files.upload() import io df = pd.read_csv(io.BytesIO(uploaded["heart.csv"])) df.shape df.info() df # ## Checking for Missing Values df.isnull().sum() # ## Checking for Balanced Dataset df["target"].value_counts() sns.countplot(data=df["target"]) # ## Handling Duplicate Data duplicate_rows = df[df.duplicated()] duplicate_sorted = duplicate_rows.sort_values(by=["age"]) print(duplicate_sorted) df = df.drop_duplicates() df.shape # ## Handling Outliers plt.figure(figsize=(17, 6)) sns.boxplot(data=df, orient="h") # ### Outliers trestbps plt.figure(figsize=(10, 3)) sns.boxplot(df["trestbps"]) q1 = df["trestbps"].quantile(0.25) q3 = df["trestbps"].quantile(0.75) IQR = q3 - q1 lower_limit = q1 - 1.5 * IQR upper_limit = q3 + 1.5 * IQR df = df[(df["trestbps"] > lower_limit) & (df["trestbps"] < upper_limit)] plt.figure(figsize=(10, 3)) sns.boxplot(df["trestbps"]) # ### Outliers chol plt.figure(figsize=(10, 3)) sns.boxplot(df["chol"]) q1 = df["chol"].quantile(0.25) q3 = df["chol"].quantile(0.75) IQR = q3 - q1 lower_limit = q1 - 1.5 * IQR upper_limit = q3 + 1.5 * IQR df = df[(df["chol"] > lower_limit) & (df["chol"] < upper_limit)] plt.figure(figsize=(10, 3)) sns.boxplot(df["chol"]) # ### Outliers thalach plt.figure(figsize=(10, 3)) sns.boxplot(df["thalach"]) q1 = df["thalach"].quantile(0.25) q3 = df["thalach"].quantile(0.75) IQR = q3 - q1 lower_limit = q1 - 1.5 * IQR upper_limit = q3 + 1.5 * IQR df = df[(df["thalach"] > lower_limit) & (df["thalach"] < upper_limit)] plt.figure(figsize=(10, 3)) sns.boxplot(df["thalach"]) # ### Outliers oldpeak plt.figure(figsize=(10, 3)) sns.boxplot(df["oldpeak"]) q1 = df["oldpeak"].quantile(0.25) q3 = df["oldpeak"].quantile(0.75) IQR = q3 - q1 lower_limit = q1 - 1.5 * IQR upper_limit = q3 + 1.5 * IQR df = df[(df["oldpeak"] > lower_limit) & (df["oldpeak"] < upper_limit)] plt.figure(figsize=(10, 3)) sns.boxplot(df["oldpeak"]) # ### Rechecking Outliers plt.figure(figsize=(15, 6)) sns.boxplot(data=df, orient="h") # # **4) Descriptive Statistic Analysis** df.describe() # # **5) Visualization** from dataprep.eda import create_report create_report(df).show() # # **6) Correlation Analysis and Feature Selection** # ## Performing Feature Selection # ### Checking Correlation corr = df.corr() corr print(corr["target"].sort_values(ascending=False)) X = df.drop("target", axis=1) X y = df["target"] y # ### Univariate Selection for Categorical Variable from sklearn.feature_selection import SelectKBest from sklearn.feature_selection import chi2 bestfeatures = SelectKBest(score_func=chi2) fit = bestfeatures.fit(X, y) scores = pd.DataFrame(fit.scores_) dfcolumns = pd.DataFrame(X.columns) featureScores = pd.concat([dfcolumns, scores], axis=1) featureScores.columns = ["Label", "Score"] featureScores.sort_values(by="Score", ascending=False) # ### Dropping Features which are Not Correlated df.drop(["fbs", "restecg"], axis=1, inplace=True) df.head() df.shape # # **7) Feature Engineering** # ### Feature Engineering for i in df.columns: print(i, df[i].unique()) print("\n") new_sex = pd.get_dummies(data=df["sex"], prefix="sex") new_sex new_cp = pd.get_dummies(df["cp"], prefix="chestPain") new_cp new_exang = pd.get_dummies(df["exang"], prefix="exang") new_exang new_slope = pd.get_dummies(df["slope"], prefix="slope") new_slope new_thal = pd.get_dummies(df["thal"], prefix="thal") new_thal new_ca = pd.get_dummies(df["ca"], prefix="ca") new_ca app = [df, new_sex, new_cp, new_ca, new_thal, new_exang, new_slope] df1 = pd.concat(app, axis=1) df1.columns df1.drop(["sex", "cp", "thal", "exang", "ca", "slope"], axis=1, inplace=True) df1.head() df1.shape # ### Feature Scaling from sklearn.preprocessing import StandardScaler sc = StandardScaler() df1[["age", "trestbps", "chol", "oldpeak", "thalach"]] = sc.fit_transform( df1[["age", "trestbps", "chol", "oldpeak", "thalach"]] ) df1.head(10) df1.columns X = df1.drop("target", axis=1) X y = df1["target"] y # # **8) Determine The Hypothesis** # **Null Hypothesis** # H0: There is no relationship between the independent variables ('age', 'trestbps', 'chol', 'thalach', 'oldpeak', 'target', 'sex', 'chestPain', 'ca', 'thal', 'exang' , 'slope') with the dependent variable ('target') # **Alternative Hypothesis** # H1: There is a relationship between the independent variables ('age', 'trestbps', 'chol', 'thalach', 'oldpeak', 'target', 'sex', 'chestPain', 'ca', 'thal', 'exang ', 'slope') with dependent variable ('target') # # **9) Choosing Model** # ## 1. Random Forest # The Random Forest model will be used for modeling the Heart Disease dataset because this model is effective for dealing with multicollinearity in dataset variables. In addition, Random Forest is also suitable for use in disease prediction case studies and can provide an important feature for identifying risk factors that most influence the risk of heart disease in patients. # ## 2. Logistic Regression # The Logistic Regression model is used because this model can predict the probability of an event occurring based on a given variable, in this case study it is predicting the probability of the presence or absence of heart disease based on the patient's health factors. So by using the Logistic Regression model it is expected to be able to analyze the relationship between these variables with the risk of the presence or absence of heart disease. # # **10) Cross Validation and Bootsrapping** # ## Splitting Dataset to Data Test and Data Train from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=42 ) # ## Cross Validation from sklearn.metrics import accuracy_score from sklearn.model_selection import cross_val_score # ### Cross Validation for Model using Random Forest from sklearn.ensemble import RandomForestClassifier rfc = RandomForestClassifier() rfc_scores = cross_val_score(rfc, X_train, y_train, cv=5) print( "Accuracy with cross-validation: %.2f with standard deviation %.2f" % (rfc_scores.mean(), rfc_scores.std()) ) # The Cross Validation result for the model using Random Forest is 0.82 or 82%, which consider to category of good performance for prediction. Therefore, the process can proceed to create a model using Random Forest. # ### Cross Validation for Model using Logistic Regression from sklearn.linear_model import LogisticRegression lr = LogisticRegression() lr_scores = cross_val_score(lr, X_train, y_train, cv=5) print( "Accuracy with cross-validation: %.2f with standard deviation %.2f" % (lr_scores.mean(), lr_scores.std()) ) # The Cross Validation result for the model using Logistic Regression is 0.83 or 83%, which consider into the category of good performance for prediction. Thus, the process can be continued to create a model using Logistic Regression. # ## Bootsrapping from sklearn.utils import resample # ### Bootsrapping for Model using Random Forest rfc = RandomForestClassifier() n_boot_rfc = 100 accuracies = [] train_size = 0.8 for i in range(n_boot_rfc): X_boot_rfc, y_boot_rfc = resample( X_train, y_train, n_samples=int(train_size * len(X_train)) ) rfc.fit(X_boot_rfc, y_boot_rfc) rfc_accuracy = rfc.score(X_test, y_test) accuracies.append(rfc_accuracy) rfc_mean_acc = np.mean(accuracies) rfc_std_acc = np.std(accuracies) rfc_lower_ci = rfc_mean_acc - 1.96 * rfc_std_acc rfc_upper_ci = rfc_mean_acc + 1.96 * rfc_std_acc print("Mean accuracy: %.2f" % rfc_mean_acc) print("95%% confidence interval: [%.2f, %.2f]" % (rfc_lower_ci, rfc_upper_ci)) # Bootstrapping results for models using Random Forest are 0.87 or 87%, indicating that the model has good performance for predicting new data. # ### Bootsrapping for Model using Logistic Regression lr = LogisticRegression() n_boot_lr = 100 accuracies = [] train_size = 0.8 for i in range(n_boot_lr): X_boot_lr, y_boot_lr = resample( X_train, y_train, n_samples=int(train_size * len(X_train)) ) lr.fit(X_boot_lr, y_boot_lr) lr_accuracy = lr.score(X_test, y_test) accuracies.append(lr_accuracy) lr_mean_acc = np.mean(accuracies) lr_std_acc = np.std(accuracies) lr_lower_ci = lr_mean_acc - 1.96 * lr_std_acc lr_upper_ci = lr_mean_acc + 1.96 * lr_std_acc print("Mean accuracy: %.2f" % lr_mean_acc) print("95%% confidence interval: [%.2f, %.2f]" % (lr_lower_ci, lr_upper_ci)) # Bootstrapping results for models using Logistic Regression have a value of 0.87 or 87%, indicating that the model has good performance for predicting new data. # # **11) Building Model** # ## Random Forest rfc = RandomForestClassifier() rfc.fit(X_train, y_train) y_pred_rfc = rfc.predict(X_test) pd.DataFrame(np.c_[y_test, y_pred_rfc], columns=["Actual", "Predicted"]) # ## Logistic Regression from sklearn.linear_model import LogisticRegression lr = LogisticRegression().fit(X_train, y_train) y_pred_lr = lr.predict(X_test) pd.DataFrame(np.c_[y_test, y_pred_lr], columns=["Actual", "Predicted"]) # # **12) Model Evaluation** # The evaluation model used is: # ## 1. Confusion Matrix # Confusion matrix is suitable for use in the Heart Disease dataset because it can provide information regarding true positive, true negative, false positive, and false negative that are relevant to the classification case in the case study using the Heart Disease dataset. # ## 2. Precision Recall # Precision-recall is used because it can provide information about the trade-off between precision and recall which is important for evaluating classification models such as the Heart Disease dataset. from sklearn.metrics import ( confusion_matrix, accuracy_score, precision_score, recall_score, f1_score, ) from sklearn.metrics import classification_report from sklearn import metrics # ## Model Evaluation for Model using Random Forest rfc = RandomForestClassifier() rfc.fit(X_train, y_train) rfc_train = rfc.score(X_train, y_train) * 100 rfc_test = rfc.score(X_test, y_test) * 100 print("Testing Accuracy:", round(rfc_test, 2), "%") print("Training Accuracy:", round(rfc_train, 2), "%") print(classification_report(y_pred_rfc, y_test)) data_rfc = confusion_matrix(y_test, y_pred_rfc) rfc_con = pd.DataFrame(data_rfc, columns=np.unique(y_test), index=np.unique(y_test)) plt.figure(figsize=(5, 3)) sns.heatmap(rfc_con, annot=True, fmt="g", cmap="YlGnBu") plt.xlabel("Predicted") plt.ylabel("Actual") plt.show() # Based on the results of the model evaluation above, it can be concluded that the Random Forest model has a good performance in classifying the dataset. It can be seen from the high value of precision, recall, and f1-score. # ## Model Evaluation for Model using Logistic Regression lr = LogisticRegression() lr.fit(X_train, y_train) lr_train = lr.score(X_train, y_train) * 100 lr_test = lr.score(X_test, y_test) * 100 print("Testing Accuracy:", round(lr_test, 2), "%") print("Training Accuracy:", round(lr_train, 2), "%") print(classification_report(y_pred_lr, y_test)) data_lr = confusion_matrix(y_test, y_pred_lr) lr_con = pd.DataFrame(data_lr, columns=np.unique(y_test), index=np.unique(y_test)) plt.figure(figsize=(5, 3)) sns.heatmap(lr_con, annot=True, fmt="g", cmap="YlGnBu") plt.xlabel("Predicted") plt.ylabel("Actual") plt.show() # Based on the results of the model evaluation above, it can be concluded that the Logistic Regression model has good performance in classifying the dataset. It can be seen from the high value of precision, recall, and f1-score. # ## Feature Importance # ### Feature Importance untuk Model dengan Menggunakan Random Forest rfc = RandomForestClassifier(n_estimators=100, random_state=42) rfc.fit(X_train, y_train) rfc_fimp = pd.Series(rfc.feature_importances_, index=X.columns) rfc_fimp.sort_values(ascending=False) plt.figure(figsize=(10, 6)) rfc_fimp.nsmallest(30).plot(kind="barh") round(rfc_fimp, 4) * 100 plt.title("Important Features", size=12) plt.show() # Features that have the most significant feature importance values in the models created using the Random Forest: # * oldpeak # * thalach # * age # * ca_0 # * chol # Based on the results of the feature importance, it can be seen that the oldpeak variable has the greatest influence on the 'target' prediction results (presence or absence of heart disease) in the Random Forest model. Then the variables thalach, age, ca_0, and chol, have a significant effect on the 'target' prediction results. # Thus, if you want to make a model using the Random Forest method that is more effective for predicting the potential risk of heart disease, then this variable must be considered and selected as an important feature. # ### Feature Importance untuk Model dengan Menggunakan Logistic Regression lr = LogisticRegression() lr.fit(X_train, y_train) coef = lr.coef_[0] features = X.columns feature_importance = pd.DataFrame({"Features": features, "Coef": coef}) feature_importance["Exp_Coef"] = np.exp(feature_importance["Coef"]) feature_importance = feature_importance.sort_values(by="Exp_Coef", ascending=False) print(feature_importance) fig, ax = plt.subplots(figsize=(8, 6)) ax.barh(feature_importance["Features"], feature_importance["Exp_Coef"]) ax.set_xlabel("Feature Importance (Exp Coef)") ax.set_ylabel("Features") ax.invert_yaxis() plt.title("Important Features", size=12) plt.show() # Features that have the most significant feature importance values in the models created using the Logistic Regression: # * ca_0 # * chestPain_3 # * sex_0 # * chestPain_2 # * ca_4 # Based on the results of the feature importance, it can be seen that the ca_0 variable has the greatest influence on the 'target' prediction results in the Random Forest model. Then the variables chestPain_3, sex_0, chestPain_2, and ca_4, have a significant effect on the 'target' prediction results. # Thus, if you want to make a model using the Logistic Regression method which is more effective for predicting the potential risk of heart disease, then this variable must be considered and selected as an important feature. # # **13) Model Comparison** # a. Confusion Matrix Results # Confusion Matrix in the Random Forest model obtained True Positive values = 24, False Positive 4, True Negative = 27, and False Negative 1. Meanwhile in the Logistic Regression model obtained True Positive values = 24, False Positive 6, True = 25, and False Negatives 1. # b. Classification Report results # Classification Report on the Random Forest model obtained an F1-Score value of 91% while on the Logistic Regression model obtained an F1-Score value of 88% # c. Performance # Random Forest is more accurate than Logistic Regression, it can be seen from the F1-Score value of Random Forest which is bigger than Logistic Regression # d. Scalability # Logistic Regression can be used for very large datasets with a relatively faster time compared to using Random Forest # # **14) Hyperparameter Tuning** from sklearn.model_selection import GridSearchCV # ## Hyperparameter Tuning for Random Forest rfc = RandomForestClassifier() rfc_param = [ { "max_depth": np.arange(1, 10), "min_samples_split": [0.1, 0.5, 1.0], "random_state": np.arange(1, 50), "n_estimators": [25, 50, 100], } ] rfc_search = GridSearchCV(rfc, rfc_param, scoring="accuracy") rfc_result = rfc_search.fit(X_train, y_train) print("Best Score: %s" % rfc_result.best_score_) print("Best Hyperparameters: %s" % rfc_result.best_params_) # The results of Hyperparameter Tuning for models using Random Forest are that the best score is 0.86, then the best parameters used in making models with Random Forest are the maximum tree depth (max depth) is 9, the minimum number of sample splits (minimum sample split) is 0.1, the number of estimators (n estimators) is 50, and the random state is 30. # ## Hyperparameter Tuning for Logistic Regression lr = LogisticRegression() lr_param = [{"penalty": ["l2"], "C": [0.1, 0.4, 0.5], "random_state": [0]}] lr_search = GridSearchCV( lr, lr_param, scoring="accuracy", n_jobs=-1, ) lr_result = lr_search.fit(X_train, y_train) print("Best Score: %s" % lr_result.best_score_) print("Best Hyperparameters: %s" % lr_result.best_params_) # The results of Hyperparameter Tuning for models using Logistic Regression are that the best score is 0.84, then the best parameters used in making models with Logistic Regression are the type of regularization used (penalty) is l2, the regularization strength (C) is 0.1, and the random state is 0. # # **15) Predict How Well Model Performance in Testing The Dataset** # ### Random Forest hrfc = RandomForestClassifier( max_depth=9, min_samples_split=0.1, n_estimators=50, random_state=30 ) hrfc.fit(X_train, y_train) y_pred_hrfc = hrfc.predict(X_test) hrfc_accuracy = accuracy_score(y_test, y_pred_hrfc) print("Accuracy on test dataset: %.2f%%" % (hrfc_accuracy * 100)) # The prediction accuracy of the model made with Random Forest and Hyperparameter Tuning has been done is 89.29% # ## Logistic Regression hlr = LogisticRegression(C=0.1, penalty="l2", random_state=0) hlr.fit(X_train, y_train) y_pred_hlr = hlr.predict(X_test) hlr_accuracy = accuracy_score(y_test, y_pred_hlr) print("Accuracy on test dataset: %.2f%%" % (hlr_accuracy * 100)) # Predicted model accuracy made with Logistics and Hyperparameter Tuning has been done is 87.5% # # **16) Learning Curve** from sklearn.model_selection import learning_curve def plot_learning_curve( estimator, title, X, y, cv=None, n_jobs=None, train_sizes=np.linspace(0.1, 1.0, 5) ): plt.figure() plt.title(title) plt.xlabel("Training examples") plt.ylabel("Score") train_sizes, train_scores, test_scores, fit_times, _ = learning_curve( estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes, return_times=True, ) train_scores_mean = np.mean(train_scores, axis=1) train_scores_std = np.std(train_scores, axis=1) test_scores_mean = np.mean(test_scores, axis=1) test_scores_std = np.std(test_scores, axis=1) plt.grid() plt.fill_between( train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std, alpha=0.1, color="r", ) plt.fill_between( train_sizes, test_scores_mean - test_scores_std, test_scores_mean + test_scores_std, alpha=0.1, color="g", ) plt.plot(train_sizes, train_scores_mean, "o-", color="r", label="Training score") plt.plot( train_sizes, test_scores_mean, "o-", color="g", label="Cross-validation score" ) plt.legend(loc="best") return plt # ### Learning Curve Model Random Forest hrfc = RandomForestClassifier( max_depth=9, min_samples_split=0.1, n_estimators=50, random_state=30 ) title = "Random Forest Learning Curve" plot_learning_curve(hrfc, title, X_train, y_train, cv=5, n_jobs=-1) # Based on the Learning Curve for the Random Forest Model above, it can be seen: # 1. Based on the Training Score curve, initially the model has a perfect level of accuracy (1.0) for the training data when modeling small amounts of data, then this accuracy decreases when the amount of data to be modeled starts to increase. This shows that the model has the possibility of overfitting the training data when the amount of data is still small, but is able to balance the accuracy and complexity of the model when the amount of data increases. # 2. Based on the Cross-Validation Score curve, it shows that initially the model has low accuracy (about 0.76) when modeling data that has never been seen before (out of sample) with a small amount. But then the accuracy starts to increase when the amount of data increases. # 3. At the initial point, the two curves have a fairly large gap but the gap gets smaller as the amount of data increases. This shows that the model which initially has the problem of overfitting, is then able to start to balance the accuracy when the amount of data increases # Thus, the model using Random Forest is good enough to handle data that is not too complex and not too simple, and is able to generalize well to data that has never been seen before. # ### Learning Curve Model Logistic Regression hlr = LogisticRegression(C=0.1, penalty="l2", random_state=0) title = "Logistic Regression Learning Curve" plot_learning_curve(hlr, title, X_train, y_train, cv=5, n_jobs=-1) # Based on the Learning Curve for the Logistic Regression Model above, it can be seen: # 1. Based on the Training Score curve, the training score value increases as the amount of training data increases, which means that the model is able to slowly increase its accuracy to the training data when the data provided begins to increase. # 2. Based on the Cross-Validation Score curve, it shows that the model tends to be stable when modeling data that has never been seen before and continues to increase as the amount of data increases. This shows that the model can improve performance but it is not optimal, seen from the increase in the curve which tends to be small. # 3. At the second point, the Cross-Validation Score curve point is higher than the Training Score curve point. This indicates that in these conditions there is overfitting in the training data. # Thus, the model using Logistic Regression experiences a bit of overfitting at the second curve point. # # **17) ROC Analysis** from sklearn.metrics import roc_curve, roc_auc_score hrfc = RandomForestClassifier( max_depth=9, min_samples_split=0.1, n_estimators=50, random_state=30 ) hlr = LogisticRegression(C=0.1, penalty="l2", random_state=0) hrfc.fit(X_train, y_train) hlr.fit(X_train, y_train) hrfc_prob = hrfc.predict_proba(X_test)[:, 1] hlr_prob = hlr.predict_proba(X_test)[:, 1] fpr_hrfc, tpr_hrfc, _ = roc_curve(y_test, hrfc_prob) roc_auc_hrfc = roc_auc_score(y_test, hrfc_prob) fpr_hlr, tpr_hlr, _ = roc_curve(y_test, hlr_prob) roc_auc_hlr = roc_auc_score(y_test, hlr_prob) plt.figure(figsize=(8, 6)) plt.plot(fpr_hrfc, tpr_hrfc, label="Random Forest (AUC = %0.2f)" % roc_auc_hrfc) plt.plot(fpr_hlr, tpr_hlr, label="Logistic Regression (AUC = %0.2f)" % roc_auc_hlr) plt.plot([0, 1], [0, 1], linestyle="--", color="gray") plt.xlim([0, 1]) plt.ylim([0, 1]) plt.xlabel("False Positive Rate") plt.ylabel("True Positive Rate") plt.title("ROC Curve") plt.legend() plt.show() # Hasil ROC Curve dari kedua model yang telah dibuat yaitu Random Forest dan Decision Tree menunjukkan bahwa model dengan Logistic Regression memiliki nilai AUC yaitu 0.96 yang lebih besar dari nilai AUC dari model dengan Random Forest, berdasarkan nilai AUC tersebut kedua model dapat dikategorikan memiliki kemampuan yang sangat baik dalam membedakan antara kelas positif dan negatif. Kemudian model dengan Logistic Regression juka memiliki posisi yang cenderung lebih dekat dengan bagian pojok kiri atas (koordinat 0.0, 1.0), semakin dekat kurva model dengan bagian pojok kiri atas maka semakin baik model yang dibuat. Oleh karena itu jika dilihat berdasarkan kurva ROC di atas, model dengan Logistic Regression lebih baik dibandingkan model dengan Random Forest. # ### ROC Model Random Forest plt.figure(figsize=(8, 6)) plt.plot(fpr_hrfc, tpr_hrfc, label="Random Forest (AUC = %0.2f)" % roc_auc_hrfc) plt.plot([0, 1], [0, 1], linestyle="--", color="gray") plt.xlim([0, 1]) plt.ylim([0, 1]) plt.xlabel("False Positive Rate") plt.ylabel("True Positive Rate") plt.title("ROC Curve") plt.legend() plt.show() # ### ROC Model Logistic Regression plt.figure(figsize=(8, 6)) plt.plot( fpr_hlr, tpr_hlr, label="Logistic Regression (AUC = %0.2f)" % roc_auc_hlr, color="orange", ) plt.plot([0, 1], [0, 1], linestyle="--", color="gray") plt.xlim([0, 1]) plt.ylim([0, 1]) plt.xlabel("False Positive Rate") plt.ylabel("True Positive Rate") plt.title("ROC Curve") plt.legend() plt.show()
false
0
9,260
1
9,260
9,260
129524810
<jupyter_start><jupyter_text>Financial Inclusion in Africa You are asked to predict the likelihood of the person having a bank account or not (Yes = 1, No = 0), for each unique id in the test dataset . You will train your model on 70% of the data and test your model on the final 30% of the data, across four East African countries - Kenya, Rwanda, Tanzania, and Uganda. Kaggle dataset identifier: financial-inclusion-in-africa <jupyter_script>import pandas as pd train = pd.read_csv("/kaggle/input/financial-inclusion-in-africa/Train.csv") train.head() test = pd.read_csv("/kaggle/input/financial-inclusion-in-africa/Test.csv") test.head() train.nunique() train.info() train = train.drop(["uniqueid"], axis=1) train.head() test = test.drop(["uniqueid"], axis=1) test.head() train["country"].value_counts() train["bank_account"] = train["bank_account"].replace({"Yes": 1, "No": 0}) train.head() train["cellphone_access"] = train["cellphone_access"].replace({"Yes": 1, "No": 0}) train.head() test["cellphone_access"] = test["cellphone_access"].replace({"Yes": 1, "No": 0}) test.head() train["relationship_with_head"].value_counts() train["marital_status"].value_counts() train["education_level"].value_counts() train["job_type"].value_counts() round(train["bank_account"].value_counts() * 100 / len(train), 2) from lazypredict.Supervised import LazyClassifier y = train.pop("bank_account") X = train from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=42, shuffle=True, stratify=y ) clf = LazyClassifier(verbose=0, predictions=True) models, predictions = clf.fit(X_train, X_test, y_train, y_test) models predictions.head() from sklearn.metrics import classification_report for i in predictions.columns.tolist(): print("\t\t", i, "\n") print(classification_report(y_test, predictions[i]), "\n")
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/524/129524810.ipynb
financial-inclusion-in-africa
gauravduttakiit
[{"Id": 129524810, "ScriptId": 38513196, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4760409, "CreationDate": "05/14/2023 14:26:40", "VersionNumber": 1.0, "Title": "Bank Account Prediction : LazyPredict", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 58.0, "LinesInsertedFromPrevious": 58.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 5}]
[{"Id": 185670275, "KernelVersionId": 129524810, "SourceDatasetVersionId": 5683202}]
[{"Id": 5683202, "DatasetId": 3267290, "DatasourceVersionId": 5758770, "CreatorUserId": 4760409, "LicenseName": "Unknown", "CreationDate": "05/14/2023 13:56:48", "VersionNumber": 1.0, "Title": "Financial Inclusion in Africa", "Slug": "financial-inclusion-in-africa", "Subtitle": NaN, "Description": "You are asked to predict the likelihood of the person having a bank account or not (Yes = 1, No = 0), for each unique id in the test dataset . You will train your model on 70% of the data and test your model on the final 30% of the data, across four East African countries - Kenya, Rwanda, Tanzania, and Uganda.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3267290, "CreatorUserId": 4760409, "OwnerUserId": 4760409.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5683202.0, "CurrentDatasourceVersionId": 5758770.0, "ForumId": 3332906, "Type": 2, "CreationDate": "05/14/2023 13:56:48", "LastActivityDate": "05/14/2023", "TotalViews": 108, "TotalDownloads": 6, "TotalVotes": 1, "TotalKernels": 2}]
[{"Id": 4760409, "UserName": "gauravduttakiit", "DisplayName": "Gaurav Dutta", "RegisterDate": "03/28/2020", "PerformanceTier": 3}]
import pandas as pd train = pd.read_csv("/kaggle/input/financial-inclusion-in-africa/Train.csv") train.head() test = pd.read_csv("/kaggle/input/financial-inclusion-in-africa/Test.csv") test.head() train.nunique() train.info() train = train.drop(["uniqueid"], axis=1) train.head() test = test.drop(["uniqueid"], axis=1) test.head() train["country"].value_counts() train["bank_account"] = train["bank_account"].replace({"Yes": 1, "No": 0}) train.head() train["cellphone_access"] = train["cellphone_access"].replace({"Yes": 1, "No": 0}) train.head() test["cellphone_access"] = test["cellphone_access"].replace({"Yes": 1, "No": 0}) test.head() train["relationship_with_head"].value_counts() train["marital_status"].value_counts() train["education_level"].value_counts() train["job_type"].value_counts() round(train["bank_account"].value_counts() * 100 / len(train), 2) from lazypredict.Supervised import LazyClassifier y = train.pop("bank_account") X = train from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=42, shuffle=True, stratify=y ) clf = LazyClassifier(verbose=0, predictions=True) models, predictions = clf.fit(X_train, X_test, y_train, y_test) models predictions.head() from sklearn.metrics import classification_report for i in predictions.columns.tolist(): print("\t\t", i, "\n") print(classification_report(y_test, predictions[i]), "\n")
false
2
494
5
613
494
129524735
<jupyter_start><jupyter_text>Most famous video card manufacturers' share prices # Share prices of the top 5 GPU companies: **NVIDIA** (1999-2023 share prices) ![](https://www.googleapis.com/download/storage/v1/b/kaggle-user-content/o/inbox%2F10074224%2Fb1fd5dabd613a075ad897bf0369d56db%2Fshutterstock_nvidia-1.jpg.optimal.jpg?generation=1681388697258646&alt=media) **AMD** (1980-2023 share prices) ![](https://www.googleapis.com/download/storage/v1/b/kaggle-user-content/o/inbox%2F10074224%2F94a06cf1df0c8d283251bc5c8f5a91f6%2F333752_O.png?generation=1681388888651843&alt=media) **Intel** (1980-2023 share prices) ![](https://www.googleapis.com/download/storage/v1/b/kaggle-user-content/o/inbox%2F10074224%2F5126457e1e1ef08694646d848f69c1ef%2F-----intel----190182827.jpg?generation=1681388933148313&alt=media) **ASUS** (2000-2023 share prices) ![](https://www.googleapis.com/download/storage/v1/b/kaggle-user-content/o/inbox%2F10074224%2Fdfac6f4c6c8c604444695e2346d3df69%2Fb43f54da4d61.jpg?generation=1681388967057017&alt=media) **MSI** (1962-2023 share prices) ![](https://www.googleapis.com/download/storage/v1/b/kaggle-user-content/o/inbox%2F10074224%2Fe0d10762358e5567bdbdbaeaaa24a89c%2Fmsi-top_.jpg?generation=1681388989358202&alt=media) Kaggle dataset identifier: nvidia-amd-intel-asus-msi-share-prices <jupyter_script># ### 模型 # - ARIMA(2,1,3) # - 加法模型因素分解 # - LSTM import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from statsmodels.tsa.stattools import adfuller from numpy import log from statsmodels.tsa.stattools import kpss from statsmodels.graphics.tsaplots import plot_acf from statsmodels.graphics.tsaplots import plot_pacf import matplotlib.pyplot as plt from statsmodels.tsa.arima.model import ARIMA from statsmodels.tsa.stattools import pacf from keras.models import Sequential from keras.layers.core import Dense, Activation import keras import math from sklearn.metrics import r2_score from sklearn.metrics import mean_absolute_error as mae from sklearn.metrics import mean_squared_error from numpy import array from keras.models import Sequential from keras.layers import LSTM, SimpleRNN from keras.layers import Dense from statsmodels.tsa.stattools import pacf from keras.models import Sequential from keras.layers import LSTM from keras.layers import Dense import warnings warnings.filterwarnings("ignore") # ## 数据 # - 挑一个品牌,NVIDIA # - 单变量,预测close # 数据描述:1999-01-25 至 2023-04-12 每天 NVIDIA 的股票价格以及市场份额数据,共6094条。选择close价格预测。 dataset = pd.read_csv( "/kaggle/input/nvidia-amd-intel-asus-msi-share-prices/NVIDIA (1999-2023).csv" ) dataset.head() # dataset.tail() dataset.describe() # ### 数据探索性分析 # 没有缺失值 dataset.isnull().sum() # #### 时序图 start_date = pd.to_datetime(dataset.Date[0]) end_date = pd.to_datetime(dataset.Date.values[-1]) dataset["Date"] = pd.to_datetime(dataset["Date"]) top_plt = plt.subplot2grid((5, 4), (0, 0), rowspan=3, colspan=4) top_plt.plot(dataset.Date, dataset["Close"]) plt.title("Historical Stock Close Price of Nvidia") # bottom_plt = plt.subplot2grid((5,4), (3,0), rowspan = 1, colspan = 4) # bottom_plt.bar(dataset.Date, dataset['Volume']) # plt.title('\nNvidia Trading Volume', y = -0.60) plt.gcf().set_size_inches(16, 10) # 后续神经网络结果好,可能是用了Volume dataset2 = dataset[["Close", "Volume", "Date"]] dataset2["logclose"] = np.log(dataset2["Close"]) dataset2.head() dataset2.isnull().sum() # ## ARIMA # 1. 差分去趋势,取log解决异方差 # 2. ACF,PACF定阶 # 3. 利用auto ARIMA定阶 # ### Auto ARIMA # 先自动定阶,然后再结合ACF,PACF以及AIC解释 # ARIMA(2,1,3) import pmdarima as pm smodel = pm.auto_arima( dataset2.logclose, start_p=1, max_p=6, start_q=1, max_q=6, seasonal=True, # seasonal=True的话,是SARIMA stationary=False, test="adf", # 自动差分 information_criterion="aic", stepwise=False, error_action="ignore", suppress_warnings=True, ) smodel.summary() smodel.plot_diagnostics() plt.gcf().set_size_inches(16, 10) plt.show() # #### 差分过程 # 取log,一阶差分 # 需要检验平稳、非纯随机 np.min(dataset2.logclose) plt.rcParams.update({"figure.figsize": (18, 7), "figure.dpi": 120}) # Original Series fig, axes = plt.subplots(3, 2, sharex=True) axes[0, 0].plot(dataset2["Close"].values) axes[0, 0].set_title("Original Series") # plot_acf(dataset2['Close'].values, ax = axes[0, 1]);axes[0, 1].set_title('ACF of Original Series') axes[0, 1].plot(dataset2["logclose"].values) axes[0, 1].set_title("Original Series(log)") # plot_acf(dataset2['logclose'].values, ax = axes[0, 3]);axes[0, 3].set_title('ACF of Original Series(log)') # 1st Differencing axes[1, 0].plot(dataset2["Close"].diff()) axes[1, 0].set_title("1st Order Differencing") # plot_acf(dataset2['Close'].diff().dropna(), ax = axes[1, 1]);axes[1, 1].set_title('ACF of 1st Order Differencing') axes[1, 1].plot(dataset2["logclose"].diff()) axes[1, 1].set_title("1st Order Differencing(log)") # plot_acf(dataset2['logclose'].diff(), ax = axes[1, 3]);axes[1, 3].set_title('ACF of 1st Order Differencing(log)') # 2nd Differencing axes[2, 0].plot(dataset2["Close"].diff().diff(periods=8)) axes[2, 0].set_title("2nd Order Differencing") # plot_acf(dataset2['Close'].diff().diff().dropna(), ax = axes[2, 1]);axes[2, 1].set_title('ACF of 2nd Order Differencing') axes[2, 1].plot(dataset2["logclose"].diff().diff(periods=8)) axes[2, 1].set_title("2nd Order Differencing(log)") # plot_acf(dataset2['logclose'].diff().diff(), ax = axes[2, 3]);axes[2, 3].set_title('ACF of 2nd Order Differencing(log)') plt.show() # #### 平稳性检验 # - ADF 拒绝,平稳 # - BP 拒绝,还存在异方差。把异常值处理一下可能能过? result = adfuller(dataset2.logclose.diff()[1:], autolag="AIC") print(f"ADF Statistic: {result[0]}") print(f"p-value: {result[1]}") for key, value in result[4].items(): print("Critial Values:") print(f" {key}, {value}") import statsmodels.stats.api as sms test = sms.het_breuschpagan( np.array(dataset2.logclose.diff()[1:]).reshape(-1, 1), np.array(range(len(dataset2.logclose.diff()[1:]))).reshape(-1, 1), ) test[-1] # #### 纯随机检验 # 有阶数拒绝就可以 from statsmodels.stats.diagnostic import acorr_ljungbox ljungbox_result = acorr_ljungbox( dataset2.logclose.diff().diff(periods=8)[9:], lags=12 ) # 返回统计量和p值,lags为检验的延迟数 ljungbox_result # #### ACF,PACF plt.rcParams.update({"figure.figsize": (20, 5), "figure.dpi": 120}) fig, axes = plt.subplots(1, 2, sharex=True) plot_acf(dataset2.logclose.diff()[1:], ax=axes[0]) axes[0].set_title("ACF of Original Series(log)") plot_pacf(dataset2.logclose.diff()[1:], ax=axes[1]) axes[1].set_title("PACF of 1st Order Differencing(log)") plt.show() # ## Models considered : # **ARIMA(2,1,3) ** # # **加法模型 ** # # **LSTM ** # ## ARIMA data = dataset2["logclose"].values print("Length of Total data: ", len(data)) train_length = int(len(data) * 0.9) train_data = data[:train_length] test_data = data[train_length:] print("Train and Test data length: ", len(train_data), len(test_data)) # ### Building ARIMA Model import statsmodels.api as sm model = sm.tsa.arima.ARIMA(train_data, order=(2, 1, 3)) model_fit = model.fit() print(model_fit.summary()) # Plot residual errors residuals = pd.DataFrame(model_fit.resid) fig, ax = plt.subplots(1, 2) residuals.plot(title="Residuals", ax=ax[0]) residuals.plot(kind="kde", title="Density", ax=ax[1]) plt.show() # Forecast forecast_result = model_fit.forecast(steps=50, alpha=0.05) # 95% conf forecast_result[:50] test_data[:50] # ### Plotting Test and Predicted Results # 预测未来30天的销量 pred_uc = model_fit.get_forecast(steps=30, alpha=0.05) pred_pr = pred_uc.predicted_mean # 获取预测的置信区间 pred_ci = pred_uc.conf_int() # 合并预测值与置信区间 pred_data = pd.DataFrame( {"forecast": pred_pr, "lower_ci_95": pred_ci[:, 0], "upper_ci_95": pred_ci[:, 1]} ) pred_data.head() fig, ax = plt.subplots(figsize=(15, 6)) ax.plot(pred_data.index[:30] - 30, train_data[-30:]) ax.plot(pred_data.forecast[:30], color="green", label="forecast", alpha=0.7) ax.plot( pred_data.index[:30], test_data[:30], color="yellow", label="observed", alpha=0.7 ) ax.fill_between( pred_data.index, pred_data.lower_ci_95, pred_data.upper_ci_95, color="grey", alpha=0.5, label="95% confidence interval", ) ax.set_title("ARIMA Model for Nvidia Price Forecasting") ax.legend() plt.show() # Here we're plotting Test and Predicted data plt.figure(figsize=(12, 6)) plt.rcParams.update({"font.size": 12}) plt.plot(test_data[:50], "#0077be", label="Actual") plt.plot(forecast_result[:], "#ff8841", label="Predicted") plt.title("ARIMA Model for Nvidia Price Forecasting") plt.ylabel("Nvidia Price [in Dollar]") plt.xlabel("Time Steps [in Days] ") plt.legend() plt.show() # ## 加法模型 # 三个,周期为7,30,365。三个周期下都没有明显的季节趋势 from statsmodels.tsa.seasonal import seasonal_decompose decomposition = seasonal_decompose( dataset2.logclose, # model='multiplicative', period=7, ) fig = plt.figure() fig = decomposition.plot() fig.set_size_inches(15, 12) plt.show() from statsmodels.tsa.seasonal import seasonal_decompose decomposition = seasonal_decompose( dataset2.logclose, # model='multiplicative', period=30, ) fig = plt.figure() fig = decomposition.plot() fig.set_size_inches(15, 12) plt.show() from statsmodels.tsa.seasonal import seasonal_decompose decomposition = seasonal_decompose( dataset2.logclose, # model='multiplicative', period=365, ) fig = plt.figure() fig = decomposition.plot() fig.set_size_inches(15, 12) plt.show() help(seasonal_decompose) # ## Artificial Neural Network data = dataset2["Close"].values print("Shape of data: ", data.shape) # Separating train and test data train_length = int(len(data) * 0.8) print("Train length: ", train_length) train_data, test_data = data[:train_length], data[train_length:] print("Shape of Train and Test data: ", train_data.shape, test_data.shape) # Changing dataset shape to 2D train_data = train_data.reshape(-1, 1) test_data = test_data.reshape(-1, 1) print("Shape of Train and Test data: ", train_data.shape, test_data.shape) def create_dataset(dataset, lookback): dataX, dataY = [], [] for i in range(len(dataset) - lookback - 1): a = dataset[i : (i + lookback), 0] dataX.append(a) b = dataset[i + lookback, 0] dataY.append(b) return np.array(dataX), np.array(dataY) # ### Selecting Lag value from PACF graph plot_pacf(data, lags=10) plt.show() # ### Considering only Auto-correlation Lag value Greater than 10% pacf_value = pacf(data, nlags=20) lag = 0 # collect lag values greater than 10% correlation for x in pacf_value: if x > 0.1: lag += 1 else: break print("Selected look_back (or lag = ): ", lag) # ### Separating Input and Output values train_X, train_y = create_dataset(train_data, lag) test_X, test_y = create_dataset(test_data, lag) print("Shape of train_X and train_y: ", train_X.shape, train_y.shape) print("Shape of test_X and test_y: ", test_X.shape, test_y.shape) # ### Building an MLP model np.random.seed(7) # model = Sequential() # model.add(Dense(64, input_dim = lag, activation='relu', name= "1st_hidden")) # # model.add(Dense(64, activation='relu', name = '2nd_hidden')) # model.add(Dense(1, name = 'Output_layer', activation = 'linear')) # # model.add(Activation("linear", name = 'Linear_activation')) # model.compile(loss = "mean_squared_error", optimizer = "adam") # model.summary() # ### Fitting data to Model # epoch_number = 100 # batches = 64 # history = model.fit(train_X, train_y, epochs = epoch_number, batch_size = batches, verbose = 1, shuffle = False, # validation_split = 0.1) # ### Train and Validation Loss # # plot history # plt.clf # plt.figure(figsize = (10,8)) # plt.plot(history.history['loss'], label = 'train') # plt.plot(history.history['val_loss'], label = 'test') # plt.xlabel('Number of Epochs') # plt.ylabel('Train and Test Loss') # plt.title('Train and Test loss per epochs [Univariate]') # plt.legend() # plt.show() # ### Making Predictions # # Make prediction # testPredict = model.predict(test_X) # predicted_value = testPredict[:, 0] # ### Evaluation Metrics for measuring performance # * ** R-Squared ** # * ** Mean Absolute Error ** # * ** Mean Absolute Percentage Error** # * ** Mean Squared Error** # * ** Root Mean Squared Error** # * ** Normalized Root Mean Squared Error** # * ** Weighted Absolute Percentage Error** # * ** Weighted Mean Absolute Percentage Error** def evaluate_forecast_results(actual, predicted): print("R2 Score: ", round(r2_score(actual, predicted), 2)) print("MAE : ", round(mae(actual, predicted), 2)) print("MSE: ", round(mean_squared_error(actual, predicted), 2)) print("RMSE: ", round(math.sqrt(mean_squared_error(actual, predicted)), 2)) print("NRMSE: ", NRMSE(actual, predicted)) print("WMAPE: ", WMAPE(actual, predicted)) def NRMSE(actual, predicted): rmse = math.sqrt(mean_squared_error(actual, predicted)) nrmse = rmse / np.mean(actual) return round(nrmse, 4) def WMAPE(actual, predicted): abs_error = np.sum(actual - predicted) wmape = abs_error / np.sum(actual) return round(wmape, 4) evaluate_forecast_results(test_y, predicted_value) # plt.figure(figsize = (16, 8)) # plt.rcParams.update({'font.size': 12}) # plt.plot(test_y[:], '#0077be', label = 'Actual') # plt.plot(predicted_value, '#ff8841', label = 'Predicted') # plt.title('MLP Model for Nvidia Price Forecasting') # plt.ylabel('Nvidia Stock Close Price ') # plt.xlabel('Time Steps [in Days] ') # plt.legend() # plt.show() # ## RNN - Univariate Time Series Forecasting # # data = dataset2['Close'].values # print('Shape of data: ', data.shape) # Separate train and test data train_length = int(len(data) * 0.8) print("Train length: ", train_length) train_data, test_data = data[:train_length], data[train_length:] print("Shape of Train and Test data: ", len(train_data), len(test_data)) def split_sequence(sequence, n_steps): X, y = list(), list() for i in range(len(sequence)): end_ix = i + n_steps if end_ix > len(sequence) - 1: break seq_x, seq_y = sequence[i:end_ix], sequence[end_ix] X.append(seq_x) y.append(seq_y) return array(X), array(y) # ### Lag Value already to be chosen from PACF Plot pacf_value = pacf(data, nlags=20) lag = 0 # collect lag values greater than 10% correlation for x in pacf_value: if x > 0.1: lag += 1 else: break print("Selected look_back (or lag = ): ", lag) n_features = 1 train_X, train_y = split_sequence(train_data, lag) test_X, test_y = split_sequence(test_data, lag) print("Shape of train_X and train_y: ", train_X.shape, train_y.shape) print("Shape of test_X and test_y: ", test_X.shape, test_y.shape) # ### Reshaping train_X and test_X to 3-D train_X = train_X.reshape((train_X.shape[0], train_X.shape[1], n_features)) test_X = test_X.reshape((test_X.shape[0], test_X.shape[1], n_features)) # New shape of train_X and test_X are :- print("Shape of train_X and train_y: ", train_X.shape, train_y.shape) print("Shape of test_X and test_y: ", test_X.shape, test_y.shape) # ### Building the model # # define model # model = Sequential() # model.add(SimpleRNN(64, activation='relu', return_sequences = False, input_shape = (lag, n_features))) # model.add(Dense(1)) # model.compile(optimizer = 'adam', loss = 'mse') # model.summary() # ### Fit the model - with training data import tensorflow as tf tf.config.run_functions_eagerly(True) # fit model # cb = tf.keras.callbacks.EarlyStopping(monitor = 'loss', patience = 15, restore_best_weights = True) # history = model.fit(train_X, train_y, epochs = 150, batch_size = 64, verbose = 1, validation_split = 0.1, # callbacks = [cb]) # ### Summarizing model accuracy and Loss # # summarize history for loss # plt.plot(history.history['loss']) # plt.plot(history.history['val_loss']) # plt.title('model loss') # plt.ylabel('loss') # plt.xlabel('epoch') # plt.legend(['train', 'test'], loc = 'upper left') # plt.show() # ### Making prediction with Test data # train_predict = model.predict(train_X) # test_predict = model.predict(test_X) # print('Shape of train and test predict: ', train_predict.shape, test_predict.shape) # ### Model evaluation # actual_ = test_y # predicted_ = test_predict[:, 0] # len(actual_), len(predicted_) # evaluate_forecast_results(actual_, predicted_) # ### Plotting test and predicted data # # plt.rc("figure", figsize = (14,8)) # plt.rcParams.update({'font.size': 16}) # plt.plot(actual_, label = 'Actual') # plt.plot(predicted_, label = 'Predicted') # plt.xlabel('Time in days') # plt.ylabel('Nvidia stock price') # plt.title('Nvidia Stock Close price prediction using Simple RNN - Test data') # plt.legend() # plt.show() # df_train = pd.DataFrame(columns = ['Train data']) # df_train['Train data'] = train_data # df = pd.DataFrame(columns = ['Test data', 'Predicted data']) # df['Test data'] = actual_ # df['Predicted data'] = predicted_ # total_len = len(df_train['Train data']) + len(df['Test data']) # range(len(df_train['Train data']), total_len) # x_list = [x for x in range(len(df_train['Train data']), total_len)] # df.index = x_list # plt.rc("figure", figsize=(14,8)) # plt.rcParams.update({'font.size': 16}) # plt.xlabel('Time in days') # plt.ylabel('Nvidia price') # plt.title('Nvidia price prediction using Simple RNN') # plt.plot(df_train['Train data']) # plt.plot(df[['Test data', 'Predicted data']]) # plt.legend(['Train', 'Test', 'Predictions'], loc='lower right') # plt.show() # ## LSTM data = dataset2["Close"].values print("Shape of data: ", data.shape) # Separate train and test data train_length = int(len(data) * 0.8) print("Train length: ", train_length) train_data, test_data = data[:train_length], data[train_length:] print("Shape of Train and Test data: ", len(train_data), len(test_data)) def split_sequence(sequence, n_steps): X, y = list(), list() for i in range(len(sequence)): end_ix = i + n_steps if end_ix > len(sequence) - 1: break seq_x, seq_y = sequence[i:end_ix], sequence[end_ix] X.append(seq_x) y.append(seq_y) return array(X), array(y) # ### Choosing the appropriate lag value lag = 2 n_features = 1 train_X, train_y = split_sequence(train_data, lag) test_X, test_y = split_sequence(test_data, lag) print("Shape of train_X and train_y: ", train_X.shape, train_y.shape) print("Shape of test_X and test_y: ", test_X.shape, test_y.shape) # ### Reshaping train_X and test_X to 3D train_X = train_X.reshape((train_X.shape[0], train_X.shape[1], n_features)) test_X = test_X.reshape((test_X.shape[0], test_X.shape[1], n_features)) print("Shape of train_X and train_y: ", train_X.shape, train_y.shape) print("Shape of test_X and test_y: ", test_X.shape, test_y.shape) # ### Building LSTM Model model = Sequential() model.add( LSTM(64, activation="relu", return_sequences=True, input_shape=(lag, n_features)) ) model.add(LSTM(64, activation="relu")) model.add(Dense(1)) model.compile(optimizer="adam", loss="mse") model.summary() # ### Fitting model with data import tensorflow as tf tf.config.run_functions_eagerly(True) cb = tf.keras.callbacks.EarlyStopping( monitor="loss", patience=15, restore_best_weights=True ) history = model.fit( train_X, train_y, epochs=150, batch_size=64, verbose=1, validation_split=0.1, callbacks=[cb], ) # ### Summarizing model accuracy and Loss plt.plot(history.history["loss"]) plt.plot(history.history["val_loss"]) plt.title("model loss") plt.ylabel("loss") plt.xlabel("epoch") plt.legend(["train", "test"], loc="upper left") plt.show() # ### Making the prediction train_predict = model.predict(train_X) test_predict = model.predict(test_X) print("Shape of train and test predict: ", train_predict.shape, test_predict.shape) # ### Model Evaluation actual_lstm = test_y predicted_lstm = test_predict[:, 0] evaluate_forecast_results(actual_lstm, predicted_lstm) df_train = pd.DataFrame(columns=["Train data"]) df_train["Train data"] = train_data df = pd.DataFrame(columns=["Test data", "Predicted data"]) df["Test data"] = actual_lstm df["Predicted data"] = predicted_lstm total_len = len(df_train["Train data"]) + len(df["Test data"]) range(len(df_train["Train data"]), total_len) x_list = [x for x in range(len(df_train["Train data"]), total_len)] df.index = x_list plt.rc("figure", figsize=(14, 8)) plt.rcParams.update({"font.size": 16}) plt.xlabel("Time in days") plt.ylabel("Nvidia Stock Close price") plt.title("Nvidia Stock price prediction using LSTM") plt.plot(df_train["Train data"]) plt.plot(df[["Test data", "Predicted data"]]) plt.legend(["Train", "Test", "Predictions"], loc="lower right") plt.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/524/129524735.ipynb
nvidia-amd-intel-asus-msi-share-prices
kapturovalexander
[{"Id": 129524735, "ScriptId": 37699502, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 10493971, "CreationDate": "05/14/2023 14:26:12", "VersionNumber": 1.0, "Title": "GPU Stock Price Prediction \u81ea\u7528", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 662.0, "LinesInsertedFromPrevious": 285.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 377.0, "LinesInsertedFromFork": 285.0, "LinesDeletedFromFork": 229.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 377.0, "TotalVotes": 1}]
[{"Id": 185670143, "KernelVersionId": 129524735, "SourceDatasetVersionId": 5634621}]
[{"Id": 5634621, "DatasetId": 3126518, "DatasourceVersionId": 5709896, "CreatorUserId": 10074224, "LicenseName": "Other (specified in description)", "CreationDate": "05/08/2023 15:34:07", "VersionNumber": 7.0, "Title": "Most famous video card manufacturers' share prices", "Slug": "nvidia-amd-intel-asus-msi-share-prices", "Subtitle": "Share prices of 5 biggest companies who make GPU", "Description": "# Share prices of the top 5 GPU companies:\n**NVIDIA** (1999-2023 share prices)\n![](https://www.googleapis.com/download/storage/v1/b/kaggle-user-content/o/inbox%2F10074224%2Fb1fd5dabd613a075ad897bf0369d56db%2Fshutterstock_nvidia-1.jpg.optimal.jpg?generation=1681388697258646&alt=media)\n**AMD** (1980-2023 share prices)\n![](https://www.googleapis.com/download/storage/v1/b/kaggle-user-content/o/inbox%2F10074224%2F94a06cf1df0c8d283251bc5c8f5a91f6%2F333752_O.png?generation=1681388888651843&alt=media)\n**Intel** (1980-2023 share prices)\n![](https://www.googleapis.com/download/storage/v1/b/kaggle-user-content/o/inbox%2F10074224%2F5126457e1e1ef08694646d848f69c1ef%2F-----intel----190182827.jpg?generation=1681388933148313&alt=media)\n**ASUS** (2000-2023 share prices)\n![](https://www.googleapis.com/download/storage/v1/b/kaggle-user-content/o/inbox%2F10074224%2Fdfac6f4c6c8c604444695e2346d3df69%2Fb43f54da4d61.jpg?generation=1681388967057017&alt=media)\n**MSI** (1962-2023 share prices)\n![](https://www.googleapis.com/download/storage/v1/b/kaggle-user-content/o/inbox%2F10074224%2Fe0d10762358e5567bdbdbaeaaa24a89c%2Fmsi-top_.jpg?generation=1681388989358202&alt=media)", "VersionNotes": "Data Update 2023-05-08", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3126518, "CreatorUserId": 10074224, "OwnerUserId": 10074224.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 6218583.0, "CurrentDatasourceVersionId": 6297954.0, "ForumId": 3190101, "Type": 2, "CreationDate": "04/13/2023 12:15:18", "LastActivityDate": "04/13/2023", "TotalViews": 9305, "TotalDownloads": 1486, "TotalVotes": 69, "TotalKernels": 7}]
[{"Id": 10074224, "UserName": "kapturovalexander", "DisplayName": "Alexander Kapturov", "RegisterDate": "03/28/2022", "PerformanceTier": 2}]
# ### 模型 # - ARIMA(2,1,3) # - 加法模型因素分解 # - LSTM import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from statsmodels.tsa.stattools import adfuller from numpy import log from statsmodels.tsa.stattools import kpss from statsmodels.graphics.tsaplots import plot_acf from statsmodels.graphics.tsaplots import plot_pacf import matplotlib.pyplot as plt from statsmodels.tsa.arima.model import ARIMA from statsmodels.tsa.stattools import pacf from keras.models import Sequential from keras.layers.core import Dense, Activation import keras import math from sklearn.metrics import r2_score from sklearn.metrics import mean_absolute_error as mae from sklearn.metrics import mean_squared_error from numpy import array from keras.models import Sequential from keras.layers import LSTM, SimpleRNN from keras.layers import Dense from statsmodels.tsa.stattools import pacf from keras.models import Sequential from keras.layers import LSTM from keras.layers import Dense import warnings warnings.filterwarnings("ignore") # ## 数据 # - 挑一个品牌,NVIDIA # - 单变量,预测close # 数据描述:1999-01-25 至 2023-04-12 每天 NVIDIA 的股票价格以及市场份额数据,共6094条。选择close价格预测。 dataset = pd.read_csv( "/kaggle/input/nvidia-amd-intel-asus-msi-share-prices/NVIDIA (1999-2023).csv" ) dataset.head() # dataset.tail() dataset.describe() # ### 数据探索性分析 # 没有缺失值 dataset.isnull().sum() # #### 时序图 start_date = pd.to_datetime(dataset.Date[0]) end_date = pd.to_datetime(dataset.Date.values[-1]) dataset["Date"] = pd.to_datetime(dataset["Date"]) top_plt = plt.subplot2grid((5, 4), (0, 0), rowspan=3, colspan=4) top_plt.plot(dataset.Date, dataset["Close"]) plt.title("Historical Stock Close Price of Nvidia") # bottom_plt = plt.subplot2grid((5,4), (3,0), rowspan = 1, colspan = 4) # bottom_plt.bar(dataset.Date, dataset['Volume']) # plt.title('\nNvidia Trading Volume', y = -0.60) plt.gcf().set_size_inches(16, 10) # 后续神经网络结果好,可能是用了Volume dataset2 = dataset[["Close", "Volume", "Date"]] dataset2["logclose"] = np.log(dataset2["Close"]) dataset2.head() dataset2.isnull().sum() # ## ARIMA # 1. 差分去趋势,取log解决异方差 # 2. ACF,PACF定阶 # 3. 利用auto ARIMA定阶 # ### Auto ARIMA # 先自动定阶,然后再结合ACF,PACF以及AIC解释 # ARIMA(2,1,3) import pmdarima as pm smodel = pm.auto_arima( dataset2.logclose, start_p=1, max_p=6, start_q=1, max_q=6, seasonal=True, # seasonal=True的话,是SARIMA stationary=False, test="adf", # 自动差分 information_criterion="aic", stepwise=False, error_action="ignore", suppress_warnings=True, ) smodel.summary() smodel.plot_diagnostics() plt.gcf().set_size_inches(16, 10) plt.show() # #### 差分过程 # 取log,一阶差分 # 需要检验平稳、非纯随机 np.min(dataset2.logclose) plt.rcParams.update({"figure.figsize": (18, 7), "figure.dpi": 120}) # Original Series fig, axes = plt.subplots(3, 2, sharex=True) axes[0, 0].plot(dataset2["Close"].values) axes[0, 0].set_title("Original Series") # plot_acf(dataset2['Close'].values, ax = axes[0, 1]);axes[0, 1].set_title('ACF of Original Series') axes[0, 1].plot(dataset2["logclose"].values) axes[0, 1].set_title("Original Series(log)") # plot_acf(dataset2['logclose'].values, ax = axes[0, 3]);axes[0, 3].set_title('ACF of Original Series(log)') # 1st Differencing axes[1, 0].plot(dataset2["Close"].diff()) axes[1, 0].set_title("1st Order Differencing") # plot_acf(dataset2['Close'].diff().dropna(), ax = axes[1, 1]);axes[1, 1].set_title('ACF of 1st Order Differencing') axes[1, 1].plot(dataset2["logclose"].diff()) axes[1, 1].set_title("1st Order Differencing(log)") # plot_acf(dataset2['logclose'].diff(), ax = axes[1, 3]);axes[1, 3].set_title('ACF of 1st Order Differencing(log)') # 2nd Differencing axes[2, 0].plot(dataset2["Close"].diff().diff(periods=8)) axes[2, 0].set_title("2nd Order Differencing") # plot_acf(dataset2['Close'].diff().diff().dropna(), ax = axes[2, 1]);axes[2, 1].set_title('ACF of 2nd Order Differencing') axes[2, 1].plot(dataset2["logclose"].diff().diff(periods=8)) axes[2, 1].set_title("2nd Order Differencing(log)") # plot_acf(dataset2['logclose'].diff().diff(), ax = axes[2, 3]);axes[2, 3].set_title('ACF of 2nd Order Differencing(log)') plt.show() # #### 平稳性检验 # - ADF 拒绝,平稳 # - BP 拒绝,还存在异方差。把异常值处理一下可能能过? result = adfuller(dataset2.logclose.diff()[1:], autolag="AIC") print(f"ADF Statistic: {result[0]}") print(f"p-value: {result[1]}") for key, value in result[4].items(): print("Critial Values:") print(f" {key}, {value}") import statsmodels.stats.api as sms test = sms.het_breuschpagan( np.array(dataset2.logclose.diff()[1:]).reshape(-1, 1), np.array(range(len(dataset2.logclose.diff()[1:]))).reshape(-1, 1), ) test[-1] # #### 纯随机检验 # 有阶数拒绝就可以 from statsmodels.stats.diagnostic import acorr_ljungbox ljungbox_result = acorr_ljungbox( dataset2.logclose.diff().diff(periods=8)[9:], lags=12 ) # 返回统计量和p值,lags为检验的延迟数 ljungbox_result # #### ACF,PACF plt.rcParams.update({"figure.figsize": (20, 5), "figure.dpi": 120}) fig, axes = plt.subplots(1, 2, sharex=True) plot_acf(dataset2.logclose.diff()[1:], ax=axes[0]) axes[0].set_title("ACF of Original Series(log)") plot_pacf(dataset2.logclose.diff()[1:], ax=axes[1]) axes[1].set_title("PACF of 1st Order Differencing(log)") plt.show() # ## Models considered : # **ARIMA(2,1,3) ** # # **加法模型 ** # # **LSTM ** # ## ARIMA data = dataset2["logclose"].values print("Length of Total data: ", len(data)) train_length = int(len(data) * 0.9) train_data = data[:train_length] test_data = data[train_length:] print("Train and Test data length: ", len(train_data), len(test_data)) # ### Building ARIMA Model import statsmodels.api as sm model = sm.tsa.arima.ARIMA(train_data, order=(2, 1, 3)) model_fit = model.fit() print(model_fit.summary()) # Plot residual errors residuals = pd.DataFrame(model_fit.resid) fig, ax = plt.subplots(1, 2) residuals.plot(title="Residuals", ax=ax[0]) residuals.plot(kind="kde", title="Density", ax=ax[1]) plt.show() # Forecast forecast_result = model_fit.forecast(steps=50, alpha=0.05) # 95% conf forecast_result[:50] test_data[:50] # ### Plotting Test and Predicted Results # 预测未来30天的销量 pred_uc = model_fit.get_forecast(steps=30, alpha=0.05) pred_pr = pred_uc.predicted_mean # 获取预测的置信区间 pred_ci = pred_uc.conf_int() # 合并预测值与置信区间 pred_data = pd.DataFrame( {"forecast": pred_pr, "lower_ci_95": pred_ci[:, 0], "upper_ci_95": pred_ci[:, 1]} ) pred_data.head() fig, ax = plt.subplots(figsize=(15, 6)) ax.plot(pred_data.index[:30] - 30, train_data[-30:]) ax.plot(pred_data.forecast[:30], color="green", label="forecast", alpha=0.7) ax.plot( pred_data.index[:30], test_data[:30], color="yellow", label="observed", alpha=0.7 ) ax.fill_between( pred_data.index, pred_data.lower_ci_95, pred_data.upper_ci_95, color="grey", alpha=0.5, label="95% confidence interval", ) ax.set_title("ARIMA Model for Nvidia Price Forecasting") ax.legend() plt.show() # Here we're plotting Test and Predicted data plt.figure(figsize=(12, 6)) plt.rcParams.update({"font.size": 12}) plt.plot(test_data[:50], "#0077be", label="Actual") plt.plot(forecast_result[:], "#ff8841", label="Predicted") plt.title("ARIMA Model for Nvidia Price Forecasting") plt.ylabel("Nvidia Price [in Dollar]") plt.xlabel("Time Steps [in Days] ") plt.legend() plt.show() # ## 加法模型 # 三个,周期为7,30,365。三个周期下都没有明显的季节趋势 from statsmodels.tsa.seasonal import seasonal_decompose decomposition = seasonal_decompose( dataset2.logclose, # model='multiplicative', period=7, ) fig = plt.figure() fig = decomposition.plot() fig.set_size_inches(15, 12) plt.show() from statsmodels.tsa.seasonal import seasonal_decompose decomposition = seasonal_decompose( dataset2.logclose, # model='multiplicative', period=30, ) fig = plt.figure() fig = decomposition.plot() fig.set_size_inches(15, 12) plt.show() from statsmodels.tsa.seasonal import seasonal_decompose decomposition = seasonal_decompose( dataset2.logclose, # model='multiplicative', period=365, ) fig = plt.figure() fig = decomposition.plot() fig.set_size_inches(15, 12) plt.show() help(seasonal_decompose) # ## Artificial Neural Network data = dataset2["Close"].values print("Shape of data: ", data.shape) # Separating train and test data train_length = int(len(data) * 0.8) print("Train length: ", train_length) train_data, test_data = data[:train_length], data[train_length:] print("Shape of Train and Test data: ", train_data.shape, test_data.shape) # Changing dataset shape to 2D train_data = train_data.reshape(-1, 1) test_data = test_data.reshape(-1, 1) print("Shape of Train and Test data: ", train_data.shape, test_data.shape) def create_dataset(dataset, lookback): dataX, dataY = [], [] for i in range(len(dataset) - lookback - 1): a = dataset[i : (i + lookback), 0] dataX.append(a) b = dataset[i + lookback, 0] dataY.append(b) return np.array(dataX), np.array(dataY) # ### Selecting Lag value from PACF graph plot_pacf(data, lags=10) plt.show() # ### Considering only Auto-correlation Lag value Greater than 10% pacf_value = pacf(data, nlags=20) lag = 0 # collect lag values greater than 10% correlation for x in pacf_value: if x > 0.1: lag += 1 else: break print("Selected look_back (or lag = ): ", lag) # ### Separating Input and Output values train_X, train_y = create_dataset(train_data, lag) test_X, test_y = create_dataset(test_data, lag) print("Shape of train_X and train_y: ", train_X.shape, train_y.shape) print("Shape of test_X and test_y: ", test_X.shape, test_y.shape) # ### Building an MLP model np.random.seed(7) # model = Sequential() # model.add(Dense(64, input_dim = lag, activation='relu', name= "1st_hidden")) # # model.add(Dense(64, activation='relu', name = '2nd_hidden')) # model.add(Dense(1, name = 'Output_layer', activation = 'linear')) # # model.add(Activation("linear", name = 'Linear_activation')) # model.compile(loss = "mean_squared_error", optimizer = "adam") # model.summary() # ### Fitting data to Model # epoch_number = 100 # batches = 64 # history = model.fit(train_X, train_y, epochs = epoch_number, batch_size = batches, verbose = 1, shuffle = False, # validation_split = 0.1) # ### Train and Validation Loss # # plot history # plt.clf # plt.figure(figsize = (10,8)) # plt.plot(history.history['loss'], label = 'train') # plt.plot(history.history['val_loss'], label = 'test') # plt.xlabel('Number of Epochs') # plt.ylabel('Train and Test Loss') # plt.title('Train and Test loss per epochs [Univariate]') # plt.legend() # plt.show() # ### Making Predictions # # Make prediction # testPredict = model.predict(test_X) # predicted_value = testPredict[:, 0] # ### Evaluation Metrics for measuring performance # * ** R-Squared ** # * ** Mean Absolute Error ** # * ** Mean Absolute Percentage Error** # * ** Mean Squared Error** # * ** Root Mean Squared Error** # * ** Normalized Root Mean Squared Error** # * ** Weighted Absolute Percentage Error** # * ** Weighted Mean Absolute Percentage Error** def evaluate_forecast_results(actual, predicted): print("R2 Score: ", round(r2_score(actual, predicted), 2)) print("MAE : ", round(mae(actual, predicted), 2)) print("MSE: ", round(mean_squared_error(actual, predicted), 2)) print("RMSE: ", round(math.sqrt(mean_squared_error(actual, predicted)), 2)) print("NRMSE: ", NRMSE(actual, predicted)) print("WMAPE: ", WMAPE(actual, predicted)) def NRMSE(actual, predicted): rmse = math.sqrt(mean_squared_error(actual, predicted)) nrmse = rmse / np.mean(actual) return round(nrmse, 4) def WMAPE(actual, predicted): abs_error = np.sum(actual - predicted) wmape = abs_error / np.sum(actual) return round(wmape, 4) evaluate_forecast_results(test_y, predicted_value) # plt.figure(figsize = (16, 8)) # plt.rcParams.update({'font.size': 12}) # plt.plot(test_y[:], '#0077be', label = 'Actual') # plt.plot(predicted_value, '#ff8841', label = 'Predicted') # plt.title('MLP Model for Nvidia Price Forecasting') # plt.ylabel('Nvidia Stock Close Price ') # plt.xlabel('Time Steps [in Days] ') # plt.legend() # plt.show() # ## RNN - Univariate Time Series Forecasting # # data = dataset2['Close'].values # print('Shape of data: ', data.shape) # Separate train and test data train_length = int(len(data) * 0.8) print("Train length: ", train_length) train_data, test_data = data[:train_length], data[train_length:] print("Shape of Train and Test data: ", len(train_data), len(test_data)) def split_sequence(sequence, n_steps): X, y = list(), list() for i in range(len(sequence)): end_ix = i + n_steps if end_ix > len(sequence) - 1: break seq_x, seq_y = sequence[i:end_ix], sequence[end_ix] X.append(seq_x) y.append(seq_y) return array(X), array(y) # ### Lag Value already to be chosen from PACF Plot pacf_value = pacf(data, nlags=20) lag = 0 # collect lag values greater than 10% correlation for x in pacf_value: if x > 0.1: lag += 1 else: break print("Selected look_back (or lag = ): ", lag) n_features = 1 train_X, train_y = split_sequence(train_data, lag) test_X, test_y = split_sequence(test_data, lag) print("Shape of train_X and train_y: ", train_X.shape, train_y.shape) print("Shape of test_X and test_y: ", test_X.shape, test_y.shape) # ### Reshaping train_X and test_X to 3-D train_X = train_X.reshape((train_X.shape[0], train_X.shape[1], n_features)) test_X = test_X.reshape((test_X.shape[0], test_X.shape[1], n_features)) # New shape of train_X and test_X are :- print("Shape of train_X and train_y: ", train_X.shape, train_y.shape) print("Shape of test_X and test_y: ", test_X.shape, test_y.shape) # ### Building the model # # define model # model = Sequential() # model.add(SimpleRNN(64, activation='relu', return_sequences = False, input_shape = (lag, n_features))) # model.add(Dense(1)) # model.compile(optimizer = 'adam', loss = 'mse') # model.summary() # ### Fit the model - with training data import tensorflow as tf tf.config.run_functions_eagerly(True) # fit model # cb = tf.keras.callbacks.EarlyStopping(monitor = 'loss', patience = 15, restore_best_weights = True) # history = model.fit(train_X, train_y, epochs = 150, batch_size = 64, verbose = 1, validation_split = 0.1, # callbacks = [cb]) # ### Summarizing model accuracy and Loss # # summarize history for loss # plt.plot(history.history['loss']) # plt.plot(history.history['val_loss']) # plt.title('model loss') # plt.ylabel('loss') # plt.xlabel('epoch') # plt.legend(['train', 'test'], loc = 'upper left') # plt.show() # ### Making prediction with Test data # train_predict = model.predict(train_X) # test_predict = model.predict(test_X) # print('Shape of train and test predict: ', train_predict.shape, test_predict.shape) # ### Model evaluation # actual_ = test_y # predicted_ = test_predict[:, 0] # len(actual_), len(predicted_) # evaluate_forecast_results(actual_, predicted_) # ### Plotting test and predicted data # # plt.rc("figure", figsize = (14,8)) # plt.rcParams.update({'font.size': 16}) # plt.plot(actual_, label = 'Actual') # plt.plot(predicted_, label = 'Predicted') # plt.xlabel('Time in days') # plt.ylabel('Nvidia stock price') # plt.title('Nvidia Stock Close price prediction using Simple RNN - Test data') # plt.legend() # plt.show() # df_train = pd.DataFrame(columns = ['Train data']) # df_train['Train data'] = train_data # df = pd.DataFrame(columns = ['Test data', 'Predicted data']) # df['Test data'] = actual_ # df['Predicted data'] = predicted_ # total_len = len(df_train['Train data']) + len(df['Test data']) # range(len(df_train['Train data']), total_len) # x_list = [x for x in range(len(df_train['Train data']), total_len)] # df.index = x_list # plt.rc("figure", figsize=(14,8)) # plt.rcParams.update({'font.size': 16}) # plt.xlabel('Time in days') # plt.ylabel('Nvidia price') # plt.title('Nvidia price prediction using Simple RNN') # plt.plot(df_train['Train data']) # plt.plot(df[['Test data', 'Predicted data']]) # plt.legend(['Train', 'Test', 'Predictions'], loc='lower right') # plt.show() # ## LSTM data = dataset2["Close"].values print("Shape of data: ", data.shape) # Separate train and test data train_length = int(len(data) * 0.8) print("Train length: ", train_length) train_data, test_data = data[:train_length], data[train_length:] print("Shape of Train and Test data: ", len(train_data), len(test_data)) def split_sequence(sequence, n_steps): X, y = list(), list() for i in range(len(sequence)): end_ix = i + n_steps if end_ix > len(sequence) - 1: break seq_x, seq_y = sequence[i:end_ix], sequence[end_ix] X.append(seq_x) y.append(seq_y) return array(X), array(y) # ### Choosing the appropriate lag value lag = 2 n_features = 1 train_X, train_y = split_sequence(train_data, lag) test_X, test_y = split_sequence(test_data, lag) print("Shape of train_X and train_y: ", train_X.shape, train_y.shape) print("Shape of test_X and test_y: ", test_X.shape, test_y.shape) # ### Reshaping train_X and test_X to 3D train_X = train_X.reshape((train_X.shape[0], train_X.shape[1], n_features)) test_X = test_X.reshape((test_X.shape[0], test_X.shape[1], n_features)) print("Shape of train_X and train_y: ", train_X.shape, train_y.shape) print("Shape of test_X and test_y: ", test_X.shape, test_y.shape) # ### Building LSTM Model model = Sequential() model.add( LSTM(64, activation="relu", return_sequences=True, input_shape=(lag, n_features)) ) model.add(LSTM(64, activation="relu")) model.add(Dense(1)) model.compile(optimizer="adam", loss="mse") model.summary() # ### Fitting model with data import tensorflow as tf tf.config.run_functions_eagerly(True) cb = tf.keras.callbacks.EarlyStopping( monitor="loss", patience=15, restore_best_weights=True ) history = model.fit( train_X, train_y, epochs=150, batch_size=64, verbose=1, validation_split=0.1, callbacks=[cb], ) # ### Summarizing model accuracy and Loss plt.plot(history.history["loss"]) plt.plot(history.history["val_loss"]) plt.title("model loss") plt.ylabel("loss") plt.xlabel("epoch") plt.legend(["train", "test"], loc="upper left") plt.show() # ### Making the prediction train_predict = model.predict(train_X) test_predict = model.predict(test_X) print("Shape of train and test predict: ", train_predict.shape, test_predict.shape) # ### Model Evaluation actual_lstm = test_y predicted_lstm = test_predict[:, 0] evaluate_forecast_results(actual_lstm, predicted_lstm) df_train = pd.DataFrame(columns=["Train data"]) df_train["Train data"] = train_data df = pd.DataFrame(columns=["Test data", "Predicted data"]) df["Test data"] = actual_lstm df["Predicted data"] = predicted_lstm total_len = len(df_train["Train data"]) + len(df["Test data"]) range(len(df_train["Train data"]), total_len) x_list = [x for x in range(len(df_train["Train data"]), total_len)] df.index = x_list plt.rc("figure", figsize=(14, 8)) plt.rcParams.update({"font.size": 16}) plt.xlabel("Time in days") plt.ylabel("Nvidia Stock Close price") plt.title("Nvidia Stock price prediction using LSTM") plt.plot(df_train["Train data"]) plt.plot(df[["Test data", "Predicted data"]]) plt.legend(["Train", "Test", "Predictions"], loc="lower right") plt.show()
false
1
6,952
1
7,632
6,952
129524299
import numpy as np import pandas as pd import seaborn as sns from matplotlib import pyplot as plt import warnings warnings.filterwarnings("ignore") from sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegression from sklearn.metrics import balanced_accuracy_score, log_loss from sklearn.impute import SimpleImputer from sklearn.tree import DecisionTreeClassifier from sklearn.metrics import balanced_accuracy_score, balanced_log_loss from sklearn.model_selection import cross_val_score, train_test_split test = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/test.csv") train = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/train.csv") train.head(5) train.columns expected_results = train[["Id", "Class"]].copy() expected_results.head(5) plt.figure(figsize=(3, 3)) plot_train = train.Class.value_counts() plot_train.plot(kind="bar", color=["red", "orange"]) Health_indicators = [ "AB", "AF", "AH", "AM", "AR", "AX", "AY", "AZ", "BC", "BD ", "BN", "BP", "BQ", "BR", "BZ", "CB", "CC", "CD ", "CF", "CH", "CL", "CR", "CS", "CU", "CW ", "DA", "DE", "EJ", "DF", "DH", "DI", "DL", "DN", "DU", "DV", "DY", "EB", "EE", "EG", "EH", "EL", "EP", "EU", "FC", "FD ", "FE", "FI", "FL", "FR", "FS", "GB", "GE", "GF", "GH", "GI", "GL", ] sns.set(font_scale=2) fig, axes = plt.subplots(10, 6, figsize=(40, 60)) for idx, type_col in enumerate(Health_indicators): sns.barplot( x="Class", y=type_col, data=train, ax=axes[idx // 6, idx % 6], hue="Class", palette={0: "red", 1: "orange"}, ) plt.tight_layout() train_df.isnull().sum() train_df = train.drop(["EJ", "Id"], axis=1) test_df = test.drop(["EJ"], axis=1) train_df.head() sns.set(font_scale=0.5) log_data = np.log(train_df) # Plot histogram of each column log_data.hist() def balanced_log_loss(y_true, y_pred): """ Computes the balanced logarithmic loss between y_true and y_pred. Parameters: - y_true: array-like, shape (n_samples,) True binary labels. - y_pred: array-like, shape (n_samples,) Predicted probabilities for class 1. Returns: - loss: float The balanced logarithmic loss. """ # Compute the class weights class_weight = len(y_true) / (2 * np.bincount(y_true)) # Compute the loss for each observation loss = np.zeros_like(y_true, dtype=float) loss[y_true == 0] = -np.log(1 - y_pred[y_true == 0]) loss[y_true == 1] = -np.log(y_pred[y_true == 1]) # Weight the loss for each observation loss_weighted = np.zeros_like(y_true, dtype=float) loss_weighted[y_true == 0] = loss[y_true == 0] * class_weight[0] loss_weighted[y_true == 1] = loss[y_true == 1] * class_weight[1] # Compute the average loss return np.mean(loss_weighted) # ### Logistic Regression # Split the data into training and validation sets X = train_df.drop(["Class"], axis=1) y = train_df["Class"] X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=42) # Impute missing values imputer = SimpleImputer(strategy="mean") imputer.fit(X_train) X_train_imputed = imputer.transform(X_train) X_val_imputed = imputer.transform(X_val) # Train the logistic regression model on the imputed data model = LogisticRegression() model.fit(X_train_imputed, y_train) # Predict on the validation set and calculate the balanced logarithmic loss y_val_pred = model.predict_proba(X_val_imputed)[:, 1] log_loss = balanced_log_loss(y_val, y_val_pred) print("Balanced logarithmic loss on validation set:", log_loss) # ### Decision Tree from sklearn.tree import DecisionTreeClassifier # Train the decision tree model on the imputed data model = DecisionTreeClassifier(random_state=42) model.fit(X_train_imputed, y_train) # Predict on the validation set and calculate the balanced logarithmic loss y_val_pred = model.predict_proba(X_val_imputed)[:, 1] log_loss = balanced_log_loss(y_val, y_val_pred) print("Balanced logarithmic loss on validation set:", log_loss) # ### Random Forest # Train a random forest classifier on the imputed data model = RandomForestClassifier(n_estimators=100, random_state=42) model.fit(X_train_imputed, y_train) # Predict on the validation set and calculate the balanced logarithmic loss y_val_pred = model.predict_proba(X_val_imputed)[:, 1] log_loss = balanced_log_loss(y_val, y_val_pred) print("Balanced logarithmic loss on validation set:", log_loss) # ### SVC Model from sklearn.svm import SVC from sklearn.preprocessing import StandardScaler from sklearn.model_selection import cross_val_score from sklearn.metrics import balanced_accuracy_score, make_scorer from sklearn.impute import SimpleImputer from sklearn.pipeline import make_pipeline # Impute missing values imputer = SimpleImputer(strategy="mean") imputer.fit(X_train) X_train_imputed = imputer.transform(X_train) X_val_imputed = imputer.transform(X_val) # Scale the data scaler = StandardScaler() scaler.fit(X_train_imputed) X_train_scaled = scaler.transform(X_train_imputed) X_val_scaled = scaler.transform(X_val_imputed) # Train the SVM model on the scaled data model = make_pipeline(SVC(probability=True, random_state=42)) model.fit(X_train_scaled, y_train) # Predict on the validation set and calculate the balanced logarithmic loss y_val_pred = model.predict_proba(X_val_scaled)[:, 1] log_loss = balanced_log_loss(y_val, y_val_pred) print("Balanced logarithmic loss on validation set:", log_loss) # ### XGBClassifier Model from xgboost import XGBClassifier model = XGBClassifier() model.fit(X_train_imputed, y_train) # Predict on the validation set and calculate the balanced logarithmic loss y_val_pred = model.predict_proba(X_val_imputed)[:, 1] log_loss = balanced_log_loss(y_val, y_val_pred) print("Balanced logarithmic loss on validation set:", log_loss) # Balanced Logarithmic loss on validation set : # * RL Model = 0.836177246086312 # * DT Model = inf # * RFC Model = 0.4081586115192574 # * SVM Model = 0.5020848394635566 # * XGB Model = 0.2550011273428488 # Make predictions on the test set using the trained model test_imputed = imputer.transform(test_df.drop("Id", axis=1)) test_preds = model.predict_proba(test_imputed)[:, 1] # Create the submission dataframe submission_df = pd.DataFrame( {"Id": test_df["Id"], "0": 1 - test_preds, "1": test_preds} ) # Save the submission dataframe to a CSV file submission_df.to_csv("submission.csv", index=False) submission_df.head()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/524/129524299.ipynb
null
null
[{"Id": 129524299, "ScriptId": 38497159, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13119587, "CreationDate": "05/14/2023 14:22:41", "VersionNumber": 2.0, "Title": "notebook63eeba59cd", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 200.0, "LinesInsertedFromPrevious": 170.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 30.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np import pandas as pd import seaborn as sns from matplotlib import pyplot as plt import warnings warnings.filterwarnings("ignore") from sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegression from sklearn.metrics import balanced_accuracy_score, log_loss from sklearn.impute import SimpleImputer from sklearn.tree import DecisionTreeClassifier from sklearn.metrics import balanced_accuracy_score, balanced_log_loss from sklearn.model_selection import cross_val_score, train_test_split test = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/test.csv") train = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/train.csv") train.head(5) train.columns expected_results = train[["Id", "Class"]].copy() expected_results.head(5) plt.figure(figsize=(3, 3)) plot_train = train.Class.value_counts() plot_train.plot(kind="bar", color=["red", "orange"]) Health_indicators = [ "AB", "AF", "AH", "AM", "AR", "AX", "AY", "AZ", "BC", "BD ", "BN", "BP", "BQ", "BR", "BZ", "CB", "CC", "CD ", "CF", "CH", "CL", "CR", "CS", "CU", "CW ", "DA", "DE", "EJ", "DF", "DH", "DI", "DL", "DN", "DU", "DV", "DY", "EB", "EE", "EG", "EH", "EL", "EP", "EU", "FC", "FD ", "FE", "FI", "FL", "FR", "FS", "GB", "GE", "GF", "GH", "GI", "GL", ] sns.set(font_scale=2) fig, axes = plt.subplots(10, 6, figsize=(40, 60)) for idx, type_col in enumerate(Health_indicators): sns.barplot( x="Class", y=type_col, data=train, ax=axes[idx // 6, idx % 6], hue="Class", palette={0: "red", 1: "orange"}, ) plt.tight_layout() train_df.isnull().sum() train_df = train.drop(["EJ", "Id"], axis=1) test_df = test.drop(["EJ"], axis=1) train_df.head() sns.set(font_scale=0.5) log_data = np.log(train_df) # Plot histogram of each column log_data.hist() def balanced_log_loss(y_true, y_pred): """ Computes the balanced logarithmic loss between y_true and y_pred. Parameters: - y_true: array-like, shape (n_samples,) True binary labels. - y_pred: array-like, shape (n_samples,) Predicted probabilities for class 1. Returns: - loss: float The balanced logarithmic loss. """ # Compute the class weights class_weight = len(y_true) / (2 * np.bincount(y_true)) # Compute the loss for each observation loss = np.zeros_like(y_true, dtype=float) loss[y_true == 0] = -np.log(1 - y_pred[y_true == 0]) loss[y_true == 1] = -np.log(y_pred[y_true == 1]) # Weight the loss for each observation loss_weighted = np.zeros_like(y_true, dtype=float) loss_weighted[y_true == 0] = loss[y_true == 0] * class_weight[0] loss_weighted[y_true == 1] = loss[y_true == 1] * class_weight[1] # Compute the average loss return np.mean(loss_weighted) # ### Logistic Regression # Split the data into training and validation sets X = train_df.drop(["Class"], axis=1) y = train_df["Class"] X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=42) # Impute missing values imputer = SimpleImputer(strategy="mean") imputer.fit(X_train) X_train_imputed = imputer.transform(X_train) X_val_imputed = imputer.transform(X_val) # Train the logistic regression model on the imputed data model = LogisticRegression() model.fit(X_train_imputed, y_train) # Predict on the validation set and calculate the balanced logarithmic loss y_val_pred = model.predict_proba(X_val_imputed)[:, 1] log_loss = balanced_log_loss(y_val, y_val_pred) print("Balanced logarithmic loss on validation set:", log_loss) # ### Decision Tree from sklearn.tree import DecisionTreeClassifier # Train the decision tree model on the imputed data model = DecisionTreeClassifier(random_state=42) model.fit(X_train_imputed, y_train) # Predict on the validation set and calculate the balanced logarithmic loss y_val_pred = model.predict_proba(X_val_imputed)[:, 1] log_loss = balanced_log_loss(y_val, y_val_pred) print("Balanced logarithmic loss on validation set:", log_loss) # ### Random Forest # Train a random forest classifier on the imputed data model = RandomForestClassifier(n_estimators=100, random_state=42) model.fit(X_train_imputed, y_train) # Predict on the validation set and calculate the balanced logarithmic loss y_val_pred = model.predict_proba(X_val_imputed)[:, 1] log_loss = balanced_log_loss(y_val, y_val_pred) print("Balanced logarithmic loss on validation set:", log_loss) # ### SVC Model from sklearn.svm import SVC from sklearn.preprocessing import StandardScaler from sklearn.model_selection import cross_val_score from sklearn.metrics import balanced_accuracy_score, make_scorer from sklearn.impute import SimpleImputer from sklearn.pipeline import make_pipeline # Impute missing values imputer = SimpleImputer(strategy="mean") imputer.fit(X_train) X_train_imputed = imputer.transform(X_train) X_val_imputed = imputer.transform(X_val) # Scale the data scaler = StandardScaler() scaler.fit(X_train_imputed) X_train_scaled = scaler.transform(X_train_imputed) X_val_scaled = scaler.transform(X_val_imputed) # Train the SVM model on the scaled data model = make_pipeline(SVC(probability=True, random_state=42)) model.fit(X_train_scaled, y_train) # Predict on the validation set and calculate the balanced logarithmic loss y_val_pred = model.predict_proba(X_val_scaled)[:, 1] log_loss = balanced_log_loss(y_val, y_val_pred) print("Balanced logarithmic loss on validation set:", log_loss) # ### XGBClassifier Model from xgboost import XGBClassifier model = XGBClassifier() model.fit(X_train_imputed, y_train) # Predict on the validation set and calculate the balanced logarithmic loss y_val_pred = model.predict_proba(X_val_imputed)[:, 1] log_loss = balanced_log_loss(y_val, y_val_pred) print("Balanced logarithmic loss on validation set:", log_loss) # Balanced Logarithmic loss on validation set : # * RL Model = 0.836177246086312 # * DT Model = inf # * RFC Model = 0.4081586115192574 # * SVM Model = 0.5020848394635566 # * XGB Model = 0.2550011273428488 # Make predictions on the test set using the trained model test_imputed = imputer.transform(test_df.drop("Id", axis=1)) test_preds = model.predict_proba(test_imputed)[:, 1] # Create the submission dataframe submission_df = pd.DataFrame( {"Id": test_df["Id"], "0": 1 - test_preds, "1": test_preds} ) # Save the submission dataframe to a CSV file submission_df.to_csv("submission.csv", index=False) submission_df.head()
false
0
2,250
0
2,250
2,250
129524710
# ## Imports import os import gc import glob import json import multiprocessing as mp import warnings import albumentations as A import matplotlib.pyplot as plt import matplotlib.patches as patches import PIL.Image as Image import numpy as np import pandas as pd import random import torch import torch.nn as nn import torch.optim as optim import torch.utils.data as thd import segmentation_models_pytorch as smp from collections import defaultdict from types import SimpleNamespace from typing import Dict, List, Optional, Tuple from pathlib import Path from sklearn.metrics import fbeta_score from sklearn.exceptions import UndefinedMetricWarning from albumentations.pytorch import ToTensorV2 from segmentation_models_pytorch.encoders import get_preprocessing_fn from tqdm import tqdm warnings.simplefilter("ignore") # ## Config class CFG: # ============== comp exp name ============= comp_name = "vesuvius" comp_dir_path = "/kaggle/input" comp_folder_name = "vesuvius-challenge-ink-detection" comp_dataset_path = os.path.join(comp_dir_path, comp_folder_name) exp_name = "vesuvius_2d_slide_unet_exp001" # ============== pred target ============= target_size = 1 # ============== model cfg ============= model_name = "Unet" backbone = "efficientnet-b0" # backbone = 'se_resnext50_32x4d' in_chans = 10 # 65 # ============== data preprocessing ============= preprocess_input = get_preprocessing_fn(backbone, pretrained="imagenet") # ============== training cfg ============= size = 224 tile_size = 224 stride = tile_size // 2 train_batch_size = 32 # 32 valid_batch_size = train_batch_size use_amp = True scheduler = "GradualWarmupSchedulerV2" # scheduler = 'CosineAnnealingLR' epochs = 15 # 30 # adamW warmupあり warmup_factor = 10 # lr = 1e-3 / warmup_factor lr = 1e-3 # ============== fold ============= valid_id = 1 # objective_cv = 'binary' # 'binary', 'multiclass', 'regression' metric_direction = "maximize" # maximize, 'minimize' # metrics = 'dice_coef' # ============== fixed ============= pretrained = True inf_weight = "best" # 'best' min_lr = 1e-6 weight_decay = 1e-6 max_grad_norm = 1000 print_freq = 50 num_workers = 4 seed = 42 # ============== set dataset path ============= outputs_path = f"/kaggle/working/outputs/{comp_name}/{exp_name}/" submission_dir = outputs_path + "submissions/" submission_path = submission_dir + f"submission_{exp_name}.csv" model_dir = outputs_path + f"{comp_name}-models/" figures_dir = outputs_path + "figures/" log_dir = outputs_path + "logs/" log_path = log_dir + f"{exp_name}.txt" # ============== augmentation ============= train_aug_list = [ A.Resize(size, size), A.RandomBrightnessContrast(p=0.75), A.OneOf( [ A.GaussNoise(var_limit=[10, 50]), A.GaussianBlur(), A.MotionBlur(), ], p=0.4, ), A.Normalize(mean=[0] * in_chans, std=[1] * in_chans), ToTensorV2(transpose_mask=True), ] valid_aug_list = [ A.Resize(size, size), A.Normalize(mean=[0] * in_chans, std=[1] * in_chans), ToTensorV2(transpose_mask=True), ] # ## Set up data class SubvolumeDataset(thd.Dataset): def __init__(self, fragments: List[Path], transform=None): self.fragments = sorted(map(lambda path: path.resolve(), fragments)) self.transform = transform # Load sequentially image_stacks = [] labels = [] for fragment_id, fragment_path in enumerate(self.fragments): fragment_path = fragment_path.resolve() # absolute path print(fragment_path) images, label = self.read_image_mask(fragment_path) image_stack = np.stack(images, axis=0) image_stacks.append(image_stack) labels.append(label) print(f"Loaded fragment {fragment_path} on {os.getpid()}") self.labels = labels self.image_stacks = image_stacks def slice_fragment_to_subvolumes(self, images, mask): sliced_images = [] sliced_ink_masks = [] x1_list = list(range(0, images.shape[2] - CFG.tile_size + 1, CFG.stride)) y1_list = list(range(0, images.shape[1] - CFG.tile_size + 1, CFG.stride)) for y1 in y1_list: for x1 in x1_list: y2 = y1 + CFG.tile_size x2 = x1 + CFG.tile_size def read_image_mask(self, fragment_path): surface_volume_paths = sorted((fragment_path / "surface_volume").rglob("*.tif")) z_dim = CFG.in_chans z_mid = len(surface_volume_paths) // 2 z_start, z_end = z_mid - z_dim // 2, z_mid + z_dim // 2 # we don't convert to torch since it doesn't support uint16 images = [ np.array(Image.open(fn)) for fn in surface_volume_paths[z_start:z_end] ] pad0 = CFG.tile_size - images[0].shape[0] % CFG.tile_size pad1 = CFG.tile_size - images[0].shape[1] % CFG.tile_size images = np.pad(images, ((0, 0), (0, pad0), (0, pad1)), mode="constant") ink_mask = np.array( Image.open(str(fragment_path / "inklabels.png")).convert("1") ) ink_mask = np.pad(ink_mask, [(0, pad0), (0, pad1)], constant_values=0) ink_mask = ink_mask.astype("float32") ink_mask /= 255.0 return images, ink_mask def __len__(self): return len(self.image_stacks) def __getitem__(self, index): return self.image_stacks[index], self.labels[index] if self.transform: data = self.transform(image=subvolume, mask=inklabel) subvolume = data["image"] inklabel = data["mask"] # return torch.from_numpy(subvolume).unsqueeze(0), torch.FloatTensor([inklabel]) # return torch.from_numpy(subvolume), torch.FloatTensor([inklabel]) def plot_label(self, index, **kwargs): pixel = self.pixels[index] label = self.labels[pixel[-1]] print("Index:", index) print("Pixel:", pixel) print("Label:", int(label[pixel[0], pixel[1]])) if isinstance(label, torch.Tensor): label = label.numpy() fig, ax = plt.subplots(**kwargs) ax.imshow(label, cmap="gray") y, x, _ = pixel _, y_dim, x_dim = self.voxel_shape x_min = x - (x_dim // 2) x_max = x + (x_dim // 2) y_min = y - (y_dim // 2) y_max = y + (y_dim // 2) rect = plt.Rectangle( (x_min, y_min), x_dim, y_dim, linewidth=2, edgecolor="y", facecolor="none" ) ax.add_patch(rect) plt.show() base_path = Path("/kaggle/input/vesuvius-challenge/") train_path = base_path / "train" all_fragments = sorted([f.name for f in train_path.iterdir()]) print("All fragments:", all_fragments) train_fragments = [train_path / fragment_name for fragment_name in all_fragments] train_fragments train_dset = SubvolumeDataset( fragments=train_fragments, ) # transform=cfg.preprocess_input print("Num items (pixels)", len(train_dset)) # #### Sanity check index = 1 print(f"Sub Volume image shape = {train_dset[index][0].shape}") # train_dset.plot_label(index, figsize=(16, 10)) train_loader = thd.DataLoader(train_dset, batch_size=CFG.train_batch_size, shuffle=True) print("Num batches:", len(train_loader)) # ### Set up model DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu") class InkDetector(torch.nn.Module): def __init__(self, cfg, weight=None): super().__init__() self.cfg = cfg self.model = smp.Unet( encoder_name=cfg.backbone, encoder_weights=weight, in_channels=cfg.in_chans, classes=cfg.target_size, activation=None, ) def forward(self, image): output = self.model(image) return output model = InkDetector(CFG, "imagenet").to(DEVICE) # ### Train TRAINING_STEPS = 1000 LEARNING_RATE = cfg.lr TRAIN_RUN = True # To avoid re-running when saving the notebook if TRAIN_RUN: criterion = nn.BCEWithLogitsLoss() optimizer = optim.SGD(model.parameters(), lr=LEARNING_RATE) scheduler = torch.optim.lr_scheduler.OneCycleLR( optimizer, max_lr=LEARNING_RATE, total_steps=TRAINING_STEPS ) model.train() running_loss = 0.0 running_accuracy = 0.0 running_fbeta = 0.0 denom = 0 pbar = tqdm(enumerate(train_loader), total=TRAINING_STEPS) for i, (subvolumes, inklabels) in pbar: if i >= TRAINING_STEPS: break optimizer.zero_grad() outputs = model(subvolumes.to(DEVICE)) print(f"outpus shape = {outputs.shape}") loss = criterion(outputs, inklabels.to(DEVICE)) loss.backward() optimizer.step() scheduler.step() pred_ink = outputs.detach().sigmoid().gt(0.4).cpu().int() accuracy = (pred_ink == inklabels).sum().float().div(inklabels.size(0)) running_fbeta += fbeta_score( inklabels.view(-1).numpy(), pred_ink.view(-1).numpy(), beta=0.5 ) running_accuracy += accuracy.item() running_loss += loss.item() denom += 1 pbar.set_postfix( { "Loss": running_loss / denom, "Accuracy": running_accuracy / denom, "[email protected]": running_fbeta / denom, } ) if (i + 1) % 500 == 0: running_loss = 0.0 running_accuracy = 0.0 running_fbeta = 0.0 denom = 0 torch.save(model.state_dict(), "/kaggle/working/model.pt") else: model_weights = torch.load("/kaggle/working/model.pt") model.load_state_dict(model_weights) # ### Evaluate # Clear memory before loading test fragments train_dset.labels = None train_dset.image_stacks = [] del train_loader, train_dset gc.collect() test_path = base_path / "test" test_fragments = [train_path / fragment_name for fragment_name in test_path.iterdir()] print("All fragments:", test_fragments) pred_images = [] model.eval() for test_fragment in test_fragments: outputs = [] eval_dset = SubvolumeDataset( fragments=[test_fragment], voxel_shape=(48, 64, 64), load_inklabels=False ) eval_loader = thd.DataLoader(eval_dset, batch_size=BATCH_SIZE, shuffle=False) with torch.no_grad(): for i, (subvolumes, _) in enumerate(tqdm(eval_loader)): output = model(subvolumes.to(DEVICE)).view(-1).sigmoid().cpu().numpy() outputs.append(output) # we only load 1 fragment at a time image_shape = eval_dset.image_stacks[0].shape[1:] eval_dset.labels = None eval_dset.image_stacks = None del eval_loader gc.collect() pred_image = np.zeros(image_shape, dtype=np.uint8) outputs = np.concatenate(outputs) for (y, x, _), prob in zip(eval_dset.pixels[: outputs.shape[0]], outputs): pred_image[y, x] = prob > 0.4 pred_images.append(pred_image) eval_dset.pixels = None del eval_dset gc.collect() print("Finished", test_fragment) plt.imshow(pred_images[1], cmap="gray") # ### Submission def rle(output): flat_img = np.where(output > 0.4, 1, 0).astype(np.uint8) starts = np.array((flat_img[:-1] == 0) & (flat_img[1:] == 1)) ends = np.array((flat_img[:-1] == 1) & (flat_img[1:] == 0)) starts_ix = np.where(starts)[0] + 2 ends_ix = np.where(ends)[0] + 2 lengths = ends_ix - starts_ix return " ".join(map(str, sum(zip(starts_ix, lengths), ()))) submission = defaultdict(list) for fragment_id, fragment_name in enumerate(test_fragments): submission["Id"].append(fragment_name.name) submission["Predicted"].append(rle(pred_images[fragment_id])) pd.DataFrame.from_dict(submission).to_csv("/kaggle/working/submission.csv", index=False) pd.DataFrame.from_dict(submission)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/524/129524710.ipynb
null
null
[{"Id": 129524710, "ScriptId": 38514101, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11219911, "CreationDate": "05/14/2023 14:26:00", "VersionNumber": 1.0, "Title": "UNet Segmentataion [training]", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 416.0, "LinesInsertedFromPrevious": 416.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# ## Imports import os import gc import glob import json import multiprocessing as mp import warnings import albumentations as A import matplotlib.pyplot as plt import matplotlib.patches as patches import PIL.Image as Image import numpy as np import pandas as pd import random import torch import torch.nn as nn import torch.optim as optim import torch.utils.data as thd import segmentation_models_pytorch as smp from collections import defaultdict from types import SimpleNamespace from typing import Dict, List, Optional, Tuple from pathlib import Path from sklearn.metrics import fbeta_score from sklearn.exceptions import UndefinedMetricWarning from albumentations.pytorch import ToTensorV2 from segmentation_models_pytorch.encoders import get_preprocessing_fn from tqdm import tqdm warnings.simplefilter("ignore") # ## Config class CFG: # ============== comp exp name ============= comp_name = "vesuvius" comp_dir_path = "/kaggle/input" comp_folder_name = "vesuvius-challenge-ink-detection" comp_dataset_path = os.path.join(comp_dir_path, comp_folder_name) exp_name = "vesuvius_2d_slide_unet_exp001" # ============== pred target ============= target_size = 1 # ============== model cfg ============= model_name = "Unet" backbone = "efficientnet-b0" # backbone = 'se_resnext50_32x4d' in_chans = 10 # 65 # ============== data preprocessing ============= preprocess_input = get_preprocessing_fn(backbone, pretrained="imagenet") # ============== training cfg ============= size = 224 tile_size = 224 stride = tile_size // 2 train_batch_size = 32 # 32 valid_batch_size = train_batch_size use_amp = True scheduler = "GradualWarmupSchedulerV2" # scheduler = 'CosineAnnealingLR' epochs = 15 # 30 # adamW warmupあり warmup_factor = 10 # lr = 1e-3 / warmup_factor lr = 1e-3 # ============== fold ============= valid_id = 1 # objective_cv = 'binary' # 'binary', 'multiclass', 'regression' metric_direction = "maximize" # maximize, 'minimize' # metrics = 'dice_coef' # ============== fixed ============= pretrained = True inf_weight = "best" # 'best' min_lr = 1e-6 weight_decay = 1e-6 max_grad_norm = 1000 print_freq = 50 num_workers = 4 seed = 42 # ============== set dataset path ============= outputs_path = f"/kaggle/working/outputs/{comp_name}/{exp_name}/" submission_dir = outputs_path + "submissions/" submission_path = submission_dir + f"submission_{exp_name}.csv" model_dir = outputs_path + f"{comp_name}-models/" figures_dir = outputs_path + "figures/" log_dir = outputs_path + "logs/" log_path = log_dir + f"{exp_name}.txt" # ============== augmentation ============= train_aug_list = [ A.Resize(size, size), A.RandomBrightnessContrast(p=0.75), A.OneOf( [ A.GaussNoise(var_limit=[10, 50]), A.GaussianBlur(), A.MotionBlur(), ], p=0.4, ), A.Normalize(mean=[0] * in_chans, std=[1] * in_chans), ToTensorV2(transpose_mask=True), ] valid_aug_list = [ A.Resize(size, size), A.Normalize(mean=[0] * in_chans, std=[1] * in_chans), ToTensorV2(transpose_mask=True), ] # ## Set up data class SubvolumeDataset(thd.Dataset): def __init__(self, fragments: List[Path], transform=None): self.fragments = sorted(map(lambda path: path.resolve(), fragments)) self.transform = transform # Load sequentially image_stacks = [] labels = [] for fragment_id, fragment_path in enumerate(self.fragments): fragment_path = fragment_path.resolve() # absolute path print(fragment_path) images, label = self.read_image_mask(fragment_path) image_stack = np.stack(images, axis=0) image_stacks.append(image_stack) labels.append(label) print(f"Loaded fragment {fragment_path} on {os.getpid()}") self.labels = labels self.image_stacks = image_stacks def slice_fragment_to_subvolumes(self, images, mask): sliced_images = [] sliced_ink_masks = [] x1_list = list(range(0, images.shape[2] - CFG.tile_size + 1, CFG.stride)) y1_list = list(range(0, images.shape[1] - CFG.tile_size + 1, CFG.stride)) for y1 in y1_list: for x1 in x1_list: y2 = y1 + CFG.tile_size x2 = x1 + CFG.tile_size def read_image_mask(self, fragment_path): surface_volume_paths = sorted((fragment_path / "surface_volume").rglob("*.tif")) z_dim = CFG.in_chans z_mid = len(surface_volume_paths) // 2 z_start, z_end = z_mid - z_dim // 2, z_mid + z_dim // 2 # we don't convert to torch since it doesn't support uint16 images = [ np.array(Image.open(fn)) for fn in surface_volume_paths[z_start:z_end] ] pad0 = CFG.tile_size - images[0].shape[0] % CFG.tile_size pad1 = CFG.tile_size - images[0].shape[1] % CFG.tile_size images = np.pad(images, ((0, 0), (0, pad0), (0, pad1)), mode="constant") ink_mask = np.array( Image.open(str(fragment_path / "inklabels.png")).convert("1") ) ink_mask = np.pad(ink_mask, [(0, pad0), (0, pad1)], constant_values=0) ink_mask = ink_mask.astype("float32") ink_mask /= 255.0 return images, ink_mask def __len__(self): return len(self.image_stacks) def __getitem__(self, index): return self.image_stacks[index], self.labels[index] if self.transform: data = self.transform(image=subvolume, mask=inklabel) subvolume = data["image"] inklabel = data["mask"] # return torch.from_numpy(subvolume).unsqueeze(0), torch.FloatTensor([inklabel]) # return torch.from_numpy(subvolume), torch.FloatTensor([inklabel]) def plot_label(self, index, **kwargs): pixel = self.pixels[index] label = self.labels[pixel[-1]] print("Index:", index) print("Pixel:", pixel) print("Label:", int(label[pixel[0], pixel[1]])) if isinstance(label, torch.Tensor): label = label.numpy() fig, ax = plt.subplots(**kwargs) ax.imshow(label, cmap="gray") y, x, _ = pixel _, y_dim, x_dim = self.voxel_shape x_min = x - (x_dim // 2) x_max = x + (x_dim // 2) y_min = y - (y_dim // 2) y_max = y + (y_dim // 2) rect = plt.Rectangle( (x_min, y_min), x_dim, y_dim, linewidth=2, edgecolor="y", facecolor="none" ) ax.add_patch(rect) plt.show() base_path = Path("/kaggle/input/vesuvius-challenge/") train_path = base_path / "train" all_fragments = sorted([f.name for f in train_path.iterdir()]) print("All fragments:", all_fragments) train_fragments = [train_path / fragment_name for fragment_name in all_fragments] train_fragments train_dset = SubvolumeDataset( fragments=train_fragments, ) # transform=cfg.preprocess_input print("Num items (pixels)", len(train_dset)) # #### Sanity check index = 1 print(f"Sub Volume image shape = {train_dset[index][0].shape}") # train_dset.plot_label(index, figsize=(16, 10)) train_loader = thd.DataLoader(train_dset, batch_size=CFG.train_batch_size, shuffle=True) print("Num batches:", len(train_loader)) # ### Set up model DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu") class InkDetector(torch.nn.Module): def __init__(self, cfg, weight=None): super().__init__() self.cfg = cfg self.model = smp.Unet( encoder_name=cfg.backbone, encoder_weights=weight, in_channels=cfg.in_chans, classes=cfg.target_size, activation=None, ) def forward(self, image): output = self.model(image) return output model = InkDetector(CFG, "imagenet").to(DEVICE) # ### Train TRAINING_STEPS = 1000 LEARNING_RATE = cfg.lr TRAIN_RUN = True # To avoid re-running when saving the notebook if TRAIN_RUN: criterion = nn.BCEWithLogitsLoss() optimizer = optim.SGD(model.parameters(), lr=LEARNING_RATE) scheduler = torch.optim.lr_scheduler.OneCycleLR( optimizer, max_lr=LEARNING_RATE, total_steps=TRAINING_STEPS ) model.train() running_loss = 0.0 running_accuracy = 0.0 running_fbeta = 0.0 denom = 0 pbar = tqdm(enumerate(train_loader), total=TRAINING_STEPS) for i, (subvolumes, inklabels) in pbar: if i >= TRAINING_STEPS: break optimizer.zero_grad() outputs = model(subvolumes.to(DEVICE)) print(f"outpus shape = {outputs.shape}") loss = criterion(outputs, inklabels.to(DEVICE)) loss.backward() optimizer.step() scheduler.step() pred_ink = outputs.detach().sigmoid().gt(0.4).cpu().int() accuracy = (pred_ink == inklabels).sum().float().div(inklabels.size(0)) running_fbeta += fbeta_score( inklabels.view(-1).numpy(), pred_ink.view(-1).numpy(), beta=0.5 ) running_accuracy += accuracy.item() running_loss += loss.item() denom += 1 pbar.set_postfix( { "Loss": running_loss / denom, "Accuracy": running_accuracy / denom, "[email protected]": running_fbeta / denom, } ) if (i + 1) % 500 == 0: running_loss = 0.0 running_accuracy = 0.0 running_fbeta = 0.0 denom = 0 torch.save(model.state_dict(), "/kaggle/working/model.pt") else: model_weights = torch.load("/kaggle/working/model.pt") model.load_state_dict(model_weights) # ### Evaluate # Clear memory before loading test fragments train_dset.labels = None train_dset.image_stacks = [] del train_loader, train_dset gc.collect() test_path = base_path / "test" test_fragments = [train_path / fragment_name for fragment_name in test_path.iterdir()] print("All fragments:", test_fragments) pred_images = [] model.eval() for test_fragment in test_fragments: outputs = [] eval_dset = SubvolumeDataset( fragments=[test_fragment], voxel_shape=(48, 64, 64), load_inklabels=False ) eval_loader = thd.DataLoader(eval_dset, batch_size=BATCH_SIZE, shuffle=False) with torch.no_grad(): for i, (subvolumes, _) in enumerate(tqdm(eval_loader)): output = model(subvolumes.to(DEVICE)).view(-1).sigmoid().cpu().numpy() outputs.append(output) # we only load 1 fragment at a time image_shape = eval_dset.image_stacks[0].shape[1:] eval_dset.labels = None eval_dset.image_stacks = None del eval_loader gc.collect() pred_image = np.zeros(image_shape, dtype=np.uint8) outputs = np.concatenate(outputs) for (y, x, _), prob in zip(eval_dset.pixels[: outputs.shape[0]], outputs): pred_image[y, x] = prob > 0.4 pred_images.append(pred_image) eval_dset.pixels = None del eval_dset gc.collect() print("Finished", test_fragment) plt.imshow(pred_images[1], cmap="gray") # ### Submission def rle(output): flat_img = np.where(output > 0.4, 1, 0).astype(np.uint8) starts = np.array((flat_img[:-1] == 0) & (flat_img[1:] == 1)) ends = np.array((flat_img[:-1] == 1) & (flat_img[1:] == 0)) starts_ix = np.where(starts)[0] + 2 ends_ix = np.where(ends)[0] + 2 lengths = ends_ix - starts_ix return " ".join(map(str, sum(zip(starts_ix, lengths), ()))) submission = defaultdict(list) for fragment_id, fragment_name in enumerate(test_fragments): submission["Id"].append(fragment_name.name) submission["Predicted"].append(rle(pred_images[fragment_id])) pd.DataFrame.from_dict(submission).to_csv("/kaggle/working/submission.csv", index=False) pd.DataFrame.from_dict(submission)
false
0
3,613
0
3,613
3,613
129553692
<jupyter_start><jupyter_text>ISMI_Group3_PANDA_36_256_256_res1_tiles This dataset is a preprocessed provides a preprocessed version of the [PANDA](https://www.kaggle.com/competitions/prostate-cancer-grade-assessment) challenge. Each sample has 36 tiles, of 256 x 256 pixels. The tiles are taken from the medium resolution. Kaggle dataset identifier: ismi-group3-panda-36-256-256-res1-tiles <jupyter_script>import numpy as np import torch import pytorch_lightning as pl from torch import nn import os import pandas as pd import matplotlib.pyplot as plt import torchvision import torch.nn.functional as F from torchsummary import summary class EfficientNetModule(pl.LightningModule): def __init__(self, num_classes=6): super().__init__() self.model = torch.hub.load( "NVIDIA/DeepLearningExamples:torchhub", "nvidia_efficientnet_b0", pretrained=True, ) # Replace last layer with a 6 class output layer self.model.classifier.fc = torch.nn.Linear( in_features=self.model.classifier.fc.in_features, out_features=num_classes, bias=True, ) self.train_losses = [] self.val_losses = [] self.epoch_train_losses = [] self.epoch_val_losses = [] def forward(self, x): x = self.model(x) return x def training_step(self, batch, batch_idx): x, y = batch x = x.float() y_hat = self.model(x) loss = F.cross_entropy(y_hat, y) self.epoch_train_losses.append(loss) return loss def validation_step(self, batch, batch_idx): x, y = batch x = x.float() y_hat = self.model(x) loss = F.cross_entropy(y_hat, y) self.epoch_val_losses.append(loss) return loss def on_epoch_end(self): print(f"Train loss: {average(self.epoch_train_losses)}") print(f"Validation loss: {average(self.epoch_val_losses)}") self.train_losses.append(average(self.epoch_train_losses)) self.val_losses.append(average(self.epoch_val_losses)) self.epoch_train_losses = [] self.epoch_val_losses = [] def configure_optimizers(self): optimizer = torch.optim.Adam(self.model.classifier.fc.parameters()) return optimizer def average(self, lst): return sum(lst) / len(lst) class PANDATilesDataModule(pl.LightningModule): def __init__(self, batch_size: int = 16, stage: str = "train"): super().__init__() self.batch_size = batch_size self.data_dir = ( "/kaggle/input/ismi-group3-panda-36-256-256-res1-tiles/tiles/tiles" ) # self.transform = transforms.Compose([transforms.ToTensor()]) train_csv = pd.read_csv( "/kaggle/input/prostate-cancer-grade-assessment/train.csv" ) if stage == "train": self.data = train_csv[:9000] elif stage == "val": self.data = train_csv[9000:] elif stage == "test": self.data = pd.read_csv( "/kaggle/input/prostate-cancer-grade-assessment/test.csv" ) else: print("ERROR: YOUR STAGE INPUT WAS NOT RECOGNISED AS EITHER train/val/test") self.data = train_csv # Returns the image and ISUP label def __getitem__(self, idx): item_name = self.data.iloc[idx].loc["image_id"] ## Get the name of the image item_dir = os.path.join(self.data_dir, item_name + "/tile_0.png") return ( torchvision.io.read_image(item_dir), self.data.iloc[idx].loc["isup_grade"], ) def __len__(self): return len(self.data) class PANDADataModule(pl.LightningModule): def __init__(self, batch_size: int = 16, stage: str = "train"): super().__init__() self.batch_size = batch_size self.data_dir = "/kaggle/input/ismi-group3-panda-36-256-256-res1-tiles/tiled_images/tiled_images" # self.transform = transforms.Compose([transforms.ToTensor()]) train_csv = pd.read_csv( "/kaggle/input/prostate-cancer-grade-assessment/train.csv" ) if stage == "train": self.data = train_csv[:9000] elif stage == "val": self.data = train_csv[9000:] elif stage == "test": self.data = pd.read_csv( "/kaggle/input/prostate-cancer-grade-assessment/test.csv" ) else: print("ERROR: YOUR STAGE INPUT WAS NOT RECOGNISED AS EITHER train/val/test") self.data = train_csv # Returns the image and ISUP label def __getitem__(self, idx): item_name = self.data.iloc[idx].loc["image_id"] ## Get the name of the image item_dir = os.path.join(self.data_dir, item_name + "_tiled.png") return ( torchvision.io.read_image(item_dir), self.data.iloc[idx].loc["isup_grade"], ) def __len__(self): return len(self.data) batch_size = 16 n_epochs = 2 train_data = PANDATilesDataModule("train") train_loader = torch.utils.data.DataLoader( train_data, batch_size=batch_size, shuffle=True ) val_data = PANDATilesDataModule("val") val_loader = torch.utils.data.DataLoader(val_data, batch_size=batch_size, shuffle=False) EfficientNet = EfficientNetModule() train_data.__getitem__(0)[0].shape summary(EfficientNet, (3, 256, 256)) # trainer = pl.Trainer(limit_train_batches=100, max_epochs=n_epochs, accelerator='gpu') # trainer.fit(model=EfficientNet, train_dataloaders=train_loader, val_dataloaders=val_loader)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/553/129553692.ipynb
ismi-group3-panda-36-256-256-res1-tiles
florisvanwettum
[{"Id": 129553692, "ScriptId": 38516756, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 2476129, "CreationDate": "05/14/2023 19:19:45", "VersionNumber": 1.0, "Title": "Group3_ISMI_Baseline_Floris", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 127.0, "LinesInsertedFromPrevious": 127.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 185731578, "KernelVersionId": 129553692, "SourceDatasetVersionId": 5682352}]
[{"Id": 5682352, "DatasetId": 3228105, "DatasourceVersionId": 5757916, "CreatorUserId": 2476129, "LicenseName": "Unknown", "CreationDate": "05/14/2023 11:36:05", "VersionNumber": 5.0, "Title": "ISMI_Group3_PANDA_36_256_256_res1_tiles", "Slug": "ismi-group3-panda-36-256-256-res1-tiles", "Subtitle": "Medium resolution 36 256x256 tiles per sample, individual and combined images.", "Description": "This dataset is a preprocessed provides a preprocessed version of the [PANDA](https://www.kaggle.com/competitions/prostate-cancer-grade-assessment) challenge. Each sample has 36 tiles, of 256 x 256 pixels. The tiles are taken from the medium resolution.", "VersionNotes": "Added the last sample of the train.csv to the tiled_images", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3228105, "CreatorUserId": 2476129, "OwnerUserId": 2476129.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5682352.0, "CurrentDatasourceVersionId": 5757916.0, "ForumId": 3293216, "Type": 2, "CreationDate": "05/05/2023 21:28:46", "LastActivityDate": "05/05/2023", "TotalViews": 99, "TotalDownloads": 0, "TotalVotes": 0, "TotalKernels": 4}]
[{"Id": 2476129, "UserName": "florisvanwettum", "DisplayName": "Florijs", "RegisterDate": "11/10/2018", "PerformanceTier": 0}]
import numpy as np import torch import pytorch_lightning as pl from torch import nn import os import pandas as pd import matplotlib.pyplot as plt import torchvision import torch.nn.functional as F from torchsummary import summary class EfficientNetModule(pl.LightningModule): def __init__(self, num_classes=6): super().__init__() self.model = torch.hub.load( "NVIDIA/DeepLearningExamples:torchhub", "nvidia_efficientnet_b0", pretrained=True, ) # Replace last layer with a 6 class output layer self.model.classifier.fc = torch.nn.Linear( in_features=self.model.classifier.fc.in_features, out_features=num_classes, bias=True, ) self.train_losses = [] self.val_losses = [] self.epoch_train_losses = [] self.epoch_val_losses = [] def forward(self, x): x = self.model(x) return x def training_step(self, batch, batch_idx): x, y = batch x = x.float() y_hat = self.model(x) loss = F.cross_entropy(y_hat, y) self.epoch_train_losses.append(loss) return loss def validation_step(self, batch, batch_idx): x, y = batch x = x.float() y_hat = self.model(x) loss = F.cross_entropy(y_hat, y) self.epoch_val_losses.append(loss) return loss def on_epoch_end(self): print(f"Train loss: {average(self.epoch_train_losses)}") print(f"Validation loss: {average(self.epoch_val_losses)}") self.train_losses.append(average(self.epoch_train_losses)) self.val_losses.append(average(self.epoch_val_losses)) self.epoch_train_losses = [] self.epoch_val_losses = [] def configure_optimizers(self): optimizer = torch.optim.Adam(self.model.classifier.fc.parameters()) return optimizer def average(self, lst): return sum(lst) / len(lst) class PANDATilesDataModule(pl.LightningModule): def __init__(self, batch_size: int = 16, stage: str = "train"): super().__init__() self.batch_size = batch_size self.data_dir = ( "/kaggle/input/ismi-group3-panda-36-256-256-res1-tiles/tiles/tiles" ) # self.transform = transforms.Compose([transforms.ToTensor()]) train_csv = pd.read_csv( "/kaggle/input/prostate-cancer-grade-assessment/train.csv" ) if stage == "train": self.data = train_csv[:9000] elif stage == "val": self.data = train_csv[9000:] elif stage == "test": self.data = pd.read_csv( "/kaggle/input/prostate-cancer-grade-assessment/test.csv" ) else: print("ERROR: YOUR STAGE INPUT WAS NOT RECOGNISED AS EITHER train/val/test") self.data = train_csv # Returns the image and ISUP label def __getitem__(self, idx): item_name = self.data.iloc[idx].loc["image_id"] ## Get the name of the image item_dir = os.path.join(self.data_dir, item_name + "/tile_0.png") return ( torchvision.io.read_image(item_dir), self.data.iloc[idx].loc["isup_grade"], ) def __len__(self): return len(self.data) class PANDADataModule(pl.LightningModule): def __init__(self, batch_size: int = 16, stage: str = "train"): super().__init__() self.batch_size = batch_size self.data_dir = "/kaggle/input/ismi-group3-panda-36-256-256-res1-tiles/tiled_images/tiled_images" # self.transform = transforms.Compose([transforms.ToTensor()]) train_csv = pd.read_csv( "/kaggle/input/prostate-cancer-grade-assessment/train.csv" ) if stage == "train": self.data = train_csv[:9000] elif stage == "val": self.data = train_csv[9000:] elif stage == "test": self.data = pd.read_csv( "/kaggle/input/prostate-cancer-grade-assessment/test.csv" ) else: print("ERROR: YOUR STAGE INPUT WAS NOT RECOGNISED AS EITHER train/val/test") self.data = train_csv # Returns the image and ISUP label def __getitem__(self, idx): item_name = self.data.iloc[idx].loc["image_id"] ## Get the name of the image item_dir = os.path.join(self.data_dir, item_name + "_tiled.png") return ( torchvision.io.read_image(item_dir), self.data.iloc[idx].loc["isup_grade"], ) def __len__(self): return len(self.data) batch_size = 16 n_epochs = 2 train_data = PANDATilesDataModule("train") train_loader = torch.utils.data.DataLoader( train_data, batch_size=batch_size, shuffle=True ) val_data = PANDATilesDataModule("val") val_loader = torch.utils.data.DataLoader(val_data, batch_size=batch_size, shuffle=False) EfficientNet = EfficientNetModule() train_data.__getitem__(0)[0].shape summary(EfficientNet, (3, 256, 256)) # trainer = pl.Trainer(limit_train_batches=100, max_epochs=n_epochs, accelerator='gpu') # trainer.fit(model=EfficientNet, train_dataloaders=train_loader, val_dataloaders=val_loader)
false
2
1,529
0
1,664
1,529
129553544
<jupyter_start><jupyter_text>Parkinson's Disease Dataset ### Context Try finding the reasons for Parkinsons disease and predict who might have it next! Kaggle dataset identifier: parkinsonsdataset <jupyter_script># Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # # Importing The Libraries import matplotlib.pyplot as plt import seaborn as sns import numpy as np import pandas as pd from sklearn.metrics import confusion_matrix, accuracy_score from xgboost import XGBClassifier from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split, KFold, cross_val_score from sklearn.preprocessing import StandardScaler from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier from sklearn.neighbors import KNeighborsClassifier # importing th dataset df = pd.read_csv("/kaggle/input/parkinsonsdataset/parkinsons.csv") # # Analysis of Data df.head() df.tail() X = df.drop(["name", "status"], axis=1) # name not needed Y = df["status"] # output variable # checking any irregularities,unwanted or empty data df.info() df.describe() # different central tendencies of the data df.isnull().sum() # empty values # There are no empty variables in the data features = [feature for feature in X] features # checking for any outliers in the data for feature in features: sns.boxplot(x="status", y=feature, data=df) plt.show() # Some of the above data have outliers so we need to take steps for that # heatmap correlation = df.corr() sns.heatmap(correlation) # # Splitting Training Set and Test X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2) # # Model model = ExtraTreesClassifier() model.fit(X, Y) random_features = pd.Series(model.feature_importances_, index=X.columns) random_features.nlargest(4).plot(kind="barh") selected_feat = ["PPE", "MDVP:Fo(Hz)", "spread1", "spread2"] # putting only the selected features from the training set and test set X_train = X_train[selected_feat].values X_test = X_test[selected_feat].values y_train = y_train.values y_test = y_test.values # scaling the data sc = StandardScaler() X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test) # **XGBoost Analysis** # using the xgboost model in the data xgb = XGBClassifier() xgb.fit(X_train, y_train) y_pred1 = xgb.predict(X_test) cm1 = confusion_matrix(y_test, y_pred1) print(cm1) print(accuracy_score(y_test, y_pred1)) print(accuracy_score(y_train, xgb.predict(X_train))) from sklearn.model_selection import KFold, cross_val_score kf = KFold(10) results = cross_val_score(xgb, X, Y, cv=kf) np.mean(results) # **Random Forresting Analysis** # using random forest for classification rf = RandomForestClassifier() rf.fit(X_train, y_train) y_pred2 = rf.predict(X_test) cm2 = confusion_matrix(y_test, y_pred2) print(cm2) print(accuracy_score(y_test, y_pred2)) print(accuracy_score(y_train, rf.predict(X_train))) kf = KFold(10) results = cross_val_score(rf, X, Y, cv=kf) np.mean(results) # **Logistical Regression** lr = LogisticRegression() lr.fit(X_train, y_train) y_pred = lr.predict(X_test) cm = confusion_matrix(y_test, y_pred) print(cm) print(accuracy_score(y_test, y_pred)) print(accuracy_score(y_train, lr.predict(X_train))) kf = KFold(10) results = cross_val_score(rf, X, Y, cv=kf) np.mean(results) # **K Nearest Neighbours** knn = KNeighborsClassifier(n_neighbors=5, metric="minkowski", p=2) knn.fit(X_train, y_train) y_pred = knn.predict(X_test) cm = confusion_matrix(y_test, y_pred) print(cm) print(accuracy_score(y_test, y_pred)) print(accuracy_score(y_train, knn.predict(X_train))) kf = KFold(10) results = cross_val_score(knn, X, Y, cv=kf) np.mean(results)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/553/129553544.ipynb
parkinsonsdataset
gargmanas
[{"Id": 129553544, "ScriptId": 36429996, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 12843112, "CreationDate": "05/14/2023 19:18:04", "VersionNumber": 10.0, "Title": "Parkinsons Disease Prediction", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 152.0, "LinesInsertedFromPrevious": 46.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 106.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 185731245, "KernelVersionId": 129553544, "SourceDatasetVersionId": 2172562}]
[{"Id": 2172562, "DatasetId": 1304147, "DatasourceVersionId": 2213865, "CreatorUserId": 4631534, "LicenseName": "GNU Free Documentation License 1.3", "CreationDate": "04/29/2021 08:15:50", "VersionNumber": 1.0, "Title": "Parkinson's Disease Dataset", "Slug": "parkinsonsdataset", "Subtitle": "Use the dataset to analyze it and detect Parkinson's disease", "Description": "### Context\n\nTry finding the reasons for Parkinsons disease and predict who might have it next!", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 1304147, "CreatorUserId": 4631534, "OwnerUserId": 4631534.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2172562.0, "CurrentDatasourceVersionId": 2213865.0, "ForumId": 1322920, "Type": 2, "CreationDate": "04/29/2021 08:15:50", "LastActivityDate": "04/29/2021", "TotalViews": 15563, "TotalDownloads": 1911, "TotalVotes": 67, "TotalKernels": 15}]
[{"Id": 4631534, "UserName": "gargmanas", "DisplayName": "SHINIGAMI", "RegisterDate": "03/08/2020", "PerformanceTier": 3}]
# Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # # Importing The Libraries import matplotlib.pyplot as plt import seaborn as sns import numpy as np import pandas as pd from sklearn.metrics import confusion_matrix, accuracy_score from xgboost import XGBClassifier from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split, KFold, cross_val_score from sklearn.preprocessing import StandardScaler from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier from sklearn.neighbors import KNeighborsClassifier # importing th dataset df = pd.read_csv("/kaggle/input/parkinsonsdataset/parkinsons.csv") # # Analysis of Data df.head() df.tail() X = df.drop(["name", "status"], axis=1) # name not needed Y = df["status"] # output variable # checking any irregularities,unwanted or empty data df.info() df.describe() # different central tendencies of the data df.isnull().sum() # empty values # There are no empty variables in the data features = [feature for feature in X] features # checking for any outliers in the data for feature in features: sns.boxplot(x="status", y=feature, data=df) plt.show() # Some of the above data have outliers so we need to take steps for that # heatmap correlation = df.corr() sns.heatmap(correlation) # # Splitting Training Set and Test X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2) # # Model model = ExtraTreesClassifier() model.fit(X, Y) random_features = pd.Series(model.feature_importances_, index=X.columns) random_features.nlargest(4).plot(kind="barh") selected_feat = ["PPE", "MDVP:Fo(Hz)", "spread1", "spread2"] # putting only the selected features from the training set and test set X_train = X_train[selected_feat].values X_test = X_test[selected_feat].values y_train = y_train.values y_test = y_test.values # scaling the data sc = StandardScaler() X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test) # **XGBoost Analysis** # using the xgboost model in the data xgb = XGBClassifier() xgb.fit(X_train, y_train) y_pred1 = xgb.predict(X_test) cm1 = confusion_matrix(y_test, y_pred1) print(cm1) print(accuracy_score(y_test, y_pred1)) print(accuracy_score(y_train, xgb.predict(X_train))) from sklearn.model_selection import KFold, cross_val_score kf = KFold(10) results = cross_val_score(xgb, X, Y, cv=kf) np.mean(results) # **Random Forresting Analysis** # using random forest for classification rf = RandomForestClassifier() rf.fit(X_train, y_train) y_pred2 = rf.predict(X_test) cm2 = confusion_matrix(y_test, y_pred2) print(cm2) print(accuracy_score(y_test, y_pred2)) print(accuracy_score(y_train, rf.predict(X_train))) kf = KFold(10) results = cross_val_score(rf, X, Y, cv=kf) np.mean(results) # **Logistical Regression** lr = LogisticRegression() lr.fit(X_train, y_train) y_pred = lr.predict(X_test) cm = confusion_matrix(y_test, y_pred) print(cm) print(accuracy_score(y_test, y_pred)) print(accuracy_score(y_train, lr.predict(X_train))) kf = KFold(10) results = cross_val_score(rf, X, Y, cv=kf) np.mean(results) # **K Nearest Neighbours** knn = KNeighborsClassifier(n_neighbors=5, metric="minkowski", p=2) knn.fit(X_train, y_train) y_pred = knn.predict(X_test) cm = confusion_matrix(y_test, y_pred) print(cm) print(accuracy_score(y_test, y_pred)) print(accuracy_score(y_train, knn.predict(X_train))) kf = KFold(10) results = cross_val_score(knn, X, Y, cv=kf) np.mean(results)
false
1
1,307
0
1,355
1,307
129553823
<jupyter_start><jupyter_text>GitHub Repositories 2020 ## Context **You can star repositories to keep track of projects you find interesting.** I have Scraped top stared repositories from GitHub with different topics. I have used Python BeautifulSoup to scrape the data. The main motivation behind this data is to analyze top GitHub stared repositories. I have selected some topics like Data-Science, Machine-Learning, Computer-Vision, etc. Then I have watched most stared 100 repository details including repository commits, issue, fork, etc. ## GitHub ## Content There are more than **1000** repository nformation. Data contains the main 19 columns: 1) **topic**: A base word with the help of its fetched repository. 2) **name**: repository name. 3) **user**: repository user name. 4) **star**: stars are given by users. 5) **fork**: number of the fork that specific repository. 6) **watch**: repository watch 7) **issue**: number of issue in that repository. 8) **pull_requests**: number of pull requests 9) **projects**: a number of projects undergoing that topic_tag. 10) **topic_tag**: tag added to the repository by the user. 11) **discription_text**: short discription added by user. 12) **discription_url**: additional url provide by repository. 13) **commits**: number of commits to that repository. 14) **branches**: a number of different branches of the repository. 15) **packages**: number of packages. 16) **releases**: releases of the repository. 17) **contributors**: a number of users have contributed to the repository. 18) **License**: name of License. 19) **url**: URL of the repository. **current repository topics**: Data-Science, Machine-Learning, Open-CV, Computer-Vision, GAN, variational-encoder, Android-studio, flutter, JAVA, awesome, javascript, c++ **stay tuned for more topics.** Kaggle dataset identifier: github-repositories-analysis <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # Here, i have done ` Data Analsys ` part of popular Github Repositary. import pandas as pd data = pd.read_csv("/kaggle/input/github-repositories-analysis/Github_data.csv") data.sample(5) del_list = ["Unnamed: 0", "Unnamed: 0.1"] data.drop(del_list, axis=1, inplace=True) data.sample(5) data.head() data.info() oolumns_name = data.columns oolumns_name # Rename the columns new_column = [ "Topic", "Name", "User", "Star", "Fork", "Watch", "Issue", "Pull_requests", "Projects", "Topic_tag", "Discription_text", "Discription_url", "Commits", "Branches", "Packages", "Releases", "Contributers", "License", "Url", ] data = data.rename(columns=dict(zip(oolumns_name, new_column))) data.head(5) data["Topic"].value_counts().plot(kind="bar") data["License"].value_counts() data[data["License"] == "BSD-3-Clause"] data.info() data["Star"] = data["Star"].apply( lambda x: float(x.rstrip("k")) * 1000 if x.endswith("k") else float(x) ) data["Fork"] = data["Fork"].apply( lambda x: float(x.rstrip("k")) * 1000 if x.endswith("k") else float(x) ) data["Watch"] = data["Watch"].apply( lambda x: float(x.rstrip("k")) * 1000 if x.endswith("k") else float(x) ) data.head(5) data["Issue"] = data["Issue"].apply(lambda x: x.replace(",", "") if "," in x else x) data["Commits"] = data["Commits"].apply(lambda x: x.replace(",", "") if "," in x else x) data.head() data.info() cols = ["Issue", "Pull_requests", "Commits", "Contributers"] data[cols] = data[cols].apply(pd.to_numeric, errors="coerce", axis=1) data.dtypes data.describe() new_data = data.groupby("Topic").mean().reset_index() new_data # ## Which repo and topic has highest no. of stars? star_df = new_data.sort_values(by=["Star"], ascending=False) star_df import matplotlib.pyplot as plt plt.bar(star_df["Topic"], star_df["Star"], color="blue", width=0.4) plt.xticks(rotation=90) star_data = data.sort_values(by=["Star"], ascending=False) star_data
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/553/129553823.ipynb
github-repositories-analysis
vatsalparsaniya
[{"Id": 129553823, "ScriptId": 38511450, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11869129, "CreationDate": "05/14/2023 19:21:36", "VersionNumber": 1.0, "Title": "Data Analysis of Github Repo", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 87.0, "LinesInsertedFromPrevious": 87.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 185731806, "KernelVersionId": 129553823, "SourceDatasetVersionId": 1106182}]
[{"Id": 1106182, "DatasetId": 619495, "DatasourceVersionId": 1136340, "CreatorUserId": 2907842, "LicenseName": "Unknown", "CreationDate": "04/24/2020 20:21:04", "VersionNumber": 1.0, "Title": "GitHub Repositories 2020", "Slug": "github-repositories-analysis", "Subtitle": "GitHub Top stared Repositories of specific Domain (1200+)", "Description": "## Context \n\n**You can star repositories to keep track of projects you find interesting.**\nI have Scraped top stared repositories from GitHub with different topics. I have used Python BeautifulSoup to scrape the data. The main motivation behind this data is to analyze top GitHub stared repositories.\n\nI have selected some topics like Data-Science, Machine-Learning, Computer-Vision, etc. Then I have watched most stared 100 repository details including repository commits, issue, fork, etc.\n \n## GitHub \n\n## Content\nThere are more than **1000** repository nformation.\n\nData contains the main 19 columns:\n1) **topic**: A base word with the help of its fetched repository.\n2) **name**: repository name.\n3) **user**: repository user name.\n4) **star**: stars are given by users.\n5) **fork**: number of the fork that specific repository.\n6) **watch**: repository watch\n7) **issue**: number of issue in that repository.\n8) **pull_requests**: number of pull requests \n9) **projects**: a number of projects undergoing that topic_tag.\n10) **topic_tag**: tag added to the repository by the user.\n11) **discription_text**: short discription added by user.\n12) **discription_url**: additional url provide by repository.\n13) **commits**: number of commits to that repository.\n14) **branches**: a number of different branches of the repository.\n15) **packages**: number of packages.\n16) **releases**: releases of the repository.\n17) **contributors**: a number of users have contributed to the repository.\n18) **License**: name of License.\n19) **url**: URL of the repository.\n\n**current repository topics**: Data-Science, Machine-Learning, Open-CV, Computer-Vision, GAN, variational-encoder, Android-studio, flutter, JAVA, awesome, javascript, c++\n\n**stay tuned for more topics.**", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 619495, "CreatorUserId": 2907842, "OwnerUserId": 2907842.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 1106182.0, "CurrentDatasourceVersionId": 1136340.0, "ForumId": 633624, "Type": 2, "CreationDate": "04/24/2020 20:21:04", "LastActivityDate": "04/24/2020", "TotalViews": 7978, "TotalDownloads": 545, "TotalVotes": 20, "TotalKernels": 7}]
[{"Id": 2907842, "UserName": "vatsalparsaniya", "DisplayName": "Vatsal Parsaniya", "RegisterDate": "03/07/2019", "PerformanceTier": 2}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # Here, i have done ` Data Analsys ` part of popular Github Repositary. import pandas as pd data = pd.read_csv("/kaggle/input/github-repositories-analysis/Github_data.csv") data.sample(5) del_list = ["Unnamed: 0", "Unnamed: 0.1"] data.drop(del_list, axis=1, inplace=True) data.sample(5) data.head() data.info() oolumns_name = data.columns oolumns_name # Rename the columns new_column = [ "Topic", "Name", "User", "Star", "Fork", "Watch", "Issue", "Pull_requests", "Projects", "Topic_tag", "Discription_text", "Discription_url", "Commits", "Branches", "Packages", "Releases", "Contributers", "License", "Url", ] data = data.rename(columns=dict(zip(oolumns_name, new_column))) data.head(5) data["Topic"].value_counts().plot(kind="bar") data["License"].value_counts() data[data["License"] == "BSD-3-Clause"] data.info() data["Star"] = data["Star"].apply( lambda x: float(x.rstrip("k")) * 1000 if x.endswith("k") else float(x) ) data["Fork"] = data["Fork"].apply( lambda x: float(x.rstrip("k")) * 1000 if x.endswith("k") else float(x) ) data["Watch"] = data["Watch"].apply( lambda x: float(x.rstrip("k")) * 1000 if x.endswith("k") else float(x) ) data.head(5) data["Issue"] = data["Issue"].apply(lambda x: x.replace(",", "") if "," in x else x) data["Commits"] = data["Commits"].apply(lambda x: x.replace(",", "") if "," in x else x) data.head() data.info() cols = ["Issue", "Pull_requests", "Commits", "Contributers"] data[cols] = data[cols].apply(pd.to_numeric, errors="coerce", axis=1) data.dtypes data.describe() new_data = data.groupby("Topic").mean().reset_index() new_data # ## Which repo and topic has highest no. of stars? star_df = new_data.sort_values(by=["Star"], ascending=False) star_df import matplotlib.pyplot as plt plt.bar(star_df["Topic"], star_df["Star"], color="blue", width=0.4) plt.xticks(rotation=90) star_data = data.sort_values(by=["Star"], ascending=False) star_data
false
1
869
0
1,349
869
129553542
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import pandas as pd import numpy as np from lightgbm import LGBMRegressor from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelEncoder url = "/kaggle/input/demand-forecasting-kernels-only/train.csv" df_train = pd.read_csv(url) df_train["date"] = pd.to_datetime(df_train["date"]) df_train.head() # load test set url2 = "/kaggle/input/demand-forecasting-kernels-only/test.csv" df_test = pd.read_csv(url2) df_test["date"] = pd.to_datetime(df_test["date"]) df_test.head() # Concatenate the training and testing dataframes df_combined = pd.concat([df_train, df_test]).reset_index(drop=True) import plotly.express as px # Downsample the data by month and calculate the mean sales for each month df_downsampled = df_train.resample("M", on="date").mean() # Create a line plot using Plotly Express fig = px.line( df_downsampled, x=df_downsampled.index, y="sales", title="Sales Over Time" ) # Display the plot fig.show() # Calculate SMAPE def smape(preds, target): n = len(preds) masked_arr = ~((preds == 0) & (target == 0)) preds, target = preds[masked_arr], target[masked_arr] num = np.abs(preds - target) denom = np.abs(preds) + np.abs(target) smape_val = (200 * np.sum(num / denom)) / n return smape_val import pandas as pd import numpy as np from lightgbm import LGBMRegressor from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelEncoder # Feature Engineering - Categorical df_combined["day_of_week"] = df_combined["date"].dt.dayofweek df_combined["month"] = df_combined["date"].dt.month df_combined["year"] = df_combined["date"].dt.year # df['week'] = df['date'].dt.week df_combined["day_of_year"] = df_combined["date"].dt.dayofyear df_combined["week_of_year"] = df_combined["date"].dt.weekofyear # df_combined['sin_day_of_week'] = np.sin(2*np.pi*df_combined['day_of_week']/7) # df_combined['cos_day_of_week'] = np.cos(2*np.pi*df_combined['day_of_week']/7) # Encode categorical features le_item = LabelEncoder() le_store = LabelEncoder() df_combined["item"] = le_item.fit_transform(df_combined["item"]) df_combined["store"] = le_store.fit_transform(df_combined["store"]) # Create dummy variables for day_of_week day_of_week_dummies = pd.get_dummies(df_combined["day_of_week"], prefix="day_of_week") df_combined = pd.concat([df_combined, day_of_week_dummies], axis=1) # create a new dataframe to hold the dummy variables # Create dummy variables for month month_dummies = pd.get_dummies(df_combined["month"], prefix="month") df_combined = pd.concat([df_combined, month_dummies], axis=1) # Create dummy variables for year year_dummies = pd.get_dummies(df_combined["year"], prefix="year") df_combined = pd.concat([df_combined, year_dummies], axis=1) # # Drop rows with NaN values # df = df.dropna() df_combined = df_combined.drop( ["month", "year", "day_of_year", "week_of_year", "day_of_week"], axis=1 ) # Separate your training and testing dataframes again df_train = df_combined[df_combined["sales"].notna()] df_test = df_combined[df_combined["sales"].isna()] column_list = df_combined.columns.tolist() print(column_list) df_train = df_train.drop("id", axis=1) df_train = df_train.dropna() df_train from sklearn.model_selection import TimeSeriesSplit from lightgbm import LGBMRegressor # Number of splits n_splits = 5 # Initialize TimeSeriesSplit tscv = TimeSeriesSplit(n_splits=n_splits) model = LGBMRegressor() df_fc = df_train.copy() smape_values = [] # Perform cross-validation for train_index, test_index in tscv.split(df_train): CV_train, CV_test = df_train.iloc[train_index], df_train.iloc[test_index] # Fit the model on the training data model.fit(CV_train.drop(["sales", "date"], axis=1), CV_train["sales"]) # Predict on the test data predictions = model.predict(CV_test.drop(["sales", "date"], axis=1)) df_fc.loc[df_train.iloc[test_index].index, "predictions"] = predictions[0] # Calculate SMAPE and add it to the list of SMAPE values smape_value = smape(CV_test["sales"].values, predictions) smape_values.append(smape_value) # Print the average SMAPE value across all folds print("Average SMAPE: ", np.mean(smape_values)), smape_values import plotly.express as px # Downsample the data by month and calculate the mean sales for each month df_downsampled = df_fc.resample("M", on="date").mean() # Create a line plot using Plotly Express for sales fig = px.line( df_downsampled, x=df_downsampled.index, y="sales", title="Sales Over Time" ) # Add line for predictions fig.add_trace( go.Scatter( x=df_downsampled.index, y=df_downsampled["predictions"], name="predictions" ) ) # Display the plot fig.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/553/129553542.ipynb
null
null
[{"Id": 129553542, "ScriptId": 38522903, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11964784, "CreationDate": "05/14/2023 19:18:03", "VersionNumber": 1.0, "Title": "notebook57100693bb", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 167.0, "LinesInsertedFromPrevious": 167.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import pandas as pd import numpy as np from lightgbm import LGBMRegressor from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelEncoder url = "/kaggle/input/demand-forecasting-kernels-only/train.csv" df_train = pd.read_csv(url) df_train["date"] = pd.to_datetime(df_train["date"]) df_train.head() # load test set url2 = "/kaggle/input/demand-forecasting-kernels-only/test.csv" df_test = pd.read_csv(url2) df_test["date"] = pd.to_datetime(df_test["date"]) df_test.head() # Concatenate the training and testing dataframes df_combined = pd.concat([df_train, df_test]).reset_index(drop=True) import plotly.express as px # Downsample the data by month and calculate the mean sales for each month df_downsampled = df_train.resample("M", on="date").mean() # Create a line plot using Plotly Express fig = px.line( df_downsampled, x=df_downsampled.index, y="sales", title="Sales Over Time" ) # Display the plot fig.show() # Calculate SMAPE def smape(preds, target): n = len(preds) masked_arr = ~((preds == 0) & (target == 0)) preds, target = preds[masked_arr], target[masked_arr] num = np.abs(preds - target) denom = np.abs(preds) + np.abs(target) smape_val = (200 * np.sum(num / denom)) / n return smape_val import pandas as pd import numpy as np from lightgbm import LGBMRegressor from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelEncoder # Feature Engineering - Categorical df_combined["day_of_week"] = df_combined["date"].dt.dayofweek df_combined["month"] = df_combined["date"].dt.month df_combined["year"] = df_combined["date"].dt.year # df['week'] = df['date'].dt.week df_combined["day_of_year"] = df_combined["date"].dt.dayofyear df_combined["week_of_year"] = df_combined["date"].dt.weekofyear # df_combined['sin_day_of_week'] = np.sin(2*np.pi*df_combined['day_of_week']/7) # df_combined['cos_day_of_week'] = np.cos(2*np.pi*df_combined['day_of_week']/7) # Encode categorical features le_item = LabelEncoder() le_store = LabelEncoder() df_combined["item"] = le_item.fit_transform(df_combined["item"]) df_combined["store"] = le_store.fit_transform(df_combined["store"]) # Create dummy variables for day_of_week day_of_week_dummies = pd.get_dummies(df_combined["day_of_week"], prefix="day_of_week") df_combined = pd.concat([df_combined, day_of_week_dummies], axis=1) # create a new dataframe to hold the dummy variables # Create dummy variables for month month_dummies = pd.get_dummies(df_combined["month"], prefix="month") df_combined = pd.concat([df_combined, month_dummies], axis=1) # Create dummy variables for year year_dummies = pd.get_dummies(df_combined["year"], prefix="year") df_combined = pd.concat([df_combined, year_dummies], axis=1) # # Drop rows with NaN values # df = df.dropna() df_combined = df_combined.drop( ["month", "year", "day_of_year", "week_of_year", "day_of_week"], axis=1 ) # Separate your training and testing dataframes again df_train = df_combined[df_combined["sales"].notna()] df_test = df_combined[df_combined["sales"].isna()] column_list = df_combined.columns.tolist() print(column_list) df_train = df_train.drop("id", axis=1) df_train = df_train.dropna() df_train from sklearn.model_selection import TimeSeriesSplit from lightgbm import LGBMRegressor # Number of splits n_splits = 5 # Initialize TimeSeriesSplit tscv = TimeSeriesSplit(n_splits=n_splits) model = LGBMRegressor() df_fc = df_train.copy() smape_values = [] # Perform cross-validation for train_index, test_index in tscv.split(df_train): CV_train, CV_test = df_train.iloc[train_index], df_train.iloc[test_index] # Fit the model on the training data model.fit(CV_train.drop(["sales", "date"], axis=1), CV_train["sales"]) # Predict on the test data predictions = model.predict(CV_test.drop(["sales", "date"], axis=1)) df_fc.loc[df_train.iloc[test_index].index, "predictions"] = predictions[0] # Calculate SMAPE and add it to the list of SMAPE values smape_value = smape(CV_test["sales"].values, predictions) smape_values.append(smape_value) # Print the average SMAPE value across all folds print("Average SMAPE: ", np.mean(smape_values)), smape_values import plotly.express as px # Downsample the data by month and calculate the mean sales for each month df_downsampled = df_fc.resample("M", on="date").mean() # Create a line plot using Plotly Express for sales fig = px.line( df_downsampled, x=df_downsampled.index, y="sales", title="Sales Over Time" ) # Add line for predictions fig.add_trace( go.Scatter( x=df_downsampled.index, y=df_downsampled["predictions"], name="predictions" ) ) # Display the plot fig.show()
false
0
1,729
0
1,729
1,729
129553728
# Shorten the video into 1 minute from moviepy.video.io.ffmpeg_tools import ffmpeg_extract_subclip # Specify the start and end times for the clip you want to extract (in seconds) start_time = 0 end_time = 60 # First minute # Extract the subclip ffmpeg_extract_subclip( "/kaggle/input/gesturevideo/video_original.mp4", start_time, end_time, targetname="video_cut.mp4", ) # Process video to get openness, total movement, and leaning direction import cv2 import mediapipe as mp import numpy as np from scipy.spatial import ConvexHull import csv # Initialize MediaPipe's Holistic module mp_drawing = mp.solutions.drawing_utils mp_holistic = mp.solutions.holistic # Function to calculate the Euclidean distance between two points def euclidean_distance(p1, p2): return np.sqrt((p1.x - p2.x) ** 2 + (p1.y - p2.y) ** 2) # Function to calculate the openness of a pose def pose_openness(holistic_landmarks): keypoints = [ holistic_landmarks.landmark[mp_holistic.PoseLandmark.LEFT_SHOULDER], holistic_landmarks.landmark[mp_holistic.PoseLandmark.RIGHT_SHOULDER], holistic_landmarks.landmark[mp_holistic.PoseLandmark.LEFT_HIP], holistic_landmarks.landmark[mp_holistic.PoseLandmark.RIGHT_HIP], ] coords = np.array([(kp.x, kp.y) for kp in keypoints]) hull = ConvexHull(coords) return hull.volume # Function to calculate leaning direction def leaning_direction(holistic_landmarks): nose = holistic_landmarks.landmark[mp_holistic.PoseLandmark.NOSE] left_shoulder = holistic_landmarks.landmark[mp_holistic.PoseLandmark.LEFT_SHOULDER] right_shoulder = holistic_landmarks.landmark[ mp_holistic.PoseLandmark.RIGHT_SHOULDER ] avg_shoulder_z = (left_shoulder.z + right_shoulder.z) / 2 if nose.z < avg_shoulder_z: return "Forward" else: return "Backward" # Load the video video_path = "/kaggle/working/video_cut.mp4" cap = cv2.VideoCapture(video_path) # Get the video dimensions and FPS width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) fps = int(cap.get(cv2.CAP_PROP_FPS)) # Initialize the VideoWriter output_filename = "output_video.mp4" fourcc = cv2.VideoWriter_fourcc( *"mp4v" ) # You can also use "XVID" or "MJPG" for AVI files # out = cv2.VideoWriter(output_filename, fourcc, fps, (width, height)) # Initialize variables prev_landmarks = None total_movement = 0 movement_threshold = ( 0.001 # Adjust the threshold to fine-tune movement detection sensitivity ) keypoints_to_track = [ mp_holistic.PoseLandmark.LEFT_WRIST, mp_holistic.PoseLandmark.RIGHT_WRIST, mp_holistic.PoseLandmark.LEFT_ANKLE, mp_holistic.PoseLandmark.RIGHT_ANKLE, ] # Initialize the output CSV File csv_filename = "output_features.csv" with open(csv_filename, "w", newline="") as file: writer = csv.writer(file) writer.writerow( ["video_name", "frame_number", "total_movement", "avg_pose_openness", "leaning"] ) # Process the video frames count = 0 frame_number = 0 with mp_holistic.Holistic( static_image_mode=False, min_detection_confidence=0.5, min_tracking_confidence=0.5 ) as holistic: while cap.isOpened(): ret, frame = cap.read() if not ret: break # process every fps's frame if count % fps == 0: # Convert the frame to RGB frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # Process the frame with MediaPipe's Holistic module results = holistic.process(frame_rgb) # Draw holistic landmarks on the frame if results.pose_landmarks: mp_drawing.draw_landmarks( frame, results.pose_landmarks, mp_holistic.POSE_CONNECTIONS ) # Calculate the total movement if prev_landmarks: frame_movement = 0 for kp in keypoints_to_track: distance = euclidean_distance( results.pose_landmarks.landmark[kp], prev_landmarks.landmark[kp], ) frame_movement += distance if frame_movement > movement_threshold: total_movement += frame_movement prev_landmarks = results.pose_landmarks # Calculate and display the total movement and pose openness on the frame openness_value = pose_openness(results.pose_landmarks) # Calculate and display the leaning direction leaning_dir = leaning_direction(results.pose_landmarks) with open(csv_filename, "a", newline="") as file: writer = csv.writer(file) writer.writerow( [video_path, count, total_movement, openness_value, leaning_dir] ) frame_number += 1 count += 1 # cv2.putText(frame, f"Total Movement: {total_movement:.2f}", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 0), 2) # cv2.putText(frame, f"Pose Openness: {openness_value:.4f}", (10, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2) # cv2.putText(frame, f"Leaning: {leaning_dir}", (10, 90), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2) # Save the frame # cv2.imwrite('frame' + str(count) + '.jpg', frame) # out.write(frame) # out.release() # Process the video frames count = 0 frame_number = 0 with mp_holistic.Holistic( static_image_mode=False, min_detection_confidence=0.5, min_tracking_confidence=0.5 ) as holistic: while cap.isOpened(): ret, frame = cap.read() if not ret: break # Process every fps'th frame if count % fps == 0: # Convert the frame to RGB frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # Process the frame with MediaPipe's Holistic module results = holistic.process(frame_rgb) # Draw holistic landmarks on the frame if results.pose_landmarks: mp_drawing.draw_landmarks( frame, results.pose_landmarks, mp_holistic.POSE_CONNECTIONS ) # Calculate the total movement if prev_landmarks: frame_movement = 0 for kp in keypoints_to_track: distance = euclidean_distance( results.pose_landmarks.landmark[kp], prev_landmarks.landmark[kp], ) frame_movement += distance if frame_movement > movement_threshold: total_movement += frame_movement prev_landmarks = results.pose_landmarks # Calculate and display the total movement and pose openness on the frame openness_value = pose_openness(results.pose_landmarks) # Calculate and display the leaning direction leaning_dir = leaning_direction(results.pose_landmarks) with open(csv_filename, "a", newline="") as file: writer = csv.writer(file) writer.writerow( [ video_path, frame_number, total_movement, openness_value, leaning_dir, ] ) # Increase frame_number every time you process a frame frame_number += 1 count += 1 import csv import librosa import numpy as np from moviepy.editor import VideoFileClip import speech_recognition as sr # Load the video video_path = "/kaggle/working/video_cut.mp4" clip = VideoFileClip(video_path) # Initialize the output CSV File csv_filename = "output_audio_features.csv" with open(csv_filename, "w", newline="") as file: writer = csv.writer(file) writer.writerow(["time", "avg_pitch", "avg_intensity", "transcription"]) # Initialize the speech recognizer r = sr.Recognizer() # Process the audio one second at a time for i in range(int(clip.duration)): # Extract one second of audio audio_segment = clip.subclip(i, i + 1).audio audio_segment.write_audiofile("temp_audio.wav") # Load the audio file with librosa y, sampling_rate = librosa.load("temp_audio.wav") # Calculate pitch with librosa pitches, magnitudes = librosa.piptrack(y=y, sr=sampling_rate) # Calculate average pitch and intensity for this second avg_pitch = pitches.mean() avg_intensity = magnitudes.mean() # Transcribe the audio with SpeechRecognition with sr.AudioFile("temp_audio.wav") as source: audio = r.record(source) # read the entire audio file try: transcription = r.recognize_google(audio) except sr.UnknownValueError: transcription = "" except sr.RequestError as e: print( f"Could not request results from Google Speech Recognition service; {e}" ) transcription = "" # Write the features to the CSV with open(csv_filename, "a", newline="") as file: writer = csv.writer(file) writer.writerow([i, avg_pitch, avg_intensity, transcription])
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/553/129553728.ipynb
null
null
[{"Id": 129553728, "ScriptId": 38178405, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11926155, "CreationDate": "05/14/2023 19:20:17", "VersionNumber": 1.0, "Title": "practicum_holistic_and_audio_analysis", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 239.0, "LinesInsertedFromPrevious": 239.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# Shorten the video into 1 minute from moviepy.video.io.ffmpeg_tools import ffmpeg_extract_subclip # Specify the start and end times for the clip you want to extract (in seconds) start_time = 0 end_time = 60 # First minute # Extract the subclip ffmpeg_extract_subclip( "/kaggle/input/gesturevideo/video_original.mp4", start_time, end_time, targetname="video_cut.mp4", ) # Process video to get openness, total movement, and leaning direction import cv2 import mediapipe as mp import numpy as np from scipy.spatial import ConvexHull import csv # Initialize MediaPipe's Holistic module mp_drawing = mp.solutions.drawing_utils mp_holistic = mp.solutions.holistic # Function to calculate the Euclidean distance between two points def euclidean_distance(p1, p2): return np.sqrt((p1.x - p2.x) ** 2 + (p1.y - p2.y) ** 2) # Function to calculate the openness of a pose def pose_openness(holistic_landmarks): keypoints = [ holistic_landmarks.landmark[mp_holistic.PoseLandmark.LEFT_SHOULDER], holistic_landmarks.landmark[mp_holistic.PoseLandmark.RIGHT_SHOULDER], holistic_landmarks.landmark[mp_holistic.PoseLandmark.LEFT_HIP], holistic_landmarks.landmark[mp_holistic.PoseLandmark.RIGHT_HIP], ] coords = np.array([(kp.x, kp.y) for kp in keypoints]) hull = ConvexHull(coords) return hull.volume # Function to calculate leaning direction def leaning_direction(holistic_landmarks): nose = holistic_landmarks.landmark[mp_holistic.PoseLandmark.NOSE] left_shoulder = holistic_landmarks.landmark[mp_holistic.PoseLandmark.LEFT_SHOULDER] right_shoulder = holistic_landmarks.landmark[ mp_holistic.PoseLandmark.RIGHT_SHOULDER ] avg_shoulder_z = (left_shoulder.z + right_shoulder.z) / 2 if nose.z < avg_shoulder_z: return "Forward" else: return "Backward" # Load the video video_path = "/kaggle/working/video_cut.mp4" cap = cv2.VideoCapture(video_path) # Get the video dimensions and FPS width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) fps = int(cap.get(cv2.CAP_PROP_FPS)) # Initialize the VideoWriter output_filename = "output_video.mp4" fourcc = cv2.VideoWriter_fourcc( *"mp4v" ) # You can also use "XVID" or "MJPG" for AVI files # out = cv2.VideoWriter(output_filename, fourcc, fps, (width, height)) # Initialize variables prev_landmarks = None total_movement = 0 movement_threshold = ( 0.001 # Adjust the threshold to fine-tune movement detection sensitivity ) keypoints_to_track = [ mp_holistic.PoseLandmark.LEFT_WRIST, mp_holistic.PoseLandmark.RIGHT_WRIST, mp_holistic.PoseLandmark.LEFT_ANKLE, mp_holistic.PoseLandmark.RIGHT_ANKLE, ] # Initialize the output CSV File csv_filename = "output_features.csv" with open(csv_filename, "w", newline="") as file: writer = csv.writer(file) writer.writerow( ["video_name", "frame_number", "total_movement", "avg_pose_openness", "leaning"] ) # Process the video frames count = 0 frame_number = 0 with mp_holistic.Holistic( static_image_mode=False, min_detection_confidence=0.5, min_tracking_confidence=0.5 ) as holistic: while cap.isOpened(): ret, frame = cap.read() if not ret: break # process every fps's frame if count % fps == 0: # Convert the frame to RGB frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # Process the frame with MediaPipe's Holistic module results = holistic.process(frame_rgb) # Draw holistic landmarks on the frame if results.pose_landmarks: mp_drawing.draw_landmarks( frame, results.pose_landmarks, mp_holistic.POSE_CONNECTIONS ) # Calculate the total movement if prev_landmarks: frame_movement = 0 for kp in keypoints_to_track: distance = euclidean_distance( results.pose_landmarks.landmark[kp], prev_landmarks.landmark[kp], ) frame_movement += distance if frame_movement > movement_threshold: total_movement += frame_movement prev_landmarks = results.pose_landmarks # Calculate and display the total movement and pose openness on the frame openness_value = pose_openness(results.pose_landmarks) # Calculate and display the leaning direction leaning_dir = leaning_direction(results.pose_landmarks) with open(csv_filename, "a", newline="") as file: writer = csv.writer(file) writer.writerow( [video_path, count, total_movement, openness_value, leaning_dir] ) frame_number += 1 count += 1 # cv2.putText(frame, f"Total Movement: {total_movement:.2f}", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 0), 2) # cv2.putText(frame, f"Pose Openness: {openness_value:.4f}", (10, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2) # cv2.putText(frame, f"Leaning: {leaning_dir}", (10, 90), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2) # Save the frame # cv2.imwrite('frame' + str(count) + '.jpg', frame) # out.write(frame) # out.release() # Process the video frames count = 0 frame_number = 0 with mp_holistic.Holistic( static_image_mode=False, min_detection_confidence=0.5, min_tracking_confidence=0.5 ) as holistic: while cap.isOpened(): ret, frame = cap.read() if not ret: break # Process every fps'th frame if count % fps == 0: # Convert the frame to RGB frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # Process the frame with MediaPipe's Holistic module results = holistic.process(frame_rgb) # Draw holistic landmarks on the frame if results.pose_landmarks: mp_drawing.draw_landmarks( frame, results.pose_landmarks, mp_holistic.POSE_CONNECTIONS ) # Calculate the total movement if prev_landmarks: frame_movement = 0 for kp in keypoints_to_track: distance = euclidean_distance( results.pose_landmarks.landmark[kp], prev_landmarks.landmark[kp], ) frame_movement += distance if frame_movement > movement_threshold: total_movement += frame_movement prev_landmarks = results.pose_landmarks # Calculate and display the total movement and pose openness on the frame openness_value = pose_openness(results.pose_landmarks) # Calculate and display the leaning direction leaning_dir = leaning_direction(results.pose_landmarks) with open(csv_filename, "a", newline="") as file: writer = csv.writer(file) writer.writerow( [ video_path, frame_number, total_movement, openness_value, leaning_dir, ] ) # Increase frame_number every time you process a frame frame_number += 1 count += 1 import csv import librosa import numpy as np from moviepy.editor import VideoFileClip import speech_recognition as sr # Load the video video_path = "/kaggle/working/video_cut.mp4" clip = VideoFileClip(video_path) # Initialize the output CSV File csv_filename = "output_audio_features.csv" with open(csv_filename, "w", newline="") as file: writer = csv.writer(file) writer.writerow(["time", "avg_pitch", "avg_intensity", "transcription"]) # Initialize the speech recognizer r = sr.Recognizer() # Process the audio one second at a time for i in range(int(clip.duration)): # Extract one second of audio audio_segment = clip.subclip(i, i + 1).audio audio_segment.write_audiofile("temp_audio.wav") # Load the audio file with librosa y, sampling_rate = librosa.load("temp_audio.wav") # Calculate pitch with librosa pitches, magnitudes = librosa.piptrack(y=y, sr=sampling_rate) # Calculate average pitch and intensity for this second avg_pitch = pitches.mean() avg_intensity = magnitudes.mean() # Transcribe the audio with SpeechRecognition with sr.AudioFile("temp_audio.wav") as source: audio = r.record(source) # read the entire audio file try: transcription = r.recognize_google(audio) except sr.UnknownValueError: transcription = "" except sr.RequestError as e: print( f"Could not request results from Google Speech Recognition service; {e}" ) transcription = "" # Write the features to the CSV with open(csv_filename, "a", newline="") as file: writer = csv.writer(file) writer.writerow([i, avg_pitch, avg_intensity, transcription])
false
0
2,566
0
2,566
2,566
129553283
import numpy as np import matplotlib.pyplot as plt from tensorflow.keras.datasets import mnist from tensorflow.keras.layers import Input, Dense, Lambda from tensorflow.keras.models import Model from tensorflow.keras.losses import mse from tensorflow.keras import backend as K from tensorflow.keras.utils import plot_model # **Load the MNIST dataset** (x_train, _), (x_test, _) = mnist.load_data() # **Normalize and flatten the images** x_train = x_train.astype("float32") / 255.0 x_test = x_test.astype("float32") / 255.0 x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:]))) x_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:]))) # **Define the VAE architecture** latent_dim = 2 # **Encoder network** inputs = Input(shape=(784,)) enc_h1 = Dense(512, activation="relu")(inputs) enc_h2 = Dense(256, activation="relu")(enc_h1) z_mean = Dense(latent_dim)(enc_h2) z_log_var = Dense(latent_dim)(enc_h2) # **Latent space sampling function** def sampling(args): z_mean, z_log_var = args epsilon = K.random_normal( shape=(K.shape(z_mean)[0], latent_dim), mean=0.0, stddev=1.0 ) return z_mean + K.exp(0.5 * z_log_var) * epsilon # **Reparameterization trick** z = Lambda(sampling)([z_mean, z_log_var]) # **Decoder network** dec_h1 = Dense(256, activation="relu")(z) dec_h2 = Dense(512, activation="relu")(dec_h1) outputs = Dense(784, activation="sigmoid")(dec_h2) # **Define the VAE model** vae = Model(inputs, outputs) # **VAE loss function** reconstruction_loss = mse(inputs, outputs) kl_loss = -0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1) vae_loss = K.mean(reconstruction_loss + kl_loss) vae.add_loss(vae_loss) # **Compile the VAE model** vae.compile(optimizer="adam") # **Train the VAE model** history = vae.fit(x_train, epochs=50, batch_size=128, validation_data=(x_test, None)) # **Generate images using the VAE model** n = 10 # Number of images to generate encoded_imgs = vae.predict(x_test) decoded_imgs = vae.predict(encoded_imgs) # **Visualize the generated images** plt.figure(figsize=(20, 4)) for i in range(n): # Display original images ax = plt.subplot(2, n, i + 1) plt.imshow(x_test[i].reshape(28, 28), cmap="gray") plt.title("Original") plt.axis("off") # Display reconstructed images ax = plt.subplot(2, n, i + 1 + n) plt.imshow(decoded_imgs[i].reshape(28, 28), cmap="gray") plt.title("Generated") plt.axis("off") plt.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/553/129553283.ipynb
null
null
[{"Id": 129553283, "ScriptId": 38522492, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 12671037, "CreationDate": "05/14/2023 19:14:53", "VersionNumber": 1.0, "Title": "project basing on the MNIST dataset", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 90.0, "LinesInsertedFromPrevious": 90.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np import matplotlib.pyplot as plt from tensorflow.keras.datasets import mnist from tensorflow.keras.layers import Input, Dense, Lambda from tensorflow.keras.models import Model from tensorflow.keras.losses import mse from tensorflow.keras import backend as K from tensorflow.keras.utils import plot_model # **Load the MNIST dataset** (x_train, _), (x_test, _) = mnist.load_data() # **Normalize and flatten the images** x_train = x_train.astype("float32") / 255.0 x_test = x_test.astype("float32") / 255.0 x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:]))) x_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:]))) # **Define the VAE architecture** latent_dim = 2 # **Encoder network** inputs = Input(shape=(784,)) enc_h1 = Dense(512, activation="relu")(inputs) enc_h2 = Dense(256, activation="relu")(enc_h1) z_mean = Dense(latent_dim)(enc_h2) z_log_var = Dense(latent_dim)(enc_h2) # **Latent space sampling function** def sampling(args): z_mean, z_log_var = args epsilon = K.random_normal( shape=(K.shape(z_mean)[0], latent_dim), mean=0.0, stddev=1.0 ) return z_mean + K.exp(0.5 * z_log_var) * epsilon # **Reparameterization trick** z = Lambda(sampling)([z_mean, z_log_var]) # **Decoder network** dec_h1 = Dense(256, activation="relu")(z) dec_h2 = Dense(512, activation="relu")(dec_h1) outputs = Dense(784, activation="sigmoid")(dec_h2) # **Define the VAE model** vae = Model(inputs, outputs) # **VAE loss function** reconstruction_loss = mse(inputs, outputs) kl_loss = -0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1) vae_loss = K.mean(reconstruction_loss + kl_loss) vae.add_loss(vae_loss) # **Compile the VAE model** vae.compile(optimizer="adam") # **Train the VAE model** history = vae.fit(x_train, epochs=50, batch_size=128, validation_data=(x_test, None)) # **Generate images using the VAE model** n = 10 # Number of images to generate encoded_imgs = vae.predict(x_test) decoded_imgs = vae.predict(encoded_imgs) # **Visualize the generated images** plt.figure(figsize=(20, 4)) for i in range(n): # Display original images ax = plt.subplot(2, n, i + 1) plt.imshow(x_test[i].reshape(28, 28), cmap="gray") plt.title("Original") plt.axis("off") # Display reconstructed images ax = plt.subplot(2, n, i + 1 + n) plt.imshow(decoded_imgs[i].reshape(28, 28), cmap="gray") plt.title("Generated") plt.axis("off") plt.show()
false
0
901
0
901
901
129553333
# Exploratory Data Analysis On Netflix Dataset in Python # ***1. Importing the required libraries and dataset for EDA*** import pandas as pd import numpy as np import matplotlib.pyplot as plt import plotly.express as px plt.style.use("fivethirtyeight") plt.rcParams["figure.figsize"] = (10, 6) data = pd.read_csv("/kaggle/input/datanetflix/netflix.csv") data.sample(10) # *****2.display the columns of my data***** data.columns # ****3.show column types**** data.dtypes # ***4.show size my data*** data.shape # ***6. Dropping the duplicate rows*** duplicate_rows_data = data[data.duplicated()] print("number of duplicate rows: ", duplicate_rows_data.shape) data.count() data = data.drop_duplicates() data.head(5) # ***5. Renaming the columns*** data = data.rename(columns={"listed_in": "categorie "}) data.head(5) # ***7. Dropping the missing or null values*** data.count() print(data.isnull().sum()) data = data.dropna() print(data.isnull().sum()) # **maintenant on a nettoyé la base de données avant l'analyse pour s'assurer que les résultats obtenus sont fiables, pertinents et cohérents avec la réalité.** # ***QST:1 Top 5 des meilleures catégories.*** # new_data = {key: data[key] for key in ["show_id", "categorie "]} # Afficher les premières entrées de la nouvelle variable for key in new_data.keys(): df = pd.DataFrame(new_data) # Afficher les premières entrées du DataFrame DFF = df[0:5] DFF # ***ensuite on on va compter le nombre des films et des series pour chaque categories pour préciser mieux les categories et les afficher dans un histogramme selon le nombre des film dans une categorie*** # Compter le nombre de films pour chaque catégorie #'strip()' recherche les caractères spécifiés et les supprime des bords de la chaîne. categories = new_data["categorie "].str.split(", ") category_counts = {} for movie_categories in categories: for category in movie_categories: if category.strip() in category_counts: category_counts[category.strip()] += 1 else: category_counts[category.strip()] = 1 # Tracer l'histogramme plt.bar(category_counts.keys(), category_counts.values()) plt.xticks(rotation=90) plt.xlabel("Catégorie de films") plt.ylabel("Nombre de films") plt.show() # ***QST:2Top 5 des réalisateurs.*** # filtered_directors = pd.DataFrame() filtered_directors = data["director"].str.split(",", expand=True).stack() filtered_directors = filtered_directors.to_frame() filtered_directors.columns = ["Director"] directors = ( filtered_directors.groupby(["Director"]).size().reset_index(name="Total Content") ) directors = directors[directors.Director != "No Director Specified"] directors = directors.sort_values(by=["Total Content"], ascending=False) directorsTop5 = directors.head(5) directorsTop5 = directorsTop5.sort_values(by=["Total Content"]) fig1 = px.bar( directorsTop5, x="Director", y="Total Content", title="Top 5 Directors on Netflix" ) fig1.show() # Créer un dictionnaire pour stocker le nombre d'apparitions de chaque acteur actors_count = {} # Parcourir chaque entrée de la variable cast for cast_list in data["cast"]: # Diviser la chaîne de caractères contenant les noms des acteurs en une liste d'acteurs individuels actors = cast_list.split(", ") # Parcourir chaque acteur de la liste for actor in actors: # Ajouter 1 à l'entrée correspondant à l'acteur dans le dictionnaire actors_count, ou créer une nouvelle entrée avec une valeur de 1 si l'acteur n'a pas encore été rencontré actors_count[actor] = actors_count.get(actor, 0) + 1 # Trier les entrées du dictionnaire par ordre décroissant de nombre d'apparitions et en extraire les 10 premières top_actors = sorted(actors_count.items(), key=lambda x: x[1], reverse=True)[:10] # Créer une liste pour stocker les noms des acteurs et une autre pour stocker le nombre d'apparitions correspondant actor_names = [] actor_counts = [] for actor, count in top_actors: actor_names.append(actor) actor_counts.append(count) plt.barh(actor_names, actor_counts) # Ajouter un titre au graphique plt.title("Top 10 des acteurs les plus fréquents") # Ajouter une étiquette à l'axe y plt.xlabel("Nombre d'apparitions") # Afficher le graphique plt.show() # ***QST:3 Les cinq meilleures séries télévisées avec le plus grand nombre de saisons*** # ***Grouper les données par nom de série et trouver le maximum de saison pour chaque série*** # Grouper les données par nom de série et trouver le maximum de saison pour chaque série tv_shows = data.loc[data["type"] == "TV Show"] tv_shows # ***-ensuite afficher les cinqs meilleure series qui ont la plus grande duration*** tv_shows = data[data.type == "TV Show"] tv_shows_sorted = tv_shows.sort_values(by="duration", ascending=False) tv_shows_sorted.head(5) # ***QST:5 Netflix se concentre-t-il davantage sur les séries télévisées que sur les # films ces dernières années ?*** # **Dans cette question on compte le nombre des films et des series dans la base de donnée aprés en voir le resultas des chaque variable** nbr_tv_shows = 0 nbr_movies = 0 i = 0 while True: if data.iloc[i]["type"] == "TV Show": nbr_tv_shows += 1 if data.iloc[i]["type"] == "Movie": nbr_movies += 1 i += 1 if i == len(data): break print("le nombre des series", nbr_tv_shows) print("le nombre des films", nbr_movies) # ***en fin , pour mieux visulaiser le traitement on affiche les resultas dans une presentation graphique pas secteur*** sizes = [nbr_tv_shows, nbr_movies] labels = ["Tv_shows", "Movies"] # Création du graphique circulaire fig1, ax1 = plt.subplots() ax1.pie(sizes, labels=labels, autopct="%1.1f%%", startangle=90) ax1.axis("equal") # Pour que le cercle soit parfaitement circulaire # Affichage du diagramme plt.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/553/129553333.ipynb
null
null
[{"Id": 129553333, "ScriptId": 38440577, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14707911, "CreationDate": "05/14/2023 19:15:29", "VersionNumber": 2.0, "Title": "DBnetflix", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 211.0, "LinesInsertedFromPrevious": 176.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 35.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 2}]
null
null
null
null
# Exploratory Data Analysis On Netflix Dataset in Python # ***1. Importing the required libraries and dataset for EDA*** import pandas as pd import numpy as np import matplotlib.pyplot as plt import plotly.express as px plt.style.use("fivethirtyeight") plt.rcParams["figure.figsize"] = (10, 6) data = pd.read_csv("/kaggle/input/datanetflix/netflix.csv") data.sample(10) # *****2.display the columns of my data***** data.columns # ****3.show column types**** data.dtypes # ***4.show size my data*** data.shape # ***6. Dropping the duplicate rows*** duplicate_rows_data = data[data.duplicated()] print("number of duplicate rows: ", duplicate_rows_data.shape) data.count() data = data.drop_duplicates() data.head(5) # ***5. Renaming the columns*** data = data.rename(columns={"listed_in": "categorie "}) data.head(5) # ***7. Dropping the missing or null values*** data.count() print(data.isnull().sum()) data = data.dropna() print(data.isnull().sum()) # **maintenant on a nettoyé la base de données avant l'analyse pour s'assurer que les résultats obtenus sont fiables, pertinents et cohérents avec la réalité.** # ***QST:1 Top 5 des meilleures catégories.*** # new_data = {key: data[key] for key in ["show_id", "categorie "]} # Afficher les premières entrées de la nouvelle variable for key in new_data.keys(): df = pd.DataFrame(new_data) # Afficher les premières entrées du DataFrame DFF = df[0:5] DFF # ***ensuite on on va compter le nombre des films et des series pour chaque categories pour préciser mieux les categories et les afficher dans un histogramme selon le nombre des film dans une categorie*** # Compter le nombre de films pour chaque catégorie #'strip()' recherche les caractères spécifiés et les supprime des bords de la chaîne. categories = new_data["categorie "].str.split(", ") category_counts = {} for movie_categories in categories: for category in movie_categories: if category.strip() in category_counts: category_counts[category.strip()] += 1 else: category_counts[category.strip()] = 1 # Tracer l'histogramme plt.bar(category_counts.keys(), category_counts.values()) plt.xticks(rotation=90) plt.xlabel("Catégorie de films") plt.ylabel("Nombre de films") plt.show() # ***QST:2Top 5 des réalisateurs.*** # filtered_directors = pd.DataFrame() filtered_directors = data["director"].str.split(",", expand=True).stack() filtered_directors = filtered_directors.to_frame() filtered_directors.columns = ["Director"] directors = ( filtered_directors.groupby(["Director"]).size().reset_index(name="Total Content") ) directors = directors[directors.Director != "No Director Specified"] directors = directors.sort_values(by=["Total Content"], ascending=False) directorsTop5 = directors.head(5) directorsTop5 = directorsTop5.sort_values(by=["Total Content"]) fig1 = px.bar( directorsTop5, x="Director", y="Total Content", title="Top 5 Directors on Netflix" ) fig1.show() # Créer un dictionnaire pour stocker le nombre d'apparitions de chaque acteur actors_count = {} # Parcourir chaque entrée de la variable cast for cast_list in data["cast"]: # Diviser la chaîne de caractères contenant les noms des acteurs en une liste d'acteurs individuels actors = cast_list.split(", ") # Parcourir chaque acteur de la liste for actor in actors: # Ajouter 1 à l'entrée correspondant à l'acteur dans le dictionnaire actors_count, ou créer une nouvelle entrée avec une valeur de 1 si l'acteur n'a pas encore été rencontré actors_count[actor] = actors_count.get(actor, 0) + 1 # Trier les entrées du dictionnaire par ordre décroissant de nombre d'apparitions et en extraire les 10 premières top_actors = sorted(actors_count.items(), key=lambda x: x[1], reverse=True)[:10] # Créer une liste pour stocker les noms des acteurs et une autre pour stocker le nombre d'apparitions correspondant actor_names = [] actor_counts = [] for actor, count in top_actors: actor_names.append(actor) actor_counts.append(count) plt.barh(actor_names, actor_counts) # Ajouter un titre au graphique plt.title("Top 10 des acteurs les plus fréquents") # Ajouter une étiquette à l'axe y plt.xlabel("Nombre d'apparitions") # Afficher le graphique plt.show() # ***QST:3 Les cinq meilleures séries télévisées avec le plus grand nombre de saisons*** # ***Grouper les données par nom de série et trouver le maximum de saison pour chaque série*** # Grouper les données par nom de série et trouver le maximum de saison pour chaque série tv_shows = data.loc[data["type"] == "TV Show"] tv_shows # ***-ensuite afficher les cinqs meilleure series qui ont la plus grande duration*** tv_shows = data[data.type == "TV Show"] tv_shows_sorted = tv_shows.sort_values(by="duration", ascending=False) tv_shows_sorted.head(5) # ***QST:5 Netflix se concentre-t-il davantage sur les séries télévisées que sur les # films ces dernières années ?*** # **Dans cette question on compte le nombre des films et des series dans la base de donnée aprés en voir le resultas des chaque variable** nbr_tv_shows = 0 nbr_movies = 0 i = 0 while True: if data.iloc[i]["type"] == "TV Show": nbr_tv_shows += 1 if data.iloc[i]["type"] == "Movie": nbr_movies += 1 i += 1 if i == len(data): break print("le nombre des series", nbr_tv_shows) print("le nombre des films", nbr_movies) # ***en fin , pour mieux visulaiser le traitement on affiche les resultas dans une presentation graphique pas secteur*** sizes = [nbr_tv_shows, nbr_movies] labels = ["Tv_shows", "Movies"] # Création du graphique circulaire fig1, ax1 = plt.subplots() ax1.pie(sizes, labels=labels, autopct="%1.1f%%", startangle=90) ax1.axis("equal") # Pour que le cercle soit parfaitement circulaire # Affichage du diagramme plt.show()
false
0
1,848
2
1,848
1,848
129909725
# # Imports import os import numpy as np import pandas as pd import matplotlib.pyplot as plt from PIL import Image import cv2 # Data Directories ROOT_DIR = "/kaggle/input/airbus-ship-detection" train_image_dir = os.path.join(ROOT_DIR, "train_v2") test_image_dir = os.path.join(ROOT_DIR, "test_v2") sample_submission_dir = os.path.join(ROOT_DIR, "train_ship_segmentations_v2.csv") train_ship_segmentations_dir = os.path.join(ROOT_DIR, "train_ship_segmentations_v2.csv") # Data Loading train = os.listdir(train_image_dir) test = os.listdir(test_image_dir) sample_submission_df = pd.read_csv(sample_submission_dir) train_ship_segmentations_df = pd.read_csv(train_ship_segmentations_dir) # # Data View first_train_image_path = os.path.join(train_image_dir, train[0]) first_train_image = cv2.imread(first_train_image_path) first_train_image = cv2.cvtColor(first_train_image, cv2.COLOR_BGR2RGB) print(f"{first_train_image.shape = }\n") plt.title("First Train Image") plt.imshow(first_train_image) train_ship_segmentations_df.head(10) num_of_total_images = train_ship_segmentations_df.ImageId.nunique() not_empty = pd.notna(train_ship_segmentations_df.EncodedPixels) num_of_empty_images = (~not_empty).sum() num_of_non_empty_images = not_empty.sum() nun_of_total_masks = train_ship_segmentations_df[not_empty].ImageId.nunique() print( f"{num_of_total_images = } | {num_of_empty_images = } | {num_of_non_empty_images = } | {nun_of_total_masks = }" )
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/909/129909725.ipynb
null
null
[{"Id": 129909725, "ScriptId": 38642628, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 12676283, "CreationDate": "05/17/2023 11:17:09", "VersionNumber": 2.0, "Title": "Ido_Ronen_Ship_Detection", "EvaluationDate": "05/17/2023", "IsChange": false, "TotalLines": 49.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 49.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# # Imports import os import numpy as np import pandas as pd import matplotlib.pyplot as plt from PIL import Image import cv2 # Data Directories ROOT_DIR = "/kaggle/input/airbus-ship-detection" train_image_dir = os.path.join(ROOT_DIR, "train_v2") test_image_dir = os.path.join(ROOT_DIR, "test_v2") sample_submission_dir = os.path.join(ROOT_DIR, "train_ship_segmentations_v2.csv") train_ship_segmentations_dir = os.path.join(ROOT_DIR, "train_ship_segmentations_v2.csv") # Data Loading train = os.listdir(train_image_dir) test = os.listdir(test_image_dir) sample_submission_df = pd.read_csv(sample_submission_dir) train_ship_segmentations_df = pd.read_csv(train_ship_segmentations_dir) # # Data View first_train_image_path = os.path.join(train_image_dir, train[0]) first_train_image = cv2.imread(first_train_image_path) first_train_image = cv2.cvtColor(first_train_image, cv2.COLOR_BGR2RGB) print(f"{first_train_image.shape = }\n") plt.title("First Train Image") plt.imshow(first_train_image) train_ship_segmentations_df.head(10) num_of_total_images = train_ship_segmentations_df.ImageId.nunique() not_empty = pd.notna(train_ship_segmentations_df.EncodedPixels) num_of_empty_images = (~not_empty).sum() num_of_non_empty_images = not_empty.sum() nun_of_total_masks = train_ship_segmentations_df[not_empty].ImageId.nunique() print( f"{num_of_total_images = } | {num_of_empty_images = } | {num_of_non_empty_images = } | {nun_of_total_masks = }" )
false
0
532
0
532
532
129429904
import math import bisect import itertools import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from scipy.stats import skew, kurtosis # Regressors and classifiers from xgboost import XGBRegressor from sklearn.ensemble import ( RandomForestClassifier, HistGradientBoostingClassifier, HistGradientBoostingRegressor, ) from sklearn.linear_model import Ridge from catboost import CatBoostRegressor from sklearn.metrics import ( mean_squared_error, mean_absolute_error, accuracy_score, make_scorer, ) from sklearn.model_selection import train_test_split, cross_val_score from sklearn.model_selection import RandomizedSearchCV, GridSearchCV from sklearn.decomposition import PCA from sklearn.feature_selection import SelectKBest from sklearn.preprocessing import StandardScaler from sklearn.impute import KNNImputer import category_encoders as ce ONE = 1.123456789 # # Loading Dataframes # Dataset taken from Kaggle competition: Prediction of Wild Blueberry Yield. dir_input = "../input/playground-series-s3e14" dir_output = "../working" out_prefix = dir_output + "/playground-series-s3e14" dir_train_csv = dir_input + "/train.csv" dir_test_csv = dir_input + "/test.csv" pd.read_csv(dir_train_csv) df_train = pd.read_csv(dir_train_csv) target = "yield" df_target = df_train[[target]] df_train = df_train.drop(columns=[target]) df_test = pd.read_csv(dir_test_csv) df = pd.concat([df_train, df_test]) df.index = df["id"].values df = df.drop(columns=["id"]) idx_train = df.index[: len(df_train)] idx_pred = df.index[len(df_train) :] df_target.index = idx_train # df : contains both training and testing data without the target column. # to be used for applying the same preprocessing to both training and testing data. # df_target : target column of the training data. # idx_train : indices of all the training samples. # idx_pred : indices of the samples from the testing data. print("No. of training samples:", len(idx_train)) print("No. of testing samples:", len(idx_pred)) df # # Imputing Missing Values df.isnull().sum() # Nothing to do here. # # Detecting Outliers df_out = df.copy() df_out[target] = df_target[target] # adding the target column here to check correations df_out.dtypes # Distributions of all the features m, n = 6, 3 fig, ax = plt.subplots(m, n, figsize=[12, 15]) fig.tight_layout() for i in range(m): for j in range(n): col = i * n + j if col >= len(df_out.columns): break col = df_out.columns[col] AX = ax[i][j] sns.histplot(df_out[col], ax=AX) AX.set(yticks=[], ylabel="") plt.show() # Even though all features have dtype float64, all but fruitset, fruitmass, seeds, and yield have the flavor of categorical features. I am guessing that because of this tree based regression will work better than linear regression. # Observing the distributions of yield by all the predictors. m, n = 4, 4 fig, ax = plt.subplots(m, n, figsize=[12, 10]) fig.tight_layout() fig.subplots_adjust(hspace=0.5) for i in range(m): for j in range(n): col = i * n + j if col >= len(df_out.columns): break col = df_out.columns[col] if col == target: continue Ax = ax[i][j] sns.scatterplot( x=df_out.loc[idx_train][col], y=df_target.loc[idx_train][target], ax=Ax ) # putting ticks and labels nicely along the xaxis, in retrospect. this will be helpful in # identifying outlier values xmin = df_out.loc[idx_train][col].min() xmax = df_out.loc[idx_train][col].max() n_xticks = 10 d = (xmax - xmin) / n_xticks Ax.set(xticks=[round(xmin + i * d, 2) for i in range(n_xticks + 1)]) Ax.set_xticklabels(Ax.get_xticks(), rotation=45) Ax.grid(True, linestyle="-.") # also in retrospect, putting yticks and ylables to help out outlier detection Ax.set(ylabel="") if i < m - 1 or j == 0: Ax.set(yticks=[]) plt.show() # There appears to be positive correlation between `yield` and the last three features. However there are some samples falling onto a horizontal floor there, which seems anomalous. In the other pictures most samples are neatly on vertical lines, some are scattered. Since all but the last three features seem rather categorical in nature we shall project some of the samples onto the nearest vertical lines. We shall identify the remaining outliers and see how many there are. df_o2 = df_out.copy() df_o2["clonesize"] = df_o2["clonesize"].map(lambda x: x if x > 12 else 12.5) df_o2["clonesize"] = df_o2["clonesize"].map(lambda x: x if x < 40 else 37.5) df_o2["bumbles"] = df_o2["bumbles"].map(lambda x: x if x > 0.1 else 0.117) df_o2["bumbles"] = df_o2["bumbles"].map(lambda x: x if x < 0.255 or x > 0.35 else 0.25) df_o2["osmia"] = df_o2["osmia"].map(lambda x: x if x > 0.2 else 0.058) df_o2["osmia"] = df_o2["osmia"].map(lambda x: x if x < 0.53 or x > 0.63 else 0.63) df_o2["MaxOfUpperTRange"] = df_o2["MaxOfUpperTRange"].map( lambda x: x if x < 77.5 or x > 80 else 77.4 ) df_o2["MaxOfUpperTRange"] = df_o2["MaxOfUpperTRange"].map( lambda x: x if x < 87 or x > 90 else 86 ) df_o2["MinOfUpperTRange"] = df_o2["MinOfUpperTRange"].map( lambda x: x if x > 40 else 42.1 ) df_o2["AverageOfUpperTRange"] = df_o2["AverageOfUpperTRange"].map( lambda x: x if x < 65 or x > 70 else 64.7 ) df_o2["MaxOfLowerTRange"] = df_o2["MaxOfLowerTRange"].map( lambda x: x if x < 51 or x > 53 else 50.2 ) df_o2["MaxOfLowerTRange"] = df_o2["MaxOfLowerTRange"].map( lambda x: x if x < 65 or x > 67 else 68.2 ) df_o2["MinOfLowerTRange"] = df_o2["MinOfLowerTRange"].map( lambda x: x if x < 24.5 or x > 26 else 24.3 ) df_o2["MinOfLowerTRange"] = df_o2["MinOfLowerTRange"].map( lambda x: x if x < 27.5 or x > 29 else 27 ) df_o2["MinOfLowerTRange"] = df_o2["MinOfLowerTRange"].map( lambda x: x if x < 30.5 or x > 32 else 30 ) df_o2["AverageOfLowerTRange"] = df_o2["AverageOfLowerTRange"].map( lambda x: x if x < 44 or x > 45.7 else 45.8 ) df_o2["RainingDays"] = df_o2["RainingDays"].map(lambda x: x if x < 25 or x > 30 else 24) df_o2["AverageRainingDays"] = df_o2["AverageRainingDays"].map( lambda x: x if x < 0.065 or x > 0.075 else 0.06 ) df_o2["AverageRainingDays"] = df_o2["AverageRainingDays"].map( lambda x: x if x < 0.12 or x > 0.15 else 0.1 ) df_o2["AverageRainingDays"] = df_o2["AverageRainingDays"].map( lambda x: x if x < 0.24 or x > 0.255 else 0.26 ) df_o2["andrena"] = df_o2["andrena"].map(lambda x: x if x < 0.2 or x > 0.245 else 0.25) df_o2["andrena"] = df_o2["andrena"].map(lambda x: x if x < 0.48 or x > 0.56 else 0.5) df_o2["andrena"] = df_o2["andrena"].map(lambda x: x if x < 0.7 or x > 0.71 else 0.75) m, n = 4, 4 fig, ax = plt.subplots(m, n, figsize=[12, 10]) fig.tight_layout() fig.subplots_adjust(hspace=0.5) for i in range(m): for j in range(n): col = i * n + j if col >= len(df_o2.columns): break col = df_o2.columns[col] if col == target: continue Ax = ax[i][j] sns.scatterplot( x=df_o2.loc[idx_train][col], y=df_target.loc[idx_train][target], ax=Ax ) # putting ticks and labels nicely along the xaxis, in retrospect. this will be helpful in # identifying outlier values xmin = df_o2.loc[idx_train][col].min() xmax = df_o2.loc[idx_train][col].max() n_xticks = 10 d = (xmax - xmin) / n_xticks Ax.set(xticks=[round(xmin + i * d, 2) for i in range(n_xticks + 1)]) Ax.set_xticklabels(Ax.get_xticks(), rotation=45) Ax.grid(True, linestyle="-.") # also in retrospect, putting yticks and ylables to help out outlier detection Ax.set(ylabel="") if i < m - 1 or j == 0: Ax.set(yticks=[]) plt.show() conds = [] conds.append(df_o2.loc[idx_train][target] < 2550) conds.append(df_o2.loc[idx_train]["seeds"] < 24) conds.append(df_o2.loc[idx_train]["honeybee"] > 5) conds.append(df_o2.loc[idx_train]["fruitmass"] < 0.34) conds.append( df_o2.loc[idx_train]["bumbles"].apply(lambda i: True if i > 0.4 else False).values ) conds.append( df_o2.loc[idx_train]["andrena"].apply(lambda i: True if i < 0.2 else False).values ) conds.append( df_o2.apply(lambda r: r["seeds"] > 37 and r["yield"] < 3000, axis=1).values ) idx_custom = [any(X) for X in zip(*conds)] idx_all_in = [not x for x in idx_custom] # boolean flag for inlier indices in idx_train idx_all_in = df_o2.loc[idx_train].loc[idx_all_in].index # inlier indices in idx_train. m, n = 4, 4 fig, ax = plt.subplots(m, n, figsize=[12, 10]) fig.tight_layout() fig.subplots_adjust(hspace=0.5) for i in range(m): for j in range(n): col = i * n + j if col >= len(df_o2.columns): break col = df_o2.columns[col] if col == target: continue Ax = ax[i][j] sns.scatterplot( x=df_o2.loc[idx_all_in][col], y=df_target.loc[idx_all_in][target], ax=Ax ) sns.scatterplot( x=df_o2.loc[idx_train].loc[idx_custom][col], y=df_target.loc[idx_custom][target], ax=Ax, s=20, ) # putting ticks and labels nicely along the xaxis, in retrospect. this will be helpful in # identifying outlier values xmin = df_o2.loc[idx_train][col].min() xmax = df_o2.loc[idx_train][col].max() n_xticks = 10 d = (xmax - xmin) / n_xticks Ax.set(xticks=[round(xmin + i * d, 2) for i in range(n_xticks + 1)]) Ax.set_xticklabels(Ax.get_xticks(), rotation=45) Ax.grid(True, linestyle="-.") # also in retrospect, putting yticks and ylables to help out outlier detection if i < m - 1 or j == 0: Ax.set(yticks=[]) Ax.set(ylabel="") plt.show() print("% of outliers in training data:", 100 * (1 - len(idx_all_in) / len(idx_train))) # The outliers have been identified with reasonable accuracy and only about 1% of the total training data are outliers, so we are going to drop them from training. All inlier indices are in idx_all_in # Some of the samples I have identified as outliers fall squarely on a potential linear regression line in the plots against the highly correlated predictors fruitset, fruitmass, and seeds. These are samples that fell out of the vertical lines in the other plots. Whether it would be better to project these onto the vertical lines as well or not is unclear since they are a bit too scattered. # # Preprocessing and Feature Engineering # ## Dealing with Skewness # Checking for skewness df_full = df_o2.copy() def zero(s): if 0 in s.index: return s.loc[0] else: return 0 num_columns = [c for c in df_full.columns if df_full[c].dtype in ["int64", "float64"]] zero_percents = [ 100 * zero(df_full[c].value_counts()) / len(df_full) for c in num_columns ] skewness = df_full.skew(numeric_only=True) skew_zero = pd.DataFrame(skewness, columns=["skewness"]) skew_zero["% zero"] = zero_percents skew_zero.sort_values(by="skewness", ascending=False) # Most features are not very skewed. `honeybee` is the only odd one. # A quick look at the honeybee distribution df_full["honeybee"].describe() # Which samples have the high values in honeybee? df_full.loc[df_full["honeybee"] > 1] # Not sure why these values are so much higher than the rest. Almost equal but small number of such anomalous values appear both in training and testing data (as evidenced by valid and NaN values in the `yield` column). # Log transforming the highly skewed columns df_unskewed = df_full.copy().drop( columns=[target] ) # the target columns will not be transformed. It has low skewness. for c in df_full.columns: if df_full[c].dtype == "object": continue if df_full[c].skew() < 0.5: continue # threshhold for being considered to be highly skewed. shift = -df_full[c].min() + ONE df_unskewed[c] = df_unskewed[c].apply(lambda x: math.log(shift + x)) df_unskewed.describe() # ## Engigeering Features # How are all the features correlated? # .corr() will get correlations among all features, there is no object type feature. df_aug = df_unskewed.copy() df_aug[target] = df_target[target] # adding the target column here to check correations sns.heatmap(df_aug.corr()) df_aug.drop(columns=[target], inplace=True) # getting rid of the target column # The temperature features are perfectly correlated, as are the two rain related features. No need to keep all of them. Instead, we can create some interaction features. df_aug = df_unskewed.copy() df_aug["TRange"] = df_aug["MaxOfUpperTRange"] - df_aug["MinOfLowerTRange"] # df_aug['AverageT'] = (df_aug['AverageOfUpperTRange'] + df_aug['AverageOfLowerTRange'])/2 df_aug["T_bee_int"] = ( df_aug["AverageOfUpperTRange"] + df_aug["AverageOfLowerTRange"] ) * (df_aug["honeybee"] + df_aug["bumbles"] + df_aug["andrena"] + df_aug["osmia"]) df_aug["honeybee_clonesize_int"] = df_aug["honeybee"] * df_aug["clonesize"] df_aug["fruitset_seeds_int"] = df_aug["fruitset"] * df_aug["seeds"] df_aug["fruitmass_seeds_int"] = df_aug["fruitmass"] * df_aug["seeds"] df_aug["mass_set_int"] = df_aug["fruitmass"] * df_aug["fruitset"] df_aug["andrena_osmia_int"] = df_aug["andrena"] * df_aug["osmia"] df_aug["set_bee_int"] = ( df_aug["bumbles"] + df_aug["osmia"] + df_aug["andrena"] ) * df_aug["fruitset"] df_aug["mass_bee_int"] = ( df_aug["bumbles"] + df_aug["osmia"] + df_aug["andrena"] ) * df_aug["fruitmass"] df_aug["seed_bee_int"] = ( df_aug["bumbles"] + df_aug["osmia"] + df_aug["andrena"] ) * df_aug["seeds"] df_aug.drop( columns=[ "MaxOfUpperTRange", "MinOfUpperTRange", "MaxOfLowerTRange", "MinOfLowerTRange", "AverageOfUpperTRange", "AverageOfLowerTRange", "AverageRainingDays", ], inplace=True, ) sns.heatmap(df_aug.corr()) plt.show() # There is no concrete benchmark showing the efficacy of these feature engineerings. # ## Standardization # ### Input Data scaler = StandardScaler() # the scaler is fitted only to the training data, and even then only to the inliers scaler.fit(df_aug.loc[idx_all_in]) df_scaled = df_aug.copy() # we transform both training and testing data using the same scaler df_scaled = pd.DataFrame( scaler.transform(df_scaled), index=df_scaled.index, columns=df_scaled.columns ) df_scaled # ### Target Data mod_target = df_target.copy().loc[idx_all_in] scaler_target = StandardScaler() mod_target = scaler_target.fit_transform(mod_target) # # Modeling # train test split xtrain, xval, ytrain, yval = train_test_split( df_scaled.loc[idx_all_in], mod_target, test_size=0.3 ) yval = scaler_target.inverse_transform( yval ) # yval is already been transformed into the original form print("Shape of training data:", xtrain.shape, ytrain.shape) print("Shape of validation data:", xval.shape, yval.shape) unique_targets = sorted(df_target[target].unique()) print( "% of unique values in the target column:", 100 * len(unique_targets) / len(idx_train), ) # Trick observation. Following Post-processing trick to improve CV & LB score (credited to siukeitin), we observe that there are only about 5% unique values in the target columns of the training samples. Because of this, even though the target column is of dtype float64, we shall round our predictions to the closest value that appears in the set of unique values. # some helper functions # projects the number n to its nearest value in the unique_target list. def force(n): loc = bisect.bisect_left(unique_targets, n) if loc == 0: return unique_targets[0] if loc == len(unique_targets): return unique_targets[-1] return ( unique_targets[loc] if abs(unique_targets[loc] - n) < abs(unique_targets[loc - 1] - n) else unique_targets[loc - 1] ) force_np = np.vectorize(force) # computes the mean absolute error in y_pred relative to y_true def mae(y_true, y_pred, **kwargs): n = len(y_true) y_true = scaler_target.inverse_transform(y_true.reshape(n, 1)) y_pred = force_np(scaler_target.inverse_transform(y_pred.reshape(n, 1))) return mean_absolute_error(y_true, y_pred) mae_scorer = make_scorer(mae, greater_is_better=False) # my mean absolute error scorer # Creates a sorted list of n equally distanced numbers starting from a and ending near b. # If the integer flag is set to True then it rounds the entries of the output list to their nearest integers. def interval(a, b, n, integer=False): d = (b - a) / n ans = ( set() ) # set only because of the possibility that if rounding to integers occur then some entries may coincide i = a while i < b: ans.add(round(i) if integer else i) i += d return sorted(list(ans)) # I shall use `CatBoostRegressor`, `XGBRegressor` and `HistGradientBoost` and use `RandomizedSearchCV` for hyperparameter tuning in both cases. The contest is scored at Kaggle using mean absolute error, so this is the metric that will be used for training. # I don't know how to determine the best hyperparameters to tune or the best range in which to search for. The parameters and the ranges below are mostly guesses. # # CatBoost cbr = CatBoostRegressor( loss_function="MAE", verbose=False, subsample=0.818, learning_rate=0.023, l2_leaf_reg=3.664, iterations=1280, depth=5, colsample_bylevel=0.426, border_count=300, ) # uncomment the following block to run randomized hyperparameter search """ # the search space for the hyperparameters random_grid = {'learning_rate': interval(.001, .1, 50), 'depth': range(2,10), 'l2_leaf_reg': interval(.1, 10, 50), 'iterations': interval(500, 1500, 50, integer=True), 'border_count': interval(100, 500, 50, integer=True), 'subsample': interval(.3, 1, 50), 'colsample_bylevel': interval(.3, 1, 50) } cbr_random = RandomizedSearchCV(scoring=mae_scorer, estimator = cbr, param_distributions = random_grid, n_iter = 150, cv = 3, #random_state = 31, verbose = 1, n_jobs = -1, refit=True ) cbr_random.fit(xtrain, ytrain.ravel()) cbr = cbr_random.best_estimator_ print('Best parameters from search:', cbr_random.best_params_) print('Min error from search:', abs(cbr_random.best_score_)) """ cbr.fit(xtrain, ytrain.ravel()) y_pred_val = force_np( scaler_target.inverse_transform(cbr.predict(xval).reshape(len(xval), 1)) ) print( "Mean absolute error (validation):", mean_absolute_error(yval.reshape(len(xval), 1), y_pred_val), ) res = (y_pred_val - yval).ravel() sns.scatterplot(x=yval.ravel(), y=res) sns.lineplot(x=[min(yval.ravel()), max(yval.ravel())], y=0, color="r") plt.show() print("mean of residuals:", res.mean()) print("skewness of residuals:", skew(res)) print("kurtosis of residuals:", kurtosis(res)) df_pred = df_scaled.loc[idx_pred] target_pred = force_np( scaler_target.inverse_transform(cbr.predict(df_pred).reshape(len(idx_pred), 1)) ) solution = pd.DataFrame(target_pred, columns=[target]) solution["id"] = df_pred.index solution = solution[["id", target]] solution.to_csv(out_prefix + "_sol_cbr.csv", index=False) # Ideally the residuals should be normally distributed around the red line. I am consistently underestimating the high yields. There should be some way to correct this. This problem persists through all the models. # ## XGBoost xgb = XGBRegressor( objective="reg:absoluteerror", n_estimators=228, max_depth=4, min_child_weight=2, max_delta_step=0.4, grow_policy="lossguide", subsample=0.96, learning_rate=0.0316, reg_lambda=1.72, gamma=0.68, alpha=0.88, ) # uncomment the following block to run randomized hyperparameter search """ # the search space for the hyperparameters random_grid = {#'max_depth': range(1,10), 'lambda': interval(0, 2, 50), 'gamma': interval(0, 2, 50), 'alpha': interval(0, 2, 50), 'learning_rate': interval(.01, .1, 25), 'subsample': interval(.5, 1, 50) #'n_estimators': interval(10, 1000, 100, integer=True), #'grow_policy': ['depthwise', 'lossguide'], #'min_child_weight': interval(.0001, 5, 50), #'max_delta_step': interval(0, 2, 50) } xgb_random = RandomizedSearchCV(scoring=mae_scorer, estimator = xgb, param_distributions = random_grid, n_iter = 150, cv = 3, #random_state = 31, verbose = 1, n_jobs = -1, refit=True ) xgb_random.fit(xtrain, ytrain.ravel()) xgb = xgb_random.best_estimator_ print('Best parameters from search:', xgb_random.best_params_) print('Min error from search:', abs(xgb_random.best_score_)) """ xgb.fit(xtrain, ytrain.ravel()) y_pred_val = force_np( scaler_target.inverse_transform(xgb.predict(xval).reshape(len(xval), 1)) ) print( "Mean absolute error (validation):", mean_absolute_error(yval.reshape(len(xval), 1), y_pred_val), ) res = (y_pred_val - yval).ravel() sns.scatterplot(x=yval.ravel(), y=res) sns.lineplot(x=[min(yval.ravel()), max(yval.ravel())], y=0, color="r") plt.show() print("mean of residuals:", res.mean()) print("skewness of residuals:", skew(res)) print("kurtosis of residuals:", kurtosis(res)) df_pred = df_scaled.loc[idx_pred] target_pred = force_np( scaler_target.inverse_transform(xgb.predict(df_pred).reshape(len(idx_pred), 1)) ) solution = pd.DataFrame(target_pred, columns=[target]) solution["id"] = df_pred.index solution = solution[["id", target]] solution.to_csv(out_prefix + "_sol_xgb.csv", index=False) # ## HistGradientBoost hgb = HistGradientBoostingRegressor( loss="absolute_error", early_stopping=False, min_samples_leaf=19, max_leaf_nodes=10, max_iter=1815, max_depth=8, max_bins=239, learning_rate=0.013, l2_regularization=0.96, ) # uncomment the following block to run randomized hyperparameter search """ # the search space for the hyperparameters random_grid = {'l2_regularization': interval(0, 1, 50), #'early_stopping': [True, False], 'learning_rate': interval(.001, .1, 50), 'max_iter': interval(100, 5000, 100, integer=True), 'max_depth': interval(1, 50, 20, integer=True), 'max_bins': interval(50, 255, 50, integer=True), 'min_samples_leaf': interval(10, 100, 20, integer=True), 'max_leaf_nodes': interval(10, 100, 20, integer=True) } hgb_random = RandomizedSearchCV(scoring = mae_scorer, estimator = hgb, param_distributions = random_grid, n_iter = 150, cv = 3, verbose = 1, #random_state = 19, n_jobs = -1, refit=True ) hgb_random.fit(xtrain, ytrain.ravel()) hgb = hgb_random.best_estimator_ print('Best parameters from search:', hgb_random.best_params_) print('Min error from search:', abs(hgb_random.best_score_)) """ hgb.fit(xtrain, ytrain.ravel()) y_pred_val = force_np( scaler_target.inverse_transform(hgb.predict(xval).reshape(len(xval), 1)) ) print( "Mean absolute error (validation):", mean_absolute_error(yval.reshape(len(xval), 1), y_pred_val), ) res = (y_pred_val - yval).ravel() sns.scatterplot(x=yval.ravel(), y=res) sns.lineplot(x=[min(yval.ravel()), max(yval.ravel())], y=0, color="r") plt.show() print("mean of residuals:", res.mean()) print("skewness of residuals:", skew(res)) print("kurtosis of residuals:", kurtosis(res)) df_pred = df_scaled.loc[idx_pred] target_pred = force_np( scaler_target.inverse_transform(hgb.predict(df_pred).reshape(len(idx_pred), 1)) ) solution = pd.DataFrame(target_pred, columns=[target]) solution["id"] = df_pred.index solution = solution[["id", target]] solution.to_csv(out_prefix + "_sol_hgb.csv", index=False) # ## Averaging over Models # The three models perform quite similarly on the training set. So I am going to take the average of their outcomes. target_pred = force_np( scaler_target.inverse_transform( ( cbr.predict(df_pred).reshape(len(idx_pred), 1) + xgb.predict(df_pred).reshape(len(idx_pred), 1) + hgb.predict(df_pred).reshape(len(idx_pred), 1) ) / 3 ) ) solution = pd.DataFrame(target_pred, columns=[target]) solution["id"] = df_pred.index solution = solution[["id", target]] solution.to_csv(out_prefix + "_sol_hgb+xgb+cbr.csv", index=False)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/429/129429904.ipynb
null
null
[{"Id": 129429904, "ScriptId": 38441078, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 962756, "CreationDate": "05/13/2023 18:11:28", "VersionNumber": 5.0, "Title": "Blueberry yield (playground series 3.14)", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 565.0, "LinesInsertedFromPrevious": 224.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 341.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import math import bisect import itertools import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from scipy.stats import skew, kurtosis # Regressors and classifiers from xgboost import XGBRegressor from sklearn.ensemble import ( RandomForestClassifier, HistGradientBoostingClassifier, HistGradientBoostingRegressor, ) from sklearn.linear_model import Ridge from catboost import CatBoostRegressor from sklearn.metrics import ( mean_squared_error, mean_absolute_error, accuracy_score, make_scorer, ) from sklearn.model_selection import train_test_split, cross_val_score from sklearn.model_selection import RandomizedSearchCV, GridSearchCV from sklearn.decomposition import PCA from sklearn.feature_selection import SelectKBest from sklearn.preprocessing import StandardScaler from sklearn.impute import KNNImputer import category_encoders as ce ONE = 1.123456789 # # Loading Dataframes # Dataset taken from Kaggle competition: Prediction of Wild Blueberry Yield. dir_input = "../input/playground-series-s3e14" dir_output = "../working" out_prefix = dir_output + "/playground-series-s3e14" dir_train_csv = dir_input + "/train.csv" dir_test_csv = dir_input + "/test.csv" pd.read_csv(dir_train_csv) df_train = pd.read_csv(dir_train_csv) target = "yield" df_target = df_train[[target]] df_train = df_train.drop(columns=[target]) df_test = pd.read_csv(dir_test_csv) df = pd.concat([df_train, df_test]) df.index = df["id"].values df = df.drop(columns=["id"]) idx_train = df.index[: len(df_train)] idx_pred = df.index[len(df_train) :] df_target.index = idx_train # df : contains both training and testing data without the target column. # to be used for applying the same preprocessing to both training and testing data. # df_target : target column of the training data. # idx_train : indices of all the training samples. # idx_pred : indices of the samples from the testing data. print("No. of training samples:", len(idx_train)) print("No. of testing samples:", len(idx_pred)) df # # Imputing Missing Values df.isnull().sum() # Nothing to do here. # # Detecting Outliers df_out = df.copy() df_out[target] = df_target[target] # adding the target column here to check correations df_out.dtypes # Distributions of all the features m, n = 6, 3 fig, ax = plt.subplots(m, n, figsize=[12, 15]) fig.tight_layout() for i in range(m): for j in range(n): col = i * n + j if col >= len(df_out.columns): break col = df_out.columns[col] AX = ax[i][j] sns.histplot(df_out[col], ax=AX) AX.set(yticks=[], ylabel="") plt.show() # Even though all features have dtype float64, all but fruitset, fruitmass, seeds, and yield have the flavor of categorical features. I am guessing that because of this tree based regression will work better than linear regression. # Observing the distributions of yield by all the predictors. m, n = 4, 4 fig, ax = plt.subplots(m, n, figsize=[12, 10]) fig.tight_layout() fig.subplots_adjust(hspace=0.5) for i in range(m): for j in range(n): col = i * n + j if col >= len(df_out.columns): break col = df_out.columns[col] if col == target: continue Ax = ax[i][j] sns.scatterplot( x=df_out.loc[idx_train][col], y=df_target.loc[idx_train][target], ax=Ax ) # putting ticks and labels nicely along the xaxis, in retrospect. this will be helpful in # identifying outlier values xmin = df_out.loc[idx_train][col].min() xmax = df_out.loc[idx_train][col].max() n_xticks = 10 d = (xmax - xmin) / n_xticks Ax.set(xticks=[round(xmin + i * d, 2) for i in range(n_xticks + 1)]) Ax.set_xticklabels(Ax.get_xticks(), rotation=45) Ax.grid(True, linestyle="-.") # also in retrospect, putting yticks and ylables to help out outlier detection Ax.set(ylabel="") if i < m - 1 or j == 0: Ax.set(yticks=[]) plt.show() # There appears to be positive correlation between `yield` and the last three features. However there are some samples falling onto a horizontal floor there, which seems anomalous. In the other pictures most samples are neatly on vertical lines, some are scattered. Since all but the last three features seem rather categorical in nature we shall project some of the samples onto the nearest vertical lines. We shall identify the remaining outliers and see how many there are. df_o2 = df_out.copy() df_o2["clonesize"] = df_o2["clonesize"].map(lambda x: x if x > 12 else 12.5) df_o2["clonesize"] = df_o2["clonesize"].map(lambda x: x if x < 40 else 37.5) df_o2["bumbles"] = df_o2["bumbles"].map(lambda x: x if x > 0.1 else 0.117) df_o2["bumbles"] = df_o2["bumbles"].map(lambda x: x if x < 0.255 or x > 0.35 else 0.25) df_o2["osmia"] = df_o2["osmia"].map(lambda x: x if x > 0.2 else 0.058) df_o2["osmia"] = df_o2["osmia"].map(lambda x: x if x < 0.53 or x > 0.63 else 0.63) df_o2["MaxOfUpperTRange"] = df_o2["MaxOfUpperTRange"].map( lambda x: x if x < 77.5 or x > 80 else 77.4 ) df_o2["MaxOfUpperTRange"] = df_o2["MaxOfUpperTRange"].map( lambda x: x if x < 87 or x > 90 else 86 ) df_o2["MinOfUpperTRange"] = df_o2["MinOfUpperTRange"].map( lambda x: x if x > 40 else 42.1 ) df_o2["AverageOfUpperTRange"] = df_o2["AverageOfUpperTRange"].map( lambda x: x if x < 65 or x > 70 else 64.7 ) df_o2["MaxOfLowerTRange"] = df_o2["MaxOfLowerTRange"].map( lambda x: x if x < 51 or x > 53 else 50.2 ) df_o2["MaxOfLowerTRange"] = df_o2["MaxOfLowerTRange"].map( lambda x: x if x < 65 or x > 67 else 68.2 ) df_o2["MinOfLowerTRange"] = df_o2["MinOfLowerTRange"].map( lambda x: x if x < 24.5 or x > 26 else 24.3 ) df_o2["MinOfLowerTRange"] = df_o2["MinOfLowerTRange"].map( lambda x: x if x < 27.5 or x > 29 else 27 ) df_o2["MinOfLowerTRange"] = df_o2["MinOfLowerTRange"].map( lambda x: x if x < 30.5 or x > 32 else 30 ) df_o2["AverageOfLowerTRange"] = df_o2["AverageOfLowerTRange"].map( lambda x: x if x < 44 or x > 45.7 else 45.8 ) df_o2["RainingDays"] = df_o2["RainingDays"].map(lambda x: x if x < 25 or x > 30 else 24) df_o2["AverageRainingDays"] = df_o2["AverageRainingDays"].map( lambda x: x if x < 0.065 or x > 0.075 else 0.06 ) df_o2["AverageRainingDays"] = df_o2["AverageRainingDays"].map( lambda x: x if x < 0.12 or x > 0.15 else 0.1 ) df_o2["AverageRainingDays"] = df_o2["AverageRainingDays"].map( lambda x: x if x < 0.24 or x > 0.255 else 0.26 ) df_o2["andrena"] = df_o2["andrena"].map(lambda x: x if x < 0.2 or x > 0.245 else 0.25) df_o2["andrena"] = df_o2["andrena"].map(lambda x: x if x < 0.48 or x > 0.56 else 0.5) df_o2["andrena"] = df_o2["andrena"].map(lambda x: x if x < 0.7 or x > 0.71 else 0.75) m, n = 4, 4 fig, ax = plt.subplots(m, n, figsize=[12, 10]) fig.tight_layout() fig.subplots_adjust(hspace=0.5) for i in range(m): for j in range(n): col = i * n + j if col >= len(df_o2.columns): break col = df_o2.columns[col] if col == target: continue Ax = ax[i][j] sns.scatterplot( x=df_o2.loc[idx_train][col], y=df_target.loc[idx_train][target], ax=Ax ) # putting ticks and labels nicely along the xaxis, in retrospect. this will be helpful in # identifying outlier values xmin = df_o2.loc[idx_train][col].min() xmax = df_o2.loc[idx_train][col].max() n_xticks = 10 d = (xmax - xmin) / n_xticks Ax.set(xticks=[round(xmin + i * d, 2) for i in range(n_xticks + 1)]) Ax.set_xticklabels(Ax.get_xticks(), rotation=45) Ax.grid(True, linestyle="-.") # also in retrospect, putting yticks and ylables to help out outlier detection Ax.set(ylabel="") if i < m - 1 or j == 0: Ax.set(yticks=[]) plt.show() conds = [] conds.append(df_o2.loc[idx_train][target] < 2550) conds.append(df_o2.loc[idx_train]["seeds"] < 24) conds.append(df_o2.loc[idx_train]["honeybee"] > 5) conds.append(df_o2.loc[idx_train]["fruitmass"] < 0.34) conds.append( df_o2.loc[idx_train]["bumbles"].apply(lambda i: True if i > 0.4 else False).values ) conds.append( df_o2.loc[idx_train]["andrena"].apply(lambda i: True if i < 0.2 else False).values ) conds.append( df_o2.apply(lambda r: r["seeds"] > 37 and r["yield"] < 3000, axis=1).values ) idx_custom = [any(X) for X in zip(*conds)] idx_all_in = [not x for x in idx_custom] # boolean flag for inlier indices in idx_train idx_all_in = df_o2.loc[idx_train].loc[idx_all_in].index # inlier indices in idx_train. m, n = 4, 4 fig, ax = plt.subplots(m, n, figsize=[12, 10]) fig.tight_layout() fig.subplots_adjust(hspace=0.5) for i in range(m): for j in range(n): col = i * n + j if col >= len(df_o2.columns): break col = df_o2.columns[col] if col == target: continue Ax = ax[i][j] sns.scatterplot( x=df_o2.loc[idx_all_in][col], y=df_target.loc[idx_all_in][target], ax=Ax ) sns.scatterplot( x=df_o2.loc[idx_train].loc[idx_custom][col], y=df_target.loc[idx_custom][target], ax=Ax, s=20, ) # putting ticks and labels nicely along the xaxis, in retrospect. this will be helpful in # identifying outlier values xmin = df_o2.loc[idx_train][col].min() xmax = df_o2.loc[idx_train][col].max() n_xticks = 10 d = (xmax - xmin) / n_xticks Ax.set(xticks=[round(xmin + i * d, 2) for i in range(n_xticks + 1)]) Ax.set_xticklabels(Ax.get_xticks(), rotation=45) Ax.grid(True, linestyle="-.") # also in retrospect, putting yticks and ylables to help out outlier detection if i < m - 1 or j == 0: Ax.set(yticks=[]) Ax.set(ylabel="") plt.show() print("% of outliers in training data:", 100 * (1 - len(idx_all_in) / len(idx_train))) # The outliers have been identified with reasonable accuracy and only about 1% of the total training data are outliers, so we are going to drop them from training. All inlier indices are in idx_all_in # Some of the samples I have identified as outliers fall squarely on a potential linear regression line in the plots against the highly correlated predictors fruitset, fruitmass, and seeds. These are samples that fell out of the vertical lines in the other plots. Whether it would be better to project these onto the vertical lines as well or not is unclear since they are a bit too scattered. # # Preprocessing and Feature Engineering # ## Dealing with Skewness # Checking for skewness df_full = df_o2.copy() def zero(s): if 0 in s.index: return s.loc[0] else: return 0 num_columns = [c for c in df_full.columns if df_full[c].dtype in ["int64", "float64"]] zero_percents = [ 100 * zero(df_full[c].value_counts()) / len(df_full) for c in num_columns ] skewness = df_full.skew(numeric_only=True) skew_zero = pd.DataFrame(skewness, columns=["skewness"]) skew_zero["% zero"] = zero_percents skew_zero.sort_values(by="skewness", ascending=False) # Most features are not very skewed. `honeybee` is the only odd one. # A quick look at the honeybee distribution df_full["honeybee"].describe() # Which samples have the high values in honeybee? df_full.loc[df_full["honeybee"] > 1] # Not sure why these values are so much higher than the rest. Almost equal but small number of such anomalous values appear both in training and testing data (as evidenced by valid and NaN values in the `yield` column). # Log transforming the highly skewed columns df_unskewed = df_full.copy().drop( columns=[target] ) # the target columns will not be transformed. It has low skewness. for c in df_full.columns: if df_full[c].dtype == "object": continue if df_full[c].skew() < 0.5: continue # threshhold for being considered to be highly skewed. shift = -df_full[c].min() + ONE df_unskewed[c] = df_unskewed[c].apply(lambda x: math.log(shift + x)) df_unskewed.describe() # ## Engigeering Features # How are all the features correlated? # .corr() will get correlations among all features, there is no object type feature. df_aug = df_unskewed.copy() df_aug[target] = df_target[target] # adding the target column here to check correations sns.heatmap(df_aug.corr()) df_aug.drop(columns=[target], inplace=True) # getting rid of the target column # The temperature features are perfectly correlated, as are the two rain related features. No need to keep all of them. Instead, we can create some interaction features. df_aug = df_unskewed.copy() df_aug["TRange"] = df_aug["MaxOfUpperTRange"] - df_aug["MinOfLowerTRange"] # df_aug['AverageT'] = (df_aug['AverageOfUpperTRange'] + df_aug['AverageOfLowerTRange'])/2 df_aug["T_bee_int"] = ( df_aug["AverageOfUpperTRange"] + df_aug["AverageOfLowerTRange"] ) * (df_aug["honeybee"] + df_aug["bumbles"] + df_aug["andrena"] + df_aug["osmia"]) df_aug["honeybee_clonesize_int"] = df_aug["honeybee"] * df_aug["clonesize"] df_aug["fruitset_seeds_int"] = df_aug["fruitset"] * df_aug["seeds"] df_aug["fruitmass_seeds_int"] = df_aug["fruitmass"] * df_aug["seeds"] df_aug["mass_set_int"] = df_aug["fruitmass"] * df_aug["fruitset"] df_aug["andrena_osmia_int"] = df_aug["andrena"] * df_aug["osmia"] df_aug["set_bee_int"] = ( df_aug["bumbles"] + df_aug["osmia"] + df_aug["andrena"] ) * df_aug["fruitset"] df_aug["mass_bee_int"] = ( df_aug["bumbles"] + df_aug["osmia"] + df_aug["andrena"] ) * df_aug["fruitmass"] df_aug["seed_bee_int"] = ( df_aug["bumbles"] + df_aug["osmia"] + df_aug["andrena"] ) * df_aug["seeds"] df_aug.drop( columns=[ "MaxOfUpperTRange", "MinOfUpperTRange", "MaxOfLowerTRange", "MinOfLowerTRange", "AverageOfUpperTRange", "AverageOfLowerTRange", "AverageRainingDays", ], inplace=True, ) sns.heatmap(df_aug.corr()) plt.show() # There is no concrete benchmark showing the efficacy of these feature engineerings. # ## Standardization # ### Input Data scaler = StandardScaler() # the scaler is fitted only to the training data, and even then only to the inliers scaler.fit(df_aug.loc[idx_all_in]) df_scaled = df_aug.copy() # we transform both training and testing data using the same scaler df_scaled = pd.DataFrame( scaler.transform(df_scaled), index=df_scaled.index, columns=df_scaled.columns ) df_scaled # ### Target Data mod_target = df_target.copy().loc[idx_all_in] scaler_target = StandardScaler() mod_target = scaler_target.fit_transform(mod_target) # # Modeling # train test split xtrain, xval, ytrain, yval = train_test_split( df_scaled.loc[idx_all_in], mod_target, test_size=0.3 ) yval = scaler_target.inverse_transform( yval ) # yval is already been transformed into the original form print("Shape of training data:", xtrain.shape, ytrain.shape) print("Shape of validation data:", xval.shape, yval.shape) unique_targets = sorted(df_target[target].unique()) print( "% of unique values in the target column:", 100 * len(unique_targets) / len(idx_train), ) # Trick observation. Following Post-processing trick to improve CV & LB score (credited to siukeitin), we observe that there are only about 5% unique values in the target columns of the training samples. Because of this, even though the target column is of dtype float64, we shall round our predictions to the closest value that appears in the set of unique values. # some helper functions # projects the number n to its nearest value in the unique_target list. def force(n): loc = bisect.bisect_left(unique_targets, n) if loc == 0: return unique_targets[0] if loc == len(unique_targets): return unique_targets[-1] return ( unique_targets[loc] if abs(unique_targets[loc] - n) < abs(unique_targets[loc - 1] - n) else unique_targets[loc - 1] ) force_np = np.vectorize(force) # computes the mean absolute error in y_pred relative to y_true def mae(y_true, y_pred, **kwargs): n = len(y_true) y_true = scaler_target.inverse_transform(y_true.reshape(n, 1)) y_pred = force_np(scaler_target.inverse_transform(y_pred.reshape(n, 1))) return mean_absolute_error(y_true, y_pred) mae_scorer = make_scorer(mae, greater_is_better=False) # my mean absolute error scorer # Creates a sorted list of n equally distanced numbers starting from a and ending near b. # If the integer flag is set to True then it rounds the entries of the output list to their nearest integers. def interval(a, b, n, integer=False): d = (b - a) / n ans = ( set() ) # set only because of the possibility that if rounding to integers occur then some entries may coincide i = a while i < b: ans.add(round(i) if integer else i) i += d return sorted(list(ans)) # I shall use `CatBoostRegressor`, `XGBRegressor` and `HistGradientBoost` and use `RandomizedSearchCV` for hyperparameter tuning in both cases. The contest is scored at Kaggle using mean absolute error, so this is the metric that will be used for training. # I don't know how to determine the best hyperparameters to tune or the best range in which to search for. The parameters and the ranges below are mostly guesses. # # CatBoost cbr = CatBoostRegressor( loss_function="MAE", verbose=False, subsample=0.818, learning_rate=0.023, l2_leaf_reg=3.664, iterations=1280, depth=5, colsample_bylevel=0.426, border_count=300, ) # uncomment the following block to run randomized hyperparameter search """ # the search space for the hyperparameters random_grid = {'learning_rate': interval(.001, .1, 50), 'depth': range(2,10), 'l2_leaf_reg': interval(.1, 10, 50), 'iterations': interval(500, 1500, 50, integer=True), 'border_count': interval(100, 500, 50, integer=True), 'subsample': interval(.3, 1, 50), 'colsample_bylevel': interval(.3, 1, 50) } cbr_random = RandomizedSearchCV(scoring=mae_scorer, estimator = cbr, param_distributions = random_grid, n_iter = 150, cv = 3, #random_state = 31, verbose = 1, n_jobs = -1, refit=True ) cbr_random.fit(xtrain, ytrain.ravel()) cbr = cbr_random.best_estimator_ print('Best parameters from search:', cbr_random.best_params_) print('Min error from search:', abs(cbr_random.best_score_)) """ cbr.fit(xtrain, ytrain.ravel()) y_pred_val = force_np( scaler_target.inverse_transform(cbr.predict(xval).reshape(len(xval), 1)) ) print( "Mean absolute error (validation):", mean_absolute_error(yval.reshape(len(xval), 1), y_pred_val), ) res = (y_pred_val - yval).ravel() sns.scatterplot(x=yval.ravel(), y=res) sns.lineplot(x=[min(yval.ravel()), max(yval.ravel())], y=0, color="r") plt.show() print("mean of residuals:", res.mean()) print("skewness of residuals:", skew(res)) print("kurtosis of residuals:", kurtosis(res)) df_pred = df_scaled.loc[idx_pred] target_pred = force_np( scaler_target.inverse_transform(cbr.predict(df_pred).reshape(len(idx_pred), 1)) ) solution = pd.DataFrame(target_pred, columns=[target]) solution["id"] = df_pred.index solution = solution[["id", target]] solution.to_csv(out_prefix + "_sol_cbr.csv", index=False) # Ideally the residuals should be normally distributed around the red line. I am consistently underestimating the high yields. There should be some way to correct this. This problem persists through all the models. # ## XGBoost xgb = XGBRegressor( objective="reg:absoluteerror", n_estimators=228, max_depth=4, min_child_weight=2, max_delta_step=0.4, grow_policy="lossguide", subsample=0.96, learning_rate=0.0316, reg_lambda=1.72, gamma=0.68, alpha=0.88, ) # uncomment the following block to run randomized hyperparameter search """ # the search space for the hyperparameters random_grid = {#'max_depth': range(1,10), 'lambda': interval(0, 2, 50), 'gamma': interval(0, 2, 50), 'alpha': interval(0, 2, 50), 'learning_rate': interval(.01, .1, 25), 'subsample': interval(.5, 1, 50) #'n_estimators': interval(10, 1000, 100, integer=True), #'grow_policy': ['depthwise', 'lossguide'], #'min_child_weight': interval(.0001, 5, 50), #'max_delta_step': interval(0, 2, 50) } xgb_random = RandomizedSearchCV(scoring=mae_scorer, estimator = xgb, param_distributions = random_grid, n_iter = 150, cv = 3, #random_state = 31, verbose = 1, n_jobs = -1, refit=True ) xgb_random.fit(xtrain, ytrain.ravel()) xgb = xgb_random.best_estimator_ print('Best parameters from search:', xgb_random.best_params_) print('Min error from search:', abs(xgb_random.best_score_)) """ xgb.fit(xtrain, ytrain.ravel()) y_pred_val = force_np( scaler_target.inverse_transform(xgb.predict(xval).reshape(len(xval), 1)) ) print( "Mean absolute error (validation):", mean_absolute_error(yval.reshape(len(xval), 1), y_pred_val), ) res = (y_pred_val - yval).ravel() sns.scatterplot(x=yval.ravel(), y=res) sns.lineplot(x=[min(yval.ravel()), max(yval.ravel())], y=0, color="r") plt.show() print("mean of residuals:", res.mean()) print("skewness of residuals:", skew(res)) print("kurtosis of residuals:", kurtosis(res)) df_pred = df_scaled.loc[idx_pred] target_pred = force_np( scaler_target.inverse_transform(xgb.predict(df_pred).reshape(len(idx_pred), 1)) ) solution = pd.DataFrame(target_pred, columns=[target]) solution["id"] = df_pred.index solution = solution[["id", target]] solution.to_csv(out_prefix + "_sol_xgb.csv", index=False) # ## HistGradientBoost hgb = HistGradientBoostingRegressor( loss="absolute_error", early_stopping=False, min_samples_leaf=19, max_leaf_nodes=10, max_iter=1815, max_depth=8, max_bins=239, learning_rate=0.013, l2_regularization=0.96, ) # uncomment the following block to run randomized hyperparameter search """ # the search space for the hyperparameters random_grid = {'l2_regularization': interval(0, 1, 50), #'early_stopping': [True, False], 'learning_rate': interval(.001, .1, 50), 'max_iter': interval(100, 5000, 100, integer=True), 'max_depth': interval(1, 50, 20, integer=True), 'max_bins': interval(50, 255, 50, integer=True), 'min_samples_leaf': interval(10, 100, 20, integer=True), 'max_leaf_nodes': interval(10, 100, 20, integer=True) } hgb_random = RandomizedSearchCV(scoring = mae_scorer, estimator = hgb, param_distributions = random_grid, n_iter = 150, cv = 3, verbose = 1, #random_state = 19, n_jobs = -1, refit=True ) hgb_random.fit(xtrain, ytrain.ravel()) hgb = hgb_random.best_estimator_ print('Best parameters from search:', hgb_random.best_params_) print('Min error from search:', abs(hgb_random.best_score_)) """ hgb.fit(xtrain, ytrain.ravel()) y_pred_val = force_np( scaler_target.inverse_transform(hgb.predict(xval).reshape(len(xval), 1)) ) print( "Mean absolute error (validation):", mean_absolute_error(yval.reshape(len(xval), 1), y_pred_val), ) res = (y_pred_val - yval).ravel() sns.scatterplot(x=yval.ravel(), y=res) sns.lineplot(x=[min(yval.ravel()), max(yval.ravel())], y=0, color="r") plt.show() print("mean of residuals:", res.mean()) print("skewness of residuals:", skew(res)) print("kurtosis of residuals:", kurtosis(res)) df_pred = df_scaled.loc[idx_pred] target_pred = force_np( scaler_target.inverse_transform(hgb.predict(df_pred).reshape(len(idx_pred), 1)) ) solution = pd.DataFrame(target_pred, columns=[target]) solution["id"] = df_pred.index solution = solution[["id", target]] solution.to_csv(out_prefix + "_sol_hgb.csv", index=False) # ## Averaging over Models # The three models perform quite similarly on the training set. So I am going to take the average of their outcomes. target_pred = force_np( scaler_target.inverse_transform( ( cbr.predict(df_pred).reshape(len(idx_pred), 1) + xgb.predict(df_pred).reshape(len(idx_pred), 1) + hgb.predict(df_pred).reshape(len(idx_pred), 1) ) / 3 ) ) solution = pd.DataFrame(target_pred, columns=[target]) solution["id"] = df_pred.index solution = solution[["id", target]] solution.to_csv(out_prefix + "_sol_hgb+xgb+cbr.csv", index=False)
false
0
8,354
0
8,354
8,354