file_id
stringlengths
5
9
content
stringlengths
100
5.25M
local_path
stringlengths
66
70
kaggle_dataset_name
stringlengths
3
50
kaggle_dataset_owner
stringlengths
3
20
kversion
stringlengths
497
763
kversion_datasetsources
stringlengths
71
5.46k
dataset_versions
stringlengths
338
235k
datasets
stringlengths
334
371
users
stringlengths
111
264
script
stringlengths
100
5.25M
df_info
stringlengths
0
4.87M
has_data_info
bool
2 classes
nb_filenames
int64
0
370
retreived_data_description
stringlengths
0
4.44M
script_nb_tokens
int64
25
663k
upvotes
int64
0
1.65k
tokens_description
int64
25
663k
tokens_script
int64
25
663k
129182739
<jupyter_start><jupyter_text>Cancer Rates Explanation of field attributes: Colorectal Cancer - Cancer that develops in the colon (the longest part of the large intestine) and/or the rectum (the last several inches of the large intestine). This is a rate per 100,000. Lung Cancer – Cancer that forms in tissues of the lung, usually in the cells lining air passages. This is a rate per 100,000. Breast Cancer – Cancer that forms in tissues of the breast. This is a rate per 100,000. Prostate Cancer – Cancer that forms in tissues of the prostate. This is a rate per 100,000. Urinary System Cancer – Cancer that forms in the organs of the body that produce and discharge urine. These include the kidneys, ureters, bladder, and urethra. This is a rate per 100,000. All Cancer – All cancers including, but not limited to: colorectal cancer, lung cancer, breast cancer, prostate cancer, and cancer of the urinary system. This is a rate per 100,000. Kaggle dataset identifier: cancer-rates <jupyter_script># # Cancer Rates for Lake County Illinois. # ![save.jpg](attachment:d2213f69-4a74-4d12-81c5-8e3c4c24d769.jpg) # **In this dataset, we have a list of cancer statistics in several different parts of the state of Illinois. # For a better review and detailed analysis, the zip code column of this dataset can be usefully used. Zip code contains a lot of good information that can be extracted with the help of various libraries such as zipcodes.** # import important libraries import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import plotly.express as px import matplotlib import folium # ### First step: Import the dataset Data = pd.read_csv("../input/cancer-rates/Cancer_Rates.csv") df = pd.DataFrame(Data) df # ### I am going to analyze cancers and because of that, I will skip the last two columns. This doesn't indicate that these columns are unimportant, but they have no place in the future analysis system. # df.drop(columns=["FID", "SHAPE_Length", "SHAPE_Area"], inplace=True) df # # **Let's go to use zip codes** import zipcodes as zcode list_zipcode = list(df["ZIP"]) # ## **With the following code, we can get a lot of information from each zip code. Such as the name of the city, geographical location, etc.** for i in list_zipcode: print(zcode.matching(str(i))) # # **Now how can we use this information in the analysis?** city = [] lat = [] long = [] for x in list_zipcode: my_city_county = zcode.matching(str(x)) if len(my_city_county) == 1: County = my_city_county[0].get("city") Lat = my_city_county[0].get("lat") Long = my_city_county[0].get("lat") else: County = "Not found" Lat = np.NaN Long = np.NaN city.insert(0, County) lat.insert(0, Lat) long.insert(0, Long) city.reverse() lat.reverse() long.reverse() df["Place"] = city df["Latitude"] = lat df["Longitude"] = long def highlight_cols(s): color = "yellow" return "background-color: %s" % color df.style.applymap(highlight_cols, subset=pd.IndexSlice[:, ["Place"]]) # # **As you can see, I separated the names of the cities from all the information and added them to the data frame.** df = df.astype({"Latitude": float, "Longitude": float}) df["Place"].value_counts() # **There are 2 duplicate names in the data frame that can be analyzed separately** df.loc[21, "Place"] = "Waukegan1" df.loc[22, "Place"] = "Waukegan2" df.style.applymap(highlight_cols, subset=pd.IndexSlice[:, ["Place"]]) # # **Let's see where Illinois is on the map** # ![Screenshot (67).png](attachment:51f94e94-ed40-4d5d-b2c8-5b1ccfb416dc.png) map = folium.Map(location=[40, -89.5], default_zoom_start=15) map # # **Now we can display good plots and get useful information with the help of the information obtained from zipcodes** plt.figure(figsize=(18, 5), dpi=200) color = [ "black", "red", "green", "orange", "blue", "limegreen", "darkgreen", "royalblue", "navy", ] ax = sns.barplot( data=df, x="Place", y="Colorectal", capsize=0.4, errcolor=".5", linewidth=3, edgecolor=".4", palette=color, ) plt.xticks(rotation=60, fontsize=15) plt.yticks(fontsize=15) plt.ylabel("Colorectal", fontsize=20) plt.xlabel("Place", fontsize=20) plt.title("Places Vs Colorectal cancer", fontsize=20) ax.bar_label(ax.containers[0], fmt="%.1f", fontsize=12) plt.show() plt.figure(figsize=(18, 5), dpi=200) color = [ "black", "red", "green", "orange", "blue", "limegreen", "darkgreen", "royalblue", "navy", ] ax = sns.barplot( data=df, x="Place", y="Lung_Bronc", capsize=0.4, errcolor=".5", linewidth=3, edgecolor=".4", palette=color, ) plt.xticks(rotation=60, fontsize=15) plt.yticks(fontsize=15) plt.ylabel("Lung Bronc", fontsize=20) plt.xlabel("Place", fontsize=20) plt.title("Places Vs Lung_Bronc cancer", fontsize=20) ax.bar_label(ax.containers[0], fmt="%.1f", fontsize=12) plt.show() plt.figure(figsize=(18, 5), dpi=200) color = [ "black", "red", "green", "orange", "blue", "limegreen", "darkgreen", "royalblue", "navy", ] ax = sns.barplot( data=df, x="Place", y="Breast_Can", capsize=0.4, errcolor=".5", linewidth=3, edgecolor=".4", palette=color, ) plt.xticks(rotation=60, fontsize=15) plt.yticks(fontsize=15) plt.ylabel("Breast Can", fontsize=20) plt.xlabel("Place", fontsize=20) plt.title("Places Vs Breast_Can cancer", fontsize=20) ax.bar_label(ax.containers[0], fmt="%.1f", fontsize=12) plt.show() plt.figure(figsize=(18, 5), dpi=200) color = [ "black", "red", "green", "orange", "blue", "limegreen", "darkgreen", "royalblue", "navy", ] ax = sns.barplot( data=df, x="Place", y="Prostate_C", capsize=0.4, errcolor=".5", linewidth=3, edgecolor=".4", palette=color, ) plt.xticks(rotation=60, fontsize=15) plt.yticks(fontsize=15) plt.ylabel("Prostate C", fontsize=20) plt.xlabel("Place", fontsize=20) plt.title("Places Vs Prostate_C cancer", fontsize=20) ax.bar_label(ax.containers[0], fmt="%.1f", fontsize=12) plt.show() plt.figure(figsize=(18, 5), dpi=200) color = [ "black", "red", "green", "orange", "blue", "limegreen", "darkgreen", "royalblue", "navy", ] ax = sns.barplot( data=df, x="Place", y="Urinary_Sy", capsize=0.4, errcolor=".5", linewidth=3, edgecolor=".4", palette=color, ) plt.xticks(rotation=60, fontsize=15) plt.yticks(fontsize=15) plt.ylabel("Urinary Sy", fontsize=20) plt.xlabel("Place", fontsize=20) plt.title("Places Vs Urinary_Sy cancer", fontsize=20) ax.bar_label(ax.containers[0], fmt="%.1f", fontsize=12) plt.show() plt.figure(figsize=(19, 5), dpi=200) color = [ "black", "red", "green", "orange", "blue", "limegreen", "darkgreen", "royalblue", "navy", ] ax = sns.barplot( data=df, x="Place", y="All_Cancer", capsize=0.4, errcolor=".5", linewidth=3, edgecolor=".4", palette=color, ) plt.xticks(rotation=60, fontsize=15) plt.yticks(fontsize=15) plt.ylabel("All Cancer", fontsize=20) plt.xlabel("Place", fontsize=20) plt.title("Places Vs All Cancer", fontsize=20) ax.bar_label(ax.containers[0], fmt="%.1f", fontsize=12) plt.show() plt.figure(figsize=(10, 10), dpi=200) plt.pie( df.All_Cancer, labels=df.Place, autopct="%1.1f%%", textprops={"fontsize": 9}, colors=sns.color_palette("Set3"), explode=[ 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, ], ) hole = plt.Circle((0, 0), 0.65, facecolor="white") plt.gcf().gca().add_artist(hole) plt.title("Places vs All cancers", fontsize=20) plt.show() plt.figure(figsize=(10, 10), dpi=200) plt.pie( df.Colorectal, labels=df.Place, autopct="%1.1f%%", textprops={"fontsize": 8}, colors=sns.color_palette("Set3"), explode=[ 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, ], ) hole = plt.Circle((0, 0), 0.65, facecolor="white") plt.gcf().gca().add_artist(hole) plt.title("Places vs Colorectal", fontsize=20) plt.show() plt.figure(figsize=(10, 10), dpi=200) plt.pie( df.Lung_Bronc, labels=df.Place, autopct="%1.1f%%", textprops={"fontsize": 8}, colors=sns.color_palette("Set3"), explode=[ 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, ], ) hole = plt.Circle((0, 0), 0.65, facecolor="white") plt.gcf().gca().add_artist(hole) plt.title("Places vs Lung_Bronc", fontsize=20) plt.show() plt.figure(figsize=(10, 10), dpi=200) plt.pie( df.Breast_Can, labels=df.Place, autopct="%1.1f%%", textprops={"fontsize": 8}, colors=sns.color_palette("Set3"), explode=[ 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, ], ) hole = plt.Circle((0, 0), 0.65, facecolor="white") plt.gcf().gca().add_artist(hole) plt.title("Places vs Breast_Can", fontsize=20) plt.show() plt.figure(figsize=(10, 10), dpi=200) plt.pie( df.Prostate_C, labels=df.Place, autopct="%1.1f%%", textprops={"fontsize": 8}, colors=sns.color_palette("Set3"), explode=[ 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, ], ) hole = plt.Circle((0, 0), 0.65, facecolor="white") plt.gcf().gca().add_artist(hole) plt.title("Places vs Prostate_C", fontsize=20) plt.show() plt.figure(figsize=(10, 10), dpi=200) plt.pie( df.Urinary_Sy, labels=df.Place, autopct="%1.1f%%", textprops={"fontsize": 8}, colors=sns.color_palette("Set3"), explode=[ 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, ], ) hole = plt.Circle((0, 0), 0.65, facecolor="white") plt.gcf().gca().add_artist(hole) plt.title("Places vs Urinary_Sy", fontsize=20) plt.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/182/129182739.ipynb
cancer-rates
imtkaggleteam
[{"Id": 129182739, "ScriptId": 38404040, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11338183, "CreationDate": "05/11/2023 15:47:55", "VersionNumber": 1.0, "Title": "Cancer Rate with Zipcode Analysis", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 276.0, "LinesInsertedFromPrevious": 276.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 77}]
[{"Id": 185003676, "KernelVersionId": 129182739, "SourceDatasetVersionId": 5603958}]
[{"Id": 5603958, "DatasetId": 3223644, "DatasourceVersionId": 5679010, "CreatorUserId": 11338183, "LicenseName": "Other (specified in description)", "CreationDate": "05/04/2023 20:16:49", "VersionNumber": 1.0, "Title": "Cancer Rates", "Slug": "cancer-rates", "Subtitle": "Cancer Rates for Lake County Illinois", "Description": "Explanation of field attributes:\n\nColorectal Cancer - Cancer that develops in the colon (the longest part of the large intestine) and/or the rectum (the last several inches of the large intestine). This is a rate per 100,000.\n\nLung Cancer \u2013 Cancer that forms in tissues of the lung, usually in the cells lining air passages. This is a rate per 100,000.\n\nBreast Cancer \u2013 Cancer that forms in tissues of the breast. This is a rate per 100,000. \n\nProstate Cancer \u2013 Cancer that forms in tissues of the prostate. This is a rate per 100,000.\n\nUrinary System Cancer \u2013 Cancer that forms in the organs of the body that produce and discharge urine. These include the kidneys, ureters, bladder, and urethra. This is a rate per 100,000.\n\nAll Cancer \u2013 All cancers including, but not limited to: colorectal cancer, lung cancer, breast cancer, prostate cancer, and cancer of the urinary system. This is a rate per 100,000.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3223644, "CreatorUserId": 11338183, "OwnerUserId": 11338183.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5603958.0, "CurrentDatasourceVersionId": 5679010.0, "ForumId": 3288624, "Type": 2, "CreationDate": "05/04/2023 20:16:49", "LastActivityDate": "05/04/2023", "TotalViews": 1745, "TotalDownloads": 254, "TotalVotes": 66, "TotalKernels": 2}]
[{"Id": 11338183, "UserName": "imtkaggleteam", "DisplayName": "Mohamadreza Momeni", "RegisterDate": "08/19/2022", "PerformanceTier": 4}]
# # Cancer Rates for Lake County Illinois. # ![save.jpg](attachment:d2213f69-4a74-4d12-81c5-8e3c4c24d769.jpg) # **In this dataset, we have a list of cancer statistics in several different parts of the state of Illinois. # For a better review and detailed analysis, the zip code column of this dataset can be usefully used. Zip code contains a lot of good information that can be extracted with the help of various libraries such as zipcodes.** # import important libraries import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import plotly.express as px import matplotlib import folium # ### First step: Import the dataset Data = pd.read_csv("../input/cancer-rates/Cancer_Rates.csv") df = pd.DataFrame(Data) df # ### I am going to analyze cancers and because of that, I will skip the last two columns. This doesn't indicate that these columns are unimportant, but they have no place in the future analysis system. # df.drop(columns=["FID", "SHAPE_Length", "SHAPE_Area"], inplace=True) df # # **Let's go to use zip codes** import zipcodes as zcode list_zipcode = list(df["ZIP"]) # ## **With the following code, we can get a lot of information from each zip code. Such as the name of the city, geographical location, etc.** for i in list_zipcode: print(zcode.matching(str(i))) # # **Now how can we use this information in the analysis?** city = [] lat = [] long = [] for x in list_zipcode: my_city_county = zcode.matching(str(x)) if len(my_city_county) == 1: County = my_city_county[0].get("city") Lat = my_city_county[0].get("lat") Long = my_city_county[0].get("lat") else: County = "Not found" Lat = np.NaN Long = np.NaN city.insert(0, County) lat.insert(0, Lat) long.insert(0, Long) city.reverse() lat.reverse() long.reverse() df["Place"] = city df["Latitude"] = lat df["Longitude"] = long def highlight_cols(s): color = "yellow" return "background-color: %s" % color df.style.applymap(highlight_cols, subset=pd.IndexSlice[:, ["Place"]]) # # **As you can see, I separated the names of the cities from all the information and added them to the data frame.** df = df.astype({"Latitude": float, "Longitude": float}) df["Place"].value_counts() # **There are 2 duplicate names in the data frame that can be analyzed separately** df.loc[21, "Place"] = "Waukegan1" df.loc[22, "Place"] = "Waukegan2" df.style.applymap(highlight_cols, subset=pd.IndexSlice[:, ["Place"]]) # # **Let's see where Illinois is on the map** # ![Screenshot (67).png](attachment:51f94e94-ed40-4d5d-b2c8-5b1ccfb416dc.png) map = folium.Map(location=[40, -89.5], default_zoom_start=15) map # # **Now we can display good plots and get useful information with the help of the information obtained from zipcodes** plt.figure(figsize=(18, 5), dpi=200) color = [ "black", "red", "green", "orange", "blue", "limegreen", "darkgreen", "royalblue", "navy", ] ax = sns.barplot( data=df, x="Place", y="Colorectal", capsize=0.4, errcolor=".5", linewidth=3, edgecolor=".4", palette=color, ) plt.xticks(rotation=60, fontsize=15) plt.yticks(fontsize=15) plt.ylabel("Colorectal", fontsize=20) plt.xlabel("Place", fontsize=20) plt.title("Places Vs Colorectal cancer", fontsize=20) ax.bar_label(ax.containers[0], fmt="%.1f", fontsize=12) plt.show() plt.figure(figsize=(18, 5), dpi=200) color = [ "black", "red", "green", "orange", "blue", "limegreen", "darkgreen", "royalblue", "navy", ] ax = sns.barplot( data=df, x="Place", y="Lung_Bronc", capsize=0.4, errcolor=".5", linewidth=3, edgecolor=".4", palette=color, ) plt.xticks(rotation=60, fontsize=15) plt.yticks(fontsize=15) plt.ylabel("Lung Bronc", fontsize=20) plt.xlabel("Place", fontsize=20) plt.title("Places Vs Lung_Bronc cancer", fontsize=20) ax.bar_label(ax.containers[0], fmt="%.1f", fontsize=12) plt.show() plt.figure(figsize=(18, 5), dpi=200) color = [ "black", "red", "green", "orange", "blue", "limegreen", "darkgreen", "royalblue", "navy", ] ax = sns.barplot( data=df, x="Place", y="Breast_Can", capsize=0.4, errcolor=".5", linewidth=3, edgecolor=".4", palette=color, ) plt.xticks(rotation=60, fontsize=15) plt.yticks(fontsize=15) plt.ylabel("Breast Can", fontsize=20) plt.xlabel("Place", fontsize=20) plt.title("Places Vs Breast_Can cancer", fontsize=20) ax.bar_label(ax.containers[0], fmt="%.1f", fontsize=12) plt.show() plt.figure(figsize=(18, 5), dpi=200) color = [ "black", "red", "green", "orange", "blue", "limegreen", "darkgreen", "royalblue", "navy", ] ax = sns.barplot( data=df, x="Place", y="Prostate_C", capsize=0.4, errcolor=".5", linewidth=3, edgecolor=".4", palette=color, ) plt.xticks(rotation=60, fontsize=15) plt.yticks(fontsize=15) plt.ylabel("Prostate C", fontsize=20) plt.xlabel("Place", fontsize=20) plt.title("Places Vs Prostate_C cancer", fontsize=20) ax.bar_label(ax.containers[0], fmt="%.1f", fontsize=12) plt.show() plt.figure(figsize=(18, 5), dpi=200) color = [ "black", "red", "green", "orange", "blue", "limegreen", "darkgreen", "royalblue", "navy", ] ax = sns.barplot( data=df, x="Place", y="Urinary_Sy", capsize=0.4, errcolor=".5", linewidth=3, edgecolor=".4", palette=color, ) plt.xticks(rotation=60, fontsize=15) plt.yticks(fontsize=15) plt.ylabel("Urinary Sy", fontsize=20) plt.xlabel("Place", fontsize=20) plt.title("Places Vs Urinary_Sy cancer", fontsize=20) ax.bar_label(ax.containers[0], fmt="%.1f", fontsize=12) plt.show() plt.figure(figsize=(19, 5), dpi=200) color = [ "black", "red", "green", "orange", "blue", "limegreen", "darkgreen", "royalblue", "navy", ] ax = sns.barplot( data=df, x="Place", y="All_Cancer", capsize=0.4, errcolor=".5", linewidth=3, edgecolor=".4", palette=color, ) plt.xticks(rotation=60, fontsize=15) plt.yticks(fontsize=15) plt.ylabel("All Cancer", fontsize=20) plt.xlabel("Place", fontsize=20) plt.title("Places Vs All Cancer", fontsize=20) ax.bar_label(ax.containers[0], fmt="%.1f", fontsize=12) plt.show() plt.figure(figsize=(10, 10), dpi=200) plt.pie( df.All_Cancer, labels=df.Place, autopct="%1.1f%%", textprops={"fontsize": 9}, colors=sns.color_palette("Set3"), explode=[ 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, ], ) hole = plt.Circle((0, 0), 0.65, facecolor="white") plt.gcf().gca().add_artist(hole) plt.title("Places vs All cancers", fontsize=20) plt.show() plt.figure(figsize=(10, 10), dpi=200) plt.pie( df.Colorectal, labels=df.Place, autopct="%1.1f%%", textprops={"fontsize": 8}, colors=sns.color_palette("Set3"), explode=[ 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, ], ) hole = plt.Circle((0, 0), 0.65, facecolor="white") plt.gcf().gca().add_artist(hole) plt.title("Places vs Colorectal", fontsize=20) plt.show() plt.figure(figsize=(10, 10), dpi=200) plt.pie( df.Lung_Bronc, labels=df.Place, autopct="%1.1f%%", textprops={"fontsize": 8}, colors=sns.color_palette("Set3"), explode=[ 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, ], ) hole = plt.Circle((0, 0), 0.65, facecolor="white") plt.gcf().gca().add_artist(hole) plt.title("Places vs Lung_Bronc", fontsize=20) plt.show() plt.figure(figsize=(10, 10), dpi=200) plt.pie( df.Breast_Can, labels=df.Place, autopct="%1.1f%%", textprops={"fontsize": 8}, colors=sns.color_palette("Set3"), explode=[ 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, ], ) hole = plt.Circle((0, 0), 0.65, facecolor="white") plt.gcf().gca().add_artist(hole) plt.title("Places vs Breast_Can", fontsize=20) plt.show() plt.figure(figsize=(10, 10), dpi=200) plt.pie( df.Prostate_C, labels=df.Place, autopct="%1.1f%%", textprops={"fontsize": 8}, colors=sns.color_palette("Set3"), explode=[ 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, ], ) hole = plt.Circle((0, 0), 0.65, facecolor="white") plt.gcf().gca().add_artist(hole) plt.title("Places vs Prostate_C", fontsize=20) plt.show() plt.figure(figsize=(10, 10), dpi=200) plt.pie( df.Urinary_Sy, labels=df.Place, autopct="%1.1f%%", textprops={"fontsize": 8}, colors=sns.color_palette("Set3"), explode=[ 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, ], ) hole = plt.Circle((0, 0), 0.65, facecolor="white") plt.gcf().gca().add_artist(hole) plt.title("Places vs Urinary_Sy", fontsize=20) plt.show()
false
1
4,044
77
4,364
4,044
129061518
import matplotlib.pyplot as plt import numpy as np import sklearn # # Pobieram dane nowotworowe from sklearn.datasets import load_breast_cancer bc_data = load_breast_cancer() print(bc_data.feature_names) X = bc_data.data Y = bc_data.target # # Dzielę dane na zbiór treningowy i testowy from sklearn.model_selection import train_test_split X_train, X_test, Y_train, Y_test = train_test_split( X, Y, test_size=0.25, random_state=0, stratify=Y ) # # Tworzę klasyfikator LogisticRegression from sklearn.linear_model import LogisticRegression classifier = LogisticRegression(solver="lbfgs", max_iter=2000) # # Uczę model na danych treningowych classifier.fit(X_train, Y_train) # # Ewaluacja modelu na danych testowych prediction = classifier.predict(X_test) # # Metryka accuracy np.mean(prediction == Y_test) # # Funkcja score classifier.score(X_test, Y_test) # # Raport metryk from sklearn.metrics import classification_report y_true = Y_test y_pred = prediction target_names = ["class 0", "class 1"] print( classification_report( y_true, y_pred, labels=None, target_names=target_names, sample_weight=None, digits=2, output_dict=False, zero_division="warn", ) ) # # Klasyfikator drzewa decyzyjnego from sklearn import tree dtc = tree.DecisionTreeClassifier() dtc.fit(X_train, Y_train) pred_dtc = dtc.predict(X_test) np.mean(pred_dtc == Y_test) # # Naiwny klasyfikator Bayesowski from sklearn.naive_bayes import GaussianNB gnb = GaussianNB() gnb.fit(X_train, Y_train) pred_gnb = gnb.predict(X_test) np.mean(pred_gnb == Y_test) # # Klasyfikator K najbliższych sąsiadów from sklearn.neighbors import KNeighborsClassifier neigh = KNeighborsClassifier(n_neighbors=3) neigh.fit(X_train, Y_train) pred_neigh = neigh.predict(X_test) np.mean(pred_neigh == Y_test) # # Klasyfikator maszyny wektorów wspierających from sklearn import svm supp = svm.SVC() supp.fit(X_train, Y_train) pred_supp = supp.predict(X_test) np.mean(pred_supp == Y_test) # # Klasyfikacja perceptronu wielowartstwowego from sklearn.neural_network import MLPClassifier mlp = MLPClassifier( solver="lbfgs", alpha=1e-5, hidden_layer_sizes=(5, 2), random_state=1, max_iter=3000 ) mlp.fit(X_train, Y_train) pred_mlp = mlp.predict(X_test) np.mean(pred_mlp == Y_test) # # Klasyfikator lasu losowego from sklearn.ensemble import RandomForestClassifier rfc = RandomForestClassifier(max_depth=2, random_state=0) rfc.fit(X_train, Y_train) pred_rfc = rfc.predict(X_test) np.mean(pred_rfc == Y_test) # # Cross validation dla klasyfikacji perceptronu wielowarstwowego (MLP) from sklearn.model_selection import cross_val_score scores = cross_val_score(mlp, X, Y, cv=6) print(scores) print( "%0.2f accuracy with a standard deviation of %0.2f" % (scores.mean(), scores.std()) )
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/061/129061518.ipynb
null
null
[{"Id": 129061518, "ScriptId": 38366062, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 15036156, "CreationDate": "05/10/2023 16:56:57", "VersionNumber": 1.0, "Title": "AI_classification", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 114.0, "LinesInsertedFromPrevious": 114.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import matplotlib.pyplot as plt import numpy as np import sklearn # # Pobieram dane nowotworowe from sklearn.datasets import load_breast_cancer bc_data = load_breast_cancer() print(bc_data.feature_names) X = bc_data.data Y = bc_data.target # # Dzielę dane na zbiór treningowy i testowy from sklearn.model_selection import train_test_split X_train, X_test, Y_train, Y_test = train_test_split( X, Y, test_size=0.25, random_state=0, stratify=Y ) # # Tworzę klasyfikator LogisticRegression from sklearn.linear_model import LogisticRegression classifier = LogisticRegression(solver="lbfgs", max_iter=2000) # # Uczę model na danych treningowych classifier.fit(X_train, Y_train) # # Ewaluacja modelu na danych testowych prediction = classifier.predict(X_test) # # Metryka accuracy np.mean(prediction == Y_test) # # Funkcja score classifier.score(X_test, Y_test) # # Raport metryk from sklearn.metrics import classification_report y_true = Y_test y_pred = prediction target_names = ["class 0", "class 1"] print( classification_report( y_true, y_pred, labels=None, target_names=target_names, sample_weight=None, digits=2, output_dict=False, zero_division="warn", ) ) # # Klasyfikator drzewa decyzyjnego from sklearn import tree dtc = tree.DecisionTreeClassifier() dtc.fit(X_train, Y_train) pred_dtc = dtc.predict(X_test) np.mean(pred_dtc == Y_test) # # Naiwny klasyfikator Bayesowski from sklearn.naive_bayes import GaussianNB gnb = GaussianNB() gnb.fit(X_train, Y_train) pred_gnb = gnb.predict(X_test) np.mean(pred_gnb == Y_test) # # Klasyfikator K najbliższych sąsiadów from sklearn.neighbors import KNeighborsClassifier neigh = KNeighborsClassifier(n_neighbors=3) neigh.fit(X_train, Y_train) pred_neigh = neigh.predict(X_test) np.mean(pred_neigh == Y_test) # # Klasyfikator maszyny wektorów wspierających from sklearn import svm supp = svm.SVC() supp.fit(X_train, Y_train) pred_supp = supp.predict(X_test) np.mean(pred_supp == Y_test) # # Klasyfikacja perceptronu wielowartstwowego from sklearn.neural_network import MLPClassifier mlp = MLPClassifier( solver="lbfgs", alpha=1e-5, hidden_layer_sizes=(5, 2), random_state=1, max_iter=3000 ) mlp.fit(X_train, Y_train) pred_mlp = mlp.predict(X_test) np.mean(pred_mlp == Y_test) # # Klasyfikator lasu losowego from sklearn.ensemble import RandomForestClassifier rfc = RandomForestClassifier(max_depth=2, random_state=0) rfc.fit(X_train, Y_train) pred_rfc = rfc.predict(X_test) np.mean(pred_rfc == Y_test) # # Cross validation dla klasyfikacji perceptronu wielowarstwowego (MLP) from sklearn.model_selection import cross_val_score scores = cross_val_score(mlp, X, Y, cv=6) print(scores) print( "%0.2f accuracy with a standard deviation of %0.2f" % (scores.mean(), scores.std()) )
false
0
1,033
0
1,033
1,033
129061060
import pandas as pd import numpy as np train = pd.read_csv("/kaggle/input/playground-series-s3e13/train.csv") test = pd.read_csv("/kaggle/input/playground-series-s3e13/test.csv") train["prognosis"].value_counts() # # **Mapping** prognosis_dict = { "Malaria": 1, "Lyme_disease": 2, "Plague": 3, "Zika": 4, "Yellow_Fever": 5, "Dengue": 6, "Chikungunya": 7, "Rift_Valley_fever": 8, "Tungiasis": 9, "Japanese_encephalitis": 10, "West_Nile_fever": 11, } train["prognosis"] = train["prognosis"].map(prognosis_dict)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/061/129061060.ipynb
null
null
[{"Id": 129061060, "ScriptId": 38282270, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13198683, "CreationDate": "05/10/2023 16:52:34", "VersionNumber": 1.0, "Title": "Playground-series-3-13", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 18.0, "LinesInsertedFromPrevious": 18.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
null
null
null
null
import pandas as pd import numpy as np train = pd.read_csv("/kaggle/input/playground-series-s3e13/train.csv") test = pd.read_csv("/kaggle/input/playground-series-s3e13/test.csv") train["prognosis"].value_counts() # # **Mapping** prognosis_dict = { "Malaria": 1, "Lyme_disease": 2, "Plague": 3, "Zika": 4, "Yellow_Fever": 5, "Dengue": 6, "Chikungunya": 7, "Rift_Valley_fever": 8, "Tungiasis": 9, "Japanese_encephalitis": 10, "West_Nile_fever": 11, } train["prognosis"] = train["prognosis"].map(prognosis_dict)
false
0
234
1
234
234
129061498
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # # Imports # ## Import COCO Dataset from FastAI from fastai.data.external import untar_data, URLs coco_path = untar_data(URLs.COCO_SAMPLE) coco_path = str(coco_path) + "/train_sample" # ## Imports Libraries """ file: dependencies.py author: @vincit0re brief: This file contains the dependencies for the application. date: 2023-05-05 """ # All import statements and libraries import os import glob import time import numpy as np from PIL import Image from pathlib import Path from tqdm.notebook import tqdm import matplotlib.pyplot as plt from skimage.color import rgb2lab, lab2rgb import torch from torch import nn, optim from torchvision import transforms from torchvision.utils import make_grid import copy use_gpu = True device = torch.device("cuda" if (torch.cuda.is_available() and use_gpu) else "cpu") print(f"Using Device: {device}") import warnings warnings.filterwarnings("ignore") # # Colorization # ## Hyperparameters # Hyperparameters class Hyperparameters: """This contains the hyperparameters for the application.""" _SIZE = 256 _DATA_DIR = coco_path _BATCH_SIZE = 16 _N_EOPCH = 10 _SAVE_PATH = "basemodel.pt" # ## Dataset Images def load_images(path): """This function loads the images from the path.""" paths = glob.glob(path + "/*.jpg") # Grabbing all the image file names np.random.seed(123) paths_subset = np.random.choice( paths, 10_000, replace=False ) # choosing 10000 images randomly rand_idxs = np.random.permutation(10_000) train_idxs = rand_idxs[:8000] # choosing the first 8000 as training set val_idxs = rand_idxs[8000:] # choosing last 2000 as validation set train_paths = paths_subset[train_idxs] val_paths = paths_subset[val_idxs] return train_paths, val_paths train_imgs_path, val_imgs_path = load_images(Hyperparameters._DATA_DIR) print(f"Number of Training Images: {len(train_imgs_path)}") print(f"Number of Validation Images: {len(val_imgs_path)}") def plot_sample_images(images_path): images_path = images_path[:16] fig, axes = plt.subplots(4, 4, figsize=(10, 10)) for ax, img_path in zip(axes.flatten(), images_path): ax.imshow(Image.open(img_path)) ax.axis("off") plt.suptitle("Sample Images") plt.tight_layout() plt.show() plot_sample_images(train_imgs_path) # ## Dataset Class # dataset class class ColorizationDataset(Dataset): def __init__(self, paths, split="train", size=256): self.SIZE = size if split == "train": self.transforms = transforms.Compose( [ transforms.Resize((self.SIZE, self.SIZE), Image.BICUBIC), transforms.RandomHorizontalFlip(), # A little data augmentation! ] ) elif split == "val": self.transforms = transforms.Resize((self.SIZE, self.SIZE), Image.BICUBIC) self.split = split self.size = self.SIZE self.paths = paths def __getitem__(self, idx): img = Image.open(self.paths[idx]).convert("RGB") img = self.transforms(img) img = np.array(img) img_lab = rgb2lab(img).astype("float32") # Converting RGB to L*a*b img_lab = transforms.ToTensor()(img_lab) L = img_lab[[0], ...] / 50.0 - 1.0 # Between -1 and 1 ab = img_lab[[1, 2], ...] / 110.0 # Between -1 and 1 return {"L": L, "ab": ab} def __len__(self): return len(self.paths) # A handy function to make our dataloaders def make_dataloaders(batch_size=16, n_workers=2, pin_memory=True, **kwargs): dataset = ColorizationDataset(size=Hyperparameters._SIZE, **kwargs) dataloader = DataLoader( dataset, batch_size=batch_size, num_workers=n_workers, pin_memory=pin_memory ) return dataloader # get train, val dataloaders def get_train_val_dataloaders(train_paths, val_paths, batch_size=64, shuffle=True): """This function returns the train, validation and test dataloaders.""" train_dataloader = make_dataloaders( batch_size=batch_size, paths=train_paths, split="train" ) val_dataloader = make_dataloaders( batch_size=batch_size, paths=val_paths, split="val" ) return train_dataloader, val_dataloader train_loader, val_loader = get_train_val_dataloaders( train_paths=train_imgs_path, val_paths=val_imgs_path, batch_size=16, shuffle=True ) print(f"Train Data: {len(train_loader.dataset)} ({len(train_loader)} batches)") print(f"Validation Data: {len(val_loader.dataset)} ({len(val_loader)} batches)") for batch in train_loader: print(batch["L"].shape) print(batch["ab"].shape) break # ## U-Net # block for UNet class UnetBlock(nn.Module): """This is the block for the UNet. Args: nf (int): Number of filters. ni (int): Number of input channels. submodule (nn.Module): Submodule. input_c (int): Number of input channels. dropout (bool): Dropout. innermost (bool): Innermost. outermost (bool): Outermost. """ def __init__( self, nf, ni, submodule=None, input_c=None, dropout=False, innermost=False, outermost=False, ): super().__init__() self.outermost = outermost if input_c is None: input_c = nf downconv = nn.Conv2d( input_c, ni, kernel_size=4, stride=2, padding=1, bias=False ) downrelu = nn.LeakyReLU(0.2, True) downnorm = nn.BatchNorm2d(ni) uprelu = nn.ReLU(True) upnorm = nn.BatchNorm2d(nf) if outermost: upconv = nn.ConvTranspose2d(ni * 2, nf, kernel_size=4, stride=2, padding=1) down = [downconv] up = [uprelu, upconv, nn.Tanh()] model = down + [submodule] + up elif innermost: upconv = nn.ConvTranspose2d( ni, nf, kernel_size=4, stride=2, padding=1, bias=False ) down = [downrelu, downconv] up = [uprelu, upconv, upnorm] model = down + up else: upconv = nn.ConvTranspose2d( ni * 2, nf, kernel_size=4, stride=2, padding=1, bias=False ) down = [downrelu, downconv, downnorm] up = [uprelu, upconv, upnorm] if dropout: up += [nn.Dropout(0.5)] model = down + [submodule] + up self.model = nn.Sequential(*model) # forward def forward(self, x): # print(x.shape) if self.outermost: return self.model(x) else: return torch.cat([x, self.model(x)], 1) # class for UNet class Unet(nn.Module): """This is the UNet class. Args: input_c (int): Number of input channels. output_c (int): Number of output channels. n_down (int): Number of down samples. num_filters (int): Number of filters. """ def __init__(self, input_c=1, output_c=2, n_down=8, num_filters=64): super().__init__() unet_block = UnetBlock(num_filters * 8, num_filters * 8, innermost=True) for _ in range(n_down - 5): unet_block = UnetBlock( num_filters * 8, num_filters * 8, submodule=unet_block, dropout=True ) out_filters = num_filters * 8 for _ in range(3): unet_block = UnetBlock(out_filters // 2, out_filters, submodule=unet_block) out_filters //= 2 self.model = UnetBlock( output_c, out_filters, input_c=input_c, submodule=unet_block, outermost=True ) def forward(self, x): return self.model(x) # ## Discriminator # discriminator class class PatchDiscriminator(nn.Module): """This is the discriminator class. Args: input_c (int): Number of input channels. num_filters (int): Number of filters. n_down (int): Number of down samples. """ def __init__(self, input_c, num_filters=64, n_down=3): super().__init__() model = [self.get_layers(input_c, num_filters, norm=False)] model += [ self.get_layers( num_filters * 2**i, num_filters * 2 ** (i + 1), s=1 if i == (n_down - 1) else 2, ) for i in range(n_down) ] # the 'if' statement is taking care of not using # stride of 2 for the last block in this loop # Make sure to not use normalization or model += [ self.get_layers(num_filters * 2**n_down, 1, s=1, norm=False, act=False) ] # activation for the last layer of the model self.model = nn.Sequential(*model) # when needing to make some repetitive blocks of layers, def get_layers(self, ni, nf, k=4, s=2, p=1, norm=True, act=True): # it's always helpful to make a separate method for that purpose layers = [nn.Conv2d(ni, nf, k, s, p, bias=not norm)] if norm: layers += [nn.BatchNorm2d(nf)] if act: layers += [nn.LeakyReLU(0.2, True)] return nn.Sequential(*layers) def forward(self, x): return self.model(x) # ## GAN Loss # gan loss class GANLoss(nn.Module): """This is the GAN loss. Args: gan_mode (str): GAN mode. real_label (float): Real label. fake_label (float): Fake label. """ def __init__(self, gan_mode="vanilla", real_label=1.0, fake_label=0.0): super().__init__() self.register_buffer("real_label", torch.tensor(real_label)) self.register_buffer("fake_label", torch.tensor(fake_label)) if gan_mode == "vanilla": self.loss = nn.BCEWithLogitsLoss() elif gan_mode == "lsgan": self.loss = nn.MSELoss() def get_labels(self, preds, target_is_real): if target_is_real: labels = self.real_label else: labels = self.fake_label return labels.expand_as(preds) def __call__(self, preds, target_is_real): labels = self.get_labels(preds, target_is_real) loss = self.loss(preds, labels) return loss # ## Model Initialization # weights initialization def init_weights(net, init="norm", gain=0.02): def init_func(m): classname = m.__class__.__name__ if hasattr(m, "weight") and "Conv" in classname: if init == "norm": nn.init.normal_(m.weight.data, mean=0.0, std=gain) elif init == "xavier": nn.init.xavier_normal_(m.weight.data, gain=gain) elif init == "kaiming": nn.init.kaiming_normal_(m.weight.data, a=0, mode="fan_in") if hasattr(m, "bias") and m.bias is not None: nn.init.constant_(m.bias.data, 0.0) elif "BatchNorm2d" in classname: nn.init.normal_(m.weight.data, 1.0, gain) nn.init.constant_(m.bias.data, 0.0) net.apply(init_func) print(f"model initialized with {init} initialization") return net # model initialization def init_model(model, device): model = model.to(device) model = init_weights(model) return model # # Main Model # main model class MainModel(nn.Module): """This is the main model class. Args: net_G (nn.Module): Generator network. lr_G (float): Learning rate for the generator. lr_D (float): Learning rate for the discriminator. beta1 (float): Beta1 for Adam optimizer. beta2 (float): Beta2 for Adam optimizer. lambda_L1 (float): Weight for L1 loss. """ def __init__( self, net_G=None, lr_G=2e-4, lr_D=2e-4, beta1=0.5, beta2=0.999, lambda_L1=100.0 ): super().__init__() self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") self.lambda_L1 = lambda_L1 if net_G is None: self.net_G = init_model( Unet(input_c=1, output_c=2, n_down=8, num_filters=64), self.device ) else: self.net_G = net_G.to(self.device) self.net_D = init_model( PatchDiscriminator(input_c=3, n_down=3, num_filters=64), self.device ) self.GANcriterion = GANLoss(gan_mode="vanilla").to(self.device) self.L1criterion = nn.L1Loss() self.opt_G = optim.Adam(self.net_G.parameters(), lr=lr_G, betas=(beta1, beta2)) self.opt_D = optim.Adam(self.net_D.parameters(), lr=lr_D, betas=(beta1, beta2)) def set_requires_grad(self, model, requires_grad=True): for p in model.parameters(): p.requires_grad = requires_grad def setup_input(self, data): self.L = data["L"].to(self.device) self.ab = data["ab"].to(self.device) def forward(self): self.fake_color = self.net_G(self.L) def backward_D(self): fake_image = torch.cat([self.L, self.fake_color], dim=1) fake_preds = self.net_D(fake_image.detach()) self.loss_D_fake = self.GANcriterion(fake_preds, False) real_image = torch.cat([self.L, self.ab], dim=1) real_preds = self.net_D(real_image) self.loss_D_real = self.GANcriterion(real_preds, True) self.loss_D = (self.loss_D_fake + self.loss_D_real) * 0.5 self.loss_D.backward() def backward_G(self): fake_image = torch.cat([self.L, self.fake_color], dim=1) fake_preds = self.net_D(fake_image) self.loss_G_GAN = self.GANcriterion(fake_preds, True) self.loss_G_L1 = self.L1criterion(self.fake_color, self.ab) * self.lambda_L1 self.loss_G = self.loss_G_GAN + self.loss_G_L1 self.loss_G.backward() def optimize(self): self.forward() self.net_D.train() self.set_requires_grad(self.net_D, True) self.opt_D.zero_grad() self.backward_D() self.opt_D.step() self.net_G.train() self.set_requires_grad(self.net_D, False) self.opt_G.zero_grad() self.backward_G() self.opt_G.step() # ## Metrics # average meter class for loss class AverageMeter: """This class is used to keep track of loss and other metrics.""" def __init__(self): self.reset() def reset(self): self.count, self.avg, self.sum = [0.0] * 3 def update(self, val, count=1): self.count += count self.sum += count * val self.avg = self.sum / self.count # create loss meters def create_loss_meters(): loss_D_fake = AverageMeter() loss_D_real = AverageMeter() loss_D = AverageMeter() loss_G_GAN = AverageMeter() loss_G_L1 = AverageMeter() loss_G = AverageMeter() return { "loss_D_fake": loss_D_fake, "loss_D_real": loss_D_real, "loss_D": loss_D, "loss_G_GAN": loss_G_GAN, "loss_G_L1": loss_G_L1, "loss_G": loss_G, } # update loss meters in training def update_losses(model, loss_meter_dict, count): for loss_name, loss_meter in loss_meter_dict.items(): loss = getattr(model, loss_name) loss_meter.update(loss.item(), count=count) # plot losses def lab_to_rgb(L, ab): """ Takes a batch of images """ L = (L + 1.0) * 50.0 ab = ab * 110.0 Lab = torch.cat([L, ab], dim=1).permute(0, 2, 3, 1).cpu().numpy() rgb_imgs = [] for img in Lab: img_rgb = lab2rgb(img) rgb_imgs.append(img_rgb) return np.stack(rgb_imgs, axis=0) # visualize results def visualize(model, data, save=True): model.net_G.eval() with torch.no_grad(): model.setup_input(data) model.forward() model.net_G.train() fake_color = model.fake_color.detach() real_color = model.ab L = model.L fake_imgs = lab_to_rgb(L, fake_color) real_imgs = lab_to_rgb(L, real_color) fig = plt.figure(figsize=(15, 8)) for i in range(5): ax = plt.subplot(3, 5, i + 1) ax.imshow(L[i][0].cpu(), cmap="gray") ax.axis("off") ax = plt.subplot(3, 5, i + 1 + 5) ax.imshow(fake_imgs[i]) ax.axis("off") ax = plt.subplot(3, 5, i + 1 + 10) ax.imshow(real_imgs[i]) ax.axis("off") plt.tight_layout() plt.show() if save: fig.savefig(f"colorization_{time.time()}.png") # print results def log_results(loss_meter_dict): for loss_name, loss_meter in loss_meter_dict.items(): print(f"{loss_name}: {loss_meter.avg:.5f}") if not os.path.exists("results.txt"): with open("results.txt", "w") as f: f.write(f"{loss_name}: {loss_meter.avg:.5f}\n") else: with open("results.txt", "a") as f: f.write(f"{loss_name}: {loss_meter.avg:.5f}\n") # ## Train Model def train_model( model, train_dl, val_dl, epochs, display_every=200, save_path="model.pt" ): # getting a batch for visualizing the model output after fixed intervals data = next(iter(val_dl)) best_loss = 1e10 best_model = None gan_losses = [] disc_losses = [] for e in range(epochs): # function returning a dictionary of objects to loss_meter_dict = create_loss_meters() i = 0 # log the losses of the complete network for data in tqdm(train_dl): model.setup_input(data) model.optimize() # function updating the log objects update_losses(model, loss_meter_dict, count=data["L"].size(0)) i += 1 if i % display_every == 0: print(f"\nEpoch {e+1}/{epochs}") print(f"Iteration {i}/{len(train_dl)}") # function to print out the losses log_results(loss_meter_dict) # function displaying the model's outputs visualize(model, data, save=False) # save losses gan_losses.append(loss_meter_dict["loss_G_GAN"].avg) disc_losses.append(loss_meter_dict["loss_D"].avg) # save model after every epoch if loss_meter_dict["loss_G"].avg < best_loss: best_loss = loss_meter_dict["loss_G"].avg best_model = model torch.save(best_model, save_path) return best_model, (gan_losses, disc_losses) # ### Training model = MainModel() trained_model, losses = train_model( model, train_loader, val_loader, epochs=Hyperparameters._N_EPOCHS, save_path=Hyperparameters._SAVE_PATH, ) from fastai.vision.learner import create_body from torchvision.models.resnet import resnet18 from fastai.vision.models.unet import DynamicUnet def build_res_unet(n_input=1, n_output=2, size=256): # device = torch.device("cuda" if torch.cuda.is_available() else "cpu") body = create_body(resnet18, pretrained=True, n_in=n_input, cut=-2) net_G = DynamicUnet(body, n_output, (size, size)).to(device) return net_G def pretrain_generator(net_G, train_dl, opt, criterion, epochs): for e in range(epochs): loss_meter = AverageMeter() for data in tqdm(train_dl): L, ab = data["L"].to(device), data["ab"].to(device) preds = net_G(L) loss = criterion(preds, ab) opt.zero_grad() loss.backward() opt.step() loss_meter.update(loss.item(), L.size(0)) print(f"Epoch {e + 1}/{epochs}") print(f"L1 Loss: {loss_meter.avg:.5f}") net_G = build_res_unet(n_input=1, n_output=2, size=256) opt = optim.Adam(net_G.parameters(), lr=1e-4) criterion = nn.L1Loss() # pretrain_generator(net_G, train_dl, opt, criterion, 20) # net_G = build_res_unet(n_input=1, n_output=2, size=256) # net_G.load_state_dict(torch.load("res18-unet.pt", map_location=device)) # model = MainModel(net_G=net_G) # train_model(model, train_dl, 20)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/061/129061498.ipynb
null
null
[{"Id": 129061498, "ScriptId": 38361361, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 9286223, "CreationDate": "05/10/2023 16:56:48", "VersionNumber": 1.0, "Title": "DL_Project", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 632.0, "LinesInsertedFromPrevious": 632.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # # Imports # ## Import COCO Dataset from FastAI from fastai.data.external import untar_data, URLs coco_path = untar_data(URLs.COCO_SAMPLE) coco_path = str(coco_path) + "/train_sample" # ## Imports Libraries """ file: dependencies.py author: @vincit0re brief: This file contains the dependencies for the application. date: 2023-05-05 """ # All import statements and libraries import os import glob import time import numpy as np from PIL import Image from pathlib import Path from tqdm.notebook import tqdm import matplotlib.pyplot as plt from skimage.color import rgb2lab, lab2rgb import torch from torch import nn, optim from torchvision import transforms from torchvision.utils import make_grid import copy use_gpu = True device = torch.device("cuda" if (torch.cuda.is_available() and use_gpu) else "cpu") print(f"Using Device: {device}") import warnings warnings.filterwarnings("ignore") # # Colorization # ## Hyperparameters # Hyperparameters class Hyperparameters: """This contains the hyperparameters for the application.""" _SIZE = 256 _DATA_DIR = coco_path _BATCH_SIZE = 16 _N_EOPCH = 10 _SAVE_PATH = "basemodel.pt" # ## Dataset Images def load_images(path): """This function loads the images from the path.""" paths = glob.glob(path + "/*.jpg") # Grabbing all the image file names np.random.seed(123) paths_subset = np.random.choice( paths, 10_000, replace=False ) # choosing 10000 images randomly rand_idxs = np.random.permutation(10_000) train_idxs = rand_idxs[:8000] # choosing the first 8000 as training set val_idxs = rand_idxs[8000:] # choosing last 2000 as validation set train_paths = paths_subset[train_idxs] val_paths = paths_subset[val_idxs] return train_paths, val_paths train_imgs_path, val_imgs_path = load_images(Hyperparameters._DATA_DIR) print(f"Number of Training Images: {len(train_imgs_path)}") print(f"Number of Validation Images: {len(val_imgs_path)}") def plot_sample_images(images_path): images_path = images_path[:16] fig, axes = plt.subplots(4, 4, figsize=(10, 10)) for ax, img_path in zip(axes.flatten(), images_path): ax.imshow(Image.open(img_path)) ax.axis("off") plt.suptitle("Sample Images") plt.tight_layout() plt.show() plot_sample_images(train_imgs_path) # ## Dataset Class # dataset class class ColorizationDataset(Dataset): def __init__(self, paths, split="train", size=256): self.SIZE = size if split == "train": self.transforms = transforms.Compose( [ transforms.Resize((self.SIZE, self.SIZE), Image.BICUBIC), transforms.RandomHorizontalFlip(), # A little data augmentation! ] ) elif split == "val": self.transforms = transforms.Resize((self.SIZE, self.SIZE), Image.BICUBIC) self.split = split self.size = self.SIZE self.paths = paths def __getitem__(self, idx): img = Image.open(self.paths[idx]).convert("RGB") img = self.transforms(img) img = np.array(img) img_lab = rgb2lab(img).astype("float32") # Converting RGB to L*a*b img_lab = transforms.ToTensor()(img_lab) L = img_lab[[0], ...] / 50.0 - 1.0 # Between -1 and 1 ab = img_lab[[1, 2], ...] / 110.0 # Between -1 and 1 return {"L": L, "ab": ab} def __len__(self): return len(self.paths) # A handy function to make our dataloaders def make_dataloaders(batch_size=16, n_workers=2, pin_memory=True, **kwargs): dataset = ColorizationDataset(size=Hyperparameters._SIZE, **kwargs) dataloader = DataLoader( dataset, batch_size=batch_size, num_workers=n_workers, pin_memory=pin_memory ) return dataloader # get train, val dataloaders def get_train_val_dataloaders(train_paths, val_paths, batch_size=64, shuffle=True): """This function returns the train, validation and test dataloaders.""" train_dataloader = make_dataloaders( batch_size=batch_size, paths=train_paths, split="train" ) val_dataloader = make_dataloaders( batch_size=batch_size, paths=val_paths, split="val" ) return train_dataloader, val_dataloader train_loader, val_loader = get_train_val_dataloaders( train_paths=train_imgs_path, val_paths=val_imgs_path, batch_size=16, shuffle=True ) print(f"Train Data: {len(train_loader.dataset)} ({len(train_loader)} batches)") print(f"Validation Data: {len(val_loader.dataset)} ({len(val_loader)} batches)") for batch in train_loader: print(batch["L"].shape) print(batch["ab"].shape) break # ## U-Net # block for UNet class UnetBlock(nn.Module): """This is the block for the UNet. Args: nf (int): Number of filters. ni (int): Number of input channels. submodule (nn.Module): Submodule. input_c (int): Number of input channels. dropout (bool): Dropout. innermost (bool): Innermost. outermost (bool): Outermost. """ def __init__( self, nf, ni, submodule=None, input_c=None, dropout=False, innermost=False, outermost=False, ): super().__init__() self.outermost = outermost if input_c is None: input_c = nf downconv = nn.Conv2d( input_c, ni, kernel_size=4, stride=2, padding=1, bias=False ) downrelu = nn.LeakyReLU(0.2, True) downnorm = nn.BatchNorm2d(ni) uprelu = nn.ReLU(True) upnorm = nn.BatchNorm2d(nf) if outermost: upconv = nn.ConvTranspose2d(ni * 2, nf, kernel_size=4, stride=2, padding=1) down = [downconv] up = [uprelu, upconv, nn.Tanh()] model = down + [submodule] + up elif innermost: upconv = nn.ConvTranspose2d( ni, nf, kernel_size=4, stride=2, padding=1, bias=False ) down = [downrelu, downconv] up = [uprelu, upconv, upnorm] model = down + up else: upconv = nn.ConvTranspose2d( ni * 2, nf, kernel_size=4, stride=2, padding=1, bias=False ) down = [downrelu, downconv, downnorm] up = [uprelu, upconv, upnorm] if dropout: up += [nn.Dropout(0.5)] model = down + [submodule] + up self.model = nn.Sequential(*model) # forward def forward(self, x): # print(x.shape) if self.outermost: return self.model(x) else: return torch.cat([x, self.model(x)], 1) # class for UNet class Unet(nn.Module): """This is the UNet class. Args: input_c (int): Number of input channels. output_c (int): Number of output channels. n_down (int): Number of down samples. num_filters (int): Number of filters. """ def __init__(self, input_c=1, output_c=2, n_down=8, num_filters=64): super().__init__() unet_block = UnetBlock(num_filters * 8, num_filters * 8, innermost=True) for _ in range(n_down - 5): unet_block = UnetBlock( num_filters * 8, num_filters * 8, submodule=unet_block, dropout=True ) out_filters = num_filters * 8 for _ in range(3): unet_block = UnetBlock(out_filters // 2, out_filters, submodule=unet_block) out_filters //= 2 self.model = UnetBlock( output_c, out_filters, input_c=input_c, submodule=unet_block, outermost=True ) def forward(self, x): return self.model(x) # ## Discriminator # discriminator class class PatchDiscriminator(nn.Module): """This is the discriminator class. Args: input_c (int): Number of input channels. num_filters (int): Number of filters. n_down (int): Number of down samples. """ def __init__(self, input_c, num_filters=64, n_down=3): super().__init__() model = [self.get_layers(input_c, num_filters, norm=False)] model += [ self.get_layers( num_filters * 2**i, num_filters * 2 ** (i + 1), s=1 if i == (n_down - 1) else 2, ) for i in range(n_down) ] # the 'if' statement is taking care of not using # stride of 2 for the last block in this loop # Make sure to not use normalization or model += [ self.get_layers(num_filters * 2**n_down, 1, s=1, norm=False, act=False) ] # activation for the last layer of the model self.model = nn.Sequential(*model) # when needing to make some repetitive blocks of layers, def get_layers(self, ni, nf, k=4, s=2, p=1, norm=True, act=True): # it's always helpful to make a separate method for that purpose layers = [nn.Conv2d(ni, nf, k, s, p, bias=not norm)] if norm: layers += [nn.BatchNorm2d(nf)] if act: layers += [nn.LeakyReLU(0.2, True)] return nn.Sequential(*layers) def forward(self, x): return self.model(x) # ## GAN Loss # gan loss class GANLoss(nn.Module): """This is the GAN loss. Args: gan_mode (str): GAN mode. real_label (float): Real label. fake_label (float): Fake label. """ def __init__(self, gan_mode="vanilla", real_label=1.0, fake_label=0.0): super().__init__() self.register_buffer("real_label", torch.tensor(real_label)) self.register_buffer("fake_label", torch.tensor(fake_label)) if gan_mode == "vanilla": self.loss = nn.BCEWithLogitsLoss() elif gan_mode == "lsgan": self.loss = nn.MSELoss() def get_labels(self, preds, target_is_real): if target_is_real: labels = self.real_label else: labels = self.fake_label return labels.expand_as(preds) def __call__(self, preds, target_is_real): labels = self.get_labels(preds, target_is_real) loss = self.loss(preds, labels) return loss # ## Model Initialization # weights initialization def init_weights(net, init="norm", gain=0.02): def init_func(m): classname = m.__class__.__name__ if hasattr(m, "weight") and "Conv" in classname: if init == "norm": nn.init.normal_(m.weight.data, mean=0.0, std=gain) elif init == "xavier": nn.init.xavier_normal_(m.weight.data, gain=gain) elif init == "kaiming": nn.init.kaiming_normal_(m.weight.data, a=0, mode="fan_in") if hasattr(m, "bias") and m.bias is not None: nn.init.constant_(m.bias.data, 0.0) elif "BatchNorm2d" in classname: nn.init.normal_(m.weight.data, 1.0, gain) nn.init.constant_(m.bias.data, 0.0) net.apply(init_func) print(f"model initialized with {init} initialization") return net # model initialization def init_model(model, device): model = model.to(device) model = init_weights(model) return model # # Main Model # main model class MainModel(nn.Module): """This is the main model class. Args: net_G (nn.Module): Generator network. lr_G (float): Learning rate for the generator. lr_D (float): Learning rate for the discriminator. beta1 (float): Beta1 for Adam optimizer. beta2 (float): Beta2 for Adam optimizer. lambda_L1 (float): Weight for L1 loss. """ def __init__( self, net_G=None, lr_G=2e-4, lr_D=2e-4, beta1=0.5, beta2=0.999, lambda_L1=100.0 ): super().__init__() self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") self.lambda_L1 = lambda_L1 if net_G is None: self.net_G = init_model( Unet(input_c=1, output_c=2, n_down=8, num_filters=64), self.device ) else: self.net_G = net_G.to(self.device) self.net_D = init_model( PatchDiscriminator(input_c=3, n_down=3, num_filters=64), self.device ) self.GANcriterion = GANLoss(gan_mode="vanilla").to(self.device) self.L1criterion = nn.L1Loss() self.opt_G = optim.Adam(self.net_G.parameters(), lr=lr_G, betas=(beta1, beta2)) self.opt_D = optim.Adam(self.net_D.parameters(), lr=lr_D, betas=(beta1, beta2)) def set_requires_grad(self, model, requires_grad=True): for p in model.parameters(): p.requires_grad = requires_grad def setup_input(self, data): self.L = data["L"].to(self.device) self.ab = data["ab"].to(self.device) def forward(self): self.fake_color = self.net_G(self.L) def backward_D(self): fake_image = torch.cat([self.L, self.fake_color], dim=1) fake_preds = self.net_D(fake_image.detach()) self.loss_D_fake = self.GANcriterion(fake_preds, False) real_image = torch.cat([self.L, self.ab], dim=1) real_preds = self.net_D(real_image) self.loss_D_real = self.GANcriterion(real_preds, True) self.loss_D = (self.loss_D_fake + self.loss_D_real) * 0.5 self.loss_D.backward() def backward_G(self): fake_image = torch.cat([self.L, self.fake_color], dim=1) fake_preds = self.net_D(fake_image) self.loss_G_GAN = self.GANcriterion(fake_preds, True) self.loss_G_L1 = self.L1criterion(self.fake_color, self.ab) * self.lambda_L1 self.loss_G = self.loss_G_GAN + self.loss_G_L1 self.loss_G.backward() def optimize(self): self.forward() self.net_D.train() self.set_requires_grad(self.net_D, True) self.opt_D.zero_grad() self.backward_D() self.opt_D.step() self.net_G.train() self.set_requires_grad(self.net_D, False) self.opt_G.zero_grad() self.backward_G() self.opt_G.step() # ## Metrics # average meter class for loss class AverageMeter: """This class is used to keep track of loss and other metrics.""" def __init__(self): self.reset() def reset(self): self.count, self.avg, self.sum = [0.0] * 3 def update(self, val, count=1): self.count += count self.sum += count * val self.avg = self.sum / self.count # create loss meters def create_loss_meters(): loss_D_fake = AverageMeter() loss_D_real = AverageMeter() loss_D = AverageMeter() loss_G_GAN = AverageMeter() loss_G_L1 = AverageMeter() loss_G = AverageMeter() return { "loss_D_fake": loss_D_fake, "loss_D_real": loss_D_real, "loss_D": loss_D, "loss_G_GAN": loss_G_GAN, "loss_G_L1": loss_G_L1, "loss_G": loss_G, } # update loss meters in training def update_losses(model, loss_meter_dict, count): for loss_name, loss_meter in loss_meter_dict.items(): loss = getattr(model, loss_name) loss_meter.update(loss.item(), count=count) # plot losses def lab_to_rgb(L, ab): """ Takes a batch of images """ L = (L + 1.0) * 50.0 ab = ab * 110.0 Lab = torch.cat([L, ab], dim=1).permute(0, 2, 3, 1).cpu().numpy() rgb_imgs = [] for img in Lab: img_rgb = lab2rgb(img) rgb_imgs.append(img_rgb) return np.stack(rgb_imgs, axis=0) # visualize results def visualize(model, data, save=True): model.net_G.eval() with torch.no_grad(): model.setup_input(data) model.forward() model.net_G.train() fake_color = model.fake_color.detach() real_color = model.ab L = model.L fake_imgs = lab_to_rgb(L, fake_color) real_imgs = lab_to_rgb(L, real_color) fig = plt.figure(figsize=(15, 8)) for i in range(5): ax = plt.subplot(3, 5, i + 1) ax.imshow(L[i][0].cpu(), cmap="gray") ax.axis("off") ax = plt.subplot(3, 5, i + 1 + 5) ax.imshow(fake_imgs[i]) ax.axis("off") ax = plt.subplot(3, 5, i + 1 + 10) ax.imshow(real_imgs[i]) ax.axis("off") plt.tight_layout() plt.show() if save: fig.savefig(f"colorization_{time.time()}.png") # print results def log_results(loss_meter_dict): for loss_name, loss_meter in loss_meter_dict.items(): print(f"{loss_name}: {loss_meter.avg:.5f}") if not os.path.exists("results.txt"): with open("results.txt", "w") as f: f.write(f"{loss_name}: {loss_meter.avg:.5f}\n") else: with open("results.txt", "a") as f: f.write(f"{loss_name}: {loss_meter.avg:.5f}\n") # ## Train Model def train_model( model, train_dl, val_dl, epochs, display_every=200, save_path="model.pt" ): # getting a batch for visualizing the model output after fixed intervals data = next(iter(val_dl)) best_loss = 1e10 best_model = None gan_losses = [] disc_losses = [] for e in range(epochs): # function returning a dictionary of objects to loss_meter_dict = create_loss_meters() i = 0 # log the losses of the complete network for data in tqdm(train_dl): model.setup_input(data) model.optimize() # function updating the log objects update_losses(model, loss_meter_dict, count=data["L"].size(0)) i += 1 if i % display_every == 0: print(f"\nEpoch {e+1}/{epochs}") print(f"Iteration {i}/{len(train_dl)}") # function to print out the losses log_results(loss_meter_dict) # function displaying the model's outputs visualize(model, data, save=False) # save losses gan_losses.append(loss_meter_dict["loss_G_GAN"].avg) disc_losses.append(loss_meter_dict["loss_D"].avg) # save model after every epoch if loss_meter_dict["loss_G"].avg < best_loss: best_loss = loss_meter_dict["loss_G"].avg best_model = model torch.save(best_model, save_path) return best_model, (gan_losses, disc_losses) # ### Training model = MainModel() trained_model, losses = train_model( model, train_loader, val_loader, epochs=Hyperparameters._N_EPOCHS, save_path=Hyperparameters._SAVE_PATH, ) from fastai.vision.learner import create_body from torchvision.models.resnet import resnet18 from fastai.vision.models.unet import DynamicUnet def build_res_unet(n_input=1, n_output=2, size=256): # device = torch.device("cuda" if torch.cuda.is_available() else "cpu") body = create_body(resnet18, pretrained=True, n_in=n_input, cut=-2) net_G = DynamicUnet(body, n_output, (size, size)).to(device) return net_G def pretrain_generator(net_G, train_dl, opt, criterion, epochs): for e in range(epochs): loss_meter = AverageMeter() for data in tqdm(train_dl): L, ab = data["L"].to(device), data["ab"].to(device) preds = net_G(L) loss = criterion(preds, ab) opt.zero_grad() loss.backward() opt.step() loss_meter.update(loss.item(), L.size(0)) print(f"Epoch {e + 1}/{epochs}") print(f"L1 Loss: {loss_meter.avg:.5f}") net_G = build_res_unet(n_input=1, n_output=2, size=256) opt = optim.Adam(net_G.parameters(), lr=1e-4) criterion = nn.L1Loss() # pretrain_generator(net_G, train_dl, opt, criterion, 20) # net_G = build_res_unet(n_input=1, n_output=2, size=256) # net_G.load_state_dict(torch.load("res18-unet.pt", map_location=device)) # model = MainModel(net_G=net_G) # train_model(model, train_dl, 20)
false
0
6,248
0
6,248
6,248
129188842
# # Comment # I have high expectations for the following notebook (Thank you very much for sharing such a simple yet powerful notebook!). # [DePlot - Trying Out](https://www.kaggle.com/code/gomaki/deplot-trying-out/notebook) # If I can use DePlot, I will be able to achieve accuracy without additional training for many graphs and focus on analyzing few specific graphs. # This notebook is designed to verify the expected score without additional training. # I have been trying to find a way to use this notebook without an internet connection so that I can submit it, but I have been struggling with the "Downloading (…)solve/main/Arial.TTF" part for several days without a solution. # If anyone knows a solution, I would greatly appreciate it if you could share it with me. # # library install import os import random import numpy as np import pandas as pd from glob import glob from tqdm import tqdm import matplotlib.pyplot as plt import pickle import json import cv2 from PIL import Image from rapidfuzz.distance.Levenshtein import distance as levenshtein from sklearn.metrics import r2_score import io import torch from transformers import Pix2StructForConditionalGeneration, Pix2StructProcessor import re # # deplot def deplot_output2df(deplot_output): """ The raw output of deplot TITLE | Rural population (%) long-run with 2050 projections<0x0A>(OWID) in Greece<0x0A>Years | Rural population<0x0A>1940 | 47.38<0x0A>1960 | 43.68<0x0A>1980 | 30.28<0x0A>... """ deplot_output = deplot_output.replace("<0x0A>", "\n").replace(" | ", "\t") second_a_index = [m.start() for m in re.finditer("\t", deplot_output)][1] last_newline_index = deplot_output.rfind("\n", 0, second_a_index) title = deplot_output[:last_newline_index] table = deplot_output[last_newline_index + 1 :] # print(title) try: data = io.StringIO(table) df = pd.read_csv(data, sep="\t") except: x_list = [] y_list = [] for line in table.split(" \n "): data_list = line.split("\t") x_list.append(data_list[0]) y_list.append(data_list[-1]) df = pd.DataFrame() df[x_list[0]] = x_list[1:] df[y_list[0]] = y_list[1:] return df def deplot(image, model, processor, device): inputs = processor( images=image, text="Generate underlying data table of the figure below:", return_tensors="pt", ) # Move inputs to GPU inputs = {key: value.to(device) for key, value in inputs.items()} predictions = model.generate(**inputs, max_new_tokens=512) return processor.decode(predictions[0], skip_special_tokens=True) # # Competition Metric def sigmoid(x): return 2 - 2 / (1 + np.exp(-x)) def normalized_rmse(y_true, y_pred): # The argument to the sigmoid transform is equal to # rmse(y_true, y_pred) / rmse(y_true, np.mean(y_true)) return sigmoid((1 - r2_score(y_true, y_pred)) ** 0.5) def normalized_levenshtein_score(y_true, y_pred): total_distance = np.sum([levenshtein(yt, yp) for yt, yp in zip(y_true, y_pred)]) length_sum = np.sum([len(yt) for yt in y_true]) return sigmoid(total_distance / length_sum) def score_series(y_true, y_pred): if len(y_true) != len(y_pred): return 0.0 if isinstance(y_true[0], str): return normalized_levenshtein_score(y_true, y_pred) else: return normalized_rmse(y_true, y_pred) def benetech_score(ground_truth: pd.DataFrame, predictions: pd.DataFrame) -> float: """Evaluate predictions using the metric from the Benetech - Making Graphs Accessible. Parameters ---------- ground_truth: pd.DataFrame Has columns `[data_series, chart_type]` and an index `id`. Values in `data_series` should be either arrays of floats or arrays of strings. predictions: pd.DataFrame """ if not ground_truth.index.equals(predictions.index): raise ValueError( "Must have exactly one prediction for each ground-truth instance." ) if not ground_truth.columns.equals(predictions.columns): raise ValueError(f"Predictions must have columns: {ground_truth.columns}.") pairs = zip( ground_truth.itertuples(index=False), predictions.itertuples(index=False) ) scores = [] for (gt_series, gt_type), (pred_series, pred_type) in pairs: if gt_type != pred_type: # Check chart_type condition scores.append(0.0) else: # Score with RMSE or Levenshtein as appropriate scores.append(score_series(gt_series, pred_series)) return np.mean(scores) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model = Pix2StructForConditionalGeneration.from_pretrained( "/kaggle/input/load-google-deplot-model/deplot" ).to(device) processor = Pix2StructProcessor.from_pretrained( "/kaggle/input/load-google-deplot-model/deplot" ) filepath_list = set( glob(f"/kaggle/input/benetech-making-graphs-accessible/train/annotations/*") ) id_list = [] chart_type_list = [] x_ans_list = [] y_ans_list = [] x_pred_list = [] y_pred_list = [] for filepath in tqdm(filepath_list): with open(filepath) as fp: json_data = json.load(fp) if json_data["source"] == "generated": continue id_ = filepath.split("/")[-1][:-5] chart_type = json_data["chart-type"] x_type = json_data["axes"]["x-axis"]["values-type"] y_type = json_data["axes"]["y-axis"]["values-type"] x = [] y = [] for data in json_data["data-series"]: if x_type == "numerical": x.append(str(data["x"])) else: x.append(data["x"]) if y_type == "numerical": y.append(str(data["y"])) else: y.append(data["y"]) id_list.append(id_) x_ans_list.append(";".join(x)) y_ans_list.append(";".join(y)) chart_type_list.append(chart_type) # pred by deplot filepath = f"/kaggle/input/benetech-making-graphs-accessible/train/images/{id_}.jpg" image = cv2.imread(filepath) deplot_output = deplot(image, model, processor, device) df = deplot_output2df(deplot_output) chart_columns = list(df) if chart_type != "horizontal_bar": x_pred_list.append(";".join([str(pred) for pred in df[chart_columns[0]]])) y_pred_list.append(";".join([str(pred) for pred in df[chart_columns[1]]])) else: x_pred_list.append(";".join([str(pred) for pred in df[chart_columns[1]][::-1]])) y_pred_list.append(";".join([str(pred) for pred in df[chart_columns[0]][::-1]])) x_df = pd.DataFrame() x_df["id"] = id_list x_df["id"] = x_df["id"] + "_x" x_df["data_series"] = x_ans_list x_df["chart_type"] = chart_type_list y_df = pd.DataFrame() y_df["id"] = id_list y_df["id"] = y_df["id"] + "_y" y_df["data_series"] = y_ans_list y_df["chart_type"] = chart_type_list ground_truth_df = pd.concat([x_df, y_df]).reset_index(drop=True) x_df = pd.DataFrame() x_df["id"] = id_list x_df["id"] = x_df["id"] + "_x" x_df["data_series"] = x_pred_list x_df["chart_type"] = chart_type_list y_df = pd.DataFrame() y_df["id"] = id_list y_df["id"] = y_df["id"] + "_y" y_df["data_series"] = y_pred_list y_df["chart_type"] = chart_type_list prediction_df = pd.concat([x_df, y_df]).reset_index(drop=True) score = benetech_score( ground_truth_df[["data_series", "chart_type"]], prediction_df[["data_series", "chart_type"]], ) print(f"all data: {score}") for chart_type in set(chart_type_list): score = benetech_score( ground_truth_df[ground_truth_df["chart_type"] == chart_type][ ["data_series", "chart_type"] ], prediction_df[prediction_df["chart_type"] == chart_type][ ["data_series", "chart_type"] ], ) print(f"{chart_type}: {score}") ground_truth_df.to_csv("ground_truth_df.csv", index=False) prediction_df.to_csv("deplot_prediction_df.csv", index=False) ground_truth_df prediction_df
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/188/129188842.ipynb
null
null
[{"Id": 129188842, "ScriptId": 38401663, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 5331017, "CreationDate": "05/11/2023 16:48:28", "VersionNumber": 1.0, "Title": "Benetech DePlot [validation: 0.215]", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 218.0, "LinesInsertedFromPrevious": 173.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 45.0, "LinesInsertedFromFork": 173.0, "LinesDeletedFromFork": 13.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 45.0, "TotalVotes": 5}]
null
null
null
null
# # Comment # I have high expectations for the following notebook (Thank you very much for sharing such a simple yet powerful notebook!). # [DePlot - Trying Out](https://www.kaggle.com/code/gomaki/deplot-trying-out/notebook) # If I can use DePlot, I will be able to achieve accuracy without additional training for many graphs and focus on analyzing few specific graphs. # This notebook is designed to verify the expected score without additional training. # I have been trying to find a way to use this notebook without an internet connection so that I can submit it, but I have been struggling with the "Downloading (…)solve/main/Arial.TTF" part for several days without a solution. # If anyone knows a solution, I would greatly appreciate it if you could share it with me. # # library install import os import random import numpy as np import pandas as pd from glob import glob from tqdm import tqdm import matplotlib.pyplot as plt import pickle import json import cv2 from PIL import Image from rapidfuzz.distance.Levenshtein import distance as levenshtein from sklearn.metrics import r2_score import io import torch from transformers import Pix2StructForConditionalGeneration, Pix2StructProcessor import re # # deplot def deplot_output2df(deplot_output): """ The raw output of deplot TITLE | Rural population (%) long-run with 2050 projections<0x0A>(OWID) in Greece<0x0A>Years | Rural population<0x0A>1940 | 47.38<0x0A>1960 | 43.68<0x0A>1980 | 30.28<0x0A>... """ deplot_output = deplot_output.replace("<0x0A>", "\n").replace(" | ", "\t") second_a_index = [m.start() for m in re.finditer("\t", deplot_output)][1] last_newline_index = deplot_output.rfind("\n", 0, second_a_index) title = deplot_output[:last_newline_index] table = deplot_output[last_newline_index + 1 :] # print(title) try: data = io.StringIO(table) df = pd.read_csv(data, sep="\t") except: x_list = [] y_list = [] for line in table.split(" \n "): data_list = line.split("\t") x_list.append(data_list[0]) y_list.append(data_list[-1]) df = pd.DataFrame() df[x_list[0]] = x_list[1:] df[y_list[0]] = y_list[1:] return df def deplot(image, model, processor, device): inputs = processor( images=image, text="Generate underlying data table of the figure below:", return_tensors="pt", ) # Move inputs to GPU inputs = {key: value.to(device) for key, value in inputs.items()} predictions = model.generate(**inputs, max_new_tokens=512) return processor.decode(predictions[0], skip_special_tokens=True) # # Competition Metric def sigmoid(x): return 2 - 2 / (1 + np.exp(-x)) def normalized_rmse(y_true, y_pred): # The argument to the sigmoid transform is equal to # rmse(y_true, y_pred) / rmse(y_true, np.mean(y_true)) return sigmoid((1 - r2_score(y_true, y_pred)) ** 0.5) def normalized_levenshtein_score(y_true, y_pred): total_distance = np.sum([levenshtein(yt, yp) for yt, yp in zip(y_true, y_pred)]) length_sum = np.sum([len(yt) for yt in y_true]) return sigmoid(total_distance / length_sum) def score_series(y_true, y_pred): if len(y_true) != len(y_pred): return 0.0 if isinstance(y_true[0], str): return normalized_levenshtein_score(y_true, y_pred) else: return normalized_rmse(y_true, y_pred) def benetech_score(ground_truth: pd.DataFrame, predictions: pd.DataFrame) -> float: """Evaluate predictions using the metric from the Benetech - Making Graphs Accessible. Parameters ---------- ground_truth: pd.DataFrame Has columns `[data_series, chart_type]` and an index `id`. Values in `data_series` should be either arrays of floats or arrays of strings. predictions: pd.DataFrame """ if not ground_truth.index.equals(predictions.index): raise ValueError( "Must have exactly one prediction for each ground-truth instance." ) if not ground_truth.columns.equals(predictions.columns): raise ValueError(f"Predictions must have columns: {ground_truth.columns}.") pairs = zip( ground_truth.itertuples(index=False), predictions.itertuples(index=False) ) scores = [] for (gt_series, gt_type), (pred_series, pred_type) in pairs: if gt_type != pred_type: # Check chart_type condition scores.append(0.0) else: # Score with RMSE or Levenshtein as appropriate scores.append(score_series(gt_series, pred_series)) return np.mean(scores) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model = Pix2StructForConditionalGeneration.from_pretrained( "/kaggle/input/load-google-deplot-model/deplot" ).to(device) processor = Pix2StructProcessor.from_pretrained( "/kaggle/input/load-google-deplot-model/deplot" ) filepath_list = set( glob(f"/kaggle/input/benetech-making-graphs-accessible/train/annotations/*") ) id_list = [] chart_type_list = [] x_ans_list = [] y_ans_list = [] x_pred_list = [] y_pred_list = [] for filepath in tqdm(filepath_list): with open(filepath) as fp: json_data = json.load(fp) if json_data["source"] == "generated": continue id_ = filepath.split("/")[-1][:-5] chart_type = json_data["chart-type"] x_type = json_data["axes"]["x-axis"]["values-type"] y_type = json_data["axes"]["y-axis"]["values-type"] x = [] y = [] for data in json_data["data-series"]: if x_type == "numerical": x.append(str(data["x"])) else: x.append(data["x"]) if y_type == "numerical": y.append(str(data["y"])) else: y.append(data["y"]) id_list.append(id_) x_ans_list.append(";".join(x)) y_ans_list.append(";".join(y)) chart_type_list.append(chart_type) # pred by deplot filepath = f"/kaggle/input/benetech-making-graphs-accessible/train/images/{id_}.jpg" image = cv2.imread(filepath) deplot_output = deplot(image, model, processor, device) df = deplot_output2df(deplot_output) chart_columns = list(df) if chart_type != "horizontal_bar": x_pred_list.append(";".join([str(pred) for pred in df[chart_columns[0]]])) y_pred_list.append(";".join([str(pred) for pred in df[chart_columns[1]]])) else: x_pred_list.append(";".join([str(pred) for pred in df[chart_columns[1]][::-1]])) y_pred_list.append(";".join([str(pred) for pred in df[chart_columns[0]][::-1]])) x_df = pd.DataFrame() x_df["id"] = id_list x_df["id"] = x_df["id"] + "_x" x_df["data_series"] = x_ans_list x_df["chart_type"] = chart_type_list y_df = pd.DataFrame() y_df["id"] = id_list y_df["id"] = y_df["id"] + "_y" y_df["data_series"] = y_ans_list y_df["chart_type"] = chart_type_list ground_truth_df = pd.concat([x_df, y_df]).reset_index(drop=True) x_df = pd.DataFrame() x_df["id"] = id_list x_df["id"] = x_df["id"] + "_x" x_df["data_series"] = x_pred_list x_df["chart_type"] = chart_type_list y_df = pd.DataFrame() y_df["id"] = id_list y_df["id"] = y_df["id"] + "_y" y_df["data_series"] = y_pred_list y_df["chart_type"] = chart_type_list prediction_df = pd.concat([x_df, y_df]).reset_index(drop=True) score = benetech_score( ground_truth_df[["data_series", "chart_type"]], prediction_df[["data_series", "chart_type"]], ) print(f"all data: {score}") for chart_type in set(chart_type_list): score = benetech_score( ground_truth_df[ground_truth_df["chart_type"] == chart_type][ ["data_series", "chart_type"] ], prediction_df[prediction_df["chart_type"] == chart_type][ ["data_series", "chart_type"] ], ) print(f"{chart_type}: {score}") ground_truth_df.to_csv("ground_truth_df.csv", index=False) prediction_df.to_csv("deplot_prediction_df.csv", index=False) ground_truth_df prediction_df
false
0
2,547
5
2,547
2,547
129188246
# Hello Fellow Kagglers, # This notebook gives an analysis of the competition dataset and demonstrates the preprocessing of the dataset, giving the X/y data to use for training. # The processing is as follows: # 1) Select dominant hand based on most number of non empty hand frames # 2) Filter out all frames with missing dominant hand coordinates # 3) Resize video to 256 frames # This is a work in progress and updates will follow. # The processed data could help with making a baseline. # Soon, a training notebook will follow with the corresponding inference. # Exited to continue working on sign language! import numpy as np import pandas as pd import matplotlib.pyplot as plt import matplotlib as mpl import seaborn as sn import tensorflow as tf from tqdm.notebook import tqdm from sklearn.model_selection import train_test_split, GroupShuffleSplit import glob import sys import os import math import gc import sys import sklearn import time import json # TQDM Progress Bar With Pandas Apply Function tqdm.pandas() # # Global Config # If Notebook Is Run By Committing or In Interactive Mode For Development IS_INTERACTIVE = os.environ["KAGGLE_KERNEL_RUN_TYPE"] == "Interactive" # Describe Statistics Percentiles PERCENTILES = [0.01, 0.05, 0.25, 0.50, 0.75, 0.95, 0.99, 0.999] # Global Random Seed SEED = 42 # Number of Frames to resize recording to N_TARGET_FRAMES = 256 # Global debug flag, takes subset of train DEBUG = False # # Plot Config # MatplotLib Global Settings mpl.rcParams.update(mpl.rcParamsDefault) mpl.rcParams["xtick.labelsize"] = 16 mpl.rcParams["ytick.labelsize"] = 16 mpl.rcParams["axes.labelsize"] = 18 mpl.rcParams["axes.titlesize"] = 24 # # Utils # Prints Shape and Dtype For List Of Variables def print_shape_dtype(l, names): for e, n in zip(l, names): print(f"{n} shape: {e.shape}, dtype: {e.dtype}") # # Read Train # Read Train DataFrame if DEBUG: train = pd.read_csv("/kaggle/input/asl-fingerspelling/train.csv").head(5000) else: train = pd.read_csv("/kaggle/input/asl-fingerspelling/train.csv") # Number Of Train Samples N_SAMPLES = len(train) print(f"N_SAMPLES: {N_SAMPLES}") display(train.info()) display(train.head()) # # Add File Path # Get complete file path to file def get_file_path(path): return f"/kaggle/input/asl-fingerspelling/{path}" train["file_path"] = train["path"].apply(get_file_path) # # Phrase Processing # Split Phrase To Char Tuple train["phrase_char"] = train["phrase"].apply(tuple) # Character Length of Phrase train["phrase_char_len"] = train["phrase_char"].apply(len) # Maximum Input Length MAX_PHRASE_LENGTH = train["phrase_char_len"].max() print(f"MAX_PHRASE_LENGTH: {MAX_PHRASE_LENGTH}") # Phrase Character Length Statistics display(train["phrase_char_len"].describe(percentiles=PERCENTILES).to_frame().round(1)) # Character Count Occurance plt.figure(figsize=(15, 8)) plt.title("Character Length Occurance of Phrases") train["phrase_char_len"].value_counts().sort_index().plot(kind="bar") plt.xlim(-0.50, train["phrase_char_len"].max() - 1.50) plt.xlabel("Pharse Character Length") plt.ylabel("Sample Count") plt.grid(axis="y") plt.show() # # Find Unique Character # Use Set to keep track of unique characters in phrases UNIQUE_CHARACTERS = set() for phrase in tqdm(train["phrase_char"]): for c in phrase: UNIQUE_CHARACTERS.add(c) # Sorted Unique Character UNIQUE_CHARACTERS = np.array(sorted(UNIQUE_CHARACTERS)) # Number of Unique Characters N_UNIQUE_CHARACTERS = len(UNIQUE_CHARACTERS) print(f"N_UNIQUE_CHARACTERS: {N_UNIQUE_CHARACTERS}") # Read Character to Ordinal Encoding Mapping with open( "/kaggle/input/asl-fingerspelling/character_to_prediction_index.json" ) as json_file: CHAR2ORD = json.load(json_file) # Character to Ordinal Encoding Mapping display(pd.Series(CHAR2ORD).to_frame("Ordinal Encoding")) # Number of Unique Characters N_UNIQUE_CHARACTERS = len(CHAR2ORD) print(f"N_UNIQUE_CHARACTERS: {N_UNIQUE_CHARACTERS}") # # Example Parquet File # Read First Parquet File example_parquet_df = pd.read_parquet(train["file_path"][0]) # Each parquet file contains 1000 recordings print(f"# Unique Recording: {example_parquet_df.index.nunique()}") # Display DataFrame layout display(example_parquet_df.head()) # # Video Statistics # Number of parquet chunks to analyse N = 5 if IS_INTERACTIVE else 25 # Number of Unique Frames in Recording N_UNIQUE_FRAMES = [] UNIQUE_FILE_PATHS = pd.Series(train["file_path"].unique()) for idx, file_path in enumerate(tqdm(UNIQUE_FILE_PATHS.sample(N, random_state=SEED))): df = pd.read_parquet(file_path) for group, group_df in df.groupby("sequence_id"): N_UNIQUE_FRAMES.append(group_df["frame"].nunique()) # Convert to Numpy Array N_UNIQUE_FRAMES = np.array(N_UNIQUE_FRAMES) # Number of unique frames in each video display( pd.Series(N_UNIQUE_FRAMES) .describe(percentiles=PERCENTILES) .to_frame("Value") .astype(int) ) plt.figure(figsize=(15, 8)) plt.title("Number of Unique Frames", size=24) pd.Series(N_UNIQUE_FRAMES).plot(kind="hist", bins=128) plt.grid() xlim = math.ceil(plt.xlim()[1]) plt.xlim(0, xlim) plt.xticks(np.arange(0, xlim + 50, 50)) plt.show() # With N_TARGET_FRAMES = 256 ~85% will be below N_UNIQUE_FRAMES_WATERFALL = [] # Maximum Number of Unique Frames to use N_MAX_UNIQUE_FRAMES = 400 # Compute Percentage for n in tqdm(range(0, N_MAX_UNIQUE_FRAMES + 1)): N_UNIQUE_FRAMES_WATERFALL.append( sum(N_UNIQUE_FRAMES >= n) / len(N_UNIQUE_FRAMES) * 100 ) plt.figure(figsize=(18, 10)) plt.title("Waterfall Plot For Number Of Unique Frames") pd.Series(N_UNIQUE_FRAMES_WATERFALL).plot(kind="bar") plt.grid(axis="y") plt.xticks([1] + np.arange(5, N_MAX_UNIQUE_FRAMES + 5, 5).tolist(), size=8, rotation=45) plt.xlabel("Number of Unique Frames", size=16) plt.yticks(np.arange(0, 100 + 5, 5), [f"{i}%" for i in range(0, 100 + 5, 5)]) plt.ylim(0, 100) plt.ylabel("Percentage of Samples With At Least N Unique Frames", size=16) plt.show() # # Landmark Indices def get_idxs(df, words_pos, words_neg=[], ret_names=True): idxs = [] names = [] for col_idx, col in enumerate(df.columns): # Check if column name contains all words if all([w in col for w in words_pos]) and all( [w not in col for w in words_neg] ): idxs.append(col_idx) names.append(col) # Convert to Numpy arrays idxs = np.array(idxs) names = np.array(names) # Returns either both column indices and names if ret_names: return idxs, names # Or only columns indices else: return idxs # Landmark Indices for Left/Right hand without z axis in raw data LEFT_HAND_IDXS0, LEFT_HAND_NAMES0 = get_idxs(example_parquet_df, ["left_hand"], ["z"]) RIGHT_HAND_IDXS0, RIGHT_HAND_NAMES0 = get_idxs( example_parquet_df, ["right_hand"], ["z"] ) COLUMNS = np.concatenate((LEFT_HAND_NAMES0, RIGHT_HAND_NAMES0)) N_COLS0 = len(COLUMNS) # Only X/Y axes are used N_DIMS0 = 2 print(f"N_COLS0: {N_COLS0}") # Landmark Indices in subset of dataframe with only COLUMNS selected LEFT_HAND_IDXS = np.argwhere(np.isin(COLUMNS, LEFT_HAND_NAMES0)).squeeze() RIGHT_HAND_IDXS = np.argwhere(np.isin(COLUMNS, RIGHT_HAND_NAMES0)).squeeze() N_COLS = LEFT_HAND_IDXS.size # Only X/Y axes are used N_DIMS = 2 print(f"N_COLS: {N_COLS}") # Indices in processed data by axes with only dominant hand HAND_X_IDXS = np.array( [idx for idx, name in enumerate(LEFT_HAND_NAMES0) if "x" in name] ).squeeze() HAND_Y_IDXS = np.array( [idx for idx, name in enumerate(LEFT_HAND_NAMES0) if "y" in name] ).squeeze() # Names in processed data by axes HAND_X_NAMES = LEFT_HAND_NAMES0[HAND_X_IDXS] HAND_Y_NAMES = LEFT_HAND_NAMES0[HAND_Y_IDXS] # # Tensorflow Preprocess Layer """ Tensorflow layer to process data in TFLite Data needs to be processed in the model itself, so we can not use Python """ class PreprocessLayer(tf.keras.layers.Layer): def __init__(self): super(PreprocessLayer, self).__init__() self.normalisation_correction = tf.constant( # Add 0.50 to x coordinates of left hand (original right hand) and substract 0.50 of right hand (original left hand) [0.50 if "x" in name else 0.00 for name in LEFT_HAND_NAMES0], dtype=tf.float32, ) @tf.function( input_signature=(tf.TensorSpec(shape=[None, N_COLS0], dtype=tf.float32),), ) def call(self, data0): # Number of Frames in Video N_FRAMES0 = tf.shape(data0)[0] # Find dominant hand left_hand_sum = tf.math.reduce_sum( tf.where(tf.math.is_nan(tf.gather(data0, LEFT_HAND_IDXS, axis=1)), 0, 1) ) right_hand_sum = tf.math.reduce_sum( tf.where(tf.math.is_nan(tf.gather(data0, RIGHT_HAND_IDXS, axis=1)), 0, 1) ) left_dominant = left_hand_sum >= right_hand_sum # Count non NaN Hand values in each frame if left_dominant: frames_hands_non_nan_sum = tf.math.reduce_sum( tf.where( tf.math.is_nan(tf.gather(data0, LEFT_HAND_IDXS, axis=1)), 0, 1 ), axis=[1], ) else: frames_hands_non_nan_sum = tf.math.reduce_sum( tf.where( tf.math.is_nan(tf.gather(data0, RIGHT_HAND_IDXS, axis=1)), 0, 1 ), axis=[1], ) # Frames With Coordinates for hand non_empty_frames_idxs = tf.where(frames_hands_non_nan_sum > 0) non_empty_frames_idxs = tf.squeeze(non_empty_frames_idxs, axis=1) # Filter data on frames with coordinates for hand data = tf.gather(data0, non_empty_frames_idxs, axis=0) # Cast Indices in float32 to be compatible with Tensorflow Lite non_empty_frames_idxs = tf.cast(non_empty_frames_idxs, tf.float32) # Normalize to start with 0 non_empty_frames_idxs -= tf.reduce_min(non_empty_frames_idxs) # Number of Frames in Filtered Video N_FRAMES = tf.shape(data)[0] # Gather Relevant Landmark Columns if left_dominant: data = tf.gather(data, LEFT_HAND_IDXS, axis=1) else: data = tf.gather(data, RIGHT_HAND_IDXS, axis=1) data = self.normalisation_correction + ( (data - self.normalisation_correction) * tf.where(self.normalisation_correction != 0, -1.0, 1.0) ) # Fill NaN Values With 0 data = tf.where(tf.math.is_nan(data), 0.0, data) # Resize Video data = tf.image.resize( data[:, :, tf.newaxis], [N_TARGET_FRAMES, N_COLS], method=tf.image.ResizeMethod.BILINEAR, antialias=False, ) data = tf.squeeze(data, axis=[2]) # Resize Non Empty Frame Indices non_empty_frames_idxs = tf.image.resize( non_empty_frames_idxs[:, tf.newaxis, tf.newaxis], [N_TARGET_FRAMES, 1], method=tf.image.ResizeMethod.BILINEAR, antialias=False, ) non_empty_frames_idxs = tf.squeeze(non_empty_frames_idxs, axis=[1, 2]) return data, non_empty_frames_idxs preprocess_layer = PreprocessLayer() # # Create X/Y # Target Arrays Processed Input Videos X = np.zeros([N_SAMPLES, N_TARGET_FRAMES, N_COLS], dtype=np.float32) # Frame Indices NON_EMPTY_FRAME_IDXS = np.zeros([N_SAMPLES, N_TARGET_FRAMES], dtype=np.uint16) # Ordinally Encoded Target With value 59 for pad token y = np.full( shape=[N_SAMPLES, MAX_PHRASE_LENGTH], fill_value=N_UNIQUE_CHARACTERS, dtype=np.int8 ) # Train DataFrame indexed by sequence_id to convenientlyy lookup recording data train_squence_id = train.set_index("sequence_id") # All Unique Parquet Files UNIQUE_FILE_PATHS = pd.Series(train["file_path"].unique()) # Counter to keep track of sample row = 0 # Fill Arrays for idx, file_path in enumerate(tqdm(UNIQUE_FILE_PATHS)): df = pd.read_parquet(file_path) for group, group_df in df.groupby("sequence_id"): # Get Processed Frames and non empty frame indices data, non_empty_frames_idxs = preprocess_layer(group_df[COLUMNS].values) X[row] = data NON_EMPTY_FRAME_IDXS[row] = non_empty_frames_idxs # Add Target By Ordinally Encoding Characters phrase_char = train_squence_id.loc[group, "phrase_char"] for col, char in enumerate(phrase_char): y[row, col] = CHAR2ORD.get(char) row += 1 # Example target, note the phrase is padded with the pad token 59 print(f"Example Target: {y[0]}") # Save X/y np.save("X.npy", X) np.save("y.npy", y) np.save("NON_EMPTY_FRAME_IDXS.npy", NON_EMPTY_FRAME_IDXS) # Save Validation splitter = GroupShuffleSplit(test_size=0.10, n_splits=2, random_state=SEED) PARTICIPANT_IDS = train["participant_id"].values train_idxs, val_idxs = next(splitter.split(X, y, groups=PARTICIPANT_IDS)) # Save Train np.save("X_train.npy", X[train_idxs]) np.save("y_train.npy", y[train_idxs]) np.save("NON_EMPTY_FRAME_IDXS_TRAIN.npy", NON_EMPTY_FRAME_IDXS[train_idxs]) # Save Validation np.save("X_val.npy", X[val_idxs]) np.save("y_val.npy", y[val_idxs]) np.save("NON_EMPTY_FRAME_IDXS_VAL.npy", NON_EMPTY_FRAME_IDXS[val_idxs]) # Verify Train/Val is correctly split by participan id print( f"Patient ID Intersection Train/Val: {set(PARTICIPANT_IDS[train_idxs]).intersection(PARTICIPANT_IDS[val_idxs])}" ) # Train/Val Sizes print(f"# Train Samples: {len(train_idxs)}, # Val Samples: {len(val_idxs)}") # # Coordinate Statistics def get_left_right_hand_mean_std(): # Dominant Hand Statistics LEFT_HANDS_MEAN_X = np.zeros([HAND_X_IDXS.size], dtype=np.float32) LEFT_HANDS_MEAN_Y = np.zeros([HAND_Y_IDXS.size], dtype=np.float32) LEFT_HANDS_STD_X = np.zeros([HAND_X_IDXS.size], dtype=np.float32) LEFT_HANDS_STD_Y = np.zeros([HAND_Y_IDXS.size], dtype=np.float32) fig, axes = plt.subplots(2, 1, figsize=(15, N_DIMS * 8)) axes_x, axes_y = axes # Iterate over all landmarks for col, v in enumerate( tqdm(X.reshape([N_SAMPLES * N_TARGET_FRAMES, N_COLS]).T) ): # Remove zero values as they are NaN values v = v[np.nonzero(v)] # X if col < HAND_X_IDXS.size: pos = col LEFT_HANDS_MEAN_X[pos] = v.astype(np.float32).mean() LEFT_HANDS_STD_X[pos] = v.astype(np.float32).std() axes_x.boxplot( v, notch=False, showfliers=False, positions=[pos], whis=[5, 95] ) # Y else: pos = col - HAND_X_IDXS.size - 1 LEFT_HANDS_MEAN_Y[pos] = v.astype(np.float32).mean() LEFT_HANDS_STD_Y[pos] = v.astype(np.float32).std() axes_y.boxplot( v, notch=False, showfliers=False, positions=[pos], whis=[5, 95] ) for ax, name in zip(axes, ["X", "Y"]): ax.set_title(f"Hands {name.upper()} Dimension", size=24) ax.tick_params(axis="x", labelsize=8) ax.set_ylim(0.0, 1.0) ax.grid(axis="y") plt.show() # Stack Axes LEFT_HANDS_MEAN = np.stack((LEFT_HANDS_MEAN_X, LEFT_HANDS_MEAN_Y)).T LEFT_HANDS_STD = np.stack((LEFT_HANDS_STD_X, LEFT_HANDS_STD_Y)).T return LEFT_HANDS_MEAN, LEFT_HANDS_STD # Get Dominant Hand Mean/Standard Deviation DOMINANT_HANDS_MEAN, DOMINANT_HANDS_STD = get_left_right_hand_mean_std() # Save Mean/STD to normalize input in neural network model np.save("DOMINANT_HANDS_MEAN.npy", DOMINANT_HANDS_MEAN) np.save("DOMINANT_HANDS_STD.npy", DOMINANT_HANDS_STD)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/188/129188246.ipynb
null
null
[{"Id": 129188246, "ScriptId": 38406896, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4433335, "CreationDate": "05/11/2023 16:41:54", "VersionNumber": 1.0, "Title": "ASLFR EDA + Preprocessing Dataset \ud83e\udd1f", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 443.0, "LinesInsertedFromPrevious": 20.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 423.0, "LinesInsertedFromFork": 20.0, "LinesDeletedFromFork": 0.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 423.0, "TotalVotes": 38}]
null
null
null
null
# Hello Fellow Kagglers, # This notebook gives an analysis of the competition dataset and demonstrates the preprocessing of the dataset, giving the X/y data to use for training. # The processing is as follows: # 1) Select dominant hand based on most number of non empty hand frames # 2) Filter out all frames with missing dominant hand coordinates # 3) Resize video to 256 frames # This is a work in progress and updates will follow. # The processed data could help with making a baseline. # Soon, a training notebook will follow with the corresponding inference. # Exited to continue working on sign language! import numpy as np import pandas as pd import matplotlib.pyplot as plt import matplotlib as mpl import seaborn as sn import tensorflow as tf from tqdm.notebook import tqdm from sklearn.model_selection import train_test_split, GroupShuffleSplit import glob import sys import os import math import gc import sys import sklearn import time import json # TQDM Progress Bar With Pandas Apply Function tqdm.pandas() # # Global Config # If Notebook Is Run By Committing or In Interactive Mode For Development IS_INTERACTIVE = os.environ["KAGGLE_KERNEL_RUN_TYPE"] == "Interactive" # Describe Statistics Percentiles PERCENTILES = [0.01, 0.05, 0.25, 0.50, 0.75, 0.95, 0.99, 0.999] # Global Random Seed SEED = 42 # Number of Frames to resize recording to N_TARGET_FRAMES = 256 # Global debug flag, takes subset of train DEBUG = False # # Plot Config # MatplotLib Global Settings mpl.rcParams.update(mpl.rcParamsDefault) mpl.rcParams["xtick.labelsize"] = 16 mpl.rcParams["ytick.labelsize"] = 16 mpl.rcParams["axes.labelsize"] = 18 mpl.rcParams["axes.titlesize"] = 24 # # Utils # Prints Shape and Dtype For List Of Variables def print_shape_dtype(l, names): for e, n in zip(l, names): print(f"{n} shape: {e.shape}, dtype: {e.dtype}") # # Read Train # Read Train DataFrame if DEBUG: train = pd.read_csv("/kaggle/input/asl-fingerspelling/train.csv").head(5000) else: train = pd.read_csv("/kaggle/input/asl-fingerspelling/train.csv") # Number Of Train Samples N_SAMPLES = len(train) print(f"N_SAMPLES: {N_SAMPLES}") display(train.info()) display(train.head()) # # Add File Path # Get complete file path to file def get_file_path(path): return f"/kaggle/input/asl-fingerspelling/{path}" train["file_path"] = train["path"].apply(get_file_path) # # Phrase Processing # Split Phrase To Char Tuple train["phrase_char"] = train["phrase"].apply(tuple) # Character Length of Phrase train["phrase_char_len"] = train["phrase_char"].apply(len) # Maximum Input Length MAX_PHRASE_LENGTH = train["phrase_char_len"].max() print(f"MAX_PHRASE_LENGTH: {MAX_PHRASE_LENGTH}") # Phrase Character Length Statistics display(train["phrase_char_len"].describe(percentiles=PERCENTILES).to_frame().round(1)) # Character Count Occurance plt.figure(figsize=(15, 8)) plt.title("Character Length Occurance of Phrases") train["phrase_char_len"].value_counts().sort_index().plot(kind="bar") plt.xlim(-0.50, train["phrase_char_len"].max() - 1.50) plt.xlabel("Pharse Character Length") plt.ylabel("Sample Count") plt.grid(axis="y") plt.show() # # Find Unique Character # Use Set to keep track of unique characters in phrases UNIQUE_CHARACTERS = set() for phrase in tqdm(train["phrase_char"]): for c in phrase: UNIQUE_CHARACTERS.add(c) # Sorted Unique Character UNIQUE_CHARACTERS = np.array(sorted(UNIQUE_CHARACTERS)) # Number of Unique Characters N_UNIQUE_CHARACTERS = len(UNIQUE_CHARACTERS) print(f"N_UNIQUE_CHARACTERS: {N_UNIQUE_CHARACTERS}") # Read Character to Ordinal Encoding Mapping with open( "/kaggle/input/asl-fingerspelling/character_to_prediction_index.json" ) as json_file: CHAR2ORD = json.load(json_file) # Character to Ordinal Encoding Mapping display(pd.Series(CHAR2ORD).to_frame("Ordinal Encoding")) # Number of Unique Characters N_UNIQUE_CHARACTERS = len(CHAR2ORD) print(f"N_UNIQUE_CHARACTERS: {N_UNIQUE_CHARACTERS}") # # Example Parquet File # Read First Parquet File example_parquet_df = pd.read_parquet(train["file_path"][0]) # Each parquet file contains 1000 recordings print(f"# Unique Recording: {example_parquet_df.index.nunique()}") # Display DataFrame layout display(example_parquet_df.head()) # # Video Statistics # Number of parquet chunks to analyse N = 5 if IS_INTERACTIVE else 25 # Number of Unique Frames in Recording N_UNIQUE_FRAMES = [] UNIQUE_FILE_PATHS = pd.Series(train["file_path"].unique()) for idx, file_path in enumerate(tqdm(UNIQUE_FILE_PATHS.sample(N, random_state=SEED))): df = pd.read_parquet(file_path) for group, group_df in df.groupby("sequence_id"): N_UNIQUE_FRAMES.append(group_df["frame"].nunique()) # Convert to Numpy Array N_UNIQUE_FRAMES = np.array(N_UNIQUE_FRAMES) # Number of unique frames in each video display( pd.Series(N_UNIQUE_FRAMES) .describe(percentiles=PERCENTILES) .to_frame("Value") .astype(int) ) plt.figure(figsize=(15, 8)) plt.title("Number of Unique Frames", size=24) pd.Series(N_UNIQUE_FRAMES).plot(kind="hist", bins=128) plt.grid() xlim = math.ceil(plt.xlim()[1]) plt.xlim(0, xlim) plt.xticks(np.arange(0, xlim + 50, 50)) plt.show() # With N_TARGET_FRAMES = 256 ~85% will be below N_UNIQUE_FRAMES_WATERFALL = [] # Maximum Number of Unique Frames to use N_MAX_UNIQUE_FRAMES = 400 # Compute Percentage for n in tqdm(range(0, N_MAX_UNIQUE_FRAMES + 1)): N_UNIQUE_FRAMES_WATERFALL.append( sum(N_UNIQUE_FRAMES >= n) / len(N_UNIQUE_FRAMES) * 100 ) plt.figure(figsize=(18, 10)) plt.title("Waterfall Plot For Number Of Unique Frames") pd.Series(N_UNIQUE_FRAMES_WATERFALL).plot(kind="bar") plt.grid(axis="y") plt.xticks([1] + np.arange(5, N_MAX_UNIQUE_FRAMES + 5, 5).tolist(), size=8, rotation=45) plt.xlabel("Number of Unique Frames", size=16) plt.yticks(np.arange(0, 100 + 5, 5), [f"{i}%" for i in range(0, 100 + 5, 5)]) plt.ylim(0, 100) plt.ylabel("Percentage of Samples With At Least N Unique Frames", size=16) plt.show() # # Landmark Indices def get_idxs(df, words_pos, words_neg=[], ret_names=True): idxs = [] names = [] for col_idx, col in enumerate(df.columns): # Check if column name contains all words if all([w in col for w in words_pos]) and all( [w not in col for w in words_neg] ): idxs.append(col_idx) names.append(col) # Convert to Numpy arrays idxs = np.array(idxs) names = np.array(names) # Returns either both column indices and names if ret_names: return idxs, names # Or only columns indices else: return idxs # Landmark Indices for Left/Right hand without z axis in raw data LEFT_HAND_IDXS0, LEFT_HAND_NAMES0 = get_idxs(example_parquet_df, ["left_hand"], ["z"]) RIGHT_HAND_IDXS0, RIGHT_HAND_NAMES0 = get_idxs( example_parquet_df, ["right_hand"], ["z"] ) COLUMNS = np.concatenate((LEFT_HAND_NAMES0, RIGHT_HAND_NAMES0)) N_COLS0 = len(COLUMNS) # Only X/Y axes are used N_DIMS0 = 2 print(f"N_COLS0: {N_COLS0}") # Landmark Indices in subset of dataframe with only COLUMNS selected LEFT_HAND_IDXS = np.argwhere(np.isin(COLUMNS, LEFT_HAND_NAMES0)).squeeze() RIGHT_HAND_IDXS = np.argwhere(np.isin(COLUMNS, RIGHT_HAND_NAMES0)).squeeze() N_COLS = LEFT_HAND_IDXS.size # Only X/Y axes are used N_DIMS = 2 print(f"N_COLS: {N_COLS}") # Indices in processed data by axes with only dominant hand HAND_X_IDXS = np.array( [idx for idx, name in enumerate(LEFT_HAND_NAMES0) if "x" in name] ).squeeze() HAND_Y_IDXS = np.array( [idx for idx, name in enumerate(LEFT_HAND_NAMES0) if "y" in name] ).squeeze() # Names in processed data by axes HAND_X_NAMES = LEFT_HAND_NAMES0[HAND_X_IDXS] HAND_Y_NAMES = LEFT_HAND_NAMES0[HAND_Y_IDXS] # # Tensorflow Preprocess Layer """ Tensorflow layer to process data in TFLite Data needs to be processed in the model itself, so we can not use Python """ class PreprocessLayer(tf.keras.layers.Layer): def __init__(self): super(PreprocessLayer, self).__init__() self.normalisation_correction = tf.constant( # Add 0.50 to x coordinates of left hand (original right hand) and substract 0.50 of right hand (original left hand) [0.50 if "x" in name else 0.00 for name in LEFT_HAND_NAMES0], dtype=tf.float32, ) @tf.function( input_signature=(tf.TensorSpec(shape=[None, N_COLS0], dtype=tf.float32),), ) def call(self, data0): # Number of Frames in Video N_FRAMES0 = tf.shape(data0)[0] # Find dominant hand left_hand_sum = tf.math.reduce_sum( tf.where(tf.math.is_nan(tf.gather(data0, LEFT_HAND_IDXS, axis=1)), 0, 1) ) right_hand_sum = tf.math.reduce_sum( tf.where(tf.math.is_nan(tf.gather(data0, RIGHT_HAND_IDXS, axis=1)), 0, 1) ) left_dominant = left_hand_sum >= right_hand_sum # Count non NaN Hand values in each frame if left_dominant: frames_hands_non_nan_sum = tf.math.reduce_sum( tf.where( tf.math.is_nan(tf.gather(data0, LEFT_HAND_IDXS, axis=1)), 0, 1 ), axis=[1], ) else: frames_hands_non_nan_sum = tf.math.reduce_sum( tf.where( tf.math.is_nan(tf.gather(data0, RIGHT_HAND_IDXS, axis=1)), 0, 1 ), axis=[1], ) # Frames With Coordinates for hand non_empty_frames_idxs = tf.where(frames_hands_non_nan_sum > 0) non_empty_frames_idxs = tf.squeeze(non_empty_frames_idxs, axis=1) # Filter data on frames with coordinates for hand data = tf.gather(data0, non_empty_frames_idxs, axis=0) # Cast Indices in float32 to be compatible with Tensorflow Lite non_empty_frames_idxs = tf.cast(non_empty_frames_idxs, tf.float32) # Normalize to start with 0 non_empty_frames_idxs -= tf.reduce_min(non_empty_frames_idxs) # Number of Frames in Filtered Video N_FRAMES = tf.shape(data)[0] # Gather Relevant Landmark Columns if left_dominant: data = tf.gather(data, LEFT_HAND_IDXS, axis=1) else: data = tf.gather(data, RIGHT_HAND_IDXS, axis=1) data = self.normalisation_correction + ( (data - self.normalisation_correction) * tf.where(self.normalisation_correction != 0, -1.0, 1.0) ) # Fill NaN Values With 0 data = tf.where(tf.math.is_nan(data), 0.0, data) # Resize Video data = tf.image.resize( data[:, :, tf.newaxis], [N_TARGET_FRAMES, N_COLS], method=tf.image.ResizeMethod.BILINEAR, antialias=False, ) data = tf.squeeze(data, axis=[2]) # Resize Non Empty Frame Indices non_empty_frames_idxs = tf.image.resize( non_empty_frames_idxs[:, tf.newaxis, tf.newaxis], [N_TARGET_FRAMES, 1], method=tf.image.ResizeMethod.BILINEAR, antialias=False, ) non_empty_frames_idxs = tf.squeeze(non_empty_frames_idxs, axis=[1, 2]) return data, non_empty_frames_idxs preprocess_layer = PreprocessLayer() # # Create X/Y # Target Arrays Processed Input Videos X = np.zeros([N_SAMPLES, N_TARGET_FRAMES, N_COLS], dtype=np.float32) # Frame Indices NON_EMPTY_FRAME_IDXS = np.zeros([N_SAMPLES, N_TARGET_FRAMES], dtype=np.uint16) # Ordinally Encoded Target With value 59 for pad token y = np.full( shape=[N_SAMPLES, MAX_PHRASE_LENGTH], fill_value=N_UNIQUE_CHARACTERS, dtype=np.int8 ) # Train DataFrame indexed by sequence_id to convenientlyy lookup recording data train_squence_id = train.set_index("sequence_id") # All Unique Parquet Files UNIQUE_FILE_PATHS = pd.Series(train["file_path"].unique()) # Counter to keep track of sample row = 0 # Fill Arrays for idx, file_path in enumerate(tqdm(UNIQUE_FILE_PATHS)): df = pd.read_parquet(file_path) for group, group_df in df.groupby("sequence_id"): # Get Processed Frames and non empty frame indices data, non_empty_frames_idxs = preprocess_layer(group_df[COLUMNS].values) X[row] = data NON_EMPTY_FRAME_IDXS[row] = non_empty_frames_idxs # Add Target By Ordinally Encoding Characters phrase_char = train_squence_id.loc[group, "phrase_char"] for col, char in enumerate(phrase_char): y[row, col] = CHAR2ORD.get(char) row += 1 # Example target, note the phrase is padded with the pad token 59 print(f"Example Target: {y[0]}") # Save X/y np.save("X.npy", X) np.save("y.npy", y) np.save("NON_EMPTY_FRAME_IDXS.npy", NON_EMPTY_FRAME_IDXS) # Save Validation splitter = GroupShuffleSplit(test_size=0.10, n_splits=2, random_state=SEED) PARTICIPANT_IDS = train["participant_id"].values train_idxs, val_idxs = next(splitter.split(X, y, groups=PARTICIPANT_IDS)) # Save Train np.save("X_train.npy", X[train_idxs]) np.save("y_train.npy", y[train_idxs]) np.save("NON_EMPTY_FRAME_IDXS_TRAIN.npy", NON_EMPTY_FRAME_IDXS[train_idxs]) # Save Validation np.save("X_val.npy", X[val_idxs]) np.save("y_val.npy", y[val_idxs]) np.save("NON_EMPTY_FRAME_IDXS_VAL.npy", NON_EMPTY_FRAME_IDXS[val_idxs]) # Verify Train/Val is correctly split by participan id print( f"Patient ID Intersection Train/Val: {set(PARTICIPANT_IDS[train_idxs]).intersection(PARTICIPANT_IDS[val_idxs])}" ) # Train/Val Sizes print(f"# Train Samples: {len(train_idxs)}, # Val Samples: {len(val_idxs)}") # # Coordinate Statistics def get_left_right_hand_mean_std(): # Dominant Hand Statistics LEFT_HANDS_MEAN_X = np.zeros([HAND_X_IDXS.size], dtype=np.float32) LEFT_HANDS_MEAN_Y = np.zeros([HAND_Y_IDXS.size], dtype=np.float32) LEFT_HANDS_STD_X = np.zeros([HAND_X_IDXS.size], dtype=np.float32) LEFT_HANDS_STD_Y = np.zeros([HAND_Y_IDXS.size], dtype=np.float32) fig, axes = plt.subplots(2, 1, figsize=(15, N_DIMS * 8)) axes_x, axes_y = axes # Iterate over all landmarks for col, v in enumerate( tqdm(X.reshape([N_SAMPLES * N_TARGET_FRAMES, N_COLS]).T) ): # Remove zero values as they are NaN values v = v[np.nonzero(v)] # X if col < HAND_X_IDXS.size: pos = col LEFT_HANDS_MEAN_X[pos] = v.astype(np.float32).mean() LEFT_HANDS_STD_X[pos] = v.astype(np.float32).std() axes_x.boxplot( v, notch=False, showfliers=False, positions=[pos], whis=[5, 95] ) # Y else: pos = col - HAND_X_IDXS.size - 1 LEFT_HANDS_MEAN_Y[pos] = v.astype(np.float32).mean() LEFT_HANDS_STD_Y[pos] = v.astype(np.float32).std() axes_y.boxplot( v, notch=False, showfliers=False, positions=[pos], whis=[5, 95] ) for ax, name in zip(axes, ["X", "Y"]): ax.set_title(f"Hands {name.upper()} Dimension", size=24) ax.tick_params(axis="x", labelsize=8) ax.set_ylim(0.0, 1.0) ax.grid(axis="y") plt.show() # Stack Axes LEFT_HANDS_MEAN = np.stack((LEFT_HANDS_MEAN_X, LEFT_HANDS_MEAN_Y)).T LEFT_HANDS_STD = np.stack((LEFT_HANDS_STD_X, LEFT_HANDS_STD_Y)).T return LEFT_HANDS_MEAN, LEFT_HANDS_STD # Get Dominant Hand Mean/Standard Deviation DOMINANT_HANDS_MEAN, DOMINANT_HANDS_STD = get_left_right_hand_mean_std() # Save Mean/STD to normalize input in neural network model np.save("DOMINANT_HANDS_MEAN.npy", DOMINANT_HANDS_MEAN) np.save("DOMINANT_HANDS_STD.npy", DOMINANT_HANDS_STD)
false
0
5,144
38
5,144
5,144
129188958
# packages import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import json # configs default_col_1 = "darkblue" default_col_2 = "darkgreen" # load JSON files with open( "../input/google-research-identify-contrails-reduce-global-warming/train_metadata.json", "r", ) as f: dict_train_meta = json.load(f) with open( "../input/google-research-identify-contrails-reduce-global-warming/validation_metadata.json", "r", ) as f: dict_valid_meta = json.load(f) # look at first entry (train) dict_train_meta[0] # convert to data frame df_train_meta = pd.DataFrame.from_dict(dict_train_meta) df_valid_meta = pd.DataFrame.from_dict(dict_valid_meta) # preview df_train_meta.head() # structure of data frame - train df_train_meta.info() # structure of data frame - valid df_valid_meta.info() # statistics for projection parameters - train # the entries only differ at the central meridian parameter: df_train_meta["extract"] = df_train_meta.projection_wkt.apply(lambda x: x[318:352]) df_train_meta.extract.value_counts() # statistics for projection parameters - valid df_valid_meta["extract"] = df_valid_meta.projection_wkt.apply(lambda x: x[318:352]) df_valid_meta.extract.value_counts() # numerical features features_num = ["row_min", "row_size", "col_min", "col_size", "timestamp"] # basic stats - training data df_train_meta[features_num].describe() # pairwise scatterplots - training data sns.pairplot( df_train_meta[features_num], plot_kws={"alpha": 0.2, "color": default_col_1}, diag_kws={"color": default_col_1}, ) plt.show() # basic stats - validation data df_valid_meta[features_num].describe() # pairwise scatterplots - validation data sns.pairplot( df_valid_meta[features_num], plot_kws={"alpha": 0.2, "color": default_col_2}, diag_kws={"color": default_col_2}, ) plt.show() # export df_train_meta.to_csv("df_train_meta.csv", index=False) df_valid_meta.to_csv("df_valid_meta.csv", index=False)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/188/129188958.ipynb
null
null
[{"Id": 129188958, "ScriptId": 38383919, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 1330628, "CreationDate": "05/11/2023 16:49:36", "VersionNumber": 7.0, "Title": "Contrails - Metadata", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 72.0, "LinesInsertedFromPrevious": 5.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 67.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# packages import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import json # configs default_col_1 = "darkblue" default_col_2 = "darkgreen" # load JSON files with open( "../input/google-research-identify-contrails-reduce-global-warming/train_metadata.json", "r", ) as f: dict_train_meta = json.load(f) with open( "../input/google-research-identify-contrails-reduce-global-warming/validation_metadata.json", "r", ) as f: dict_valid_meta = json.load(f) # look at first entry (train) dict_train_meta[0] # convert to data frame df_train_meta = pd.DataFrame.from_dict(dict_train_meta) df_valid_meta = pd.DataFrame.from_dict(dict_valid_meta) # preview df_train_meta.head() # structure of data frame - train df_train_meta.info() # structure of data frame - valid df_valid_meta.info() # statistics for projection parameters - train # the entries only differ at the central meridian parameter: df_train_meta["extract"] = df_train_meta.projection_wkt.apply(lambda x: x[318:352]) df_train_meta.extract.value_counts() # statistics for projection parameters - valid df_valid_meta["extract"] = df_valid_meta.projection_wkt.apply(lambda x: x[318:352]) df_valid_meta.extract.value_counts() # numerical features features_num = ["row_min", "row_size", "col_min", "col_size", "timestamp"] # basic stats - training data df_train_meta[features_num].describe() # pairwise scatterplots - training data sns.pairplot( df_train_meta[features_num], plot_kws={"alpha": 0.2, "color": default_col_1}, diag_kws={"color": default_col_1}, ) plt.show() # basic stats - validation data df_valid_meta[features_num].describe() # pairwise scatterplots - validation data sns.pairplot( df_valid_meta[features_num], plot_kws={"alpha": 0.2, "color": default_col_2}, diag_kws={"color": default_col_2}, ) plt.show() # export df_train_meta.to_csv("df_train_meta.csv", index=False) df_valid_meta.to_csv("df_valid_meta.csv", index=False)
false
0
662
0
662
662
129188995
import pandas as pd import numpy as np import matplotlib.pyplot as plt train_df = pd.read_csv("../input/asl-fingerspelling/train.csv") train_df train_df["file_id"].nunique() train_df.groupby(["file_id"])["sequence_id"].count().value_counts() data = pd.read_parquet(f"../input/asl-fingerspelling/{train_df.iloc[0]['path']}") data data.index.nunique() train_df.loc[train_df["file_id"] == 5414471] data.loc[1816796431] data.columns # (x, y, z) => 543 x 3 = 1629 + 1 for "frame" = 1630
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/188/129188995.ipynb
null
null
[{"Id": 129188995, "ScriptId": 38403204, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7429810, "CreationDate": "05/11/2023 16:50:09", "VersionNumber": 1.0, "Title": "fingerspelling", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 34.0, "LinesInsertedFromPrevious": 34.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import pandas as pd import numpy as np import matplotlib.pyplot as plt train_df = pd.read_csv("../input/asl-fingerspelling/train.csv") train_df train_df["file_id"].nunique() train_df.groupby(["file_id"])["sequence_id"].count().value_counts() data = pd.read_parquet(f"../input/asl-fingerspelling/{train_df.iloc[0]['path']}") data data.index.nunique() train_df.loc[train_df["file_id"] == 5414471] data.loc[1816796431] data.columns # (x, y, z) => 543 x 3 = 1629 + 1 for "frame" = 1630
false
0
203
0
203
203
129188639
<jupyter_start><jupyter_text>Aviachipta narxini bashorat qilish Kaggle dataset identifier: aviachipta-narxini-bashorat-qilish <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import sklearn # scikit-learn kutubxonasi import matplotlib.pyplot as plt import seaborn as sns from sklearn.model_selection import train_test_split from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler from sklearn.compose import ColumnTransformer from sklearn.preprocessing import OneHotEncoder, OrdinalEncoder from sklearn.linear_model import LinearRegression from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import RandomForestRegressor from xgboost import XGBRegressor from sklearn.model_selection import cross_val_score from sklearn.metrics import mean_absolute_error, mean_squared_error # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session df = pd.read_csv("../input/aviachipta-narxini-bashorat-qilish/train_data.csv") df2 = pd.read_csv("../input/aviachipta-narxini-bashorat-qilish/test_data.csv") sample_solution = pd.read_csv( "../input/aviachipta-narxini-bashorat-qilish/sample_solution.csv" ) print(df.head()) df.info() df2.info() df.drop(columns=["id", "flight"], axis=1, inplace=True) df2.drop(columns=["id", "flight"], axis=1, inplace=True) df df.describe() df.head() df["class"].value_counts() df2.describe() df.hist(bins=30, figsize=(14, 10)) plt.show() plt.figure(figsize=(16, 6)) sns.barplot(x=df["class"], y=df["price"]) plt.title("Aviachipta narxlari ") plt.xlabel("Class") plt.ylabel("Uylar narxi") plt.xticks(rotation=90) plt.grid() plt.show() df.plot( kind="scatter", x="days_left", y="duration", alpha=0.4, s=df["price"] / 100, label="price", c="price", cmap="jet", colorbar=True, figsize=(10, 8), ) plt.show() df.corrwith(df["price"]).sort_values(ascending=False) df.groupby("class").mean()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/188/129188639.ipynb
aviachipta-narxini-bashorat-qilish
orifsarniyozov
[{"Id": 129188639, "ScriptId": 36598122, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 8379662, "CreationDate": "05/11/2023 16:46:11", "VersionNumber": 3.0, "Title": "Aviachipta", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 89.0, "LinesInsertedFromPrevious": 60.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 29.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 185013984, "KernelVersionId": 129188639, "SourceDatasetVersionId": 5264235}]
[{"Id": 5264235, "DatasetId": 3063904, "DatasourceVersionId": 5337154, "CreatorUserId": 8379662, "LicenseName": "Unknown", "CreationDate": "03/29/2023 17:54:37", "VersionNumber": 1.0, "Title": "Aviachipta narxini bashorat qilish", "Slug": "aviachipta-narxini-bashorat-qilish", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3063904, "CreatorUserId": 8379662, "OwnerUserId": 8379662.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5264235.0, "CurrentDatasourceVersionId": 5337154.0, "ForumId": 3126640, "Type": 2, "CreationDate": "03/29/2023 17:54:37", "LastActivityDate": "03/29/2023", "TotalViews": 63, "TotalDownloads": 1, "TotalVotes": 0, "TotalKernels": 2}]
[{"Id": 8379662, "UserName": "orifsarniyozov", "DisplayName": "Orif Sarniyozov", "RegisterDate": "09/18/2021", "PerformanceTier": 0}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import sklearn # scikit-learn kutubxonasi import matplotlib.pyplot as plt import seaborn as sns from sklearn.model_selection import train_test_split from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler from sklearn.compose import ColumnTransformer from sklearn.preprocessing import OneHotEncoder, OrdinalEncoder from sklearn.linear_model import LinearRegression from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import RandomForestRegressor from xgboost import XGBRegressor from sklearn.model_selection import cross_val_score from sklearn.metrics import mean_absolute_error, mean_squared_error # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session df = pd.read_csv("../input/aviachipta-narxini-bashorat-qilish/train_data.csv") df2 = pd.read_csv("../input/aviachipta-narxini-bashorat-qilish/test_data.csv") sample_solution = pd.read_csv( "../input/aviachipta-narxini-bashorat-qilish/sample_solution.csv" ) print(df.head()) df.info() df2.info() df.drop(columns=["id", "flight"], axis=1, inplace=True) df2.drop(columns=["id", "flight"], axis=1, inplace=True) df df.describe() df.head() df["class"].value_counts() df2.describe() df.hist(bins=30, figsize=(14, 10)) plt.show() plt.figure(figsize=(16, 6)) sns.barplot(x=df["class"], y=df["price"]) plt.title("Aviachipta narxlari ") plt.xlabel("Class") plt.ylabel("Uylar narxi") plt.xticks(rotation=90) plt.grid() plt.show() df.plot( kind="scatter", x="days_left", y="duration", alpha=0.4, s=df["price"] / 100, label="price", c="price", cmap="jet", colorbar=True, figsize=(10, 8), ) plt.show() df.corrwith(df["price"]).sort_values(ascending=False) df.groupby("class").mean()
false
3
732
0
777
732
129166813
# # Import Libraries import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns # # Import Datasets adv = pd.read_csv("/kaggle/input/advertisement-dataset-csv/advertising.csv") adv.head() adv.shape adv = adv.drop("Unnamed: 0", axis=1) adv.head() adv.isnull().sum() # # EDA cols = list(adv.columns) cols adv.info() for x in cols: if adv[x].dtypes == "float64": plt.hist(adv[x]) plt.xlabel(x) plt.ylabel("count") plt.show() # # Dep & Indep Variables x = adv.iloc[:, :-1] x y = adv.iloc[:, -1] y # # Standardization from sklearn.preprocessing import StandardScaler sc = StandardScaler() xstd = sc.fit_transform(x) # # Split from sklearn.model_selection import train_test_split xtrain, xtest, ytrain, ytest = train_test_split( xstd, y, test_size=0.30, random_state=50 ) xtrain.shape, xtest.shape, ytrain.shape, ytest.shape # # Build Model using Decision Tree from sklearn.tree import DecisionTreeRegressor from sklearn.model_selection import GridSearchCV dec = DecisionTreeRegressor(random_state=50) params = {"min_samples_leaf": [1, 2, 3, 4], "max_depth": [1, 2, 3]} grid = GridSearchCV( estimator=dec, param_grid=params, cv=3, scoring="r2" # bcus its linear regression ) grid.fit(xtrain, ytrain) grid.best_score_ grid.best_estimator_ grid.best_params_ # # Predict prediction = grid.predict(xtest) # # R2 Score from sklearn.metrics import r2_score r2_score(ytest, prediction) # # RMSE Score from sklearn.metrics import mean_squared_error print(np.sqrt(mean_squared_error(ytest, prediction))) # # Visualise the Tree from sklearn import tree tree.plot_tree(grid.best_estimator_, fontsize=8)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/166/129166813.ipynb
null
null
[{"Id": 129166813, "ScriptId": 38400171, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13314142, "CreationDate": "05/11/2023 13:38:37", "VersionNumber": 2.0, "Title": "Advertisement Dataset - Decision Tree", "EvaluationDate": "05/11/2023", "IsChange": false, "TotalLines": 105.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 105.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# # Import Libraries import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns # # Import Datasets adv = pd.read_csv("/kaggle/input/advertisement-dataset-csv/advertising.csv") adv.head() adv.shape adv = adv.drop("Unnamed: 0", axis=1) adv.head() adv.isnull().sum() # # EDA cols = list(adv.columns) cols adv.info() for x in cols: if adv[x].dtypes == "float64": plt.hist(adv[x]) plt.xlabel(x) plt.ylabel("count") plt.show() # # Dep & Indep Variables x = adv.iloc[:, :-1] x y = adv.iloc[:, -1] y # # Standardization from sklearn.preprocessing import StandardScaler sc = StandardScaler() xstd = sc.fit_transform(x) # # Split from sklearn.model_selection import train_test_split xtrain, xtest, ytrain, ytest = train_test_split( xstd, y, test_size=0.30, random_state=50 ) xtrain.shape, xtest.shape, ytrain.shape, ytest.shape # # Build Model using Decision Tree from sklearn.tree import DecisionTreeRegressor from sklearn.model_selection import GridSearchCV dec = DecisionTreeRegressor(random_state=50) params = {"min_samples_leaf": [1, 2, 3, 4], "max_depth": [1, 2, 3]} grid = GridSearchCV( estimator=dec, param_grid=params, cv=3, scoring="r2" # bcus its linear regression ) grid.fit(xtrain, ytrain) grid.best_score_ grid.best_estimator_ grid.best_params_ # # Predict prediction = grid.predict(xtest) # # R2 Score from sklearn.metrics import r2_score r2_score(ytest, prediction) # # RMSE Score from sklearn.metrics import mean_squared_error print(np.sqrt(mean_squared_error(ytest, prediction))) # # Visualise the Tree from sklearn import tree tree.plot_tree(grid.best_estimator_, fontsize=8)
false
0
573
0
573
573
129166165
<jupyter_start><jupyter_text>NbFull Dataset of the properties of various Niobium thin films of varying thickness and preparation methods with there recorded critical temperatures and RRR (Residual Resistivity Ratio) where possible. Kaggle dataset identifier: nbfull <jupyter_script>import numpy as np import pandas as pd import matplotlib as plt import plotly.express as px import plotly.graph_objects as go from sklearn.metrics import mean_absolute_error from sklearn.metrics import r2_score from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestRegressor from sklearn.compose import ColumnTransformer from sklearn.pipeline import Pipeline from sklearn.impute import SimpleImputer from sklearn.preprocessing import OneHotEncoder from sklearn.model_selection import cross_val_score import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) nb_full_file_path = "/kaggle/input/nbfull/TrainingData1.csv" nb_test_file_path = "/kaggle/input/nbfull/TestData1.csv" X_nb_data = pd.read_csv(nb_full_file_path, index_col="Id") X_nb_test_data = pd.read_csv(nb_test_file_path, index_col="Id") X_nb_data.describe() X_nb_data.columns features = [ "Thickness_nm", "RRR", "Deposition_rate_nm/s", "Substrate", "Substrate_temperature_K", "Gas", "Pressure_Torr_I", "Pressure_Torr_f", "Deposition_method", "Gettering", "Annealed", "Ta(C)", "Anneal_time_hours", ] X_full = X_nb_data[features] y = X_nb_data.Tc_K X = X_full.copy() X_train_full, X_valid_full, y_train, y_valid = train_test_split( X_full, y, train_size=0.8, test_size=0.2, random_state=0 ) categorical_cols = [ cname for cname in X_train_full.columns if X_train_full[cname].nunique() < 10 and X_train_full[cname].dtype == "object" ] numerical_cols = [ cname for cname in X_train_full.columns if X_train_full[cname].dtype in ["int64", "float64", "float32", "int32"] ] print("categorical columns:", categorical_cols) print("numerical columns:", numerical_cols) # no real need for this since all columns have a cardinality below ten but, # it is good practice as not all cetegorical columns may always be included. my_cols = categorical_cols + numerical_cols X_train = X_train_full[my_cols].copy() X_valid = X_valid_full[my_cols].copy() X_test = X_nb_test_data[my_cols].copy() y_test = X_nb_test_data.Tc_K X_final = X_full.copy() y_final = X_nb_data.Tc_K X_train.head() numerical_transformer = SimpleImputer(strategy="median") categorical_transformer = Pipeline( steps=[ ("imputer", SimpleImputer(strategy="most_frequent")), ("onehot", OneHotEncoder(handle_unknown="ignore", sparse=False)), ] ) preprocessor = ColumnTransformer( transformers=[ ("num", numerical_transformer, numerical_cols), ("cat", categorical_transformer, categorical_cols), ] ) def get_score(n_estimators): mypipe = Pipeline( steps=[ ("preprocessor", preprocessor), ("model", RandomForestRegressor(n_estimators=n_estimators, random_state=0)), ] ) score = -1 * cross_val_score( mypipe, X_train, y_train, cv=5, scoring="neg_mean_absolute_error" ) return score.mean() results = {forest_size * 5: get_score(forest_size * 5) for forest_size in range(1, 60)} # import matplotlib.pyplot as plt # %matplotlib inline # plt.plot(list(results.keys()), list(results.values())) # plt.show() # best_tree_size = min(results, key=results.get) # print("optimal value for n_estimators:",best_tree_size) x_values = list(results.keys()) y_values = list(results.values()) fig = go.Figure(data=go.Scatter(x=x_values, y=y_values)) fig.update_layout(xaxis_title="Number of Estimators", yaxis_title="Mean Absolute Error") fig.show() best_tree_size = min(results, key=results.get) print("optimal value for n_estimators:", best_tree_size) results_2 = { forest_size: get_score(forest_size) for forest_size in range(best_tree_size // 2, best_tree_size + 10) } # import matplotlib.pyplot as plt # %matplotlib inline # plt.plot(list(results_2.keys()), list(results_2.values())) # plt.show() # best_tree_size_1 = min(results_2, key=results_2.get) # print("optimal value for n_estimators:",best_tree_size_1) x_values1 = list(results_2.keys()) y_values1 = list(results_2.values()) fig1 = go.Figure(data=go.Scatter(x=x_values1, y=y_values1)) fig1.update_layout( xaxis_title="Number of Estimators", yaxis_title="Mean Absolute Error" ) fig1.show() best_tree_size_1 = min(results_2, key=results_2.get) print("optimal value for n_estimators:", best_tree_size_1) final_model = RandomForestRegressor( n_estimators=best_tree_size_1, max_features=14, random_state=0 ) my_pipeline = Pipeline(steps=[("preprocessor", preprocessor), ("model", final_model)]) my_pipeline.fit(X_final, y_final) final_preds = my_pipeline.predict(X_test) score = mean_absolute_error(y_test, final_preds) print("MAE:", score) output = pd.DataFrame({"Id": X_test.index, "Tc_K": final_preds}) output.to_csv("TcPrediction.csv", index=False) zero_error_line = go.Scatter( x=[0, 10], y=[0, 10], line=dict(color="black", dash="dash"), showlegend=False ) fig = px.scatter( x=y_test, y=final_preds, ) fig.update_layout( xaxis_title="Experimental Tc(K) values", yaxis_title="Predicted Tc(K) values" ) fig.add_trace(zero_error_line) fig.show() # Y axis is the predicted values and the x axis represents the actual values. The model is generally accurate apart from films with a Tc<5K. # final_model.feature_importances_ # feature_names = [f"feature {i}" for i in range(X.shape[1])] # print(feature_names) feature_names = X_final.columns.tolist() print(feature_names) importances = final_model.feature_importances_ print(importances) print(final_model) print(X_final.shape[1]) print(X_test.shape[1]) print(X_final.columns) print(X_test.columns) print(len(importances)) print(len(feature_names)) for col in categorical_cols: unique_values = X_train_full[col].nunique() print(f"Column '{col}' has {unique_values} unique value(s)") importances = np.zeros(len(feature_names)) preprocessor = my_pipeline.named_steps["preprocessor"] numerical_cols_transformer = preprocessor.transformers_[0][1] categorical_cols_transformer = preprocessor.transformers_[1][1] # Get the feature importances for numerical columns if hasattr(my_pipeline.named_steps["model"], "feature_importances_"): importances[: len(numerical_cols)] = my_pipeline.named_steps[ "model" ].feature_importances_[: len(numerical_cols)] # Get the feature importances for one-hot encoded categorical columns if hasattr( categorical_cols_transformer.named_steps["onehot"], "get_feature_importances" ): encoded_cols_importances = categorical_cols_transformer.named_steps[ "onehot" ].get_feature_importances() encoded_cols_indices = categorical_cols_transformer.transformers_[0][2] importances[encoded_cols_indices] += encoded_cols_importances feature_scores = pd.Series(importances, index=feature_names).sort_values( ascending=False ) import plotly.graph_objects as go # Create a bar plot of feature scores fig = go.Figure(data=[go.Bar(x=feature_scores.index, y=feature_scores.values)]) fig.update_layout(title="Feature Scores", xaxis_title="Features", yaxis_title="Score") fig.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/166/129166165.ipynb
nbfull
imsp33ds
[{"Id": 129166165, "ScriptId": 34884557, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 12854863, "CreationDate": "05/11/2023 13:33:07", "VersionNumber": 12.0, "Title": "NbModel", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 221.0, "LinesInsertedFromPrevious": 102.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 119.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 184972462, "KernelVersionId": 129166165, "SourceDatasetVersionId": 5075289}]
[{"Id": 5075289, "DatasetId": 2880778, "DatasourceVersionId": 5146067, "CreatorUserId": 12854863, "LicenseName": "Unknown", "CreationDate": "02/27/2023 14:27:55", "VersionNumber": 5.0, "Title": "NbFull", "Slug": "nbfull", "Subtitle": "Niobium thin film critical temperatures", "Description": "Dataset of the properties of various Niobium thin films of varying thickness and preparation methods with there recorded critical temperatures and RRR (Residual Resistivity Ratio) where possible.", "VersionNotes": "Data Update 2023/02/27", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 2880778, "CreatorUserId": 12854863, "OwnerUserId": 12854863.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5075289.0, "CurrentDatasourceVersionId": 5146067.0, "ForumId": 2917284, "Type": 2, "CreationDate": "02/08/2023 19:48:11", "LastActivityDate": "02/08/2023", "TotalViews": 50, "TotalDownloads": 4, "TotalVotes": 0, "TotalKernels": 1}]
[{"Id": 12854863, "UserName": "imsp33ds", "DisplayName": "Mayank Mittal", "RegisterDate": "12/11/2022", "PerformanceTier": 0}]
import numpy as np import pandas as pd import matplotlib as plt import plotly.express as px import plotly.graph_objects as go from sklearn.metrics import mean_absolute_error from sklearn.metrics import r2_score from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestRegressor from sklearn.compose import ColumnTransformer from sklearn.pipeline import Pipeline from sklearn.impute import SimpleImputer from sklearn.preprocessing import OneHotEncoder from sklearn.model_selection import cross_val_score import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) nb_full_file_path = "/kaggle/input/nbfull/TrainingData1.csv" nb_test_file_path = "/kaggle/input/nbfull/TestData1.csv" X_nb_data = pd.read_csv(nb_full_file_path, index_col="Id") X_nb_test_data = pd.read_csv(nb_test_file_path, index_col="Id") X_nb_data.describe() X_nb_data.columns features = [ "Thickness_nm", "RRR", "Deposition_rate_nm/s", "Substrate", "Substrate_temperature_K", "Gas", "Pressure_Torr_I", "Pressure_Torr_f", "Deposition_method", "Gettering", "Annealed", "Ta(C)", "Anneal_time_hours", ] X_full = X_nb_data[features] y = X_nb_data.Tc_K X = X_full.copy() X_train_full, X_valid_full, y_train, y_valid = train_test_split( X_full, y, train_size=0.8, test_size=0.2, random_state=0 ) categorical_cols = [ cname for cname in X_train_full.columns if X_train_full[cname].nunique() < 10 and X_train_full[cname].dtype == "object" ] numerical_cols = [ cname for cname in X_train_full.columns if X_train_full[cname].dtype in ["int64", "float64", "float32", "int32"] ] print("categorical columns:", categorical_cols) print("numerical columns:", numerical_cols) # no real need for this since all columns have a cardinality below ten but, # it is good practice as not all cetegorical columns may always be included. my_cols = categorical_cols + numerical_cols X_train = X_train_full[my_cols].copy() X_valid = X_valid_full[my_cols].copy() X_test = X_nb_test_data[my_cols].copy() y_test = X_nb_test_data.Tc_K X_final = X_full.copy() y_final = X_nb_data.Tc_K X_train.head() numerical_transformer = SimpleImputer(strategy="median") categorical_transformer = Pipeline( steps=[ ("imputer", SimpleImputer(strategy="most_frequent")), ("onehot", OneHotEncoder(handle_unknown="ignore", sparse=False)), ] ) preprocessor = ColumnTransformer( transformers=[ ("num", numerical_transformer, numerical_cols), ("cat", categorical_transformer, categorical_cols), ] ) def get_score(n_estimators): mypipe = Pipeline( steps=[ ("preprocessor", preprocessor), ("model", RandomForestRegressor(n_estimators=n_estimators, random_state=0)), ] ) score = -1 * cross_val_score( mypipe, X_train, y_train, cv=5, scoring="neg_mean_absolute_error" ) return score.mean() results = {forest_size * 5: get_score(forest_size * 5) for forest_size in range(1, 60)} # import matplotlib.pyplot as plt # %matplotlib inline # plt.plot(list(results.keys()), list(results.values())) # plt.show() # best_tree_size = min(results, key=results.get) # print("optimal value for n_estimators:",best_tree_size) x_values = list(results.keys()) y_values = list(results.values()) fig = go.Figure(data=go.Scatter(x=x_values, y=y_values)) fig.update_layout(xaxis_title="Number of Estimators", yaxis_title="Mean Absolute Error") fig.show() best_tree_size = min(results, key=results.get) print("optimal value for n_estimators:", best_tree_size) results_2 = { forest_size: get_score(forest_size) for forest_size in range(best_tree_size // 2, best_tree_size + 10) } # import matplotlib.pyplot as plt # %matplotlib inline # plt.plot(list(results_2.keys()), list(results_2.values())) # plt.show() # best_tree_size_1 = min(results_2, key=results_2.get) # print("optimal value for n_estimators:",best_tree_size_1) x_values1 = list(results_2.keys()) y_values1 = list(results_2.values()) fig1 = go.Figure(data=go.Scatter(x=x_values1, y=y_values1)) fig1.update_layout( xaxis_title="Number of Estimators", yaxis_title="Mean Absolute Error" ) fig1.show() best_tree_size_1 = min(results_2, key=results_2.get) print("optimal value for n_estimators:", best_tree_size_1) final_model = RandomForestRegressor( n_estimators=best_tree_size_1, max_features=14, random_state=0 ) my_pipeline = Pipeline(steps=[("preprocessor", preprocessor), ("model", final_model)]) my_pipeline.fit(X_final, y_final) final_preds = my_pipeline.predict(X_test) score = mean_absolute_error(y_test, final_preds) print("MAE:", score) output = pd.DataFrame({"Id": X_test.index, "Tc_K": final_preds}) output.to_csv("TcPrediction.csv", index=False) zero_error_line = go.Scatter( x=[0, 10], y=[0, 10], line=dict(color="black", dash="dash"), showlegend=False ) fig = px.scatter( x=y_test, y=final_preds, ) fig.update_layout( xaxis_title="Experimental Tc(K) values", yaxis_title="Predicted Tc(K) values" ) fig.add_trace(zero_error_line) fig.show() # Y axis is the predicted values and the x axis represents the actual values. The model is generally accurate apart from films with a Tc<5K. # final_model.feature_importances_ # feature_names = [f"feature {i}" for i in range(X.shape[1])] # print(feature_names) feature_names = X_final.columns.tolist() print(feature_names) importances = final_model.feature_importances_ print(importances) print(final_model) print(X_final.shape[1]) print(X_test.shape[1]) print(X_final.columns) print(X_test.columns) print(len(importances)) print(len(feature_names)) for col in categorical_cols: unique_values = X_train_full[col].nunique() print(f"Column '{col}' has {unique_values} unique value(s)") importances = np.zeros(len(feature_names)) preprocessor = my_pipeline.named_steps["preprocessor"] numerical_cols_transformer = preprocessor.transformers_[0][1] categorical_cols_transformer = preprocessor.transformers_[1][1] # Get the feature importances for numerical columns if hasattr(my_pipeline.named_steps["model"], "feature_importances_"): importances[: len(numerical_cols)] = my_pipeline.named_steps[ "model" ].feature_importances_[: len(numerical_cols)] # Get the feature importances for one-hot encoded categorical columns if hasattr( categorical_cols_transformer.named_steps["onehot"], "get_feature_importances" ): encoded_cols_importances = categorical_cols_transformer.named_steps[ "onehot" ].get_feature_importances() encoded_cols_indices = categorical_cols_transformer.transformers_[0][2] importances[encoded_cols_indices] += encoded_cols_importances feature_scores = pd.Series(importances, index=feature_names).sort_values( ascending=False ) import plotly.graph_objects as go # Create a bar plot of feature scores fig = go.Figure(data=[go.Bar(x=feature_scores.index, y=feature_scores.values)]) fig.update_layout(title="Feature Scores", xaxis_title="Features", yaxis_title="Score") fig.show()
false
0
2,287
0
2,346
2,287
129166183
<jupyter_start><jupyter_text>Daily Climate time series data ### Content The Dataset is fully dedicated for the developers who want to train the model on Weather Forecasting for Indian climate. This dataset provides data from **1st January 2013** to **24th April 2017** in the city of Delhi, India. The 4 parameters here are **meantemp, humidity, wind_speed, meanpressure**. Kaggle dataset identifier: daily-climate-time-series-data <jupyter_code>import pandas as pd df = pd.read_csv('daily-climate-time-series-data/DailyDelhiClimateTrain.csv') df.info() <jupyter_output><class 'pandas.core.frame.DataFrame'> RangeIndex: 1462 entries, 0 to 1461 Data columns (total 5 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 date 1462 non-null object 1 meantemp 1462 non-null float64 2 humidity 1462 non-null float64 3 wind_speed 1462 non-null float64 4 meanpressure 1462 non-null float64 dtypes: float64(4), object(1) memory usage: 57.2+ KB <jupyter_text>Examples: { "date": "2013-01-01 00:00:00", "meantemp": 10.0, "humidity": 84.5, "wind_speed": 0.0, "meanpressure": 1015.6666666667 } { "date": "2013-01-02 00:00:00", "meantemp": 7.4, "humidity": 92.0, "wind_speed": 2.98, "meanpressure": 1017.8 } { "date": "2013-01-03 00:00:00", "meantemp": 7.1666666667, "humidity": 87.0, "wind_speed": 4.6333333332999995, "meanpressure": 1018.6666666667 } { "date": "2013-01-04 00:00:00", "meantemp": 8.6666666667, "humidity": 71.3333333333, "wind_speed": 1.2333333333, "meanpressure": 1017.1666666667 } <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # !pip install pystan==2.19.1.1 # !pip install fbprophet import pandas as pd import matplotlib.pyplot as plt from prophet import Prophet # Load the data df = pd.read_csv( "/kaggle/input/daily-climate-time-series-data/DailyDelhiClimateTrain.csv" ) df.head() df.date = pd.to_datetime(df.date) df = df.rename(columns={"date": "ds", "meantemp": "y"}) plt.plot(df["ds"], df["y"]) plt.xlabel("Date") plt.ylabel("Temperature (°C)") plt.show() # Create a Prophet object and fit it to the data model = Prophet() model.fit(df) # Create a future dataframe future = model.make_future_dataframe(periods=365) future.tail() # Make predictions for the future dates forecast = model.predict(future) forecast[["ds", "yhat", "yhat_lower", "yhat_upper"]].tail() # Visualize the predictions model.plot(forecast) plt.xlabel("Date") plt.ylabel("Temperature (°C)") plt.show() # ## Prophet with Additional Regressors future = df[df.ds > "2016-09-01"] train = df[df.ds <= "2016-09-01"] model = Prophet() model.add_regressor("humidity") model.add_regressor("wind_speed") model.add_regressor("meanpressure") model.fit(train) future.drop(columns="y", inplace=True) future.tail() # Make predictions for the future dates forecast = model.predict(future) forecast[["ds", "yhat", "yhat_lower", "yhat_upper"]].tail() # Visualize the predictions model.plot(forecast) plt.xlabel("Date") plt.ylabel("Temperature (°C)") plt.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/166/129166183.ipynb
daily-climate-time-series-data
sumanthvrao
[{"Id": 129166183, "ScriptId": 38398853, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 1534022, "CreationDate": "05/11/2023 13:33:15", "VersionNumber": 1.0, "Title": "2023 Prophet Time Series", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 81.0, "LinesInsertedFromPrevious": 81.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 184972484, "KernelVersionId": 129166183, "SourceDatasetVersionId": 636393}]
[{"Id": 636393, "DatasetId": 312121, "DatasourceVersionId": 655612, "CreatorUserId": 2205906, "LicenseName": "CC0: Public Domain", "CreationDate": "08/23/2019 09:22:09", "VersionNumber": 3.0, "Title": "Daily Climate time series data", "Slug": "daily-climate-time-series-data", "Subtitle": "Daily climate data in the city of Delhi from 2013 to 2017", "Description": "### Content\n\nThe Dataset is fully dedicated for the developers who want to train the model on Weather Forecasting for Indian climate. This dataset provides data from **1st January 2013** to **24th April 2017** in the city of Delhi, India. The 4 parameters here are \n**meantemp, humidity, wind_speed, meanpressure**.\n### Acknowledgements\n\nThis dataset has been collected from Weather Undergroud API. Dataset ownership and credit goes to them.\n\n### Submission Deadline\n\nAssignment 4 must be submitted by October 19, 2019 (10:00 PM). Any kernel published after this deadline will be evaluated for only 50% of the total marks.\n\n### Inspiration\n\nThis dataset was developed as a part Assignment 4 of Data Analytics Course, 2019 at PES University, Bangalore.", "VersionNotes": "Fixed precipitation value", "TotalCompressedBytes": 85299.0, "TotalUncompressedBytes": 85299.0}]
[{"Id": 312121, "CreatorUserId": 2205906, "OwnerUserId": 2205906.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 636393.0, "CurrentDatasourceVersionId": 655612.0, "ForumId": 323618, "Type": 2, "CreationDate": "08/22/2019 00:35:09", "LastActivityDate": "08/22/2019", "TotalViews": 217656, "TotalDownloads": 34317, "TotalVotes": 284, "TotalKernels": 160}]
[{"Id": 2205906, "UserName": "sumanthvrao", "DisplayName": "sumanthvrao", "RegisterDate": "09/03/2018", "PerformanceTier": 0}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # !pip install pystan==2.19.1.1 # !pip install fbprophet import pandas as pd import matplotlib.pyplot as plt from prophet import Prophet # Load the data df = pd.read_csv( "/kaggle/input/daily-climate-time-series-data/DailyDelhiClimateTrain.csv" ) df.head() df.date = pd.to_datetime(df.date) df = df.rename(columns={"date": "ds", "meantemp": "y"}) plt.plot(df["ds"], df["y"]) plt.xlabel("Date") plt.ylabel("Temperature (°C)") plt.show() # Create a Prophet object and fit it to the data model = Prophet() model.fit(df) # Create a future dataframe future = model.make_future_dataframe(periods=365) future.tail() # Make predictions for the future dates forecast = model.predict(future) forecast[["ds", "yhat", "yhat_lower", "yhat_upper"]].tail() # Visualize the predictions model.plot(forecast) plt.xlabel("Date") plt.ylabel("Temperature (°C)") plt.show() # ## Prophet with Additional Regressors future = df[df.ds > "2016-09-01"] train = df[df.ds <= "2016-09-01"] model = Prophet() model.add_regressor("humidity") model.add_regressor("wind_speed") model.add_regressor("meanpressure") model.fit(train) future.drop(columns="y", inplace=True) future.tail() # Make predictions for the future dates forecast = model.predict(future) forecast[["ds", "yhat", "yhat_lower", "yhat_upper"]].tail() # Visualize the predictions model.plot(forecast) plt.xlabel("Date") plt.ylabel("Temperature (°C)") plt.show()
[{"daily-climate-time-series-data/DailyDelhiClimateTrain.csv": {"column_names": "[\"date\", \"meantemp\", \"humidity\", \"wind_speed\", \"meanpressure\"]", "column_data_types": "{\"date\": \"object\", \"meantemp\": \"float64\", \"humidity\": \"float64\", \"wind_speed\": \"float64\", \"meanpressure\": \"float64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 1462 entries, 0 to 1461\nData columns (total 5 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 date 1462 non-null object \n 1 meantemp 1462 non-null float64\n 2 humidity 1462 non-null float64\n 3 wind_speed 1462 non-null float64\n 4 meanpressure 1462 non-null float64\ndtypes: float64(4), object(1)\nmemory usage: 57.2+ KB\n", "summary": "{\"meantemp\": {\"count\": 1462.0, \"mean\": 25.495520655761762, \"std\": 7.348102725432476, \"min\": 6.0, \"25%\": 18.857142857142858, \"50%\": 27.714285714285715, \"75%\": 31.30580357142857, \"max\": 38.71428571428572}, \"humidity\": {\"count\": 1462.0, \"mean\": 60.77170158004638, \"std\": 16.769652268485306, \"min\": 13.428571428571429, \"25%\": 50.375, \"50%\": 62.625, \"75%\": 72.21875, \"max\": 100.0}, \"wind_speed\": {\"count\": 1462.0, \"mean\": 6.802208747447473, \"std\": 4.561602164272007, \"min\": 0.0, \"25%\": 3.475, \"50%\": 6.221666666666667, \"75%\": 9.238235294117647, \"max\": 42.22}, \"meanpressure\": {\"count\": 1462.0, \"mean\": 1011.1045475940377, \"std\": 180.2316683392096, \"min\": -3.0416666666666665, \"25%\": 1001.5803571428571, \"50%\": 1008.563492063492, \"75%\": 1014.9449013157895, \"max\": 7679.333333333333}}", "examples": "{\"date\":{\"0\":\"2013-01-01\",\"1\":\"2013-01-02\",\"2\":\"2013-01-03\",\"3\":\"2013-01-04\"},\"meantemp\":{\"0\":10.0,\"1\":7.4,\"2\":7.1666666667,\"3\":8.6666666667},\"humidity\":{\"0\":84.5,\"1\":92.0,\"2\":87.0,\"3\":71.3333333333},\"wind_speed\":{\"0\":0.0,\"1\":2.98,\"2\":4.6333333333,\"3\":1.2333333333},\"meanpressure\":{\"0\":1015.6666666667,\"1\":1017.8,\"2\":1018.6666666667,\"3\":1017.1666666667}}"}}]
true
1
<start_data_description><data_path>daily-climate-time-series-data/DailyDelhiClimateTrain.csv: <column_names> ['date', 'meantemp', 'humidity', 'wind_speed', 'meanpressure'] <column_types> {'date': 'object', 'meantemp': 'float64', 'humidity': 'float64', 'wind_speed': 'float64', 'meanpressure': 'float64'} <dataframe_Summary> {'meantemp': {'count': 1462.0, 'mean': 25.495520655761762, 'std': 7.348102725432476, 'min': 6.0, '25%': 18.857142857142858, '50%': 27.714285714285715, '75%': 31.30580357142857, 'max': 38.71428571428572}, 'humidity': {'count': 1462.0, 'mean': 60.77170158004638, 'std': 16.769652268485306, 'min': 13.428571428571429, '25%': 50.375, '50%': 62.625, '75%': 72.21875, 'max': 100.0}, 'wind_speed': {'count': 1462.0, 'mean': 6.802208747447473, 'std': 4.561602164272007, 'min': 0.0, '25%': 3.475, '50%': 6.221666666666667, '75%': 9.238235294117647, 'max': 42.22}, 'meanpressure': {'count': 1462.0, 'mean': 1011.1045475940377, 'std': 180.2316683392096, 'min': -3.0416666666666665, '25%': 1001.5803571428571, '50%': 1008.563492063492, '75%': 1014.9449013157895, 'max': 7679.333333333333}} <dataframe_info> RangeIndex: 1462 entries, 0 to 1461 Data columns (total 5 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 date 1462 non-null object 1 meantemp 1462 non-null float64 2 humidity 1462 non-null float64 3 wind_speed 1462 non-null float64 4 meanpressure 1462 non-null float64 dtypes: float64(4), object(1) memory usage: 57.2+ KB <some_examples> {'date': {'0': '2013-01-01', '1': '2013-01-02', '2': '2013-01-03', '3': '2013-01-04'}, 'meantemp': {'0': 10.0, '1': 7.4, '2': 7.1666666667, '3': 8.6666666667}, 'humidity': {'0': 84.5, '1': 92.0, '2': 87.0, '3': 71.3333333333}, 'wind_speed': {'0': 0.0, '1': 2.98, '2': 4.6333333333, '3': 1.2333333333}, 'meanpressure': {'0': 1015.6666666667, '1': 1017.8, '2': 1018.6666666667, '3': 1017.1666666667}} <end_description>
671
0
1,370
671
129166229
# # モデル(Horon) import numpy as np import pandas as pd import matplotlib.pyplot as plt from tqdm.notebook import tqdm import librosa import warnings import lightgbm as lgb from sklearn.decomposition import * from sklearn.preprocessing import * from imblearn.ensemble import * from sklearn.metrics import * pd.set_option("display.max_columns", None) warnings.filterwarnings("ignore") train = pd.read_csv("/kaggle/input/data-science-osaka-spring-2023/train.csv") additional = pd.read_csv("/kaggle/input/data-science-osaka-spring-2023/additional.csv") train = pd.concat((train, additional), axis=0).reset_index(drop=True) # mfccだけではなく、メルスペクトログラムも求める。 n_mfcc = 20 n_mels = 128 columns = ["music_mfcc_" + str(i) for i in range(n_mfcc)] + [ "music_S_dB_" + str(j) for j in range(n_mels) ] music_features = pd.DataFrame(columns=columns, dtype=np.float32) NUM_FILES_MUSIC = 250 # musicフォルダ内のファイル数 NUM_FILES_MUSIC_ADD = 100 # music_add内のファイル数 for i in tqdm(range(NUM_FILES_MUSIC + NUM_FILES_MUSIC_ADD)): if i < NUM_FILES_MUSIC: path = f"/kaggle/input/data-science-osaka-spring-2023/music/music/{i}.wav" else: path = ( f"/kaggle/input/data-science-osaka-spring-2023/music_add/music_add/{i}.wav" ) y, sr = librosa.load(path) # MFCC mfcc = librosa.feature.mfcc(y=y, sr=sr, n_mfcc=n_mfcc) # メルスペクトログラムの計算 S = librosa.feature.melspectrogram(y=y, sr=sr, n_mels=n_mels) # デシベルスケールに変換 S_dB = librosa.power_to_db(S, ref=np.max) # 平均を取る mfcc_mean = mfcc.mean(axis=1) S_dB_mean = S_dB.mean(axis=1) music_features.loc[i, 0:n_mfcc] = mfcc_mean.astype(np.float32) music_features.loc[i, n_mfcc:] = S_dB_mean.astype(np.float32) music_features["music"] = music_features.index # motionのTime以外の列名を取得する motion_0 = pd.read_csv( "/kaggle/input/data-science-osaka-spring-2023/motion/motion/0.csv" ) features = motion_0.columns.values[1:] # motionもlibrosaを使って特徴量に変換する # 20*20=400列作ることになるが、とりあえず実行。 n_mfcc = 20 motion_features = pd.DataFrame() for f in tqdm(features): motion_feat = pd.DataFrame( columns=[f"{f}_{n}" for n in range(n_mfcc)], dtype=np.float32 ) for i in range(NUM_FILES_MUSIC + NUM_FILES_MUSIC_ADD): if i < NUM_FILES_MUSIC: path = f"/kaggle/input/data-science-osaka-spring-2023/motion/motion/{i}.csv" else: path = f"/kaggle/input/data-science-osaka-spring-2023/motion_add/motion_add/{i}.csv" df_tmp = pd.read_csv(path) y = df_tmp[f].values.astype(float) sr = 60 # 60Hz mfcc = librosa.feature.mfcc(y=y, sr=sr, n_mfcc=n_mfcc) ceps = mfcc.mean(axis=1) motion_feat.loc[i, motion_feat.columns] = ceps motion_features = pd.concat([motion_features, motion_feat], axis=1) motion_features["ID"] = motion_features.index # trainデータとvalidデータに分ける dat_train = train.iloc[:133] dat_valid = train.iloc[133:] # そのままだとnegative sampleがないので、ID x musicの組み合わせで生成して、リストにないものを0にすることにします df_train = pd.DataFrame( [(i, m) for i in dat_train.ID.values for m in dat_train.music], columns=["ID", "music"], ) df_valid = pd.DataFrame( [(i, m) for i in dat_valid.ID.values for m in dat_valid.music], columns=["ID", "music"], ) df_train_2 = df_train.copy() df_valid_2 = df_valid.copy() df_train_2 = df_train_2.merge(train, on=["ID", "music"], how="left") df_train_2["Target"] = (df_train_2.genre.isnull() == False).astype(int) df_valid_2 = df_valid_2.merge(dat_valid, on=["ID", "music"], how="left") df_valid_2["Target"] = (df_valid_2.genre.isnull() == False).astype(int) df_train_3 = df_train_2.copy() df_valid_3 = df_valid_2.copy() df_train_3 = df_train_3.drop(["genre"], axis=1) df_valid_3 = df_valid_3.drop(["genre"], axis=1) df_train_4 = df_train_3.copy() df_valid_4 = df_valid_3.copy() df_train_4 = df_train_4.merge(motion_features, on=["ID"], how="left") df_train_4 = df_train_4.merge(music_features, on=["music"], how="left") df_valid_4 = df_valid_4.merge(motion_features, on=["ID"], how="left") df_valid_4 = df_valid_4.merge(music_features, on=["music"], how="left") # X_train, X_valid, y_train, y_validを作成 col_dropped = ["ID", "music", "Target"] X_train, X_valid = df_train_4.drop(col_dropped, axis=1), df_valid_4.drop( col_dropped, axis=1 ) y_train, y_valid = df_train_2["Target"], df_valid_2["Target"] dtrain = lgb.Dataset(X_train, label=y_train) dvalid = lgb.Dataset(X_valid, label=y_valid, reference=dtrain) num_round = 100 stopping_rounds = 30 verbose_eval = -1 # optuneで調整済み params = { "boosting_type": "gbdt", "objective": "binary", "metric": "binary_logloss", "learning_rate": 0.01, "force_col_wise": True, "random_state": 1, "feature_pre_filter": False, "lambda_l1": 0.0, "lambda_l2": 0.0, "num_leaves": 31, "feature_fraction": 0.652, "bagging_fraction": 1.0, "bagging_freq": 0, "min_child_samples": 20, "num_iterations": 100, "early_stopping_round": None, } model = lgb.train( params, dtrain, valid_sets=[dvalid], num_boost_round=num_round, callbacks=[ lgb.early_stopping(stopping_rounds=stopping_rounds, verbose=True), lgb.log_evaluation(verbose_eval), ], ) # テストデータに対しても同様にデータを準備し、予測結果を出力してみます residual_musics = np.setdiff1d(np.arange(250), train.music.values) X_test = pd.DataFrame( [(i, m) for i in np.arange(100, 250) for m in residual_musics], columns=["ID", "music"], ) X_test = X_test.merge(motion_features, on=["ID"], how="left") X_test = X_test.merge(music_features, on=["music"], how="left") # 予測を実行します y_pred_test = model.predict(X_test.drop(["ID", "music"], axis=1)) X_test["music_pred"] = y_pred_test # 上位3件をスペース区切りで書き込みます submission_Horon = ( X_test.groupby("ID") .apply( lambda x: pd.Series( { "music": " ".join( x.sort_values("music_pred", ascending=False) .iloc[:7]["music"] .values.astype(str) ) } ) ) .reset_index() ) submission_Horon # # モデル(skmtnaa) # ライブラリをimportしておきます import warnings warnings.filterwarnings("ignore") import numpy as np import pandas as pd from sklearn.metrics import * from sklearn.model_selection import * from imblearn.ensemble import BalancedRandomForestClassifier from tqdm.notebook import tqdm import librosa model = BalancedRandomForestClassifier( n_estimators=2500, criterion="entropy", max_features=0.7, min_samples_leaf=5, random_state=71, n_jobs=-1, ) model.fit(X_train, y_train) residual_musics = np.setdiff1d(np.arange(250), train.music.values) X_test = pd.DataFrame( [(i, m) for i in np.arange(100, 250) for m in residual_musics], columns=["ID", "music"], ) X_test = X_test.merge(motion_features, on=["ID"], how="left") X_test = X_test.merge(music_features, on=["music"], how="left") y_pred_test = model.predict_proba(X_test.drop(["ID", "music"], axis=1))[:, 1] X_test["music_pred"] = y_pred_test submission_skmtnaa = ( X_test.groupby("ID") .apply( lambda x: pd.Series( { "music": " ".join( x.sort_values("music_pred", ascending=False) .iloc[:7]["music"] .values.astype(str) ) } ) ) .reset_index() ) submission_skmtnaa # # モデル(Murase) import numpy as np import pandas as pd import matplotlib.pyplot as plt from tqdm.notebook import tqdm import librosa import warnings import lightgbm as lgb from sklearn.decomposition import * from sklearn.preprocessing import * from imblearn.ensemble import * from sklearn.metrics import * pd.set_option("display.max_columns", None) warnings.filterwarnings("ignore") # train・additionalデータを読み込む train = pd.read_csv("/kaggle/input/data-science-osaka-spring-2023/train.csv") additional = pd.read_csv("/kaggle/input/data-science-osaka-spring-2023/additional.csv") train_add = pd.concat([train, additional], axis=0) # 正解のあるモーションIDとmusicを抽出 train_motion_id = train_add["ID"].unique().tolist() train_music_id = train_add["music"].unique().tolist() # musicの特徴量生成 def extract_music_feature(NUM_FILES, MUSIC, path_): # mfccだけではなく、メルスペクトログラムも求める。 n_mfcc, n_mels = 20, 128 columns = ["music_mfcc_" + str(i) for i in range(n_mfcc)] + [ "music_S_dB_" + str(j) for j in range(n_mels) ] music_features = pd.DataFrame(columns=columns, dtype=np.float32) for i, music in tqdm(zip(range(NUM_FILES), MUSIC)): path = path_ + f"/{music}.wav" y, sr = librosa.load(path) # MFCC mfcc = librosa.feature.mfcc(y=y, sr=sr, n_mfcc=n_mfcc) # メルスペクトログラムの計算 S = librosa.feature.melspectrogram(y=y, sr=sr, n_mels=n_mels) # デシベルスケールに変換 S_dB = librosa.power_to_db(S, ref=np.max) # 平均を取る mfcc_mean = mfcc.mean(axis=1) S_dB_mean = S_dB.mean(axis=1) music_features.loc[i, 0:n_mfcc] = mfcc_mean.astype(np.float32) music_features.loc[i, n_mfcc:] = S_dB_mean.astype(np.float32) return music_features # musicとmusic_addのファイル情報 NUM_FILES_MUSIC = 250 # NUM_FILES_MUSIC_ADD = 100 MUSIC = list(range(250)) # MUSIC_ADD = list(range(250, 350)) path_music = f"/kaggle/input/data-science-osaka-spring-2023/music/music" # path_music_add = f"/kaggle/input/data-science-osaka-spring-2023/music_add/music_add" # 特徴量抽出 music_features1 = extract_music_feature(NUM_FILES_MUSIC, MUSIC, path_music) # music_features2 = extract_music_feature(NUM_FILES_MUSIC_ADD, MUSIC_ADD, path_music_add) # IDの付与 music_features1["music"] = MUSIC # music_features2["music"] = MUSIC_ADD # 結合 # music_features = pd.concat([music_features1, music_features2], axis=0) music_features = music_features1 # motionのTime以外の列名を取得する motion_0 = pd.read_csv( "/kaggle/input/data-science-osaka-spring-2023/motion/motion/0.csv" ) features = motion_0.columns.values[1:] # モーションの特徴量抽出 def extract_motion_feature(NUM_FILES, MOTION, path_): n_mfcc = 20 motion_features = pd.DataFrame() for f in tqdm(features): motion_feat = pd.DataFrame( columns=[f"{f}_{n}" for n in range(n_mfcc)], dtype=np.float32 ) for i, motion in zip(range(NUM_FILES), MOTION): path = path_ + f"/{motion}.csv" df_tmp = pd.read_csv(path) y = df_tmp[f].values.astype(float) sr = 60 # 60Hz mfcc = librosa.feature.mfcc(y=y, sr=sr, n_mfcc=n_mfcc) ceps = mfcc.mean(axis=1) motion_feat.loc[i, motion_feat.columns] = ceps motion_features = pd.concat([motion_features, motion_feat], axis=1) return motion_features # motionとmotionのファイル情報 NUM_FILES_MOTION = 250 # NUM_FILES_MOTION_ADD = 100 MOTION = list(range(250)) # MOTION_ADD = list(range(250, 350)) path_motion = f"/kaggle/input/data-science-osaka-spring-2023/motion/motion" # path_motion_add = f"/kaggle/input/data-science-osaka-spring-2023/motion_add/motion_add" # 特徴量抽出 motion_features1 = extract_motion_feature(NUM_FILES_MOTION, MOTION, path_motion) # motion_features2 = extract_motion_feature(NUM_FILES_MOTION_ADD, MOTION_ADD, path_motion_add) # IDの付与 motion_features1["ID"] = MOTION # motion_features2["ID"] = MOTION_ADD # 結合 # motion_features = pd.concat([motion_features1, motion_features2], axis=0) motion_features = motion_features1 # 学習データ motion_train = motion_features[motion_features["ID"].isin(train_motion_id)] music_train = music_features[music_features["music"].isin(train_music_id)] # テストデータ motion_test = motion_features[~motion_features["ID"].isin(train_motion_id)] music_test = music_features[~music_features["music"].isin(train_music_id)] # 特徴量のみ抽出 X_mot_train = motion_train.drop("ID", axis=1) X_mus_train = music_train.drop("music", axis=1) X_mot_test = motion_test.drop("ID", axis=1) X_mus_test = music_test.drop("music", axis=1) from sklearn.preprocessing import StandardScaler from sklearn.decomposition import PCA from sklearn.decomposition import KernelPCA # 標準化 def scale(X): scaler = StandardScaler() X[X.columns] = scaler.fit_transform(X) return X # 主成分分析 def pca(X): X_s = scale(X) pca = PCA() X_pca = pca.fit_transform(X_s) return X_pca # カーネル主成分分析 def kernel_pca(X, gamma=1): X_s = scale(X) kpca = KernelPCA(n_components=None, kernel="rbf", gamma=gamma) X_kpca = kpca.fit_transform(X_s) return X_kpca # 主成分の寄与率 def contribute_rate(X_pca): contribution_rate = np.var(X_pca, axis=0) contribution_rate_sum = np.sum(np.var(X_pca, axis=0)) pca_var_vec = contribution_rate / contribution_rate_sum return pca_var_vec # gammaのグリッドサーチ def gs_gamma(X, param_space, n=10): scores, params = [], [] for gamma in param_space: X_kpca = kernel_pca(X, gamma=gamma) pca_var_vec = contribute_rate(X_kpca) score = pca_var_vec[:n].sum() scores.append(score) params.append(gamma) param_score = {k: round(v, 4) for k, v in zip(params, scores)} return param_score # 累積寄与率 def contribute_rate_sum(X_pca, n=10): pca_var_vec = contribute_rate(X_pca) return pca_var_vec[:n].sum() # テストデータに対して主成分分析 motion_pca = pca(X_mot_test) music_pca = pca(X_mus_test) # 主成分得点のデータフレーム df_motion_pca = pd.DataFrame( motion_pca, columns=["PC{}".format(x + 1) for x in range(motion_pca.shape[1])] ) df_music_pca = pd.DataFrame( music_pca, columns=["PC{}".format(x + 1) for x in range(music_pca.shape[1])] ) # 20次元を使用 df_motion = df_motion_pca.iloc[:, :20] df_music = df_music_pca.iloc[:, :20] # IDを付与 df_motion["ID"] = motion_test["ID"].tolist() df_music["music"] = music_test["music"].tolist() # インデックスにidを指定 df_motion_id = df_motion.set_index("ID") df_music_id = df_music.set_index("music") def cos_similarity(df1, df2): df_dot = df1.dot(df2.T) # 行列ノルムを求める df1_norm = pd.DataFrame(np.linalg.norm(df1.values, axis=1), index=df1.index) df2_norm = pd.DataFrame(np.linalg.norm(df2.values, axis=1), index=df2.index) # 行列ノルムの片方を転置して積を求める df_norm = df1_norm.dot(df2_norm.T) # コサイン類似度を算出 df_cos = df_dot / df_norm return df_cos.reset_index() def distance_similarity(df1, df2, q=2): # 各データ点の距離を格納する all_distance = [] array_df1 = df1.values array_df2 = df2.values # それぞれの組み合わせに対して距離を計算する for value1 in array_df1: distances = [] for value2 in array_df2: distance = np.linalg.norm(value1 - value2, ord=q) distances.append(distance) all_distance.append(distances) # データフレームの作成 all_distance_array = np.array(all_distance).T cols = [col for col in df1.index] df_distance = pd.DataFrame(all_distance_array, columns=cols) df_distance = df_distance.set_index(df2.index) return df_distance.reset_index() # モーションと音楽の類似度を計算 df_cos = cos_similarity(df_music_id, df_motion_id) df_distance = distance_similarity(df_motion_id, df_music_id) # 7つの予測値 def submit_pred7(df_cos, ascending=False): pred_musics = [] IDs = list(range(100, 250)) for ID in IDs: df_cos_sort = df_cos.sort_values(ID, ascending=ascending) pred_music7 = df_cos_sort["music"].tolist()[:7] pred_music7_str = " ".join(map(str, pred_music7)) pred_musics.append(pred_music7_str) submission = pd.DataFrame({"ID": IDs, "music": pred_musics}) return submission # submissionファイルを作成 submission1 = submit_pred7(df_cos) submission_Murase = submit_pred7(df_distance, ascending=True) # 出力 submission_Murase.to_csv("submission_distance.csv", index=False) submission_Murase # # アンサンブル # 各モデルのLBスコアから重みを算出し、重み付け合計の高い順に予測ラベルとする。 # 重みを求める LB_HORON = 0.06733 LB_SKMTNAA = 0.04233 LB_MURASE = 0.05966 LB_SUM = LB_HORON + LB_SKMTNAA + LB_MURASE W_HORON = LB_HORON / LB_SUM W_SKMTNAA = LB_SKMTNAA / LB_SUM W_MURASE = LB_MURASE / LB_SUM print(W_HORON, W_SKMTNAA, W_MURASE) NUM_MUSIC = 250 df_pred = pd.DataFrame(index=[], columns=range(NUM_MUSIC)) submissions = [submission_Horon, submission_skmtnaa, submission_Murase] weights = [W_HORON, W_SKMTNAA, W_MURASE] num_records = submission_Horon.shape[0] # 行数。他のsubmissionでも結果は同じ。 for i in range(num_records): labels_pred = np.zeros(NUM_MUSIC) for sub, w in zip(submissions, weights): record = sub.iloc[i, 1] # submissionから1行取り出す record_list = record.split(" ") # 半角スペース区切りでリストにする record_list_int = [int(r) for r in record_list] # 文字列を整数に変換する for val in record_list_int: # record_list_intの数値に対応するインデックスに重みを足す labels_pred[val] += w # labels_predをデータフレームにしてdf_predに結合する df_labels_pred = pd.DataFrame(labels_pred) df_pred = pd.concat([df_pred, df_labels_pred.T]) # df_labels_predは250行1列であるため転置する # インデックスが全て0であるためリセットする df_pred = df_pred.reset_index(drop=True) df_pred music_pred_list = [] for i in range(len(df_pred)): record = df_pred.iloc[i, :] # レコードを1行抽出する record_sorted = record.sort_values(ascending=False) # 降順に並べる top_7 = record_sorted[0:7] # 最初の7個を取り出す music_pred = "" # ここにmusicの番号を足していく for t in top_7.index: music_pred += str(t) + " " music_pred = music_pred[:-1] # 最後の半角スペースを消す music_pred_list.append(music_pred) df_submission = pd.DataFrame( {"ID": range(100, len(df_pred) + 100), "music": music_pred_list} ) df_submission.to_csv("submission.csv", index=False) df_submission
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/166/129166229.ipynb
null
null
[{"Id": 129166229, "ScriptId": 38341872, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14366614, "CreationDate": "05/11/2023 13:33:29", "VersionNumber": 9.0, "Title": "DS_Osaka_Spring_2023_ensemble", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 529.0, "LinesInsertedFromPrevious": 1.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 528.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# # モデル(Horon) import numpy as np import pandas as pd import matplotlib.pyplot as plt from tqdm.notebook import tqdm import librosa import warnings import lightgbm as lgb from sklearn.decomposition import * from sklearn.preprocessing import * from imblearn.ensemble import * from sklearn.metrics import * pd.set_option("display.max_columns", None) warnings.filterwarnings("ignore") train = pd.read_csv("/kaggle/input/data-science-osaka-spring-2023/train.csv") additional = pd.read_csv("/kaggle/input/data-science-osaka-spring-2023/additional.csv") train = pd.concat((train, additional), axis=0).reset_index(drop=True) # mfccだけではなく、メルスペクトログラムも求める。 n_mfcc = 20 n_mels = 128 columns = ["music_mfcc_" + str(i) for i in range(n_mfcc)] + [ "music_S_dB_" + str(j) for j in range(n_mels) ] music_features = pd.DataFrame(columns=columns, dtype=np.float32) NUM_FILES_MUSIC = 250 # musicフォルダ内のファイル数 NUM_FILES_MUSIC_ADD = 100 # music_add内のファイル数 for i in tqdm(range(NUM_FILES_MUSIC + NUM_FILES_MUSIC_ADD)): if i < NUM_FILES_MUSIC: path = f"/kaggle/input/data-science-osaka-spring-2023/music/music/{i}.wav" else: path = ( f"/kaggle/input/data-science-osaka-spring-2023/music_add/music_add/{i}.wav" ) y, sr = librosa.load(path) # MFCC mfcc = librosa.feature.mfcc(y=y, sr=sr, n_mfcc=n_mfcc) # メルスペクトログラムの計算 S = librosa.feature.melspectrogram(y=y, sr=sr, n_mels=n_mels) # デシベルスケールに変換 S_dB = librosa.power_to_db(S, ref=np.max) # 平均を取る mfcc_mean = mfcc.mean(axis=1) S_dB_mean = S_dB.mean(axis=1) music_features.loc[i, 0:n_mfcc] = mfcc_mean.astype(np.float32) music_features.loc[i, n_mfcc:] = S_dB_mean.astype(np.float32) music_features["music"] = music_features.index # motionのTime以外の列名を取得する motion_0 = pd.read_csv( "/kaggle/input/data-science-osaka-spring-2023/motion/motion/0.csv" ) features = motion_0.columns.values[1:] # motionもlibrosaを使って特徴量に変換する # 20*20=400列作ることになるが、とりあえず実行。 n_mfcc = 20 motion_features = pd.DataFrame() for f in tqdm(features): motion_feat = pd.DataFrame( columns=[f"{f}_{n}" for n in range(n_mfcc)], dtype=np.float32 ) for i in range(NUM_FILES_MUSIC + NUM_FILES_MUSIC_ADD): if i < NUM_FILES_MUSIC: path = f"/kaggle/input/data-science-osaka-spring-2023/motion/motion/{i}.csv" else: path = f"/kaggle/input/data-science-osaka-spring-2023/motion_add/motion_add/{i}.csv" df_tmp = pd.read_csv(path) y = df_tmp[f].values.astype(float) sr = 60 # 60Hz mfcc = librosa.feature.mfcc(y=y, sr=sr, n_mfcc=n_mfcc) ceps = mfcc.mean(axis=1) motion_feat.loc[i, motion_feat.columns] = ceps motion_features = pd.concat([motion_features, motion_feat], axis=1) motion_features["ID"] = motion_features.index # trainデータとvalidデータに分ける dat_train = train.iloc[:133] dat_valid = train.iloc[133:] # そのままだとnegative sampleがないので、ID x musicの組み合わせで生成して、リストにないものを0にすることにします df_train = pd.DataFrame( [(i, m) for i in dat_train.ID.values for m in dat_train.music], columns=["ID", "music"], ) df_valid = pd.DataFrame( [(i, m) for i in dat_valid.ID.values for m in dat_valid.music], columns=["ID", "music"], ) df_train_2 = df_train.copy() df_valid_2 = df_valid.copy() df_train_2 = df_train_2.merge(train, on=["ID", "music"], how="left") df_train_2["Target"] = (df_train_2.genre.isnull() == False).astype(int) df_valid_2 = df_valid_2.merge(dat_valid, on=["ID", "music"], how="left") df_valid_2["Target"] = (df_valid_2.genre.isnull() == False).astype(int) df_train_3 = df_train_2.copy() df_valid_3 = df_valid_2.copy() df_train_3 = df_train_3.drop(["genre"], axis=1) df_valid_3 = df_valid_3.drop(["genre"], axis=1) df_train_4 = df_train_3.copy() df_valid_4 = df_valid_3.copy() df_train_4 = df_train_4.merge(motion_features, on=["ID"], how="left") df_train_4 = df_train_4.merge(music_features, on=["music"], how="left") df_valid_4 = df_valid_4.merge(motion_features, on=["ID"], how="left") df_valid_4 = df_valid_4.merge(music_features, on=["music"], how="left") # X_train, X_valid, y_train, y_validを作成 col_dropped = ["ID", "music", "Target"] X_train, X_valid = df_train_4.drop(col_dropped, axis=1), df_valid_4.drop( col_dropped, axis=1 ) y_train, y_valid = df_train_2["Target"], df_valid_2["Target"] dtrain = lgb.Dataset(X_train, label=y_train) dvalid = lgb.Dataset(X_valid, label=y_valid, reference=dtrain) num_round = 100 stopping_rounds = 30 verbose_eval = -1 # optuneで調整済み params = { "boosting_type": "gbdt", "objective": "binary", "metric": "binary_logloss", "learning_rate": 0.01, "force_col_wise": True, "random_state": 1, "feature_pre_filter": False, "lambda_l1": 0.0, "lambda_l2": 0.0, "num_leaves": 31, "feature_fraction": 0.652, "bagging_fraction": 1.0, "bagging_freq": 0, "min_child_samples": 20, "num_iterations": 100, "early_stopping_round": None, } model = lgb.train( params, dtrain, valid_sets=[dvalid], num_boost_round=num_round, callbacks=[ lgb.early_stopping(stopping_rounds=stopping_rounds, verbose=True), lgb.log_evaluation(verbose_eval), ], ) # テストデータに対しても同様にデータを準備し、予測結果を出力してみます residual_musics = np.setdiff1d(np.arange(250), train.music.values) X_test = pd.DataFrame( [(i, m) for i in np.arange(100, 250) for m in residual_musics], columns=["ID", "music"], ) X_test = X_test.merge(motion_features, on=["ID"], how="left") X_test = X_test.merge(music_features, on=["music"], how="left") # 予測を実行します y_pred_test = model.predict(X_test.drop(["ID", "music"], axis=1)) X_test["music_pred"] = y_pred_test # 上位3件をスペース区切りで書き込みます submission_Horon = ( X_test.groupby("ID") .apply( lambda x: pd.Series( { "music": " ".join( x.sort_values("music_pred", ascending=False) .iloc[:7]["music"] .values.astype(str) ) } ) ) .reset_index() ) submission_Horon # # モデル(skmtnaa) # ライブラリをimportしておきます import warnings warnings.filterwarnings("ignore") import numpy as np import pandas as pd from sklearn.metrics import * from sklearn.model_selection import * from imblearn.ensemble import BalancedRandomForestClassifier from tqdm.notebook import tqdm import librosa model = BalancedRandomForestClassifier( n_estimators=2500, criterion="entropy", max_features=0.7, min_samples_leaf=5, random_state=71, n_jobs=-1, ) model.fit(X_train, y_train) residual_musics = np.setdiff1d(np.arange(250), train.music.values) X_test = pd.DataFrame( [(i, m) for i in np.arange(100, 250) for m in residual_musics], columns=["ID", "music"], ) X_test = X_test.merge(motion_features, on=["ID"], how="left") X_test = X_test.merge(music_features, on=["music"], how="left") y_pred_test = model.predict_proba(X_test.drop(["ID", "music"], axis=1))[:, 1] X_test["music_pred"] = y_pred_test submission_skmtnaa = ( X_test.groupby("ID") .apply( lambda x: pd.Series( { "music": " ".join( x.sort_values("music_pred", ascending=False) .iloc[:7]["music"] .values.astype(str) ) } ) ) .reset_index() ) submission_skmtnaa # # モデル(Murase) import numpy as np import pandas as pd import matplotlib.pyplot as plt from tqdm.notebook import tqdm import librosa import warnings import lightgbm as lgb from sklearn.decomposition import * from sklearn.preprocessing import * from imblearn.ensemble import * from sklearn.metrics import * pd.set_option("display.max_columns", None) warnings.filterwarnings("ignore") # train・additionalデータを読み込む train = pd.read_csv("/kaggle/input/data-science-osaka-spring-2023/train.csv") additional = pd.read_csv("/kaggle/input/data-science-osaka-spring-2023/additional.csv") train_add = pd.concat([train, additional], axis=0) # 正解のあるモーションIDとmusicを抽出 train_motion_id = train_add["ID"].unique().tolist() train_music_id = train_add["music"].unique().tolist() # musicの特徴量生成 def extract_music_feature(NUM_FILES, MUSIC, path_): # mfccだけではなく、メルスペクトログラムも求める。 n_mfcc, n_mels = 20, 128 columns = ["music_mfcc_" + str(i) for i in range(n_mfcc)] + [ "music_S_dB_" + str(j) for j in range(n_mels) ] music_features = pd.DataFrame(columns=columns, dtype=np.float32) for i, music in tqdm(zip(range(NUM_FILES), MUSIC)): path = path_ + f"/{music}.wav" y, sr = librosa.load(path) # MFCC mfcc = librosa.feature.mfcc(y=y, sr=sr, n_mfcc=n_mfcc) # メルスペクトログラムの計算 S = librosa.feature.melspectrogram(y=y, sr=sr, n_mels=n_mels) # デシベルスケールに変換 S_dB = librosa.power_to_db(S, ref=np.max) # 平均を取る mfcc_mean = mfcc.mean(axis=1) S_dB_mean = S_dB.mean(axis=1) music_features.loc[i, 0:n_mfcc] = mfcc_mean.astype(np.float32) music_features.loc[i, n_mfcc:] = S_dB_mean.astype(np.float32) return music_features # musicとmusic_addのファイル情報 NUM_FILES_MUSIC = 250 # NUM_FILES_MUSIC_ADD = 100 MUSIC = list(range(250)) # MUSIC_ADD = list(range(250, 350)) path_music = f"/kaggle/input/data-science-osaka-spring-2023/music/music" # path_music_add = f"/kaggle/input/data-science-osaka-spring-2023/music_add/music_add" # 特徴量抽出 music_features1 = extract_music_feature(NUM_FILES_MUSIC, MUSIC, path_music) # music_features2 = extract_music_feature(NUM_FILES_MUSIC_ADD, MUSIC_ADD, path_music_add) # IDの付与 music_features1["music"] = MUSIC # music_features2["music"] = MUSIC_ADD # 結合 # music_features = pd.concat([music_features1, music_features2], axis=0) music_features = music_features1 # motionのTime以外の列名を取得する motion_0 = pd.read_csv( "/kaggle/input/data-science-osaka-spring-2023/motion/motion/0.csv" ) features = motion_0.columns.values[1:] # モーションの特徴量抽出 def extract_motion_feature(NUM_FILES, MOTION, path_): n_mfcc = 20 motion_features = pd.DataFrame() for f in tqdm(features): motion_feat = pd.DataFrame( columns=[f"{f}_{n}" for n in range(n_mfcc)], dtype=np.float32 ) for i, motion in zip(range(NUM_FILES), MOTION): path = path_ + f"/{motion}.csv" df_tmp = pd.read_csv(path) y = df_tmp[f].values.astype(float) sr = 60 # 60Hz mfcc = librosa.feature.mfcc(y=y, sr=sr, n_mfcc=n_mfcc) ceps = mfcc.mean(axis=1) motion_feat.loc[i, motion_feat.columns] = ceps motion_features = pd.concat([motion_features, motion_feat], axis=1) return motion_features # motionとmotionのファイル情報 NUM_FILES_MOTION = 250 # NUM_FILES_MOTION_ADD = 100 MOTION = list(range(250)) # MOTION_ADD = list(range(250, 350)) path_motion = f"/kaggle/input/data-science-osaka-spring-2023/motion/motion" # path_motion_add = f"/kaggle/input/data-science-osaka-spring-2023/motion_add/motion_add" # 特徴量抽出 motion_features1 = extract_motion_feature(NUM_FILES_MOTION, MOTION, path_motion) # motion_features2 = extract_motion_feature(NUM_FILES_MOTION_ADD, MOTION_ADD, path_motion_add) # IDの付与 motion_features1["ID"] = MOTION # motion_features2["ID"] = MOTION_ADD # 結合 # motion_features = pd.concat([motion_features1, motion_features2], axis=0) motion_features = motion_features1 # 学習データ motion_train = motion_features[motion_features["ID"].isin(train_motion_id)] music_train = music_features[music_features["music"].isin(train_music_id)] # テストデータ motion_test = motion_features[~motion_features["ID"].isin(train_motion_id)] music_test = music_features[~music_features["music"].isin(train_music_id)] # 特徴量のみ抽出 X_mot_train = motion_train.drop("ID", axis=1) X_mus_train = music_train.drop("music", axis=1) X_mot_test = motion_test.drop("ID", axis=1) X_mus_test = music_test.drop("music", axis=1) from sklearn.preprocessing import StandardScaler from sklearn.decomposition import PCA from sklearn.decomposition import KernelPCA # 標準化 def scale(X): scaler = StandardScaler() X[X.columns] = scaler.fit_transform(X) return X # 主成分分析 def pca(X): X_s = scale(X) pca = PCA() X_pca = pca.fit_transform(X_s) return X_pca # カーネル主成分分析 def kernel_pca(X, gamma=1): X_s = scale(X) kpca = KernelPCA(n_components=None, kernel="rbf", gamma=gamma) X_kpca = kpca.fit_transform(X_s) return X_kpca # 主成分の寄与率 def contribute_rate(X_pca): contribution_rate = np.var(X_pca, axis=0) contribution_rate_sum = np.sum(np.var(X_pca, axis=0)) pca_var_vec = contribution_rate / contribution_rate_sum return pca_var_vec # gammaのグリッドサーチ def gs_gamma(X, param_space, n=10): scores, params = [], [] for gamma in param_space: X_kpca = kernel_pca(X, gamma=gamma) pca_var_vec = contribute_rate(X_kpca) score = pca_var_vec[:n].sum() scores.append(score) params.append(gamma) param_score = {k: round(v, 4) for k, v in zip(params, scores)} return param_score # 累積寄与率 def contribute_rate_sum(X_pca, n=10): pca_var_vec = contribute_rate(X_pca) return pca_var_vec[:n].sum() # テストデータに対して主成分分析 motion_pca = pca(X_mot_test) music_pca = pca(X_mus_test) # 主成分得点のデータフレーム df_motion_pca = pd.DataFrame( motion_pca, columns=["PC{}".format(x + 1) for x in range(motion_pca.shape[1])] ) df_music_pca = pd.DataFrame( music_pca, columns=["PC{}".format(x + 1) for x in range(music_pca.shape[1])] ) # 20次元を使用 df_motion = df_motion_pca.iloc[:, :20] df_music = df_music_pca.iloc[:, :20] # IDを付与 df_motion["ID"] = motion_test["ID"].tolist() df_music["music"] = music_test["music"].tolist() # インデックスにidを指定 df_motion_id = df_motion.set_index("ID") df_music_id = df_music.set_index("music") def cos_similarity(df1, df2): df_dot = df1.dot(df2.T) # 行列ノルムを求める df1_norm = pd.DataFrame(np.linalg.norm(df1.values, axis=1), index=df1.index) df2_norm = pd.DataFrame(np.linalg.norm(df2.values, axis=1), index=df2.index) # 行列ノルムの片方を転置して積を求める df_norm = df1_norm.dot(df2_norm.T) # コサイン類似度を算出 df_cos = df_dot / df_norm return df_cos.reset_index() def distance_similarity(df1, df2, q=2): # 各データ点の距離を格納する all_distance = [] array_df1 = df1.values array_df2 = df2.values # それぞれの組み合わせに対して距離を計算する for value1 in array_df1: distances = [] for value2 in array_df2: distance = np.linalg.norm(value1 - value2, ord=q) distances.append(distance) all_distance.append(distances) # データフレームの作成 all_distance_array = np.array(all_distance).T cols = [col for col in df1.index] df_distance = pd.DataFrame(all_distance_array, columns=cols) df_distance = df_distance.set_index(df2.index) return df_distance.reset_index() # モーションと音楽の類似度を計算 df_cos = cos_similarity(df_music_id, df_motion_id) df_distance = distance_similarity(df_motion_id, df_music_id) # 7つの予測値 def submit_pred7(df_cos, ascending=False): pred_musics = [] IDs = list(range(100, 250)) for ID in IDs: df_cos_sort = df_cos.sort_values(ID, ascending=ascending) pred_music7 = df_cos_sort["music"].tolist()[:7] pred_music7_str = " ".join(map(str, pred_music7)) pred_musics.append(pred_music7_str) submission = pd.DataFrame({"ID": IDs, "music": pred_musics}) return submission # submissionファイルを作成 submission1 = submit_pred7(df_cos) submission_Murase = submit_pred7(df_distance, ascending=True) # 出力 submission_Murase.to_csv("submission_distance.csv", index=False) submission_Murase # # アンサンブル # 各モデルのLBスコアから重みを算出し、重み付け合計の高い順に予測ラベルとする。 # 重みを求める LB_HORON = 0.06733 LB_SKMTNAA = 0.04233 LB_MURASE = 0.05966 LB_SUM = LB_HORON + LB_SKMTNAA + LB_MURASE W_HORON = LB_HORON / LB_SUM W_SKMTNAA = LB_SKMTNAA / LB_SUM W_MURASE = LB_MURASE / LB_SUM print(W_HORON, W_SKMTNAA, W_MURASE) NUM_MUSIC = 250 df_pred = pd.DataFrame(index=[], columns=range(NUM_MUSIC)) submissions = [submission_Horon, submission_skmtnaa, submission_Murase] weights = [W_HORON, W_SKMTNAA, W_MURASE] num_records = submission_Horon.shape[0] # 行数。他のsubmissionでも結果は同じ。 for i in range(num_records): labels_pred = np.zeros(NUM_MUSIC) for sub, w in zip(submissions, weights): record = sub.iloc[i, 1] # submissionから1行取り出す record_list = record.split(" ") # 半角スペース区切りでリストにする record_list_int = [int(r) for r in record_list] # 文字列を整数に変換する for val in record_list_int: # record_list_intの数値に対応するインデックスに重みを足す labels_pred[val] += w # labels_predをデータフレームにしてdf_predに結合する df_labels_pred = pd.DataFrame(labels_pred) df_pred = pd.concat([df_pred, df_labels_pred.T]) # df_labels_predは250行1列であるため転置する # インデックスが全て0であるためリセットする df_pred = df_pred.reset_index(drop=True) df_pred music_pred_list = [] for i in range(len(df_pred)): record = df_pred.iloc[i, :] # レコードを1行抽出する record_sorted = record.sort_values(ascending=False) # 降順に並べる top_7 = record_sorted[0:7] # 最初の7個を取り出す music_pred = "" # ここにmusicの番号を足していく for t in top_7.index: music_pred += str(t) + " " music_pred = music_pred[:-1] # 最後の半角スペースを消す music_pred_list.append(music_pred) df_submission = pd.DataFrame( {"ID": range(100, len(df_pred) + 100), "music": music_pred_list} ) df_submission.to_csv("submission.csv", index=False) df_submission
false
0
6,752
0
6,752
6,752
129085342
import pandas as pd import warnings from sklearn.preprocessing import StandardScaler, MinMaxScaler from sklearn.model_selection import train_test_split from imblearn.over_sampling import SMOTE from sklearn.svm import SVC from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score warnings.filterwarnings("ignore") credit_df = pd.read_csv("/kaggle/input/crditcard/creditcard.csv") credit_df.head() # %% [code] # replacing the class variable credit_df["Class"] = credit_df["Class"].replace(0, -1) analysis_df = credit_df.drop_duplicates() ## For standard Scaler analysis_df_2 = analysis_df.copy() analysis_df.head() # %% [code] print(f" There are {len(analysis_df[analysis_df['Class'] == -1])} normal transactions") print(f" There are {len(analysis_df[analysis_df['Class'] == 1])} fraud transactions") credit_df = pd.read_csv("/kaggle/input/crditcard/creditcard.csv") credit_df.head() # replacing the class variable credit_df["Class"] = credit_df["Class"].replace(0, -1) analysis_df = credit_df.drop_duplicates() ## For standard Scaler analysis_df_2 = analysis_df.copy() analysis_df.head() df_scaled = analysis_df[["Time", "Amount"]] # Initializing the scaler scaler = MinMaxScaler() # scaling the data df_scaled[["Time", "Amount"]] = scaler.fit_transform(df_scaled[["Time", "Amount"]]) ## Replace the original columns analysis_df[["Time", "Amount"]] = df_scaled[["Time", "Amount"]] analysis_df.head() X = analysis_df.drop("Class", axis=1) y = analysis_df["Class"] # Split the data into training and testing sets and stratifying on the class variable X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=42 ) smote = SMOTE(random_state=42) # resample teh training data only to include fraud cases X_train_resampled, y_train_resampled = smote.fit_resample(X_train, y_train) rfc = RandomForestClassifier(n_estimators=100, random_state=42) rfc.fit(X_train_resampled, y_train_resampled) y_pred = rfc.predict(X_test) print(f"Accuracy : {accuracy_score(y_test, y_pred)}") print(f"Precision: {precision_score(y_test, y_pred)}") print(f"Recall: {recall_score(y_test, y_pred)}") df_scaled = analysis_df_2[["Time", "Amount"]] # Initializing the scaler scaler = StandardScaler() # scaling the data df_scaled[["Time", "Amount"]] = scaler.fit_transform(df_scaled[["Time", "Amount"]]) ## Replace the original columns analysis_df_2[["Time", "Amount"]] = df_scaled[["Time", "Amount"]] analysis_df_2.head() X = analysis_df_2.drop("Class", axis=1) y = analysis_df_2["Class"] # Split the data into training and testing sets and stratifying on the class variable X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=42 ) # Create a SMOTE object smote = SMOTE(random_state=42) X_train_resampled, y_train_resampled = smote.fit_resample(X_train, y_train) rfc_2 = RandomForestClassifier(n_estimators=100, random_state=42) rfc_2.fit(X_train_resampled, y_train_resampled) RandomForestClassifier(random_state=42) y_pred = rfc_2.predict(X_test) print(f"Accuracy : {accuracy_score(y_test, y_pred)}") print(f"Precision: {precision_score(y_test, y_pred)}") print(f"Recall: {recall_score(y_test, y_pred)}") import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import seaborn as sns import missingno as msno import plotly.offline as py py.init_notebook_mode(connected=True) import plotly.graph_objs as go import plotly.tools as tls import plotly.express as px import cufflinks as cf from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot sns.set_style("whitegrid") plt.style.use("fivethirtyeight") from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.naive_bayes import GaussianNB from sklearn.tree import DecisionTreeClassifier from sklearn.svm import SVC from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import ExtraTreesClassifier from sklearn.ensemble import AdaBoostClassifier from sklearn.ensemble import GradientBoostingClassifier from sklearn.neighbors import RadiusNeighborsClassifier from sklearn.linear_model import PassiveAggressiveClassifier from sklearn.naive_bayes import BernoulliNB from sklearn.svm import NuSVC from sklearn.svm import LinearSVC from sklearn.neural_network import MLPClassifier from sklearn.ensemble import BaggingClassifier from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis import xgboost as xgb from sklearn.linear_model import RidgeClassifier from catboost import Pool, CatBoostClassifier, cv import lightgbm as lgb import plotly.offline as py from plotly.offline import init_notebook_mode, iplot import plotly.graph_objs as go from plotly import tools init_notebook_mode(connected=True) import plotly.figure_factory as ff from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import OneHotEncoder from sklearn.preprocessing import StandardScaler from sklearn.impute import SimpleImputer from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import confusion_matrix from sklearn.metrics import accuracy_score from sklearn.tree import DecisionTreeClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.metrics import classification_report from sklearn.metrics import confusion_matrix from sklearn.metrics import accuracy_score from sklearn.decomposition import PCA import random def random_colors(number_of_colors): color = [ "#" + "".join([random.choice("0123456789ABCDEF") for j in range(6)]) for i in range(number_of_colors) ] return color train = pd.read_csv("/kaggle/input/crditcard/creditcard.csv") table = ff.create_table(train.head().round(1)) iplot(table, filename="jupyter-table1") train.columns train.shape iplot( ff.create_table(train.dtypes.to_frame().reset_index().round(3)), filename="jupyter-table2", ) train.isnull().sum() msno.bar(train, color="r", figsize=(10, 8)) msno.matrix(train)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/085/129085342.ipynb
null
null
[{"Id": 129085342, "ScriptId": 38373084, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7936144, "CreationDate": "05/10/2023 22:19:31", "VersionNumber": 2.0, "Title": "notebook87afcedb39", "EvaluationDate": "05/10/2023", "IsChange": false, "TotalLines": 209.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 209.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import pandas as pd import warnings from sklearn.preprocessing import StandardScaler, MinMaxScaler from sklearn.model_selection import train_test_split from imblearn.over_sampling import SMOTE from sklearn.svm import SVC from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score warnings.filterwarnings("ignore") credit_df = pd.read_csv("/kaggle/input/crditcard/creditcard.csv") credit_df.head() # %% [code] # replacing the class variable credit_df["Class"] = credit_df["Class"].replace(0, -1) analysis_df = credit_df.drop_duplicates() ## For standard Scaler analysis_df_2 = analysis_df.copy() analysis_df.head() # %% [code] print(f" There are {len(analysis_df[analysis_df['Class'] == -1])} normal transactions") print(f" There are {len(analysis_df[analysis_df['Class'] == 1])} fraud transactions") credit_df = pd.read_csv("/kaggle/input/crditcard/creditcard.csv") credit_df.head() # replacing the class variable credit_df["Class"] = credit_df["Class"].replace(0, -1) analysis_df = credit_df.drop_duplicates() ## For standard Scaler analysis_df_2 = analysis_df.copy() analysis_df.head() df_scaled = analysis_df[["Time", "Amount"]] # Initializing the scaler scaler = MinMaxScaler() # scaling the data df_scaled[["Time", "Amount"]] = scaler.fit_transform(df_scaled[["Time", "Amount"]]) ## Replace the original columns analysis_df[["Time", "Amount"]] = df_scaled[["Time", "Amount"]] analysis_df.head() X = analysis_df.drop("Class", axis=1) y = analysis_df["Class"] # Split the data into training and testing sets and stratifying on the class variable X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=42 ) smote = SMOTE(random_state=42) # resample teh training data only to include fraud cases X_train_resampled, y_train_resampled = smote.fit_resample(X_train, y_train) rfc = RandomForestClassifier(n_estimators=100, random_state=42) rfc.fit(X_train_resampled, y_train_resampled) y_pred = rfc.predict(X_test) print(f"Accuracy : {accuracy_score(y_test, y_pred)}") print(f"Precision: {precision_score(y_test, y_pred)}") print(f"Recall: {recall_score(y_test, y_pred)}") df_scaled = analysis_df_2[["Time", "Amount"]] # Initializing the scaler scaler = StandardScaler() # scaling the data df_scaled[["Time", "Amount"]] = scaler.fit_transform(df_scaled[["Time", "Amount"]]) ## Replace the original columns analysis_df_2[["Time", "Amount"]] = df_scaled[["Time", "Amount"]] analysis_df_2.head() X = analysis_df_2.drop("Class", axis=1) y = analysis_df_2["Class"] # Split the data into training and testing sets and stratifying on the class variable X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=42 ) # Create a SMOTE object smote = SMOTE(random_state=42) X_train_resampled, y_train_resampled = smote.fit_resample(X_train, y_train) rfc_2 = RandomForestClassifier(n_estimators=100, random_state=42) rfc_2.fit(X_train_resampled, y_train_resampled) RandomForestClassifier(random_state=42) y_pred = rfc_2.predict(X_test) print(f"Accuracy : {accuracy_score(y_test, y_pred)}") print(f"Precision: {precision_score(y_test, y_pred)}") print(f"Recall: {recall_score(y_test, y_pred)}") import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import seaborn as sns import missingno as msno import plotly.offline as py py.init_notebook_mode(connected=True) import plotly.graph_objs as go import plotly.tools as tls import plotly.express as px import cufflinks as cf from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot sns.set_style("whitegrid") plt.style.use("fivethirtyeight") from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.naive_bayes import GaussianNB from sklearn.tree import DecisionTreeClassifier from sklearn.svm import SVC from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import ExtraTreesClassifier from sklearn.ensemble import AdaBoostClassifier from sklearn.ensemble import GradientBoostingClassifier from sklearn.neighbors import RadiusNeighborsClassifier from sklearn.linear_model import PassiveAggressiveClassifier from sklearn.naive_bayes import BernoulliNB from sklearn.svm import NuSVC from sklearn.svm import LinearSVC from sklearn.neural_network import MLPClassifier from sklearn.ensemble import BaggingClassifier from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis import xgboost as xgb from sklearn.linear_model import RidgeClassifier from catboost import Pool, CatBoostClassifier, cv import lightgbm as lgb import plotly.offline as py from plotly.offline import init_notebook_mode, iplot import plotly.graph_objs as go from plotly import tools init_notebook_mode(connected=True) import plotly.figure_factory as ff from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import OneHotEncoder from sklearn.preprocessing import StandardScaler from sklearn.impute import SimpleImputer from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import confusion_matrix from sklearn.metrics import accuracy_score from sklearn.tree import DecisionTreeClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.metrics import classification_report from sklearn.metrics import confusion_matrix from sklearn.metrics import accuracy_score from sklearn.decomposition import PCA import random def random_colors(number_of_colors): color = [ "#" + "".join([random.choice("0123456789ABCDEF") for j in range(6)]) for i in range(number_of_colors) ] return color train = pd.read_csv("/kaggle/input/crditcard/creditcard.csv") table = ff.create_table(train.head().round(1)) iplot(table, filename="jupyter-table1") train.columns train.shape iplot( ff.create_table(train.dtypes.to_frame().reset_index().round(3)), filename="jupyter-table2", ) train.isnull().sum() msno.bar(train, color="r", figsize=(10, 8)) msno.matrix(train)
false
0
1,875
0
1,875
1,875
129085315
<jupyter_start><jupyter_text>The Movies Dataset ### Context These files contain metadata for all 45,000 movies listed in the Full MovieLens Dataset. The dataset consists of movies released on or before July 2017. Data points include cast, crew, plot keywords, budget, revenue, posters, release dates, languages, production companies, countries, TMDB vote counts and vote averages. This dataset also has files containing 26 million ratings from 270,000 users for all 45,000 movies. Ratings are on a scale of 1-5 and have been obtained from the official GroupLens website. ### Content This dataset consists of the following files: **movies_metadata.csv:** The main Movies Metadata file. Contains information on 45,000 movies featured in the Full MovieLens dataset. Features include posters, backdrops, budget, revenue, release dates, languages, production countries and companies. **keywords.csv:** Contains the movie plot keywords for our MovieLens movies. Available in the form of a stringified JSON Object. **credits.csv:** Consists of Cast and Crew Information for all our movies. Available in the form of a stringified JSON Object. **links.csv:** The file that contains the TMDB and IMDB IDs of all the movies featured in the Full MovieLens dataset. **links_small.csv:** Contains the TMDB and IMDB IDs of a small subset of 9,000 movies of the Full Dataset. **ratings_small.csv:** The subset of 100,000 ratings from 700 users on 9,000 movies. The Full MovieLens Dataset consisting of 26 million ratings and 750,000 tag applications from 270,000 users on all the 45,000 movies in this dataset can be accessed [here](https://grouplens.org/datasets/movielens/latest/) Kaggle dataset identifier: the-movies-dataset <jupyter_script>import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.cluster import KMeans from sklearn.decomposition import PCA from sklearn.preprocessing import StandardScaler # Kaggle directory dir = "/kaggle/input/the-movies-dataset/" # Load datasets ratings = pd.read_csv(dir + "ratings_small.csv") metadata = pd.read_csv(dir + "movies_metadata.csv", low_memory=False) metadata = metadata.rename(columns={"id": "movieId"}) metadata["movieId"] = pd.to_numeric(metadata["movieId"], errors="coerce") # Merge the two DataFrames on the 'movieId' column combined_data = pd.merge(ratings, metadata, on="movieId") # Filter users who have rated 20 or more movies user_rating_counts = combined_data.groupby("userId")["rating"].count() active_users = user_rating_counts[user_rating_counts >= 20].index df = combined_data[combined_data["userId"].isin(active_users)] df.columns import json import numpy as np import matplotlib.pyplot as plt import seaborn as sns from datetime import datetime # Convert 'release_date' to datetime format combined_data["release_date"] = pd.to_datetime( combined_data["release_date"], errors="coerce" ) # Convert 'genres' from JSON format to a list of genres def parse_genres(genres_str): genres = json.loads(genres_str.replace("'", '"')) genres_list = [g["name"] for g in genres] return genres_list combined_data["genres"] = combined_data["genres"].apply(parse_genres) # Extract year from 'release_date' and create 'release_year' column combined_data["release_year"] = combined_data["release_date"].dt.year # Plot distribution of ratings plt.figure(figsize=(10, 5)) sns.histplot(combined_data["rating"], bins=20, kde=False) plt.title("Distribution of Ratings") plt.xlabel("Rating") plt.ylabel("Count") plt.show() # distribution of movie release years plt.figure(figsize=(10, 5)) sns.histplot(combined_data["release_year"], bins=np.arange(1900, 2023, 1)) plt.title("Distribution of Movie Release Years") plt.xlabel("Release Year") plt.ylabel("Count") plt.show() # number of movies for each genre all_genres = np.concatenate(combined_data["genres"].values) unique_genres, counts = np.unique(all_genres, return_counts=True) genre_counts = pd.DataFrame({"genre": unique_genres, "count": counts}).sort_values( by="count", ascending=False ) # distribution of genres plt.figure(figsize=(10, 5)) sns.barplot(x="genre", y="count", data=genre_counts) plt.title("Distribution of Genres") plt.xlabel("Genre") plt.ylabel("Count") plt.xticks(rotation=90) plt.show() # Plot distribution of languages plt.figure(figsize=(10, 5)) sns.countplot(data=combined_data, x="original_language") plt.title("Distribution of Original Language") plt.xlabel("Language") plt.ylabel("Count") plt.xticks(rotation=90) plt.show() from sklearn.decomposition import PCA from sklearn.cluster import KMeans # cluster based on userid num_vars = [ "userId", "movieId", "rating", "budget", "popularity", "revenue", "runtime", "vote_average", "vote_count", ] df = df[num_vars] df = df.dropna() df_std = StandardScaler().fit_transform(df) pca = PCA(n_components=2) df_pca = pca.fit_transform(df_std) k = 8 kmeans = KMeans(n_clusters=k, n_init="auto", random_state=42) kmeans.fit(df_pca) df["cluster"] = kmeans.labels_ # labels = df['userId'] # data = np.array(df.drop('userId', axis = 1)) df # adopted from: https://pythonprogramminglanguage.com/kmeans-elbow-method/ from scipy.spatial.distance import cdist performance = [] # number of clusters to consider K = range(1, 15) # run kmeans for each value of K for k in K: kmeanModel = KMeans(n_clusters=k, n_init="auto") kmeanModel.fit(df_std) performance.append( sum(np.min(cdist(df_std, kmeanModel.cluster_centers_, "euclidean"), axis=1)) / df_std.shape[0] ) # Plot the elbow plt.plot(K, performance, "bx-") plt.xlabel("k") plt.ylabel("Performance") plt.title("The Elbow Method showing the optimal k") plt.show() import matplotlib.pyplot as plt import seaborn as sns df_std = pd.DataFrame(df_std, columns=num_vars) # Add the PCA components to the DataFrame df_std["PCA1"] = df_pca[:, 0] df_std["PCA2"] = df_pca[:, 1] # Add the cluster labels to the DataFrame df_std["cluster"] = kmeans.labels_ plt.figure(figsize=(10, 7)) sns.scatterplot(x="PCA1", y="PCA2", hue="cluster", data=df_std, palette="viridis") plt.title("PCA Components colored by cluster") plt.show() plt.figure(figsize=(10, 7)) sns.boxplot(x="cluster", y="rating", data=df_std) plt.title("Rating distribution per cluster") plt.show() from mpl_toolkits.mplot3d import Axes3D pca = PCA(n_components=3) df_pca2 = pca.fit_transform(df_std) df_std["PCA1"] = df_pca2[:, 0] df_std["PCA2"] = df_pca2[:, 1] df_std["PCA3"] = df_pca2[:, 2] fig = plt.figure(figsize=(10, 7)) ax = fig.add_subplot(111, projection="3d") scatter = ax.scatter( df_std["PCA1"], df_std["PCA2"], df_std["PCA3"], c=df_std["cluster"], cmap="viridis", alpha=0.5, ) ax.set_xlabel("PCA1") ax.set_ylabel("PCA2") ax.set_zlabel("PCA3") plt.title("3D view of data points") plt.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/085/129085315.ipynb
the-movies-dataset
rounakbanik
[{"Id": 129085315, "ScriptId": 38157186, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 8181938, "CreationDate": "05/10/2023 22:19:04", "VersionNumber": 2.0, "Title": "Project4", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 180.0, "LinesInsertedFromPrevious": 100.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 80.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 184826513, "KernelVersionId": 129085315, "SourceDatasetVersionId": 6663}]
[{"Id": 6663, "DatasetId": 3405, "DatasourceVersionId": 6663, "CreatorUserId": 927562, "LicenseName": "CC0: Public Domain", "CreationDate": "11/10/2017 02:40:38", "VersionNumber": 7.0, "Title": "The Movies Dataset", "Slug": "the-movies-dataset", "Subtitle": "Metadata on over 45,000 movies. 26 million ratings from over 270,000 users.", "Description": "### Context\n\nThese files contain metadata for all 45,000 movies listed in the Full MovieLens Dataset. The dataset consists of movies released on or before July 2017. Data points include cast, crew, plot keywords, budget, revenue, posters, release dates, languages, production companies, countries, TMDB vote counts and vote averages.\n\nThis dataset also has files containing 26 million ratings from 270,000 users for all 45,000 movies. Ratings are on a scale of 1-5 and have been obtained from the official GroupLens website.\n\n\n### Content\n\nThis dataset consists of the following files:\n\n**movies_metadata.csv:** The main Movies Metadata file. Contains information on 45,000 movies featured in the Full MovieLens dataset. Features include posters, backdrops, budget, revenue, release dates, languages, production countries and companies.\n\n**keywords.csv:** Contains the movie plot keywords for our MovieLens movies. Available in the form of a stringified JSON Object.\n\n**credits.csv:** Consists of Cast and Crew Information for all our movies. Available in the form of a stringified JSON Object.\n\n**links.csv:** The file that contains the TMDB and IMDB IDs of all the movies featured in the Full MovieLens dataset.\n\n**links_small.csv:** Contains the TMDB and IMDB IDs of a small subset of 9,000 movies of the Full Dataset.\n\n**ratings_small.csv:** The subset of 100,000 ratings from 700 users on 9,000 movies.\n\nThe Full MovieLens Dataset consisting of 26 million ratings and 750,000 tag applications from 270,000 users on all the 45,000 movies in this dataset can be accessed [here](https://grouplens.org/datasets/movielens/latest/) \n\n### Acknowledgements\n\nThis dataset is an ensemble of data collected from TMDB and GroupLens.\nThe Movie Details, Credits and Keywords have been collected from the TMDB Open API. This product uses the TMDb API but is not endorsed or certified by TMDb. Their API also provides access to data on many additional movies, actors and actresses, crew members, and TV shows. You can try it for yourself [here](https://www.themoviedb.org/documentation/api).\n\nThe Movie Links and Ratings have been obtained from the Official GroupLens website. The files are a part of the dataset available [here](https://grouplens.org/datasets/movielens/latest/)\n\n![](https://www.themoviedb.org/assets/static_cache/9b3f9c24d9fd5f297ae433eb33d93514/images/v4/logos/408x161-powered-by-rectangle-green.png)\n\n\n### Inspiration\n\nThis dataset was assembled as part of my second Capstone Project for Springboard's [Data Science Career Track](https://www.springboard.com/workshops/data-science-career-track). I wanted to perform an extensive EDA on Movie Data to narrate the history and the story of Cinema and use this metadata in combination with MovieLens ratings to build various types of Recommender Systems.\n\nBoth my notebooks are available as kernels with this dataset: [The Story of Film](https://www.kaggle.com/rounakbanik/the-story-of-film) and [Movie Recommender Systems](https://www.kaggle.com/rounakbanik/movie-recommender-systems)\n\nSome of the things you can do with this dataset:\nPredicting movie revenue and/or movie success based on a certain metric. What movies tend to get higher vote counts and vote averages on TMDB? Building Content Based and Collaborative Filtering Based Recommendation Engines.", "VersionNotes": "Add Full Ratings", "TotalCompressedBytes": 943755800.0, "TotalUncompressedBytes": 943755800.0}]
[{"Id": 3405, "CreatorUserId": 927562, "OwnerUserId": 927562.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 6663.0, "CurrentDatasourceVersionId": 6663.0, "ForumId": 8425, "Type": 2, "CreationDate": "10/24/2017 18:53:43", "LastActivityDate": "02/06/2018", "TotalViews": 1496610, "TotalDownloads": 295134, "TotalVotes": 3284, "TotalKernels": 475}]
[{"Id": 927562, "UserName": "rounakbanik", "DisplayName": "Rounak Banik", "RegisterDate": "02/23/2017", "PerformanceTier": 2}]
import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.cluster import KMeans from sklearn.decomposition import PCA from sklearn.preprocessing import StandardScaler # Kaggle directory dir = "/kaggle/input/the-movies-dataset/" # Load datasets ratings = pd.read_csv(dir + "ratings_small.csv") metadata = pd.read_csv(dir + "movies_metadata.csv", low_memory=False) metadata = metadata.rename(columns={"id": "movieId"}) metadata["movieId"] = pd.to_numeric(metadata["movieId"], errors="coerce") # Merge the two DataFrames on the 'movieId' column combined_data = pd.merge(ratings, metadata, on="movieId") # Filter users who have rated 20 or more movies user_rating_counts = combined_data.groupby("userId")["rating"].count() active_users = user_rating_counts[user_rating_counts >= 20].index df = combined_data[combined_data["userId"].isin(active_users)] df.columns import json import numpy as np import matplotlib.pyplot as plt import seaborn as sns from datetime import datetime # Convert 'release_date' to datetime format combined_data["release_date"] = pd.to_datetime( combined_data["release_date"], errors="coerce" ) # Convert 'genres' from JSON format to a list of genres def parse_genres(genres_str): genres = json.loads(genres_str.replace("'", '"')) genres_list = [g["name"] for g in genres] return genres_list combined_data["genres"] = combined_data["genres"].apply(parse_genres) # Extract year from 'release_date' and create 'release_year' column combined_data["release_year"] = combined_data["release_date"].dt.year # Plot distribution of ratings plt.figure(figsize=(10, 5)) sns.histplot(combined_data["rating"], bins=20, kde=False) plt.title("Distribution of Ratings") plt.xlabel("Rating") plt.ylabel("Count") plt.show() # distribution of movie release years plt.figure(figsize=(10, 5)) sns.histplot(combined_data["release_year"], bins=np.arange(1900, 2023, 1)) plt.title("Distribution of Movie Release Years") plt.xlabel("Release Year") plt.ylabel("Count") plt.show() # number of movies for each genre all_genres = np.concatenate(combined_data["genres"].values) unique_genres, counts = np.unique(all_genres, return_counts=True) genre_counts = pd.DataFrame({"genre": unique_genres, "count": counts}).sort_values( by="count", ascending=False ) # distribution of genres plt.figure(figsize=(10, 5)) sns.barplot(x="genre", y="count", data=genre_counts) plt.title("Distribution of Genres") plt.xlabel("Genre") plt.ylabel("Count") plt.xticks(rotation=90) plt.show() # Plot distribution of languages plt.figure(figsize=(10, 5)) sns.countplot(data=combined_data, x="original_language") plt.title("Distribution of Original Language") plt.xlabel("Language") plt.ylabel("Count") plt.xticks(rotation=90) plt.show() from sklearn.decomposition import PCA from sklearn.cluster import KMeans # cluster based on userid num_vars = [ "userId", "movieId", "rating", "budget", "popularity", "revenue", "runtime", "vote_average", "vote_count", ] df = df[num_vars] df = df.dropna() df_std = StandardScaler().fit_transform(df) pca = PCA(n_components=2) df_pca = pca.fit_transform(df_std) k = 8 kmeans = KMeans(n_clusters=k, n_init="auto", random_state=42) kmeans.fit(df_pca) df["cluster"] = kmeans.labels_ # labels = df['userId'] # data = np.array(df.drop('userId', axis = 1)) df # adopted from: https://pythonprogramminglanguage.com/kmeans-elbow-method/ from scipy.spatial.distance import cdist performance = [] # number of clusters to consider K = range(1, 15) # run kmeans for each value of K for k in K: kmeanModel = KMeans(n_clusters=k, n_init="auto") kmeanModel.fit(df_std) performance.append( sum(np.min(cdist(df_std, kmeanModel.cluster_centers_, "euclidean"), axis=1)) / df_std.shape[0] ) # Plot the elbow plt.plot(K, performance, "bx-") plt.xlabel("k") plt.ylabel("Performance") plt.title("The Elbow Method showing the optimal k") plt.show() import matplotlib.pyplot as plt import seaborn as sns df_std = pd.DataFrame(df_std, columns=num_vars) # Add the PCA components to the DataFrame df_std["PCA1"] = df_pca[:, 0] df_std["PCA2"] = df_pca[:, 1] # Add the cluster labels to the DataFrame df_std["cluster"] = kmeans.labels_ plt.figure(figsize=(10, 7)) sns.scatterplot(x="PCA1", y="PCA2", hue="cluster", data=df_std, palette="viridis") plt.title("PCA Components colored by cluster") plt.show() plt.figure(figsize=(10, 7)) sns.boxplot(x="cluster", y="rating", data=df_std) plt.title("Rating distribution per cluster") plt.show() from mpl_toolkits.mplot3d import Axes3D pca = PCA(n_components=3) df_pca2 = pca.fit_transform(df_std) df_std["PCA1"] = df_pca2[:, 0] df_std["PCA2"] = df_pca2[:, 1] df_std["PCA3"] = df_pca2[:, 2] fig = plt.figure(figsize=(10, 7)) ax = fig.add_subplot(111, projection="3d") scatter = ax.scatter( df_std["PCA1"], df_std["PCA2"], df_std["PCA3"], c=df_std["cluster"], cmap="viridis", alpha=0.5, ) ax.set_xlabel("PCA1") ax.set_ylabel("PCA2") ax.set_zlabel("PCA3") plt.title("3D view of data points") plt.show()
false
0
1,699
0
2,162
1,699
129085714
# # Help functions import cv2 import numpy as np import os def load_data(base_path, batch_size=8): # Iterate over the three train directories (train/1, train/2, train/3) for fragment_id in range(1, 4): fragment_path = os.path.join(base_path, str(fragment_id)) print(f"Loading data from fragment: {fragment_id}") images, masks, inklabels = [], [], [] for i in range(65): # Load surface_volume images (00.tif to 64.tif) image_path = os.path.join(fragment_path, f"surface_volume/{i:02d}.tif") image = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE) images.append(image) # Load mask.png and inklabels.png only once per fragment if i == 0: mask = cv2.imread( os.path.join(fragment_path, "mask.png"), cv2.IMREAD_GRAYSCALE ) inklabel = cv2.imread( os.path.join(fragment_path, "inklabels.png"), cv2.IMREAD_GRAYSCALE ) # Append the same mask and inklabel for each image slice masks.append(mask) inklabels.append(inklabel) # Yield a batch of data if len(images) == batch_size: yield np.stack(images, axis=0), np.stack(masks, axis=0), np.stack( inklabels, axis=0 ) images, masks, inklabels = [], [], [] # Yield any remaining data if images: yield np.stack(images, axis=0), np.stack(masks, axis=0), np.stack( inklabels, axis=0 ) print("Data loading complete.") # Usage example: base_path = "/kaggle/input/vesuvius-challenge-ink-detection/train" for images, masks, inklabels in load_data(base_path, batch_size=8): # Process the batch of data pass # # Dataset import torch from torch.utils.data import Dataset import albumentations as A from albumentations.pytorch import ToTensorV2 import cv2 import numpy as np np.__version__ = "1.23.0" import os from torch.utils.data import Dataset class VesuviusDataset(Dataset): def __init__(self, base_path, transforms=None): self.base_path = base_path self.transforms = transforms self.fragment_ids = [str(i) for i in range(1, 4)] self.slice_ids = [f"{i:02d}" for i in range(65)] def __len__(self): return len(self.fragment_ids) * len(self.slice_ids) def __getitem__(self, idx): fragment_idx = idx // len(self.slice_ids) slice_idx = idx % len(self.slice_ids) fragment_id = self.fragment_ids[fragment_idx] slice_id = self.slice_ids[slice_idx] fragment_path = os.path.join(self.base_path, fragment_id) image = cv2.imread( os.path.join(fragment_path, f"surface_volume/{slice_id}.tif"), cv2.IMREAD_GRAYSCALE, ) mask = cv2.imread(os.path.join(fragment_path, "mask.png"), cv2.IMREAD_GRAYSCALE) inklabel = cv2.imread( os.path.join(fragment_path, "inklabels.png"), cv2.IMREAD_GRAYSCALE ) if self.transforms: augmented = self.transforms(image=image, mask=mask) image = augmented["image"] mask = augmented["mask"] return image, mask, inklabel def get_transforms(): # Define data augmentation and preprocessing transforms return A.Compose( [ A.HorizontalFlip(p=0.5), A.VerticalFlip(p=0.5), A.Normalize( mean=[0.485], std=[0.229] ), # Updated mean and std for grayscale images ToTensorV2(), ], p=1.0, ) # Usage example: base_path = "/kaggle/input/vesuvius-challenge-ink-detection/train" transforms = ( get_transforms() ) # Define your data augmentation and preprocessing transforms dataset = VesuviusDataset(base_path, transforms=transforms) # Access data using indices image, mask, inklabel = dataset[0] # Get the first item from the dataset # # Model import torch.optim as optim from torch.cuda.amp import GradScaler, autocast from torch.utils.data import DataLoader from sklearn.model_selection import KFold import sys sys.path.append( "/kaggle/input/pretrainedmodels/pretrainedmodels-0.7.4/pretrainedmodels" ) sys.path.append( "/kaggle/input/segmentation-models-pytorch/segmentation_models.pytorch-master" ) sys.path.append( "/kaggle/input/efficientnet-pytorch/EfficientNet-PyTorch-master/efficientnet_pytorch" ) import pretrainedmodels import segmentation_models_pytorch as smp import efficientnet_pytorch NUM_FOLDS = 5 NUM_EPOCHS = 30 BATCH_SIZE = 8 DEVICE = "cuda" if torch.cuda.is_available() else "cpu" BACKBONE = "efficientnet-b4" LEARNING_RATE = 1e-4 def get_model(): model = smp.DeepLabV3Plus( encoder_name=BACKBONE, encoder_weights="imagenet", in_channels=1, classes=1, ) # Parallelize the model across multiple GPUs if torch.cuda.device_count() > 1: model = torch.nn.DataParallel(model) return model.to(DEVICE) def train_one_fold(fold, train_loader, val_loader): print(f"Training fold {fold}") model = get_model().to(DEVICE) criterion = nn.BCEWithLogitsLoss() optimizer = optim.Adam(model.parameters(), lr=LEARNING_RATE) scaler = GradScaler() # For mixed precision training for epoch in range(NUM_EPOCHS): print(f"Starting epoch {epoch}") # Training loop model.train() for batch in train_loader: images, masks = batch images, masks = images.to(DEVICE), masks.to(DEVICE) optimizer.zero_grad() with autocast(): # Mixed precision training outputs = model(images) loss = criterion(outputs, masks) scaler.scale(loss).backward() scaler.step(optimizer) scaler.update() # Validation loop model.eval() val_loss = 0 with torch.no_grad(): for batch in val_loader: images, masks = batch images, masks = images.to(DEVICE), masks.to(DEVICE) outputs = model(images) loss = criterion(outputs, masks) val_loss += loss.item() val_loss /= len(val_loader) print(f"Fold {fold}, Epoch {epoch}, Validation Loss: {val_loss}") return model def kfold_cross_validation(): # Load and preprocess data images, masks = load_data( "/kaggle/input/vesuvius-challenge-ink-detection/train" ) # Replace with the correct path to your data # Flatten the lists of fragments into a single list of image slices and masks images = [img_slice for fragment in images for img_slice in fragment] masks = [ mask_slice for fragment_masks in masks for mask_slice in fragment_masks if fragment_masks is not None ] dataset = VesuviusDataset(images, masks, transforms=get_transforms()) kfold = KFold(n_splits=NUM_FOLDS, shuffle=True, random_state=42) models = [] for fold, (train_indices, val_indices) in enumerate(kfold.split(dataset)): train_subset = torch.utils.data.Subset(dataset, train_indices) val_subset = torch.utils.data.Subset(dataset, val_indices) train_loader = DataLoader( train_subset, batch_size=8, shuffle=True, num_workers=4 ) val_loader = DataLoader(val_subset, batch_size=8, shuffle=False, num_workers=4) model = train_one_fold(fold, train_loader, val_loader) models.append(model) return models def ensemble_models(models): print("Ensembling models") def ensemble(images): with torch.no_grad(): preds = [model(images) for model in models] avg_preds = sum(preds) / len(preds) return avg_preds return ensemble # # Main def kfold_cross_validation(): # Load and preprocess data train_base_path = "/kaggle/input/vesuvius-challenge-ink-detection/train" # Create lists to store all images, masks, and inklabels all_images, all_masks, all_inklabels = [], [], [] # Iterate over the generator to get all batches of data for images, masks, inklabels in load_data(train_base_path): all_images.extend(images) all_masks.extend(masks) all_inklabels.extend(inklabels) # Convert lists to numpy arrays all_images = np.stack(all_images, axis=0) all_masks = np.stack(all_masks, axis=0) all_inklabels = np.stack(all_inklabels, axis=0) dataset = VesuviusDataset(all_images, all_masks, transforms=get_transforms()) kfold = KFold(n_splits=NUM_FOLDS, shuffle=True, random_state=42) models = [] for fold, (train_indices, val_indices) in enumerate(kfold.split(dataset)): train_subset = torch.utils.data.Subset(dataset, train_indices) val_subset = torch.utils.data.Subset(dataset, val_indices) train_loader = DataLoader( train_subset, batch_size=8, shuffle=True, num_workers=4 ) val_loader = DataLoader(val_subset, batch_size=8, shuffle=False, num_workers=4) model = train_one_fold(fold, train_loader, val_loader) models.append(model) return models # K-Fold Cross Validation and training models = kfold_cross_validation() # Ensembling models ensemble_fn = ensemble_models(models) # Load test data test_base_path = "/kaggle/input/vesuvius-challenge-ink-detection/test" test_images = [] for fragment_id in range(1, 4): fragment_path = os.path.join(test_base_path, str(fragment_id)) for i in range(65): image_path = os.path.join(fragment_path, f"surface_volume/{i:02d}.tif") image = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE) test_images.append(image) test_data_loader = DataLoader( VesuviusDataset(test_images, None, get_transforms()), batch_size=BATCH_SIZE, shuffle=False, ) # Predict and create submission file predictions = predict(ensemble_fn, test_data_loader) create_submission_file(predictions) # # Submission import pandas as pd def predict(ensemble_fn, test_data_loader): predictions = [] with torch.no_grad(): for batch in test_data_loader: images = batch images = images.to(DEVICE) outputs = ensemble_fn(images) predictions.extend(outputs.cpu().numpy()) return predictions def create_submission_file(predictions): submission = pd.DataFrame(columns=["Id", "Predicted"]) for i, pred in enumerate(predictions): rle = rle_encoding(pred.squeeze()) submission = submission.append( {"Id": str(i), "Predicted": " ".join(map(str, rle))}, ignore_index=True ) submission.to_csv("submission.csv", index=False) # Load and preprocess data images, masks = load_data("train") # Replace with the correct path to your data # K-Fold Cross Validation and training models = kfold_cross_validation() # Ensembling models ensemble_fn = ensemble_models(models) # Load test data test_images = [ fragment for fragment_id in os.listdir("test") for fragment in load_data(os.path.join("test", fragment_id))[0] ] # Replace with the correct path to your test data # Flatten the list of test fragments into a single list of image slices test_images = [img_slice for fragment in test_images for img_slice in fragment] test_data_loader = DataLoader( VesuviusDataset(test_images, None, get_transforms()), batch_size=BATCH_SIZE, shuffle=False, ) # Predict and create submission file predictions = predict(ensemble_fn, test_data_loader) create_submission_file(predictions)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/085/129085714.ipynb
null
null
[{"Id": 129085714, "ScriptId": 38373464, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 8800417, "CreationDate": "05/10/2023 22:26:24", "VersionNumber": 2.0, "Title": "Vesuvius Challenge - Ink Detection", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 317.0, "LinesInsertedFromPrevious": 7.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 310.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 5}]
null
null
null
null
# # Help functions import cv2 import numpy as np import os def load_data(base_path, batch_size=8): # Iterate over the three train directories (train/1, train/2, train/3) for fragment_id in range(1, 4): fragment_path = os.path.join(base_path, str(fragment_id)) print(f"Loading data from fragment: {fragment_id}") images, masks, inklabels = [], [], [] for i in range(65): # Load surface_volume images (00.tif to 64.tif) image_path = os.path.join(fragment_path, f"surface_volume/{i:02d}.tif") image = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE) images.append(image) # Load mask.png and inklabels.png only once per fragment if i == 0: mask = cv2.imread( os.path.join(fragment_path, "mask.png"), cv2.IMREAD_GRAYSCALE ) inklabel = cv2.imread( os.path.join(fragment_path, "inklabels.png"), cv2.IMREAD_GRAYSCALE ) # Append the same mask and inklabel for each image slice masks.append(mask) inklabels.append(inklabel) # Yield a batch of data if len(images) == batch_size: yield np.stack(images, axis=0), np.stack(masks, axis=0), np.stack( inklabels, axis=0 ) images, masks, inklabels = [], [], [] # Yield any remaining data if images: yield np.stack(images, axis=0), np.stack(masks, axis=0), np.stack( inklabels, axis=0 ) print("Data loading complete.") # Usage example: base_path = "/kaggle/input/vesuvius-challenge-ink-detection/train" for images, masks, inklabels in load_data(base_path, batch_size=8): # Process the batch of data pass # # Dataset import torch from torch.utils.data import Dataset import albumentations as A from albumentations.pytorch import ToTensorV2 import cv2 import numpy as np np.__version__ = "1.23.0" import os from torch.utils.data import Dataset class VesuviusDataset(Dataset): def __init__(self, base_path, transforms=None): self.base_path = base_path self.transforms = transforms self.fragment_ids = [str(i) for i in range(1, 4)] self.slice_ids = [f"{i:02d}" for i in range(65)] def __len__(self): return len(self.fragment_ids) * len(self.slice_ids) def __getitem__(self, idx): fragment_idx = idx // len(self.slice_ids) slice_idx = idx % len(self.slice_ids) fragment_id = self.fragment_ids[fragment_idx] slice_id = self.slice_ids[slice_idx] fragment_path = os.path.join(self.base_path, fragment_id) image = cv2.imread( os.path.join(fragment_path, f"surface_volume/{slice_id}.tif"), cv2.IMREAD_GRAYSCALE, ) mask = cv2.imread(os.path.join(fragment_path, "mask.png"), cv2.IMREAD_GRAYSCALE) inklabel = cv2.imread( os.path.join(fragment_path, "inklabels.png"), cv2.IMREAD_GRAYSCALE ) if self.transforms: augmented = self.transforms(image=image, mask=mask) image = augmented["image"] mask = augmented["mask"] return image, mask, inklabel def get_transforms(): # Define data augmentation and preprocessing transforms return A.Compose( [ A.HorizontalFlip(p=0.5), A.VerticalFlip(p=0.5), A.Normalize( mean=[0.485], std=[0.229] ), # Updated mean and std for grayscale images ToTensorV2(), ], p=1.0, ) # Usage example: base_path = "/kaggle/input/vesuvius-challenge-ink-detection/train" transforms = ( get_transforms() ) # Define your data augmentation and preprocessing transforms dataset = VesuviusDataset(base_path, transforms=transforms) # Access data using indices image, mask, inklabel = dataset[0] # Get the first item from the dataset # # Model import torch.optim as optim from torch.cuda.amp import GradScaler, autocast from torch.utils.data import DataLoader from sklearn.model_selection import KFold import sys sys.path.append( "/kaggle/input/pretrainedmodels/pretrainedmodels-0.7.4/pretrainedmodels" ) sys.path.append( "/kaggle/input/segmentation-models-pytorch/segmentation_models.pytorch-master" ) sys.path.append( "/kaggle/input/efficientnet-pytorch/EfficientNet-PyTorch-master/efficientnet_pytorch" ) import pretrainedmodels import segmentation_models_pytorch as smp import efficientnet_pytorch NUM_FOLDS = 5 NUM_EPOCHS = 30 BATCH_SIZE = 8 DEVICE = "cuda" if torch.cuda.is_available() else "cpu" BACKBONE = "efficientnet-b4" LEARNING_RATE = 1e-4 def get_model(): model = smp.DeepLabV3Plus( encoder_name=BACKBONE, encoder_weights="imagenet", in_channels=1, classes=1, ) # Parallelize the model across multiple GPUs if torch.cuda.device_count() > 1: model = torch.nn.DataParallel(model) return model.to(DEVICE) def train_one_fold(fold, train_loader, val_loader): print(f"Training fold {fold}") model = get_model().to(DEVICE) criterion = nn.BCEWithLogitsLoss() optimizer = optim.Adam(model.parameters(), lr=LEARNING_RATE) scaler = GradScaler() # For mixed precision training for epoch in range(NUM_EPOCHS): print(f"Starting epoch {epoch}") # Training loop model.train() for batch in train_loader: images, masks = batch images, masks = images.to(DEVICE), masks.to(DEVICE) optimizer.zero_grad() with autocast(): # Mixed precision training outputs = model(images) loss = criterion(outputs, masks) scaler.scale(loss).backward() scaler.step(optimizer) scaler.update() # Validation loop model.eval() val_loss = 0 with torch.no_grad(): for batch in val_loader: images, masks = batch images, masks = images.to(DEVICE), masks.to(DEVICE) outputs = model(images) loss = criterion(outputs, masks) val_loss += loss.item() val_loss /= len(val_loader) print(f"Fold {fold}, Epoch {epoch}, Validation Loss: {val_loss}") return model def kfold_cross_validation(): # Load and preprocess data images, masks = load_data( "/kaggle/input/vesuvius-challenge-ink-detection/train" ) # Replace with the correct path to your data # Flatten the lists of fragments into a single list of image slices and masks images = [img_slice for fragment in images for img_slice in fragment] masks = [ mask_slice for fragment_masks in masks for mask_slice in fragment_masks if fragment_masks is not None ] dataset = VesuviusDataset(images, masks, transforms=get_transforms()) kfold = KFold(n_splits=NUM_FOLDS, shuffle=True, random_state=42) models = [] for fold, (train_indices, val_indices) in enumerate(kfold.split(dataset)): train_subset = torch.utils.data.Subset(dataset, train_indices) val_subset = torch.utils.data.Subset(dataset, val_indices) train_loader = DataLoader( train_subset, batch_size=8, shuffle=True, num_workers=4 ) val_loader = DataLoader(val_subset, batch_size=8, shuffle=False, num_workers=4) model = train_one_fold(fold, train_loader, val_loader) models.append(model) return models def ensemble_models(models): print("Ensembling models") def ensemble(images): with torch.no_grad(): preds = [model(images) for model in models] avg_preds = sum(preds) / len(preds) return avg_preds return ensemble # # Main def kfold_cross_validation(): # Load and preprocess data train_base_path = "/kaggle/input/vesuvius-challenge-ink-detection/train" # Create lists to store all images, masks, and inklabels all_images, all_masks, all_inklabels = [], [], [] # Iterate over the generator to get all batches of data for images, masks, inklabels in load_data(train_base_path): all_images.extend(images) all_masks.extend(masks) all_inklabels.extend(inklabels) # Convert lists to numpy arrays all_images = np.stack(all_images, axis=0) all_masks = np.stack(all_masks, axis=0) all_inklabels = np.stack(all_inklabels, axis=0) dataset = VesuviusDataset(all_images, all_masks, transforms=get_transforms()) kfold = KFold(n_splits=NUM_FOLDS, shuffle=True, random_state=42) models = [] for fold, (train_indices, val_indices) in enumerate(kfold.split(dataset)): train_subset = torch.utils.data.Subset(dataset, train_indices) val_subset = torch.utils.data.Subset(dataset, val_indices) train_loader = DataLoader( train_subset, batch_size=8, shuffle=True, num_workers=4 ) val_loader = DataLoader(val_subset, batch_size=8, shuffle=False, num_workers=4) model = train_one_fold(fold, train_loader, val_loader) models.append(model) return models # K-Fold Cross Validation and training models = kfold_cross_validation() # Ensembling models ensemble_fn = ensemble_models(models) # Load test data test_base_path = "/kaggle/input/vesuvius-challenge-ink-detection/test" test_images = [] for fragment_id in range(1, 4): fragment_path = os.path.join(test_base_path, str(fragment_id)) for i in range(65): image_path = os.path.join(fragment_path, f"surface_volume/{i:02d}.tif") image = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE) test_images.append(image) test_data_loader = DataLoader( VesuviusDataset(test_images, None, get_transforms()), batch_size=BATCH_SIZE, shuffle=False, ) # Predict and create submission file predictions = predict(ensemble_fn, test_data_loader) create_submission_file(predictions) # # Submission import pandas as pd def predict(ensemble_fn, test_data_loader): predictions = [] with torch.no_grad(): for batch in test_data_loader: images = batch images = images.to(DEVICE) outputs = ensemble_fn(images) predictions.extend(outputs.cpu().numpy()) return predictions def create_submission_file(predictions): submission = pd.DataFrame(columns=["Id", "Predicted"]) for i, pred in enumerate(predictions): rle = rle_encoding(pred.squeeze()) submission = submission.append( {"Id": str(i), "Predicted": " ".join(map(str, rle))}, ignore_index=True ) submission.to_csv("submission.csv", index=False) # Load and preprocess data images, masks = load_data("train") # Replace with the correct path to your data # K-Fold Cross Validation and training models = kfold_cross_validation() # Ensembling models ensemble_fn = ensemble_models(models) # Load test data test_images = [ fragment for fragment_id in os.listdir("test") for fragment in load_data(os.path.join("test", fragment_id))[0] ] # Replace with the correct path to your test data # Flatten the list of test fragments into a single list of image slices test_images = [img_slice for fragment in test_images for img_slice in fragment] test_data_loader = DataLoader( VesuviusDataset(test_images, None, get_transforms()), batch_size=BATCH_SIZE, shuffle=False, ) # Predict and create submission file predictions = predict(ensemble_fn, test_data_loader) create_submission_file(predictions)
false
0
3,224
5
3,224
3,224
129085125
<jupyter_start><jupyter_text>Heart Failure Prediction # About this dataset &gt; Cardiovascular diseases (CVDs) are the **number 1 cause of death globally**, taking an estimated **17.9 million lives each year**, which accounts for **31% of all deaths worlwide**. Heart failure is a common event caused by CVDs and this dataset contains 12 features that can be used to predict mortality by heart failure. &gt; Most cardiovascular diseases can be prevented by addressing behavioural risk factors such as tobacco use, unhealthy diet and obesity, physical inactivity and harmful use of alcohol using population-wide strategies. &gt; People with cardiovascular disease or who are at high cardiovascular risk (due to the presence of one or more risk factors such as hypertension, diabetes, hyperlipidaemia or already established disease) need **early detection** and management wherein a machine learning model can be of great help. # How to use this dataset &gt; - Create a model for predicting mortality caused by Heart Failure. - Your kernel can be featured here! - [More datasets](https://www.kaggle.com/andrewmvd/datasets) # Acknowledgements If you use this dataset in your research, please credit the authors &gt; ### Citation Davide Chicco, Giuseppe Jurman: Machine learning can predict survival of patients with heart failure from serum creatinine and ejection fraction alone. BMC Medical Informatics and Decision Making 20, 16 (2020). ([link](https://doi.org/10.1186/s12911-020-1023-5)) &gt; ### License CC BY 4.0 &gt; ### Splash icon Icon by [Freepik](https://www.flaticon.com/authors/freepik), available on [Flaticon](https://www.flaticon.com/free-icon/heart_1186541). &gt; ### Splash banner Wallpaper by [jcomp](https://br.freepik.com/jcomp), available on [Freepik](https://br.freepik.com/fotos-gratis/simplesmente-design-minimalista-com-estetoscopio-de-equipamento-de-medicina-ou-phonendoscope_5018002.htm#page=1&query=cardiology&position=3). Kaggle dataset identifier: heart-failure-clinical-data <jupyter_script># # Heart Failure Study # https://www.kaggle.com/datasets/andrewmvd/heart-failure-clinical-data import pandas as pd import seaborn as sns from sklearn.svm import SVC from sklearn.naive_bayes import BernoulliNB from sklearn.preprocessing import StandardScaler from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import train_test_split, GridSearchCV from sklearn.metrics import confusion_matrix, classification_report DATA_DIRECTORY = "/kaggle/input/heart-failure-clinical-data/heart_failure_clinical_records_dataset.csv" df = pd.read_csv(DATA_DIRECTORY) df.head(10) df.isnull().sum() # No nulls found len(df) sns.countplot(x="DEATH_EVENT", data=df) # Dataset is imbalanced. Will need to account for this during training X_train, X_test, y_train, y_test = train_test_split( df.drop(["DEATH_EVENT"], axis=1), df["DEATH_EVENT"], test_size=0.25, shuffle=True, random_state=42, ) X_train standard_scaler = StandardScaler() X_train = standard_scaler.fit_transform(X_train) X_test = standard_scaler.fit_transform(X_test) X_train # I'll try four different algorithms: # 1. Logistic Regression # 2. SVC # 3. Random Forest # 4. Naive Bayes model_1 = LogisticRegression(random_state=42, class_weight="balanced") model_1.fit(X_train, y_train) y_preds = model_1.predict(X_test) sns.heatmap(confusion_matrix(y_test, y_preds), annot=True, fmt="g") print(classification_report(y_test, y_preds)) grid_search = GridSearchCV( SVC(kernel="linear", random_state=42, class_weight="balanced"), {"C": [0.01, 0.1, 1, 10], "gamma": [0.01, 0.1, 1, "scale", "auto"]}, scoring="accuracy", cv=5, verbose=True, ) grid_search.fit(X_train, y_train) print(grid_search.best_params_) model_2 = SVC( kernel="linear", C=0.01, gamma=0.01, random_state=42, class_weight="balanced" ) model_2.fit(X_train, y_train) y_preds = model_2.predict(X_test) sns.heatmap(confusion_matrix(y_test, y_preds), annot=True, fmt="g") print(classification_report(y_test, y_preds)) grid_search = GridSearchCV( RandomForestClassifier(random_state=42, class_weight="balanced"), {"n_estimators": [50, 100, 200], "max_depth": [None, 5, 10, 15, 20]}, scoring="accuracy", cv=5, ) grid_search.fit(X_train, y_train) print(grid_search.best_params_) model_3 = RandomForestClassifier( n_estimators=100, max_depth=5, random_state=42, class_weight="balanced" ) model_3.fit(X_train, y_train) y_preds = model_3.predict(X_test) sns.heatmap(confusion_matrix(y_test, y_preds), annot=True, fmt="g") print(classification_report(y_test, y_preds)) model_4 = BernoulliNB() model_4.fit(X_train, y_train) y_pred = model_4.predict(X_test) sns.heatmap(confusion_matrix(y_test, y_pred), annot=True, fmt="g") print(classification_report(y_test, y_preds))
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/085/129085125.ipynb
heart-failure-clinical-data
andrewmvd
[{"Id": 129085125, "ScriptId": 38373895, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6549302, "CreationDate": "05/10/2023 22:14:49", "VersionNumber": 1.0, "Title": "Heart Failure Study Model Selection", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 146.0, "LinesInsertedFromPrevious": 146.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 184826089, "KernelVersionId": 129085125, "SourceDatasetVersionId": 1263738}]
[{"Id": 1263738, "DatasetId": 727551, "DatasourceVersionId": 1295676, "CreatorUserId": 793761, "LicenseName": "Attribution 4.0 International (CC BY 4.0)", "CreationDate": "06/20/2020 01:03:20", "VersionNumber": 1.0, "Title": "Heart Failure Prediction", "Slug": "heart-failure-clinical-data", "Subtitle": "12 clinical features por predicting death events.", "Description": "# About this dataset\n&gt; Cardiovascular diseases (CVDs) are the **number 1 cause of death globally**, taking an estimated **17.9 million lives each year**, which accounts for **31% of all deaths worlwide**.\nHeart failure is a common event caused by CVDs and this dataset contains 12 features that can be used to predict mortality by heart failure.\n\n&gt; Most cardiovascular diseases can be prevented by addressing behavioural risk factors such as tobacco use, unhealthy diet and obesity, physical inactivity and harmful use of alcohol using population-wide strategies.\n\n&gt; People with cardiovascular disease or who are at high cardiovascular risk (due to the presence of one or more risk factors such as hypertension, diabetes, hyperlipidaemia or already established disease) need **early detection** and management wherein a machine learning model can be of great help.\n\n# How to use this dataset\n&gt; - Create a model for predicting mortality caused by Heart Failure.\n- Your kernel can be featured here!\n- [More datasets](https://www.kaggle.com/andrewmvd/datasets)\n\n\n\n# Acknowledgements\nIf you use this dataset in your research, please credit the authors\n&gt; ### Citation\nDavide Chicco, Giuseppe Jurman: Machine learning can predict survival of patients with heart failure from serum creatinine and ejection fraction alone. BMC Medical Informatics and Decision Making 20, 16 (2020). ([link](https://doi.org/10.1186/s12911-020-1023-5))\n\n&gt; ### License\nCC BY 4.0\n\n&gt; ### Splash icon\nIcon by [Freepik](https://www.flaticon.com/authors/freepik), available on [Flaticon](https://www.flaticon.com/free-icon/heart_1186541).\n\n&gt; ### Splash banner\nWallpaper by [jcomp](https://br.freepik.com/jcomp), available on [Freepik](https://br.freepik.com/fotos-gratis/simplesmente-design-minimalista-com-estetoscopio-de-equipamento-de-medicina-ou-phonendoscope_5018002.htm#page=1&query=cardiology&position=3).", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 727551, "CreatorUserId": 793761, "OwnerUserId": 793761.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 1263738.0, "CurrentDatasourceVersionId": 1295676.0, "ForumId": 742394, "Type": 2, "CreationDate": "06/20/2020 01:03:20", "LastActivityDate": "06/20/2020", "TotalViews": 882099, "TotalDownloads": 116977, "TotalVotes": 2090, "TotalKernels": 920}]
[{"Id": 793761, "UserName": "andrewmvd", "DisplayName": "Larxel", "RegisterDate": "11/15/2016", "PerformanceTier": 4}]
# # Heart Failure Study # https://www.kaggle.com/datasets/andrewmvd/heart-failure-clinical-data import pandas as pd import seaborn as sns from sklearn.svm import SVC from sklearn.naive_bayes import BernoulliNB from sklearn.preprocessing import StandardScaler from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import train_test_split, GridSearchCV from sklearn.metrics import confusion_matrix, classification_report DATA_DIRECTORY = "/kaggle/input/heart-failure-clinical-data/heart_failure_clinical_records_dataset.csv" df = pd.read_csv(DATA_DIRECTORY) df.head(10) df.isnull().sum() # No nulls found len(df) sns.countplot(x="DEATH_EVENT", data=df) # Dataset is imbalanced. Will need to account for this during training X_train, X_test, y_train, y_test = train_test_split( df.drop(["DEATH_EVENT"], axis=1), df["DEATH_EVENT"], test_size=0.25, shuffle=True, random_state=42, ) X_train standard_scaler = StandardScaler() X_train = standard_scaler.fit_transform(X_train) X_test = standard_scaler.fit_transform(X_test) X_train # I'll try four different algorithms: # 1. Logistic Regression # 2. SVC # 3. Random Forest # 4. Naive Bayes model_1 = LogisticRegression(random_state=42, class_weight="balanced") model_1.fit(X_train, y_train) y_preds = model_1.predict(X_test) sns.heatmap(confusion_matrix(y_test, y_preds), annot=True, fmt="g") print(classification_report(y_test, y_preds)) grid_search = GridSearchCV( SVC(kernel="linear", random_state=42, class_weight="balanced"), {"C": [0.01, 0.1, 1, 10], "gamma": [0.01, 0.1, 1, "scale", "auto"]}, scoring="accuracy", cv=5, verbose=True, ) grid_search.fit(X_train, y_train) print(grid_search.best_params_) model_2 = SVC( kernel="linear", C=0.01, gamma=0.01, random_state=42, class_weight="balanced" ) model_2.fit(X_train, y_train) y_preds = model_2.predict(X_test) sns.heatmap(confusion_matrix(y_test, y_preds), annot=True, fmt="g") print(classification_report(y_test, y_preds)) grid_search = GridSearchCV( RandomForestClassifier(random_state=42, class_weight="balanced"), {"n_estimators": [50, 100, 200], "max_depth": [None, 5, 10, 15, 20]}, scoring="accuracy", cv=5, ) grid_search.fit(X_train, y_train) print(grid_search.best_params_) model_3 = RandomForestClassifier( n_estimators=100, max_depth=5, random_state=42, class_weight="balanced" ) model_3.fit(X_train, y_train) y_preds = model_3.predict(X_test) sns.heatmap(confusion_matrix(y_test, y_preds), annot=True, fmt="g") print(classification_report(y_test, y_preds)) model_4 = BernoulliNB() model_4.fit(X_train, y_train) y_pred = model_4.predict(X_test) sns.heatmap(confusion_matrix(y_test, y_pred), annot=True, fmt="g") print(classification_report(y_test, y_preds))
false
0
1,014
0
1,623
1,014
129111338
<jupyter_start><jupyter_text>New Zealand Firearm Licenses April 2023 This small dataset has the total count of active firearm licenses in New Zealand split up by the 12 police districts. Data was obtained from [New Zealand Police's public online database](https://www.police.govt.nz/about-us/publication/firearms-data), which uploads a new Excel sheet of firearm data monthly. Preview image from Tom Def (thdef) on Unsplash. Kaggle dataset identifier: new-zealand-firearm-licenses-april-2023 <jupyter_script># # Making Maps with New Zealand Firearm License Data # This combines the [New Zealand police district boundaries map](https://www.kaggle.com/datasets/protobioengineering/new-zealand-police-district-boundaries-2021) with data on [the total number of firearm licenses in each police district](https://www.kaggle.com/datasets/protobioengineering/new-zealand-firearm-licenses-april-2023). **The final map is at the bottom of this notebook.** # **Note:** The code uses the spelling `Licences`, since that is the proper spelling in New Zealand. # ### Imports and Files import geopandas as gpd import pandas as pd import matplotlib.pyplot as plt import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # ## Reading the Data new_zealand_map = gpd.read_file( "/kaggle/input/new-zealand-police-district-boundaries-2021/nz-police-district-boundaries.shx" ) firearm_licenses = pd.read_csv( "/kaggle/input/new-zealand-firearm-licenses-april-2023/nz_firearm_licenses_20230401.csv" ) firearm_licenses["Licences"] = firearm_licenses["Licences"].astype("int") new_zealand_map firearm_licenses # ## Merging the Map and Firearm License Data merged_map = new_zealand_map.merge( firearm_licenses, left_on=["DISTRICT_N"], right_on=["Residence District"] ) merged_map.plot(column="Licences", cmap="Blues", legend=True) # ## Improving the Firearm License Map merged_map.plot(column="Licences", cmap="Blues", legend=True) plt.title( "Active Firearm Licenses by Police District in New Zealand (April 2023)", y=1.04 ) plt.tick_params( axis="both", # do the following to both the X and Y axes which="both", # get rid of both major and minor ticks top=False, # get rid of ticks on top/bottom/left/right bottom=False, left=False, right=False, labeltop=False, # get rid of labels on top/bottom/left/right labelbottom=False, labelleft=False, labelright=False, ) plt.axis("off") plt.subplots_adjust(right=0.85)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/111/129111338.ipynb
new-zealand-firearm-licenses-april-2023
protobioengineering
[{"Id": 129111338, "ScriptId": 38381117, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14517655, "CreationDate": "05/11/2023 05:09:00", "VersionNumber": 1.0, "Title": "NZ Police - Visualizing Firearm Ownership", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 51.0, "LinesInsertedFromPrevious": 51.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
[{"Id": 184877868, "KernelVersionId": 129111338, "SourceDatasetVersionId": 5660355}, {"Id": 184877867, "KernelVersionId": 129111338, "SourceDatasetVersionId": 5659139}]
[{"Id": 5660355, "DatasetId": 3253171, "DatasourceVersionId": 5735783, "CreatorUserId": 14517655, "LicenseName": "Unknown", "CreationDate": "05/11/2023 04:58:44", "VersionNumber": 2.0, "Title": "New Zealand Firearm Licenses April 2023", "Slug": "new-zealand-firearm-licenses-april-2023", "Subtitle": "Active firearm licenses in New Zealand by police district in April 2023", "Description": "This small dataset has the total count of active firearm licenses in New Zealand split up by the 12 police districts.\n\nData was obtained from [New Zealand Police's public online database](https://www.police.govt.nz/about-us/publication/firearms-data), which uploads a new Excel sheet of firearm data monthly.\n\nPreview image from Tom Def (thdef) on Unsplash.", "VersionNotes": "Data Update 2023-05-11", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3253171, "CreatorUserId": 14517655, "OwnerUserId": 14517655.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5660355.0, "CurrentDatasourceVersionId": 5735783.0, "ForumId": 3318614, "Type": 2, "CreationDate": "05/11/2023 04:22:27", "LastActivityDate": "05/11/2023", "TotalViews": 53, "TotalDownloads": 9, "TotalVotes": 1, "TotalKernels": 1}]
[{"Id": 14517655, "UserName": "protobioengineering", "DisplayName": "Proto Bioengineering", "RegisterDate": "04/06/2023", "PerformanceTier": 0}]
# # Making Maps with New Zealand Firearm License Data # This combines the [New Zealand police district boundaries map](https://www.kaggle.com/datasets/protobioengineering/new-zealand-police-district-boundaries-2021) with data on [the total number of firearm licenses in each police district](https://www.kaggle.com/datasets/protobioengineering/new-zealand-firearm-licenses-april-2023). **The final map is at the bottom of this notebook.** # **Note:** The code uses the spelling `Licences`, since that is the proper spelling in New Zealand. # ### Imports and Files import geopandas as gpd import pandas as pd import matplotlib.pyplot as plt import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # ## Reading the Data new_zealand_map = gpd.read_file( "/kaggle/input/new-zealand-police-district-boundaries-2021/nz-police-district-boundaries.shx" ) firearm_licenses = pd.read_csv( "/kaggle/input/new-zealand-firearm-licenses-april-2023/nz_firearm_licenses_20230401.csv" ) firearm_licenses["Licences"] = firearm_licenses["Licences"].astype("int") new_zealand_map firearm_licenses # ## Merging the Map and Firearm License Data merged_map = new_zealand_map.merge( firearm_licenses, left_on=["DISTRICT_N"], right_on=["Residence District"] ) merged_map.plot(column="Licences", cmap="Blues", legend=True) # ## Improving the Firearm License Map merged_map.plot(column="Licences", cmap="Blues", legend=True) plt.title( "Active Firearm Licenses by Police District in New Zealand (April 2023)", y=1.04 ) plt.tick_params( axis="both", # do the following to both the X and Y axes which="both", # get rid of both major and minor ticks top=False, # get rid of ticks on top/bottom/left/right bottom=False, left=False, right=False, labeltop=False, # get rid of labels on top/bottom/left/right labelbottom=False, labelleft=False, labelright=False, ) plt.axis("off") plt.subplots_adjust(right=0.85)
false
1
655
1
798
655
129111690
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # Meng-import pacakge/modul yang akan digunakan import pandas as pd import matplotlib as plt import numpy as np import seaborn as sns import io import matplotlib.pyplot as plt import seaborn as sns import plotly.express as px import warnings warnings.filterwarnings("ignore") building = pd.read_csv("/kaggle/input/ashrae-energy-prediction/building_metadata.csv") print(building.info()) energi = pd.read_csv("/kaggle/input/ashrae-energy-prediction/train.csv") print(energi.info()) train = pd.read_csv("/kaggle/input/ashrae-energy-prediction/train.csv") print(train.info()) file_sumber = pd.merge(building, energi, train) file_sumber building2 = building[building["primary_use"] == "Office"] building3 = building2[ ["site_id", "building_id", "primary_use", "square_feet", "year_built"] ] building3 energi2 = energi[energi["meter"] == 0] energi3 = energi2[energi2["meter_reading"] > 0] energi3 print(file_sumber.info()) file_sumber["timestamp"] = pd.to_datetime( file_sumber["timestamp"], format="%Y-%m-%d %H:%M:%S" ) newdata = file_sumber.set_index("timestamp") newdata.head() file_sumber.info() file_sumber.describe() file_sumber.duplicated().sum() # menghitung banyaknya data yang duplikat sns.set(rc={"figure.figsize": (30, 15)}) sns.boxplot(newdata) plt.semilogy() newdata.isna().sum() # Persentase missing values (newdata.isnull().sum() / len(newdata) * 100).to_frame("persentase missing")
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/111/129111690.ipynb
null
null
[{"Id": 129111690, "ScriptId": 38381572, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 15048621, "CreationDate": "05/11/2023 05:13:42", "VersionNumber": 1.0, "Title": "notebookd111ec2f4e", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 71.0, "LinesInsertedFromPrevious": 71.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # Meng-import pacakge/modul yang akan digunakan import pandas as pd import matplotlib as plt import numpy as np import seaborn as sns import io import matplotlib.pyplot as plt import seaborn as sns import plotly.express as px import warnings warnings.filterwarnings("ignore") building = pd.read_csv("/kaggle/input/ashrae-energy-prediction/building_metadata.csv") print(building.info()) energi = pd.read_csv("/kaggle/input/ashrae-energy-prediction/train.csv") print(energi.info()) train = pd.read_csv("/kaggle/input/ashrae-energy-prediction/train.csv") print(train.info()) file_sumber = pd.merge(building, energi, train) file_sumber building2 = building[building["primary_use"] == "Office"] building3 = building2[ ["site_id", "building_id", "primary_use", "square_feet", "year_built"] ] building3 energi2 = energi[energi["meter"] == 0] energi3 = energi2[energi2["meter_reading"] > 0] energi3 print(file_sumber.info()) file_sumber["timestamp"] = pd.to_datetime( file_sumber["timestamp"], format="%Y-%m-%d %H:%M:%S" ) newdata = file_sumber.set_index("timestamp") newdata.head() file_sumber.info() file_sumber.describe() file_sumber.duplicated().sum() # menghitung banyaknya data yang duplikat sns.set(rc={"figure.figsize": (30, 15)}) sns.boxplot(newdata) plt.semilogy() newdata.isna().sum() # Persentase missing values (newdata.isnull().sum() / len(newdata) * 100).to_frame("persentase missing")
false
0
667
0
667
667
129111023
import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from tqdm.notebook import tqdm from colorama import Style, Fore blk = Style.BRIGHT + Fore.BLACK red = Style.BRIGHT + Fore.RED blu = Style.BRIGHT + Fore.BLUE res = Style.RESET_ALL base_dir = "/kaggle/input/asl-fingerspelling" train_csv = f"{base_dir}/train.csv" supplemental_csv = f"{base_dir}/supplemental_metadata.csv" # # Train Dataset train = pd.read_csv(train_csv) train.head() # ## Top 30 Phrase in Training Dataset fig, ax = plt.subplots(figsize=(8, 8)) train["phrase"].value_counts().head(30).sort_values(ascending=True).plot( kind="barh", ax=ax, title="Top 30 Phrase in Training Dataset" ) ax.set_xlabel("Number of Training Examples") plt.show() # ## Parquet File of Top 1 Phrase train_example = train.query('phrase == "surprise az"')["path"].values[0] example_landmark = pd.read_parquet(f"{base_dir}/{train_example}") example_landmark sequence_target = train[ (train["path"] == train_example) & (train["phrase"] == "surprise az") ]["sequence_id"].values[0] # One parquet path consist of a set sequence_id, here I want to show a landmark for `surprise az` but in one parquet has many sequence_id. It means one parquet can consist of different labels. filtered_data = example_landmark[example_landmark.index == sequence_target] filtered_data.head() def x_y_z(column_names): x = sum(1 for col in column_names if col.startswith("x")) y = sum(1 for col in column_names if col.startswith("y")) z = sum(1 for col in column_names if col.startswith("z")) return x, y, z def type_of_landmark(example_landmark): body_parts = set() for column in example_landmark.columns: parts = column.split("_") if len(parts) >= 2: if parts[1] == "right": body_parts.add("right_hand") elif parts[1] == "left": body_parts.add("left_hand") else: body_parts.add(parts[1]) return body_parts # ### Check Landmark, Frames, and (x, y, z) points unique_frames = filtered_data["frame"].nunique() type_landmark = type_of_landmark(filtered_data) face_columns = [col for col in filtered_data.columns if "pose" in col] right_hand_columns = [col for col in filtered_data.columns if "right_hand" in col] left_hand_columns = [col for col in filtered_data.columns if "right_hand" in col] pose_columns = [col for col in filtered_data.columns if "right_hand" in col] x_face, y_face, z_face = x_y_z(face_columns) x_right_hand, y_right_hand, z_right_hand = x_y_z(right_hand_columns) x_left_hand, y_left_hand, z_left_hand = x_y_z(left_hand_columns) x_pose, y_pose, z_pose = x_y_z(left_hand_columns) print( f"{blk}Landmark file for sequence_id {red}{sequence_target}{blk} has {red}{unique_frames}{blk} frames " ) print( f"{blk}This landmark has {red}{len(type_landmark)} {blk}types of landmarks and consists of {red}{type_landmark}" ) print( f"\n{blk}Face landmark has {red}{len(face_columns)} {blk}points in x : {red}{x_face} points, {blk}y : {red}{y_face} points, {blk}z : {red}{z_face} points" ) print( f"{blk}Right hand landmark has {red}{len(right_hand_columns)} {blk}points in x : {red}{x_right_hand} points, {blk}y : {red}{y_right_hand} points, {blk}z : {red}{z_right_hand} points" ) print( f"{blk}Left hand landmark has {red}{len(left_hand_columns)} {blk}points in x : {red}{x_left_hand} points, {blk}y : {red}{y_left_hand} points, {blk}z : {red}{z_left_hand} points" ) print( f"{blk}Face landmark has {red}{len(pose_columns)} {blk}points in x : {red}{x_pose} points, {blk}y : {red}{y_pose} points, {blk}z : {red}{z_pose} points" ) # # Supplemental Dataset supplemental = pd.read_csv(supplemental_csv) supplemental.head() # ## Top 30 Phrase in Training Dataset fig, ax = plt.subplots(figsize=(8, 8)) supplemental["phrase"].value_counts().head(30).sort_values(ascending=True).plot( kind="barh", ax=ax, title="Top 30 Phrase in Supplemental" ) ax.set_xlabel("Number of Examples") plt.show() # ### # ## Parquet File of Top 1 Phrase sup_example = supplemental.query('phrase == "why do you ask silly questions"')[ "path" ].values[0] ex_landmark = pd.read_parquet(f"{base_dir}/{sup_example}") ex_landmark sequence_target = supplemental[ (supplemental["path"] == sup_example) & (supplemental["phrase"] == "why do you ask silly questions") ]["sequence_id"].values[0] filtered_data = ex_landmark[ex_landmark.index == sequence_target] filtered_data.head() # ### Check Landmark, Frames, and (x, y, z) points unique_frames = filtered_data["frame"].nunique() type_landmark = type_of_landmark(filtered_data) face_columns = [col for col in filtered_data.columns if "pose" in col] right_hand_columns = [col for col in filtered_data.columns if "right_hand" in col] left_hand_columns = [col for col in filtered_data.columns if "right_hand" in col] pose_columns = [col for col in filtered_data.columns if "right_hand" in col] x_face, y_face, z_face = x_y_z(face_columns) x_right_hand, y_right_hand, z_right_hand = x_y_z(right_hand_columns) x_left_hand, y_left_hand, z_left_hand = x_y_z(left_hand_columns) x_pose, y_pose, z_pose = x_y_z(left_hand_columns) print( f"{blk}Landmark file for sequence_id {red}{sequence_target}{blk} has {red}{unique_frames}{blk} frames " ) print( f"{blk}This landmark has {red}{len(type_landmark)} {blk}types of landmarks and consists of {red}{type_landmark}" ) print( f"\n{blk}Face landmark has {red}{len(face_columns)} {blk}points in x : {red}{x_face} points, {blk}y : {red}{y_face} points, {blk}z : {red}{z_face} points" ) print( f"{blk}Right hand landmark has {red}{len(right_hand_columns)} {blk}points in x : {red}{x_right_hand} points, {blk}y : {red}{y_right_hand} points, {blk}z : {red}{z_right_hand} points" ) print( f"{blk}Left hand landmark has {red}{len(left_hand_columns)} {blk}points in x : {red}{x_left_hand} points, {blk}y : {red}{y_left_hand} points, {blk}z : {red}{z_left_hand} points" ) print( f"{blk}Face landmark has {red}{len(pose_columns)} {blk}points in x : {red}{x_pose} points, {blk}y : {red}{y_pose} points, {blk}z : {red}{z_pose} points" )
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/111/129111023.ipynb
null
null
[{"Id": 129111023, "ScriptId": 38377708, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11120740, "CreationDate": "05/11/2023 05:05:08", "VersionNumber": 3.0, "Title": "\ud83e\udd1e\ud83c\udffc [Simple EDA] ASLFR \ud83e\udd1e\ud83c\udffc", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 133.0, "LinesInsertedFromPrevious": 2.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 131.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from tqdm.notebook import tqdm from colorama import Style, Fore blk = Style.BRIGHT + Fore.BLACK red = Style.BRIGHT + Fore.RED blu = Style.BRIGHT + Fore.BLUE res = Style.RESET_ALL base_dir = "/kaggle/input/asl-fingerspelling" train_csv = f"{base_dir}/train.csv" supplemental_csv = f"{base_dir}/supplemental_metadata.csv" # # Train Dataset train = pd.read_csv(train_csv) train.head() # ## Top 30 Phrase in Training Dataset fig, ax = plt.subplots(figsize=(8, 8)) train["phrase"].value_counts().head(30).sort_values(ascending=True).plot( kind="barh", ax=ax, title="Top 30 Phrase in Training Dataset" ) ax.set_xlabel("Number of Training Examples") plt.show() # ## Parquet File of Top 1 Phrase train_example = train.query('phrase == "surprise az"')["path"].values[0] example_landmark = pd.read_parquet(f"{base_dir}/{train_example}") example_landmark sequence_target = train[ (train["path"] == train_example) & (train["phrase"] == "surprise az") ]["sequence_id"].values[0] # One parquet path consist of a set sequence_id, here I want to show a landmark for `surprise az` but in one parquet has many sequence_id. It means one parquet can consist of different labels. filtered_data = example_landmark[example_landmark.index == sequence_target] filtered_data.head() def x_y_z(column_names): x = sum(1 for col in column_names if col.startswith("x")) y = sum(1 for col in column_names if col.startswith("y")) z = sum(1 for col in column_names if col.startswith("z")) return x, y, z def type_of_landmark(example_landmark): body_parts = set() for column in example_landmark.columns: parts = column.split("_") if len(parts) >= 2: if parts[1] == "right": body_parts.add("right_hand") elif parts[1] == "left": body_parts.add("left_hand") else: body_parts.add(parts[1]) return body_parts # ### Check Landmark, Frames, and (x, y, z) points unique_frames = filtered_data["frame"].nunique() type_landmark = type_of_landmark(filtered_data) face_columns = [col for col in filtered_data.columns if "pose" in col] right_hand_columns = [col for col in filtered_data.columns if "right_hand" in col] left_hand_columns = [col for col in filtered_data.columns if "right_hand" in col] pose_columns = [col for col in filtered_data.columns if "right_hand" in col] x_face, y_face, z_face = x_y_z(face_columns) x_right_hand, y_right_hand, z_right_hand = x_y_z(right_hand_columns) x_left_hand, y_left_hand, z_left_hand = x_y_z(left_hand_columns) x_pose, y_pose, z_pose = x_y_z(left_hand_columns) print( f"{blk}Landmark file for sequence_id {red}{sequence_target}{blk} has {red}{unique_frames}{blk} frames " ) print( f"{blk}This landmark has {red}{len(type_landmark)} {blk}types of landmarks and consists of {red}{type_landmark}" ) print( f"\n{blk}Face landmark has {red}{len(face_columns)} {blk}points in x : {red}{x_face} points, {blk}y : {red}{y_face} points, {blk}z : {red}{z_face} points" ) print( f"{blk}Right hand landmark has {red}{len(right_hand_columns)} {blk}points in x : {red}{x_right_hand} points, {blk}y : {red}{y_right_hand} points, {blk}z : {red}{z_right_hand} points" ) print( f"{blk}Left hand landmark has {red}{len(left_hand_columns)} {blk}points in x : {red}{x_left_hand} points, {blk}y : {red}{y_left_hand} points, {blk}z : {red}{z_left_hand} points" ) print( f"{blk}Face landmark has {red}{len(pose_columns)} {blk}points in x : {red}{x_pose} points, {blk}y : {red}{y_pose} points, {blk}z : {red}{z_pose} points" ) # # Supplemental Dataset supplemental = pd.read_csv(supplemental_csv) supplemental.head() # ## Top 30 Phrase in Training Dataset fig, ax = plt.subplots(figsize=(8, 8)) supplemental["phrase"].value_counts().head(30).sort_values(ascending=True).plot( kind="barh", ax=ax, title="Top 30 Phrase in Supplemental" ) ax.set_xlabel("Number of Examples") plt.show() # ### # ## Parquet File of Top 1 Phrase sup_example = supplemental.query('phrase == "why do you ask silly questions"')[ "path" ].values[0] ex_landmark = pd.read_parquet(f"{base_dir}/{sup_example}") ex_landmark sequence_target = supplemental[ (supplemental["path"] == sup_example) & (supplemental["phrase"] == "why do you ask silly questions") ]["sequence_id"].values[0] filtered_data = ex_landmark[ex_landmark.index == sequence_target] filtered_data.head() # ### Check Landmark, Frames, and (x, y, z) points unique_frames = filtered_data["frame"].nunique() type_landmark = type_of_landmark(filtered_data) face_columns = [col for col in filtered_data.columns if "pose" in col] right_hand_columns = [col for col in filtered_data.columns if "right_hand" in col] left_hand_columns = [col for col in filtered_data.columns if "right_hand" in col] pose_columns = [col for col in filtered_data.columns if "right_hand" in col] x_face, y_face, z_face = x_y_z(face_columns) x_right_hand, y_right_hand, z_right_hand = x_y_z(right_hand_columns) x_left_hand, y_left_hand, z_left_hand = x_y_z(left_hand_columns) x_pose, y_pose, z_pose = x_y_z(left_hand_columns) print( f"{blk}Landmark file for sequence_id {red}{sequence_target}{blk} has {red}{unique_frames}{blk} frames " ) print( f"{blk}This landmark has {red}{len(type_landmark)} {blk}types of landmarks and consists of {red}{type_landmark}" ) print( f"\n{blk}Face landmark has {red}{len(face_columns)} {blk}points in x : {red}{x_face} points, {blk}y : {red}{y_face} points, {blk}z : {red}{z_face} points" ) print( f"{blk}Right hand landmark has {red}{len(right_hand_columns)} {blk}points in x : {red}{x_right_hand} points, {blk}y : {red}{y_right_hand} points, {blk}z : {red}{z_right_hand} points" ) print( f"{blk}Left hand landmark has {red}{len(left_hand_columns)} {blk}points in x : {red}{x_left_hand} points, {blk}y : {red}{y_left_hand} points, {blk}z : {red}{z_left_hand} points" ) print( f"{blk}Face landmark has {red}{len(pose_columns)} {blk}points in x : {red}{x_pose} points, {blk}y : {red}{y_pose} points, {blk}z : {red}{z_pose} points" )
false
0
2,169
0
2,169
2,169
129995546
# package imports import numpy as np import pandas as pd # Important imports for modeling and evaluation from sklearn.model_selection import train_test_split from sklearn.model_selection import GridSearchCV from sklearn.tree import DecisionTreeClassifier from sklearn.tree import plot_tree import sklearn.metrics as metrics # Visualization package imports import matplotlib.pyplot as plt import seaborn as sns # importing data. df_original = pd.read_csv("/kaggle/input/invistico-airline/Invistico_Airline.csv") df_original.head(n=10) df_original.dtypes df_original["Class"].unique() df_original["satisfaction"].value_counts(dropna=False) df_original.isnull().sum() df_original.shape df_subset = df_original.dropna(axis=0).reset_index(drop=True) df_subset.isna().sum() df_subset.shape df_subset["Class"] = df_subset["Class"].map({"Business": 3, "Eco Plus": 2, "Eco": 1}) df_subset["satisfaction"] = df_subset["satisfaction"].map( {"satisfied": 1, "dissatisfied": 0} ) df_subset = pd.get_dummies(df_subset, drop_first=True) df_subset.dtypes y = df_subset["satisfaction"] X = df_subset.copy() X = X.drop("satisfaction", axis=1) X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.25, random_state=0 ) decision_tree = DecisionTreeClassifier(random_state=0) decision_tree.fit(X_train, y_train) dt_pred = decision_tree.predict(X_test) print("Decision Tree") print("Accuracy:", "%.6f" % metrics.accuracy_score(y_test, dt_pred)) print("Precision:", "%.6f" % metrics.precision_score(y_test, dt_pred)) print("Recall:", "%.6f" % metrics.recall_score(y_test, dt_pred)) print("F1 Score:", "%.6f" % metrics.f1_score(y_test, dt_pred)) cm = metrics.confusion_matrix(y_test, dt_pred, labels=decision_tree.classes_) disp = metrics.ConfusionMatrixDisplay( confusion_matrix=cm, display_labels=decision_tree.classes_ ) disp.plot() plt.figure(figsize=(20, 12)) plot_tree(decision_tree, max_depth=2, fontsize=14, feature_names=X.columns) importances = decision_tree.feature_importances_ forest_importances = pd.Series(importances, index=X.columns) fig, ax = plt.subplots() forest_importances.plot.bar(ax=ax) tree_para = { "max_depth": [ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 30, 40, 50, ], "min_samples_leaf": [2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 20, 50], } scoring = {"accuracy", "precision", "recall", "f1"} tuned_decision_tree = DecisionTreeClassifier(random_state=0) clf = GridSearchCV(tuned_decision_tree, tree_para, scoring=scoring, cv=5, refit="f1") clf.fit(X_train, y_train) clf.best_estimator_ print("Best Avg. Validation Score: ", "%.4f" % clf.best_score_) results = pd.DataFrame(columns=["Model", "F1", "Recall", "Precision", "Accuracy"]) def make_results(model_name, model_object): """ Accepts as arguments a model name (your choice - string) and a fit GridSearchCV model object. Returns a pandas df with the F1, recall, precision, and accuracy scores for the model with the best mean F1 score across all validation folds. """ # Get all the results from the CV and put them in a df. cv_results = pd.DataFrame(model_object.cv_results_) # Isolate the row of the df with the max(mean f1 score). best_estimator_results = cv_results.iloc[cv_results["mean_test_f1"].idxmax(), :] # Extract accuracy, precision, recall, and f1 score from that row. f1 = best_estimator_results.mean_test_f1 recall = best_estimator_results.mean_test_recall precision = best_estimator_results.mean_test_precision accuracy = best_estimator_results.mean_test_accuracy # Create a table of results. table = pd.DataFrame() table = table.append( { "Model": model_name, "F1": f1, "Recall": recall, "Precision": precision, "Accuracy": accuracy, }, ignore_index=True, ) return table result_table = make_results("Tuned Decision Tree", clf) result_table plt.figure(figsize=(20, 12)) plot_tree(clf.best_estimator_, max_depth=2, fontsize=14, feature_names=X.columns) importances = clf.best_estimator_.feature_importances_ forest_importances = pd.Series(importances, index=X.columns) fig, ax = plt.subplots() forest_importances.plot.bar(ax=ax)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/995/129995546.ipynb
null
null
[{"Id": 129995546, "ScriptId": 38670581, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 9485427, "CreationDate": "05/18/2023 02:36:49", "VersionNumber": 3.0, "Title": "Building a Decision Tree with Python", "EvaluationDate": "05/18/2023", "IsChange": false, "TotalLines": 146.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 146.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# package imports import numpy as np import pandas as pd # Important imports for modeling and evaluation from sklearn.model_selection import train_test_split from sklearn.model_selection import GridSearchCV from sklearn.tree import DecisionTreeClassifier from sklearn.tree import plot_tree import sklearn.metrics as metrics # Visualization package imports import matplotlib.pyplot as plt import seaborn as sns # importing data. df_original = pd.read_csv("/kaggle/input/invistico-airline/Invistico_Airline.csv") df_original.head(n=10) df_original.dtypes df_original["Class"].unique() df_original["satisfaction"].value_counts(dropna=False) df_original.isnull().sum() df_original.shape df_subset = df_original.dropna(axis=0).reset_index(drop=True) df_subset.isna().sum() df_subset.shape df_subset["Class"] = df_subset["Class"].map({"Business": 3, "Eco Plus": 2, "Eco": 1}) df_subset["satisfaction"] = df_subset["satisfaction"].map( {"satisfied": 1, "dissatisfied": 0} ) df_subset = pd.get_dummies(df_subset, drop_first=True) df_subset.dtypes y = df_subset["satisfaction"] X = df_subset.copy() X = X.drop("satisfaction", axis=1) X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.25, random_state=0 ) decision_tree = DecisionTreeClassifier(random_state=0) decision_tree.fit(X_train, y_train) dt_pred = decision_tree.predict(X_test) print("Decision Tree") print("Accuracy:", "%.6f" % metrics.accuracy_score(y_test, dt_pred)) print("Precision:", "%.6f" % metrics.precision_score(y_test, dt_pred)) print("Recall:", "%.6f" % metrics.recall_score(y_test, dt_pred)) print("F1 Score:", "%.6f" % metrics.f1_score(y_test, dt_pred)) cm = metrics.confusion_matrix(y_test, dt_pred, labels=decision_tree.classes_) disp = metrics.ConfusionMatrixDisplay( confusion_matrix=cm, display_labels=decision_tree.classes_ ) disp.plot() plt.figure(figsize=(20, 12)) plot_tree(decision_tree, max_depth=2, fontsize=14, feature_names=X.columns) importances = decision_tree.feature_importances_ forest_importances = pd.Series(importances, index=X.columns) fig, ax = plt.subplots() forest_importances.plot.bar(ax=ax) tree_para = { "max_depth": [ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 30, 40, 50, ], "min_samples_leaf": [2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 20, 50], } scoring = {"accuracy", "precision", "recall", "f1"} tuned_decision_tree = DecisionTreeClassifier(random_state=0) clf = GridSearchCV(tuned_decision_tree, tree_para, scoring=scoring, cv=5, refit="f1") clf.fit(X_train, y_train) clf.best_estimator_ print("Best Avg. Validation Score: ", "%.4f" % clf.best_score_) results = pd.DataFrame(columns=["Model", "F1", "Recall", "Precision", "Accuracy"]) def make_results(model_name, model_object): """ Accepts as arguments a model name (your choice - string) and a fit GridSearchCV model object. Returns a pandas df with the F1, recall, precision, and accuracy scores for the model with the best mean F1 score across all validation folds. """ # Get all the results from the CV and put them in a df. cv_results = pd.DataFrame(model_object.cv_results_) # Isolate the row of the df with the max(mean f1 score). best_estimator_results = cv_results.iloc[cv_results["mean_test_f1"].idxmax(), :] # Extract accuracy, precision, recall, and f1 score from that row. f1 = best_estimator_results.mean_test_f1 recall = best_estimator_results.mean_test_recall precision = best_estimator_results.mean_test_precision accuracy = best_estimator_results.mean_test_accuracy # Create a table of results. table = pd.DataFrame() table = table.append( { "Model": model_name, "F1": f1, "Recall": recall, "Precision": precision, "Accuracy": accuracy, }, ignore_index=True, ) return table result_table = make_results("Tuned Decision Tree", clf) result_table plt.figure(figsize=(20, 12)) plot_tree(clf.best_estimator_, max_depth=2, fontsize=14, feature_names=X.columns) importances = clf.best_estimator_.feature_importances_ forest_importances = pd.Series(importances, index=X.columns) fig, ax = plt.subplots() forest_importances.plot.bar(ax=ax)
false
0
1,424
0
1,424
1,424
129801519
<jupyter_start><jupyter_text>Chest CT-Scan images Dataset ### Data Story It was a project about chest cancer detection using machine learning and deep leaning (CNN) . we classify and diagnose if the patient have cancer or not using AI model . We give them the information about the type of cancer and the way of treatment. we tried to collect all data we need to make the model classify the images easily. so i had to fetch data from many resources to start the project . I researched a lot to collect all the data from many resources and cleaned it for the CNN . ### Data Images are not in dcm format, the images are in jpg or png to fit the model Data contain 3 chest cancer types which are Adenocarcinoma,Large cell carcinoma, Squamous cell carcinoma , and 1 folder for the normal cell Data folder is the main folder that contain all the step folders inside Data folder are test , train , valid **test** represent testing set **train** represent training set **valid** represent validation set training set is **70%** testing set is **20%** validation set is **10%** 1. **Adenocarcinoma** Adenocarcinoma of the lung: Lung adenocarcinoma is the most common form of lung cancer accounting for 30 percent of all cases overall and about 40 percent of all non-small cell lung cancer occurrences. Adenocarcinomas are found in several common cancers, including breast, prostate and colorectal. Adenocarcinomas of the lung are found in the outer region of the lung in glands that secrete mucus and help us breathe. Symptoms include coughing, hoarseness, weight loss and weakness. 2. **Large cell carcinoma** Large-cell undifferentiated carcinoma: Large-cell undifferentiated carcinoma lung cancer grows and spreads quickly and can be found anywhere in the lung. This type of lung cancer usually accounts for 10 to 15 percent of all cases of NSCLC. Large-cell undifferentiated carcinoma tends to grow and spread quickly. 3. **Squamous cell carcinoma** Squamous cell: This type of lung cancer is found centrally in the lung, where the larger bronchi join the trachea to the lung, or in one of the main airway branches. Squamous cell lung cancer is responsible for about 30 percent of all non-small cell lung cancers, and is generally linked to smoking. And the last folder is the normal CT-Scan images Kaggle dataset identifier: chest-ctscan-images <jupyter_script>import numpy as np from numpy.random import seed import pandas as pd import os import matplotlib.pyplot as plt import warnings warnings.simplefilter(action="ignore", category=FutureWarning) import glob import seaborn as sns # Tensorflow Packages import tensorflow as tf from tensorflow import keras from tensorflow.random import set_seed from keras.models import Sequential from keras.layers import ( Dense, Dropout, Flatten, BatchNormalization, GlobalAveragePooling2D, ) from tensorflow.keras.applications import ResNet50 from tensorflow.keras.applications.resnet import preprocess_input print("TensorFlow version:", tf.__version__) print("Keras version:", keras.__version__) import logging logging.getLogger("tensorflow").setLevel(logging.ERROR) seed_val = 42 # Seed for reproducible results seed(seed_val) # numpy's seed set_seed(seed_val) # tensorflow's seed AUTOTUNE = tf.data.experimental.AUTOTUNE gpus = tf.config.experimental.list_physical_devices("GPU") print("Numbers of GPUs:", len(gpus)) if gpus: tf.config.experimental.set_memory_growth(gpus[0], enable=True) details = tf.config.experimental.get_device_details(gpus[0]) print("GPU Details:", details.get("device_name", "Unknown GPU")) data_dir = "/kaggle/input/chest-ctscan-images/Data/train" # Train directory class_names = [class_name for class_name in os.listdir(data_dir)] print("Number of unique classes:", len(class_names)) total_images = glob.glob(data_dir + "/*/*") print(f"There are {len(total_images)} images for training in the dataset.") os.chdir("/kaggle/input/chest-ctscan-images/Data") batch_size = 32 img_size = (512, 512) train_ds = tf.keras.utils.image_dataset_from_directory( "train", batch_size=batch_size, image_size=img_size, seed=0, label_mode="categorical", shuffle=True, ) val_ds = tf.keras.utils.image_dataset_from_directory( "test", # We changed the directory because `test` has more images batch_size=batch_size, image_size=img_size, seed=0, label_mode="categorical", shuffle=False, ) test_ds = tf.keras.utils.image_dataset_from_directory( "valid", batch_size=batch_size, image_size=img_size, seed=0, label_mode="categorical", shuffle=False, ) class_names = train_ds.class_names print(class_names) for image_batch, labels_batch in train_ds: print(image_batch.shape) print(labels_batch.shape) break def min_max(ds): img_batch, labels_batch = next(iter(ds)) print( "Minimum pixel value:", np.min(img_batch[0]), "\nMaximum pixel value:", np.max(img_batch[0]), ) min_max(train_ds) # The RGB channel values are in the [0, 255] range. This is not ideal for a neural network; in general we should seek to make your input values small. But we'll use the `tf.keras.applications.efficientnet.EfficientNetB3`, which expects their inputs to be float tensors of pixels with values in the [0-255] range. Therefore, there is no need to standardize values to be in the [0, 1] range by using `tf.keras.layers.Rescaling` or`tf.keras.applications.efficientnet.preprocess_input`. def configure_for_performance(ds): ds = ds.prefetch(buffer_size=AUTOTUNE) return ds def plot_training(hist): """ This function take training model and plot history of accuracy and losses with the best epoch in both of them. """ # Define needed variables tr_acc = hist.history["accuracy"] tr_loss = hist.history["loss"] val_acc = hist.history["val_accuracy"] val_loss = hist.history["val_loss"] index_loss = np.argmin(val_loss) val_lowest = val_loss[index_loss] index_acc = np.argmax(val_acc) acc_highest = val_acc[index_acc] Epochs = [i + 1 for i in range(len(tr_acc))] loss_label = f"best epoch= {str(index_loss + 1)}" acc_label = f"best epoch= {str(index_acc + 1)}" # Plot training history plt.figure(figsize=(20, 8)) plt.subplot(1, 2, 1) plt.plot(Epochs, tr_loss, "r", label="Training loss") plt.plot(Epochs, val_loss, "g", label="Validation loss") plt.scatter(index_loss + 1, val_lowest, s=150, c="blue", label=loss_label) plt.title("Training and Validation Loss") plt.xlabel("Epochs") plt.ylabel("Loss") plt.legend() plt.subplot(1, 2, 2) plt.plot(Epochs, tr_acc, "r", label="Training Accuracy") plt.plot(Epochs, val_acc, "g", label="Validation Accuracy") plt.scatter(index_acc + 1, acc_highest, s=150, c="blue", label=acc_label) plt.title("Training and Validation Accuracy") plt.xlabel("Epochs") plt.ylabel("Accuracy") plt.legend() plt.tight_layout sns.despine() data_augmentation = tf.keras.Sequential( [ keras.layers.RandomFlip("horizontal_and_vertical"), keras.layers.RandomRotation(0.2), ] ) for image, _ in train_ds.take(1): plt.figure(figsize=(10, 10)) first_image = image[0] for i in range(9): ax = plt.subplot(3, 3, i + 1) augmented_image = data_augmentation(tf.expand_dims(first_image, 0)) plt.imshow(augmented_image[0] / 255.0) plt.axis("off") # train_ds = configure_for_performance(train_ds) # val_ds = configure_for_performance(val_ds) base_model = ResNet50( include_top=False, pooling="avg", weights="imagenet", input_shape=(512, 512, 3) ) base_model.trainable = False # Freeze weights early_stop = keras.callbacks.EarlyStopping(patience=4, verbose=1, start_from_epoch=5) reduce_lr = keras.callbacks.ReduceLROnPlateau( monitor="val_loss", factor=0.2, patience=3, verbose=1, min_lr=1e-5 ) inputs = tf.keras.Input(shape=(512, 512, 3)) x = data_augmentation(inputs) x = preprocess_input(x) x = base_model(x, training=False) x = Dropout(0.4, seed=seed_val)(x) # x = GlobalAveragePooling2D()(x) x = Flatten()(x) x = Dropout(0.3)(x) outputs = Dense(4, activation="softmax")(x) model = tf.keras.Model(inputs, outputs) model.summary() model.compile( optimizer=tf.keras.optimizers.Adamax(learning_rate=1e-4), loss=tf.keras.losses.CategoricalCrossentropy(), metrics=["accuracy"], ) history = model.fit( x=train_ds, validation_data=val_ds, epochs=20, batch_size=batch_size, verbose=2, callbacks=[early_stop, reduce_lr], ) plot_training(history)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/801/129801519.ipynb
chest-ctscan-images
mohamedhanyyy
[{"Id": 129801519, "ScriptId": 38509595, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 9777372, "CreationDate": "05/16/2023 14:58:52", "VersionNumber": 1.0, "Title": "Chest CT-Scan ResNet50 Classification", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 194.0, "LinesInsertedFromPrevious": 194.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 186172482, "KernelVersionId": 129801519, "SourceDatasetVersionId": 1432479}]
[{"Id": 1432479, "DatasetId": 839140, "DatasourceVersionId": 1465871, "CreatorUserId": 3607160, "LicenseName": "Database: Open Database, Contents: \u00a9 Original Authors", "CreationDate": "08/20/2020 18:46:59", "VersionNumber": 1.0, "Title": "Chest CT-Scan images Dataset", "Slug": "chest-ctscan-images", "Subtitle": "CT-Scan images with different types of chest cancer", "Description": "### Data Story\nIt was a project about chest cancer detection using machine learning and deep leaning (CNN) .\nwe classify and diagnose if the patient have cancer or not using AI model .\nWe give them the information about the type of cancer and the way of treatment. \nwe tried to collect all data we need to make the model classify the images easily.\nso i had to fetch data from many resources to start the project .\nI researched a lot to collect all the data from many resources and cleaned it for the CNN .\n\n### Data\nImages are not in dcm format, the images are in jpg or png to fit the model \nData contain 3 chest cancer types which are Adenocarcinoma,Large cell carcinoma, Squamous cell carcinoma , and 1 folder for the normal cell \nData folder is the main folder that contain all the step folders \ninside Data folder are test , train , valid\n\n**test** represent testing set\n**train** represent training set\n**valid** represent validation set\ntraining set is **70%**\ntesting set is **20%**\nvalidation set is **10%**\n\n1. **Adenocarcinoma**\n\nAdenocarcinoma of the lung: Lung adenocarcinoma is the most common form of lung cancer\naccounting for 30 percent of all cases overall and about 40 percent \nof all non-small cell lung cancer occurrences. Adenocarcinomas are \nfound in several common cancers, including breast, prostate and colorectal.\nAdenocarcinomas of the lung are found in the outer region of the lung\nin glands that secrete mucus and help us breathe. \nSymptoms include coughing, hoarseness, weight loss and weakness.\n\n\n2. **Large cell carcinoma**\n\nLarge-cell undifferentiated carcinoma: Large-cell undifferentiated carcinoma lung cancer grows and spreads quickly and can \nbe found anywhere in the lung. This type of lung cancer usually accounts for 10\nto 15 percent of all cases of NSCLC. \nLarge-cell undifferentiated carcinoma tends to grow and spread quickly.\n\n\n3. **Squamous cell carcinoma**\n \nSquamous cell: This type of lung cancer is found centrally in the lung, \nwhere the larger bronchi join the trachea to the lung, \nor in one of the main airway branches. \nSquamous cell lung cancer is responsible for about 30 percent of all non-small \ncell lung cancers, and is generally linked to smoking.\n\nAnd the last folder is the normal CT-Scan images \n\n### Acknowledgements\n\nWe wouldn't be here without the help of others and the resources we found. \nthanks for all of my team and the people who supported us\n\n\n### Inspiration\n\nI want to hear all your feedback", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 839140, "CreatorUserId": 3607160, "OwnerUserId": 3607160.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 1432479.0, "CurrentDatasourceVersionId": 1465871.0, "ForumId": 854336, "Type": 2, "CreationDate": "08/20/2020 18:46:59", "LastActivityDate": "08/20/2020", "TotalViews": 118265, "TotalDownloads": 14230, "TotalVotes": 248, "TotalKernels": 56}]
[{"Id": 3607160, "UserName": "mohamedhanyyy", "DisplayName": "Mohamed Hany", "RegisterDate": "08/21/2019", "PerformanceTier": 3}]
import numpy as np from numpy.random import seed import pandas as pd import os import matplotlib.pyplot as plt import warnings warnings.simplefilter(action="ignore", category=FutureWarning) import glob import seaborn as sns # Tensorflow Packages import tensorflow as tf from tensorflow import keras from tensorflow.random import set_seed from keras.models import Sequential from keras.layers import ( Dense, Dropout, Flatten, BatchNormalization, GlobalAveragePooling2D, ) from tensorflow.keras.applications import ResNet50 from tensorflow.keras.applications.resnet import preprocess_input print("TensorFlow version:", tf.__version__) print("Keras version:", keras.__version__) import logging logging.getLogger("tensorflow").setLevel(logging.ERROR) seed_val = 42 # Seed for reproducible results seed(seed_val) # numpy's seed set_seed(seed_val) # tensorflow's seed AUTOTUNE = tf.data.experimental.AUTOTUNE gpus = tf.config.experimental.list_physical_devices("GPU") print("Numbers of GPUs:", len(gpus)) if gpus: tf.config.experimental.set_memory_growth(gpus[0], enable=True) details = tf.config.experimental.get_device_details(gpus[0]) print("GPU Details:", details.get("device_name", "Unknown GPU")) data_dir = "/kaggle/input/chest-ctscan-images/Data/train" # Train directory class_names = [class_name for class_name in os.listdir(data_dir)] print("Number of unique classes:", len(class_names)) total_images = glob.glob(data_dir + "/*/*") print(f"There are {len(total_images)} images for training in the dataset.") os.chdir("/kaggle/input/chest-ctscan-images/Data") batch_size = 32 img_size = (512, 512) train_ds = tf.keras.utils.image_dataset_from_directory( "train", batch_size=batch_size, image_size=img_size, seed=0, label_mode="categorical", shuffle=True, ) val_ds = tf.keras.utils.image_dataset_from_directory( "test", # We changed the directory because `test` has more images batch_size=batch_size, image_size=img_size, seed=0, label_mode="categorical", shuffle=False, ) test_ds = tf.keras.utils.image_dataset_from_directory( "valid", batch_size=batch_size, image_size=img_size, seed=0, label_mode="categorical", shuffle=False, ) class_names = train_ds.class_names print(class_names) for image_batch, labels_batch in train_ds: print(image_batch.shape) print(labels_batch.shape) break def min_max(ds): img_batch, labels_batch = next(iter(ds)) print( "Minimum pixel value:", np.min(img_batch[0]), "\nMaximum pixel value:", np.max(img_batch[0]), ) min_max(train_ds) # The RGB channel values are in the [0, 255] range. This is not ideal for a neural network; in general we should seek to make your input values small. But we'll use the `tf.keras.applications.efficientnet.EfficientNetB3`, which expects their inputs to be float tensors of pixels with values in the [0-255] range. Therefore, there is no need to standardize values to be in the [0, 1] range by using `tf.keras.layers.Rescaling` or`tf.keras.applications.efficientnet.preprocess_input`. def configure_for_performance(ds): ds = ds.prefetch(buffer_size=AUTOTUNE) return ds def plot_training(hist): """ This function take training model and plot history of accuracy and losses with the best epoch in both of them. """ # Define needed variables tr_acc = hist.history["accuracy"] tr_loss = hist.history["loss"] val_acc = hist.history["val_accuracy"] val_loss = hist.history["val_loss"] index_loss = np.argmin(val_loss) val_lowest = val_loss[index_loss] index_acc = np.argmax(val_acc) acc_highest = val_acc[index_acc] Epochs = [i + 1 for i in range(len(tr_acc))] loss_label = f"best epoch= {str(index_loss + 1)}" acc_label = f"best epoch= {str(index_acc + 1)}" # Plot training history plt.figure(figsize=(20, 8)) plt.subplot(1, 2, 1) plt.plot(Epochs, tr_loss, "r", label="Training loss") plt.plot(Epochs, val_loss, "g", label="Validation loss") plt.scatter(index_loss + 1, val_lowest, s=150, c="blue", label=loss_label) plt.title("Training and Validation Loss") plt.xlabel("Epochs") plt.ylabel("Loss") plt.legend() plt.subplot(1, 2, 2) plt.plot(Epochs, tr_acc, "r", label="Training Accuracy") plt.plot(Epochs, val_acc, "g", label="Validation Accuracy") plt.scatter(index_acc + 1, acc_highest, s=150, c="blue", label=acc_label) plt.title("Training and Validation Accuracy") plt.xlabel("Epochs") plt.ylabel("Accuracy") plt.legend() plt.tight_layout sns.despine() data_augmentation = tf.keras.Sequential( [ keras.layers.RandomFlip("horizontal_and_vertical"), keras.layers.RandomRotation(0.2), ] ) for image, _ in train_ds.take(1): plt.figure(figsize=(10, 10)) first_image = image[0] for i in range(9): ax = plt.subplot(3, 3, i + 1) augmented_image = data_augmentation(tf.expand_dims(first_image, 0)) plt.imshow(augmented_image[0] / 255.0) plt.axis("off") # train_ds = configure_for_performance(train_ds) # val_ds = configure_for_performance(val_ds) base_model = ResNet50( include_top=False, pooling="avg", weights="imagenet", input_shape=(512, 512, 3) ) base_model.trainable = False # Freeze weights early_stop = keras.callbacks.EarlyStopping(patience=4, verbose=1, start_from_epoch=5) reduce_lr = keras.callbacks.ReduceLROnPlateau( monitor="val_loss", factor=0.2, patience=3, verbose=1, min_lr=1e-5 ) inputs = tf.keras.Input(shape=(512, 512, 3)) x = data_augmentation(inputs) x = preprocess_input(x) x = base_model(x, training=False) x = Dropout(0.4, seed=seed_val)(x) # x = GlobalAveragePooling2D()(x) x = Flatten()(x) x = Dropout(0.3)(x) outputs = Dense(4, activation="softmax")(x) model = tf.keras.Model(inputs, outputs) model.summary() model.compile( optimizer=tf.keras.optimizers.Adamax(learning_rate=1e-4), loss=tf.keras.losses.CategoricalCrossentropy(), metrics=["accuracy"], ) history = model.fit( x=train_ds, validation_data=val_ds, epochs=20, batch_size=batch_size, verbose=2, callbacks=[early_stop, reduce_lr], ) plot_training(history)
false
0
1,992
0
2,657
1,992
129801682
# ![Diamond Price Prediction.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAB4AAAAJYCAYAAACD9/sjAAAAAXNSR0IArs4c6QAAAARzQklUCAgICHwIZIgAAAAJcEhZcwAADsQAAA7EAZUrDhsAAAR6aVRYdFhNTDpjb20uYWRvYmUueG1wAAAAAAA8P3hwYWNrZXQgYmVnaW49J++7vycgaWQ9J1c1TTBNcENlaGlIenJlU3pOVGN6a2M5ZCc/Pgo8eDp4bXBtZXRhIHhtbG5zOng9J2Fkb2JlOm5zOm1ldGEvJz4KPHJkZjpSREYgeG1sbnM6cmRmPSdodHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjJz4KCiA8cmRmOkRlc2NyaXB0aW9uIHJkZjphYm91dD0nJwogIHhtbG5zOkF0dHJpYj0naHR0cDovL25zLmF0dHJpYnV0aW9uLmNvbS9hZHMvMS4wLyc+CiAgPEF0dHJpYjpBZHM+CiAgIDxyZGY6U2VxPgogICAgPHJkZjpsaSByZGY6cGFyc2VUeXBlPSdSZXNvdXJjZSc+CiAgICAgPEF0dHJpYjpDcmVhdGVkPjIwMjMtMDUtMTY8L0F0dHJpYjpDcmVhdGVkPgogICAgIDxBdHRyaWI6RXh0SWQ+YzMxOTgyNzktMjk1My00ZTIyLWEyOWUtNTBmNGVjMjQ5YTY2PC9BdHRyaWI6RXh0SWQ+CiAgICAgPEF0dHJpYjpGYklkPjUyNTI2NTkxNDE3OTU4MDwvQXR0cmliOkZiSWQ+CiAgICAgPEF0dHJpYjpUb3VjaFR5cGU+MjwvQXR0cmliOlRvdWNoVHlwZT4KICAgIDwvcmRmOmxpPgogICA8L3JkZjpTZXE+CiAgPC9BdHRyaWI6QWRzPgogPC9yZGY6RGVzY3JpcHRpb24+CgogPHJkZjpEZXNjcmlwdGlvbiByZGY6YWJvdXQ9JycKICB4bWxuczpkYz0naHR0cDovL3B1cmwub3JnL2RjL2VsZW1lbnRzLzEuMS8nPgogIDxkYzp0aXRsZT4KICAgPHJkZjpBbHQ+CiAgICA8cmRmOmxpIHhtbDpsYW5nPSd4LWRlZmF1bHQnPkRpYW1vbmQgUHJpY2UgUHJlZGljdGlvbiAtIDE8L3JkZjpsaT4KICAgPC9yZGY6QWx0PgogIDwvZGM6dGl0bGU+CiA8L3JkZjpEZXNjcmlwdGlvbj4KCiA8cmRmOkRlc2NyaXB0aW9uIHJkZjphYm91dD0nJwogIHhtbG5zOnBkZj0naHR0cDovL25zLmFkb2JlLmNvbS9wZGYvMS4zLyc+CiAgPHBkZjpBdXRob3I+RGF0dW0gQ2FudjwvcGRmOkF1dGhvcj4KIDwvcmRmOkRlc2NyaXB0aW9uPgoKIDxyZGY6RGVzY3JpcHRpb24gcmRmOmFib3V0PScnCiAgeG1sbnM6eG1wPSdodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvJz4KICA8eG1wOkNyZWF0b3JUb29sPkNhbnZhPC94bXA6Q3JlYXRvclRvb2w+CiA8L3JkZjpEZXNjcmlwdGlvbj4KPC9yZGY6UkRGPgo8L3g6eG1wbWV0YT4KPD94cGFja2V0IGVuZD0ncic/PsCvwXUAACAASURBVHic7L37ryzLdd/3XVXdPT2Pfc69l5fiQy+LkiiZetik5Acjx47C2EgiyHKMIEZgQQHym5H8Hfk1AfJLgCRwECSGEStOrES2lVi2bEFSJEuQxYck0iavzIdJieQ9Z+89j37Wyg/dPdPTu3v2zN7dM1016wPMOXt6emqqetV71VqLjDEMoXeYx/NYiejSWbCaMclSeDoiRzcQObqByLEdhrp0Fk7mWFk272Pm2vyEwQwkWY4szy9aP2ydMzEAlM+NiKDYbMtiAIAAYywrHwPEDGj72kWdqq5b9ezbYIbCro4V5SEYwzAAGLs2zcx7bXzbprm4r7o28TTm8ykUERhAluWI4gg5t8i8rVsgAwBQvKvbh56zYQYTAFD5Oh0Zv/phfwwosL6NXClFmzB78hNZuoHI0Q2eK8di2KPGtYdjYf1a27yfuZgBEIDA05iFwTZvaW6wXkeI0qz3/HcxRLJD5HXIdth32sqivNqU5jDp8iBtYEiGkld9HTFE+jakO0g+qdjdYmYQ6KlLr4fJnuGZtq1T+kj3nDAzkizH733hy/j663uQKICHY6wbBDKRP42xylE4DZGjG4gc3UDkaKeyt41TZPnYvcyMNM2RpimYaKsoOve8xZV5EhEVai4GzPZp2osLcnGhDACgiEGNzZKsUcHaNn+b75kZmhiL+Qye54EAGJNjtU5gQMiZoQggmAd95mMLcwIVmw5AqfR9PjJ2XQZX2o1g32az8BBpj3ZDRNux7FRZGmMAEIhU49rDjfO2v5vvNQHhZALf18VBMGas1xFuNxsYIpABdHlArMo3Ve9PyvleDqxRyAyZ7iBpDpWuKJUHSPNhux07Q+RVgXo7tFDv2/o8+Fvvr/t+BlXaSp22L9XMRb2X36ZJ9hyysTHdY2BmpLnBp77wJXzj9dKR3UfhJGTzQhAEQRCEMeJ5Gr7vo9qgucSk2ZV5UmVpUVhnCmPAlbplmGEateoxa862tkxEMEy4W20Qx2m5yatws5jC1wRVPi/TosF9rG9gMHIU+Wxuygh2UR0WEOyHef8l2Ie0R5tQ2Hm8KF47a979v4+hmJcD9Vllfa5+zFi7d3DMMFabCOt1hCwrZhWzWYj3vvESM+2DwGBFxav+m0flVhAEQRgCbrwEO/AunQHhMshGiHBt1E+7CvYicnSDa5FjeV7zwrkYD11yr1vyEQG+r8FgZHkOU532f6K1wlM59+8NhSvtzHY51OnTpdTlKCxwDO8sPjQKS1vDu23aQ3199RkTAQysNhHyPMe0dAk5m4bQOkEUp0DN8uepVPmQNZC9HKpLgg08lFN7e3Rj3HIdaY9jp7L1eeSw1IHm1hTlvpXv/vX9ufxDd/DNtl6/P0ozZMZgEgSY+AqeUnjzxRzrJMD9er1VVBPVfviJ9cw2Ky+b2pPkVZ7BUAxi/cv9WqgOtV9hgwV8M7QXUX/unm1mbG1MFMCCbIQ8wrUoKgRBEM7JdfSt7jtaOVWObS7i2iwFfU8DbJBlDKbL1RPblXX1J2dvKR5uNNssE8CFAwZFvg3KXo4BIgahcNdojNlan9c3f1vbf2HjA1IKmyhBbgwWsxBKKUwCH1p72Gw2yFrSOMQxdf+xvquZX9v7A1dpk6PIyRbaFEo7B68iRvuQ9jgGnvK8Hzr1bBsiK1ESMbjmnePQ/L4+dtY/q7sfzUtr4CzzMJtOioNgYYCJr7FcR1hHMRg7a+NTSjj02G1b/e4jv4YKBVrFULF/bXu2Q2CHS2nLGCjmr43P9VGPSkeM6fX3qvQSQQPsPIjMnof7O5MXxMaK5P5mvCAItmNj3yoIwmGqzZmtVYACPF9D6/Nb/7blTRCEdgwKq19gX0nadOHetllARFBgKFW2e62QZDle3y8RZykAA60MFvMptNYy/gtHU9VF6b9tZOe2tlAwSbu3HWmH56SvNkMtr30X4ESnK1ab84P6/4oUktTgdrlGkiYAMzwivFiEePliAa3V3ndOQergcNjWQ9s0l7Qpr1ZBe/8JPSP1dpzPQCyAhQfI6XZBEARBOA2+4jN1T7Xm7nKVVFwHFGkEgQalGbIs21n01b1Enmm6YuvcyL4cH8elDwX0hTPlICAzgCaFwvVXuV1cbhQb6u4fucVlJBNhudogCwIsZlMwM17MJoiiBEmSwSgqz5Z39zt9PFHXLM+vFZGj3ezilO5dxdbNoGAFYhk8BOdee9QVwdW1KgrkLi+H3D7vpdYY/7lKiwmrTYI4zTCdTOBrjalPmLxYYBUlWG/WINIwoMLylM3WNLltvr6zWn72AzgbY3dRXLf+PeNybLQMJa9+1592jptD5FeDek237s2g7zT7TPdQmvWnsedF6VjPS2yKddnI+65zMNa8Xu9upSAIV8dYO2JBuFZcaZNypvzpdNWBnYUA4Ps+tNYo/DXVbzpLFreI9cD4cEUmTpSDgBzcsA4q23F1S4tVcFsfYEzhWnITRbi9XyI3RXphGGA6C6FgAJMDNStPJ56hcBa4UW+k7tgIAVBgptrr0nkSTuVQW5R2aQtFW6zT5Q3ksTnA7j2BoJCmOe5Xa2yiBCCC1go38xBvv/kGCAytCMbkQM2DUGcuRSkxWF6HcP/cNT+8NuQZjBfm8sjKwId5+44n3NW2uPY6lea6TxgnogAWWpEJtyAIY0YmFsJlKF0SNl5y7rn/NllfSCgFeJ4GqeJ06SXnKDI/Gh+uKHBcKQNjXxmzdfMM3poMdbmDrq4rpYoNXeUhyXLc3q+QZjkAwNOExWwG3yMQM+iCMcIFdxBlsBswP3wJdtFUAkubrGh3yTxmuuR3SEmwPTJGBKU0iDSiNMP9/QpZloMAeFrh7bdeYh4GZbxJwBizTftc9cYGhc/Q2JPTAque7ZXHO+1dmcgAMaB6qrUMwIBhekntIbYpU22Ms35tbUwUwAMzZuE/hpyM3mGzHIV9RJaCMC7sapNtmy92bMKMmc6TqLX5h9YKE8+DR7rYHLrgtETmRcJQ2F6viAhcNs96USorYEX717rSAHPNooRgmHG/3CBOMhABShFmYYgg8EGGy4MiyvrnJ4wDsRC2lfa5mSiE7aPZ5tra5PW1zZGuPVpMxg6t7SqZNS2E626gi6gRu/uYGTkI9+sN1lFUhokhzKcTvPliAV+rB4rf/TyM5FkJF8UmJdIw2JLP4VDleuSpccSLvqh48W6ZMgiDe8RomRQ91QJ467nBmrZgU7vtD4kBfAaeGhvv0jQnT67EKXsqtspREARhzIy1Xx1nrtymGRO4WTc83wNgwGkGw8UShblyK3e+fFabVrbGBXYVm+epXfXexrIARUxgMKP03FxsuFSbA6XLNKA7JiAR7VnzAACDcLdaYpr7mIdTKKURhgpKKWySBMYYKKVhjNlbu9j6DIVx0TVXkfo1dtoPlz24S8RoLS6MmS5Sl0VXLOCmgqM6zNUc/0GlVxFSiJIMSbLEYj6DpxUmgQ/f93C/ThBFEUxtjl6lQeWcpErrOfSUjPU8VYl2VNpX/nCHKf/1zYcfrgtPi1a91z/V/u9abw4RU7hPHrhpbvmN7frsxPTq9avvtZdN9XbseRULYKGTQxY5giAIl2bsA6xwHOOVo2p5CeekOQ/hvIj35XsKpAiFT2icXVs/3jor2Iptbr6OgYlgQGDaWeZS6QpaY38zuK3sD2MGMog04sRguYqQlS6hA1/jxSyEVgp5nu+lJYc3BUF4yEgtKYVn445VsEVrkCOaUFfYhybM/FD5WyavqPoZAhPhbrXCah0V4z6A+dTDWy8XCCc+Cjviag5Qho7pZY41fF9hi1LVlnzamm7/VAtmO/I7lGveY9dbD7xQUHm4lR72Xwpl6JqeUUqdlOeTIXrQLz5ltGGuAqL1fxDbpnZrQ18gFsDCk5CTloLNyIagIAjCOGl6HWl+xszwPQ9AjjTLkNPurOolrP1stjp1FZdk4oIFK3MRH0vVrZwBENqtnatrlRVQXZ6GGQRClCbI8xzhNEQ4CUDMWMwmiKIUSWYAqvUhlj8/Ydwcs56wvQ1fA8xNGTGIZM/DVuyy2B+xcrdH6i6a6we8urwO1v9ua4dEGnGWIVvlCMMJJr4H1sCL+RQbT2OziZEzbxUzhWXw89RfXKZRz0ef2KTsEGyiHqX2evZAmxapRyl+cbrBG/WoVB9qDfvA+rentCrlr2AHogAWhBMQxaEgjAtpk25waTnylWy+DE2fcmxLi1FXAmsAjDzN9r5zKVxQ1AnjxIW6xSDk5d87xS+3tvOqrJUVUBXbl8uNXGaG1j4yY3C/WoMNIwwDKCJMQx8qyRElCZTSMj8QRkFXHRfGTOlytra1WfVbgMjQVsbVFq/P+rxSPhw6/NVU2tT/b14HCDkD9+sIqacxm4VQpDAPJ5gGPu43ETZRAgBQWsOAQUdPC/Zls2v/dnlskbwOhx2HAKo623OyAzLEc1Vl3PAm9b7IACd1y+oUf8kjoC9L4gdp0DDrVDkQ0z+y43kmXK5ksrki2IjLbVIQhEOI678hGbxvraXvaY0g8DCA16UnIfOhceGOK0i361ZXn9HmMpKZoWobwkppLDcRXt+vkOcMIoVw4mM+nYJMjn2rB0EYB1Xf5HK7dpfS0SFj+xLs5bJtUdYfdbrcRNethdvuZ2ZopZDkOe6WG0RxvD0w+nI+w1svF5j4Hkyen6D8bWPYOiJ7U8Mhz/a6Byrd0te29vtPqCZDuFYfUpna93i386pw3XXMlj5mJFtmgu3IIlYQBEF4DkNPnHjvRXsvYfzsWQ3U/vc9D55SWym2TUXOOT+RudB4qFuLuCAXl8oBNOL71TY86p9XFr/b97WN4K1CWCnkucHdao0kzQAGfE9hMZ/BVxrMBlU8QBatjTAy6gooF9q327QfINzJT7oWm2m2xWHb5HVvw3YpOR5TfmznBdh5BSKiIlwECLlhrKIE98sNstyA2SDwfbxczHEzm4KqeSGqteDjbbZeDwaLxTkQg1nQDZLqMNhgRTh06JqrrrP8cD3BzI19oeOOi27HA96l22tWz+D+ua/0tp6cekz3HNhkqTwE1z3zEHpHFq+CIJwbmwZd4ZKo8iX1xRbaNlq27ytFkGFM/AC+LhQ91cfNDZtzInOhcdCUuysycbkcbW1eKbVnAVS/p4rtV8QHBu5XG6yjuPyMMJ2GmAZBoQTexgSGDAPCaBGFsG0Q9ueXhKZCuFuMshVnA09pk5X8C+p1pHoJwHEeQJrjPlC4cqXa50qp4lp5MckyLDcx1lG0jds7n07wnpcLaK3AplBcMFdhJqp2urPwb8nV8wvcgS1KCUL57C2zehx7ukOuU2WbrGCr/AXDAGDafx3TvLdrj/LV1jf1kc8hQ//0m99dTPS+DSps6g9sQmYfwiC4vGCVjsMdRJaCcB2Ila/dHLYGYDAYvu8h8DTAZvsd6eMFYdx0KYGPudYKEVabGPfLNYwBFAHhxMdiFkJvk+ja3BWE8VFvIy6vr91iXyG8rxRuylDmKbZxmjJYDp4+xiFr4EPXug6IKqWglIIxBlGS4/Z+jSTJtve8dTPHGzdTgE1h+cfHyYdI9o6EYRhuzSpzhuqpGgCGanF+n5vuAFa6Q9SDQZXJHX8L40UUwGfk2jYj5fSyIAiCcAq9uaYBgaG2L5mWnpehT23W51M7566A7/nwtS6ujGDuIXMgYShcqltt5VAtgb27Nnub7qOV0kiyHHfLJdIsBwD42sN8NoFfWf5c0XpMsJ96ez+vm1qhP3bWn8xUvsRttO3st8NCroACkQaRbLWeQpfXn05PQHg4/m9DxVT3kUJmDJZRhOV6AyYFIiAMArznjReYTQIwZ9uAQG3Wd4Xid6BCwxKlMg/n6tUWK92h063obzznQevtEAzybIlgyrNY1zzU9un+GRh2Z82KPrHEprwCogAWzoRrC1PbGrrQjcjSDUSOwj5SH2znGMtAosJvE2EX1ycIAmitQHTZeUdzw14YB64pS1wpS7McxphHN36b7ys3kIZzAIScgdf3S2ziFDkbaKWxmE4w8bVoXBq4Uo+uGVEG2wyVimAuYpmKLK1mzwW4keHmVB5TBLeFd+kKE8HlXMIAiNIc776+wyYurIEVAS/nId5+eQPP03sWeM22Z5uLYkDGdVvpt04cE9l2HAzWFrYuiou/uQdFcFsf8VyGPrAwRH5t6ROvzRDzEKIAFs6GLGQEQRCEvuHy3Hb9JbhLfQLPzCClHrhNmng+fO2BSnfQbTOPc89HZA40LlyShSt169hyHLYQYmittyf9ldJYrddYrSIYZoAIs0mA+XSytwhmNiAGLnxu5KKIVak7iAxtZGc9uu86GnsvOeA4Fqjl1YAZQCE4aZP9UR/zH4shrJXexq0FEUhprKIYr5drZKaQhacV3rhZYFHOC5i5mEOIvAAM60J2KGy1/u0Hxk7xa0N+z0jZJT+H6on2bVHbJ0P1W7vDN7BiwcQo4ztfOzWDBO/CWRGuEBsnEW0McfJHEARBOBX7xxMXee4Yecw8YbsBpNTebxERPF1s+iR5hrbzjkPPQ1yY57hOmxWJMA664vw13TM22331tvAOUMpXaaR5htu7JeazKSa+B19pePMQy1WMnA2oxSLAlfXKIR7ro+suNV1/Fq5ySMYi03FxjDx24pTx67Ic8dw7bmmzLhUe0jXOV3TN4eqxgJkZVPf9SgAYyHKDu+UK02CCcOJDacJiPkUQ+FhuoiJ8RGMdY8s42HcebbL0GxJ7lMo1JZ0leyTneLbP/YX6YXMa4LkO2R567xNAoAEcavefz+Gwqf+qS0osgC+ATZVlKERxKowJaZOCMB5OaY8ykoybIRd0h1zBMjO0UvA8D1op0IhcYMn8RxgKV+rWY+Xoiv/XfL//mYIB4X61xjpKABSf3SymCAO/+N0T8nBNHIo9K7iNyHis7CulBLuRPvUwXQrIU+IFExEU0b71Hils0hS3yzXStHC97nsab71Y4GY+BdhAUU2RPIBi1QbsyKWd2KNUto+hnoHquUXYIqtqfFJEg4TZGqwtSA+2h1gACxdDLC8EQegTscp3h6YsWc6rCSdApes2AhD4AeI0RZ6b8rPLW7PJ/GdcdFmX2Mil63ZftLWRLsvfxyCibUxhBmEVxUjzDIvpFEoBYVjEDV9vYgAMA4kVdSwutZ1r5DHrYJHpWNmfE3eJcYhN2uviPGuPx8Y2aYcFx67zm/fV31f9mjGm+hA5A3erNXxPYxZO4GmFaeAjeOMFNlGMTZIU95ZhJIRxI9bKNuXVnmeruHIg8Px11lD7AEO7ft7FyrFDZkJBXQkuO6rCxXDhJLktA5YgXAvSJgXheti6YmrEAmMULl2rE/8T3y/iggq9Y+v8zWW2G5sOccjS95j3Sqm9/mKTZLhbrZHnOcAGvqexmIXwPQ8K3bHDhW5cWNcJO0SO9iNt0g4eW7tWsjPGXL0Mu8b6x6yBiQgaxcsYhlIKSlVb4YViN80NbpcrRGlWehICFrMQL+cz+Fr3qvy1xfKT0L/FIyD7NcCQdWCQZK1iiGdbrSP64Bz1f6iDEGN3f80Y3jOKVf1Xc2w0xlz3LOKCXPsE7hA2NSqRoxuIHN1BZGkrBNQiiogc3eBccmzG6WqerjXGIE1T5IYL9Q7VYoReeM5x6d9/Ki7HsXOlLC6U45jN8eb7evtv9g1792U5bm7mCMMAKGMBR1GCVZyAlF9e21lmufA8Ky41xrr0DK8ZkaP9FDJkyHEXwGa7mGtui22Wvm1zgkPfq/9tKjenSuHFbArPU+VawWATpVhtYjCp0qtI4Q7VnLCWsEXxW6Esyq9Nz3aYvJrR9wXNtflYn22bjatH/Y0R9XjCfaQFPAyP0yeGGYoIaiCvImOtB3X6lNlZMYzEGHzy81/EN29X4gJaGCeubbIIgiAIjyF9vtAfbdYAnucBmQGzGdV2p8x5hKFwoW6d6iqtre13KTt14ON+tUKSp1hMZyAAk3AC5XnYbGIYCS3RO4+5HRbsQNx/208hw2Z7HCa+3/hQANzwluHyYbxTaM4Vug6CNa+1HfIyxuB2tULgeZiFE2ilMA0n8H0f602MODWlpRlApAA8PHgqCMI+trSNZi6Hyncfa7RmqJy268/FDqkNiy119zHsPermAK5UoqGwxVWRyNENRI7uILIcP/zgpcQGwVHO1R7blD4VXDvR73kKWhEU7VzEjWGuMYY8nEKXRYUtc7fHcKUcgDtl6SpHl/vHtnuaLuNNbqC0hyRJcXt3jzTLYRgIPI2beQjf06hGpr24gZYz1vogLmvtpE1uIj9boMarUGoV8uuOL2w31RYoNd67wTW1xUMWWY9ZxLXOE6o3isAgxEmKu+UacZqBAXha4cViihfzGTxNW3ejhSKYBrOYvQRDlMU2CzrJ63DYYPUJLkYH1WP32beStt7H992+dunWffT1i0311qa8duHWbEdwElcnrIIgCNdFEUlo/1Vda7nbgUmWcF6O2QTSWsPXGqrmKm4sdU3mO4LwdB5TArfdT6o6AKKQGcbdco1NFAMAFAHTSYBpEECjiL8oscQF4TRkXLORam5ehmZhckgRTI2/r2c71OW22KX4eCxecJcSmErzXlIKuTG4X21wv1wjz4tDYJOJjzde3GAWBEUN4uK6OfCMrXJR3HuKwzKWddyl2NZZoReIi1cxCvarUAWGr699upYu0uNmKFnBUsQFtGAFY3czNRYLIuF5iBwF4dyMrz8X7KZtY6dS8tYtgX0iMHLkeb69fwzK4DHk4TEeGyfHPmc7BZdc+tlQtw5Rb8ePHfbocv24+97DOMFKKRhmbKIEWZLiZjGFUkA48eFpjSiOkeU5oOxWGFTltqU+dFl9C/YgMnSBh0pg+1xF76ycrxXX2+Jj7p7brjU9BlVK4Mq6F0RgAqLcIF2uMQ08TMMJFBEW8wnCiYdVFCNKUijL5wcVLtWJp2KFhWqVblFNrfGkZsOzHboF9JXftnVP3/SpBN9L16ZDMY70iW6MUMJV4borG0EQBJco3DsrcOk8Rnru6+Pck+YuS8D6/1opBFrB69gEEp6Oi8/RlTmnzeVobtIeG0u23UW02rMGqjZtVfk+Y+DV/QZpVhwQ8TyF+WwK39NQXBj7lPvCIEvdQtvcTq/Btanr1GUnsrSTwjK4+bqEHE3Lq+lxqLJoFpq45jK6y9K361rzu/W/lVJQRFCVgo0I6zjF6+UGcVaM/Z6n8XI+xctFWNSwhvt0Rg5DgMF+n9dXWftmqFZik7KnT4ZtSwwiBiuGGfdj2DJ2eQEo2iu19xt90Ve9GMr9MwCADNRAjo+GUtLaUL8uiSiAL4xU0OfhwiRVEARBaEfGSKEvmidkgdIS2PdG5/1hzPOaU/Pl0jzNhTIA7pTjMY6JC/wYr+9XWK2j8h1jPgsxnQbQqjjOxAywOl4xLQyDS8qLa6MpK5Gd7VRuo3d/D0+bsld4Li4ohvu0slOVHRwRcmNwt1zhbrUu3D4TIfB8vPVygTAMADZgkxeKGejyu+SMhbBwPLKXscPGQwB993nNQ+ljRZSpO1x6DuICWhB6YmwbyIJwzUh7PC884AaPyNINzi3HNoVvRd3iV2uNEECSZcgN733v0hP+MeShjrTD8cnkqbhi9f5YOdpcQLe5hK7Ldf9zhXXp+nkxn0IB8D0Nmk4QxxmSPAORbOiOEZdc0V8bxphWmYkcbaB+IAbo8vvTryilDz4ntrmRPuQitW3s75xPVPdUnxMhyQxe368QBgHm0wkAYDENEXoeNnGKOE0B2rWKurVeX+XqE9sUdDalO+Y2IuxDABT6UdQ256K9u6oeTKFsSo8GPScrXBRRAAvOIAt9oQ9E2eQOIstzIn2uME7a4nw1XbBpreEzgzkvTvJjPPOIsSjq+uhLXVOeApeXy3NxSSanKIHr36tfa8YLL2KqaaQ54/Z2icViDt/X8LSGniromBAnKUzDbeRYn+u1zolsU1hcO12yOXSY47HvCpeiTR78IJ4wUPa3I5nzCKdhQ1tsP+TVvUZofs+U4R6q+MCo5gsA1lGMOE0wn4bwPQ++70FrBT/RWMXJ9lDLmK3qhrLXH2t5u7BJ+Vuka8e8bqzK+urpVakortIdf92t+qohPAsQYZDYv2OtB+dO91LIcbkR4FqlEgTbkTYpCILgJvXNl+pvY0zpDtrfLqJsdHUnCKfQdyy6MdNU+naVuXmSfqvIJYJRCq/u7rFcbVDEXQPC0Md0Otn2GzJ/tAfp4+1GZGcz1PISmQrnoS0cxGOxgYFCyVJdV0RQxYkFAAQmhZQJr5drLJfrwt0zAdMwwFsvF5iG4daCWOp5P8h8S7CBoerpUP3Iwz7KDPI7wmUgY4yMQCNAJgLDce7JQZ+uXYTLIu3SfkSG/cMXOjsmsnSDS8qx/ttdrqGNMUiSBDmo1TLg0lwqH0PFQHIBV8pyTeU41Bc8+t7k8LXGbDaF72kAgGGDTRQjTjMo5ZXrgG2Otulc+hnLOHo8l5aV8HxEhm4gcnSDdjGOa0ysH4zrsghue8/7F0AAZtMJJr5flpuRZYxllCDN0sq9CIrDZKc9g0EsVKt0m+aQz03XMqs8m6x/q4OINjBmeRVHOMr0eNcW+spzfR+hl/w2PBX1bf3LzCDF9aNZ6MtudMix3K62e0YMIzEGn/z8F/HN25VYAI8FJyrXSKkUsvXX0Ig8BUEQBGG8tI3TzFy4g/b9B1bCY+ESCpwhflMUUePDFcuUU8vxmPXPg/dKI8kN7u6WiOIUhgGtNGbTEPPpBGzywiqIqfhvBIpfQNrcqZx77Sj0T9segMjSPkSGblCMh83XZWXZZQ3cnAOckgajsJlbriPcrVZI80LFpDThxWKCF7MQmg1G6bp3KD/QQu+MYFp5FGOY/x6imbu+1/1DpFdXKvfVh9ZD3xT5Hbfc6ojy93hEASxcJbJ4EITrwNXB+1ww6MFLEGylbaOn+Z6Z4XkeJp6GYjPK+cIY8/QUXCmHpSz4RwAAIABJREFUME5OrVvHbvpuT98rhbvVGsv1BnnOUKQQeD5eLGbwlAKXMf8AIMuy7XddOcRxbYgCyh1EjvYjMnSHS8uybaw/tF5oU+googe6U1IKWc54fb/Eap0AYGgCJoGHN17eYBoGQOl56Dl5fS5b619LGLM16XlgEEm/NwRqZIe+2xjyUPbWexFXfRlBVIZu4V06A4JwSYY4ld/nSRxBEITz8uAc5EVy0Yb0rW5waTk2f7/5XikFYww8r5gimyw/ex7HxDlkNRYLyafSrE+2Mza358/hKXXrgTXPgeehtUacpsiyHItZiEngA2wwn4eIohhRmgGgBy7amu3KhWd9bbT1jSJH+3Ct/75GHovpLthD29h4jjlJ29qk6QK6630TVaZVzT8UETZpgiRPMQ0nmPgeiIB5OEHgeVhHMdIsd66+2lQeu5TKdafF18kQz3UINedQbpqB/i2Lt+ulgdbkNrUxm/quUxEF8Ii49KbotXLMAt72zUlBuGakbz1MbesLY19QUGNRLdjJudtkc7Om6/eJCKa02qvcQXsA8tyAjSmW3COqd7ZvXLfFWLOxHHVcKQfgzty3WY6nWAZ3fa+IlaWQMXB7v8R8GmIahgAY01kIlWaIogTGtB86OZR2/XNhGPqu46IUtptDfYPI0T6kX7WfugyHPjjVNU947ODoA5ihCj+qMLX78hxYLiNsfIXFNICvPAS+B9/T2CQZoigurIGpCB+xH+O137LWxz6bFCjSdu1iiPlV73WgbKJDKFSrdPumrzSbRwmGal3SH4wHsecWBEHoQAYVdxBZHkKVL3lGgpu0LeoOuXOrNkaICIHWCLQCYece6VoYuqzSLwvnoo+6XI8NqJTavWdAo4gNvI5T3K42hcLXMELPw800hK8VALM9ZFIG2TpLvoVupA8SjkVcgAvCuBiyHbatGR5zA12fI2yvVS8iULncznODu1WE5XqzPRwWegpv3Mwwm0xg8hyF8pfKz3dp9l1mGQPtU07ZIjNr8glRigEErQhE9hz8vfb29VTEAlgQWjC1uF19nwYSBEEYC7ZuYUmfLPRB26n+ivrpeK01fADJlbuDPgcuWZ0C9vdVrpQDeNrGaduma9OCt+6VgpmRpilusxSL2RS+50EpwmI2xSZOkCTJ1hUcMz9LCXysTERZdVnEqtRNxI27vRyy1HdlDnItDOl14bHxv82LTfN7XesKBiPOc8T3S8wmE4QTH2DGdBpgEnhYRTHiNN2fLwxQtiGQ9jOkcmqQZK3ANov1IfPbF4R6v2SX62fhachhh5EhDWQcNCdrT3UXJ9iPyFJwCQbAUNuXTAOESzGWvvWQZXAd3/MQaAWNhxZAY1Cw9G2VdMkyuWRh5VI5XCnLU2ha9jT/rsf3IiIwCLfLNdZRXB60MpiFARazGRRzLzG2mhaJTRmNpY8VujkkP2F8HJKRyNJu6jITOdpN322xy9K3Oe63WQlv/0YRG3h7hYGcGUQKy02E1/dr5DmX1sLAzXyKm9kUVLqBHsICeC8/FnDdcxoGUbGLYwM2yaqt7T6HocaN+mHT/g3UjHUysyndMSE7v4JwJLKwEwTBfmTYF4Qmj7l6U0qBmeF5HrTWBzd5Lk0fcxOZ3wjC8XRtyhARNnGCV6/vkWYGgIHnKdzczOBrPVg7ayoxBLuQtaY7iAzdQNqj/Ty3Xz1lnn/IVTSVSlcigiZVhIQAkBuD16s17lYbAMU9k8DHWy8WmHoeiEsFTV8WwL2k0pKuZYqZMa3fhMcZRF4DVoGx169i7cJWHQQRnofsBI8YmWSOH3HrJQj2cC1tkovzwq0vF7gWOV4Dl5TlY+4b60qcKiwEEcH3fQS+V0ygR7Yh2Mcm5RjLYzsubR67Uo7n0NzMVUptLYHqSmBSCqQUDAj3yzVWmwTGGCgizOchbmYTEAxQU9j2hcjJHeQA8ng41uqmzUuIyNFeRJbu8RT51dt/3SVzW5/wMBZww2tImwcRIiRZjtf3K6yjBMwEIoX5LMSLxQy+ptp8oUiVcdp436cr6cog1ab6X39Wfa9Bh0pX2Ke3+sYMBTus4IerW/WxzfSctjAGuPynqjlkqsjzwqiwaSAV2hnCVYtwOUSObnANcuQrOdt1DbK8BsYmx2Z+2tyqGmPAzIiTFGzBQv+UBePY5FHHhU0VF8oAuFOO59LWX3RdZ2b4WmM+m8L3yrh+YKxWG8SZgdYaeZ7vxfyT5yycgtQX+xEZ2o/I0B0OybJ5MKBr769tXt387vb/MsYwM8P3PCymE3habdOPsxzL5RooPRPlYGitgdxs83uMskgNUEdts9K1Kd0iyfGuzypsii2t+PhDXU/BinQ5H+wZWFH+AdMcC2wYaZ7j01/4Er5+u7qSXWJBOBNtkzlBEMaDywO8IAjP55g+goigtUbge49u7AhCHVfqhlg9FRzyHNB2PTMGt/dLbKIEDIDYYFHG+jMm34spCMhzFk5DLBLtR+RnJ3W5ifzc4Sne/h4LK9O8p+v+LC+sge9WG5jS0nbiabz58gazSQBFgAaBzb7ymYnAB5YyfeyEcOM1FDYpkYZKt0jzuvuUvp8rXffj3GLbvqht+R0PuzHIu3BOhA7qp7cEe2h2SiJDQRCG5FqsfQXhnLSdoG870a+UQuh7yLIMGXcrf2xh7HMWVywiXSkH4FZZnkpzzVa9b/YdSunCxRoR7tcbpFmO+WwCrQhBoPFCzRBFEZK82FIlUnvfF4RTeCzMgTBu2tq9yHC8nLoHVJ8vSh8/bo6RZSXHauxvWzt0zRWaaZgyjcoiOE5zpNkK82mAwPMBBUzDAL6nECVZ4Y0ItTpYKoFb84rn9SNdT0L6pmEoHqu45u0N3sVAtc36t680H3Nf3wfSH4yLujRk51gQBOEIZCBzB5GlG4gc3WCscmyezq9vzlQwF67XtNZ7blvHxjEWRWPMdxuuWEdVbsQFN3jMuqfoO3axxLXWiJIEr+5WSNIMYMDTCov5DGHoQykCc96atiA8BYlbaj8iQ3eo5GeMKHds51Bc2QexgDvebw8EgOCRKmKTUhUrmAFSuF1u8Hq5RJqa0kW0wmI6wYv5FJ7WRWDgSgHd0TcMopiq/ui5O7JNOTVMuvb08bbNVW2oBzLO77BBXmOHWUyHBEEQjuaaBghhfDDowUsQhPPQ1v+rMgaX1hqB1qBSwVNRdwc4hgXcGPIg7OOCTMZSvy/NISVw1U/U+wSlFMDA7f0ay/Vmez0MAiymU3ikADZbX3XyjIW+edh2TeMldW7siDLYfkSG7tM1P3igPKZdr0vl51opAAztaRgmLFcb3K02hbcQIviexsvZFDezsNgZqOoRMxiAQaEY3ov7y+XU4kCVa7p5bt66V1+5H9fSgnAOhlTSjr8fL1SARHL46CqojSlgFhfQgiAIgmAPsrwShHPRdOF2yMWfUoTA9xGnGdBwATumw0NtZRn/YvUhtrsebj5/m8tS4Uo5nsMhd9CV0nfPPTQYBMImTpCkKW7mM3i+t7UGjpIEURwDpC9VJOEK2NXZ7riUzfuvva2PEXEbbT8iQ7dozgfq1zrlStV/+56HCFT6cAaSNEOWZZj4PqbhBJoIge/h5c0cUZIiSRIY3v3uKTXolBVB5VK675iqtln7SezfYej9uVLZjvpMcuD+uff4x1TrTwbApvHKprw+h3ovIhbAI+ZaKqQgCIJQsDtlS2CovZcofwVXGTIOzXNpbnQfcu9GRJj4xdnK6uR+PZ0xKVrHlJen4kIZKlwpy9jq+SU4xQUkEYEBKO0hN8Dd3RrrTVTG8wNm4QQ3s9nODWTJtT9jYXja3A0/qrgQRkdTdtJ32EdXWxRZjp9mPOAuz0D1OcEDt9ANBS4zQ3kahhTWcYLX9ytEaVbMJQiYhwHeuJnD931wnoO4JdY0AVy90G3lewhVL1uZVh/YNr70n9+nSONyDLWG7z3Nyup9YPrM91DPVg3UxIaqB1bUrxEjMYAt4poqpiAIgkCAKHuPRsZIYWja6lhXvSMiKKUw8TQI+5utY62rtm8g2p5/V7l2uTQ3cKtrzXuq/40p3MezIqw3CV7d3iPPi2fo+x5u5lNMPA/ERqwvhYsiiid7GVtYCuH5iCzt5JBHoa77VakIJgAmy4EqtASA5XqD2/slsny35lhMJ3h5M4evAOYcfSsUbZuH2KREKpKVdt0XxMXummqZmz+X+jq/f2vd/j2Jievn64UhCmBBEISTsG2yK7QzXjmONV/jZbyyFFyiy4qv7bS+UgqB7x/v7u1CuLJpaIxxoiyykesOXdaSTWsgoNiQqlvSEBEMA6+X91hvIgCAVoTZLMQ0nMAr3AucsTSC8BCxRLQfsSy1mzaLUpHdOGmuIR5bF3TJslAEK2ilUAWSKEwbCZkBbpdL3G9iZIahiDHxNF4s5phNwr0YwIfsSw0Ac6HlytjWScJhbJHXULkc+kBmn547mAv/grt+Y/zY5LJ9rDSt3skYY4f0rxyZzAnCeJD26A6XlCXLGazekDbpBrbIcRePq9xEaWzUVH8bY5CmKQxJWz8XLi3qXCmLK+Xog2Yf19ZvtL1/88UCWhEAA2bCeh0hzQxY7ZTJBGzjj9eR5y9cGqmDdiPycwOR43hoW+8ce63tc25cY2bMplNMJ16522BgQFhHCZIk3aqBqnlDPa22emKwsxxTA9Yju6x0e0qXUdNQGqva6eifbYmuqYDHnOdmG+wvTQZVIcRHXP5zpGtT++qLOM3xu+98CV979052nwVBEARBEAThWNriebW5elVKwS8tgW1RbtuOS8/ZpbIIp1PvZ27vl9jEKQAFIsJ8PsVsGgBswGy2sYQra4Rr3OAQxov0ZXYj1sFuIDIcD13hZY4Zux+TYxWOZr3e4G65QZJmYCgoEBbhBDezGXxNe8rfQ/kCxG3ooMh07WzI3NguRF794106A4IgCLYhm/nCsXTXEpnQCEITW/rWusunNvdP9WtKKQQAUmbkj3xP6Iexutx+Ci6UxYUy9EWXW/hmf7D3ORFyZizXa+RZgOl0Aq0UgsCH9jQ2UYwky0Glp4F6HyrPXBgLh5QVgl0cmqeJPO2gy72wcD661jz164fcRjc9iDTnE0prZHmO+3WOwPcQBj48reB7Cp43Q5Jk2CQJcnP8HG2oGmJb3Rsmv3ZZ/w5F7xafA6U7FMPkk0DEoIFasE2W+9eOHOaxBGkAgiAINqI6XtKn94mMkcIleCwucHVCX2sN39NQtP9dQbgmbDjccS7a+o7Oz0v3zlp72MQJ7u6WSLMcRAStFBbzGeZhCDYMYwyUUtK/CNYgFomCIFwrXVa/bdcPrTmamHI+ABSHyOIkxe39ElG8c/8cTny8XMwxmwRQdJx1uG1zC3vya9cYOJSnmSHSVBjWK84Qz+K5aTIKt+0ViiDK3wHTtQmxABYEQXgCtliqCYcROQqCcCrNDZiuuF11RXDlDjrLTbkpIwyNWJ6OD7F836fNAvhBDN/yBcMAFHIAt/crhEGA2XQCpRiTwANpjShOkGXZOYsgCEfR1R+7pnC4dsSy1F7EsvtyHLIGBmqxfjtkpJRq9LHVfQxmAkiBoXAfp1jHCWahj1kYgpgxDXwEvodNmiBKUoAflzcxwFIlemX3zOsRl4VnsRdfuX9s6BeLvQhgiEMGNpRf2EcUwIIgCM9ANjOFCpbJ+sWR9mg/Nh3KqHJZt/at3gP7G97MDE0EUoTMMLIjNliEfnChX3DJra8o5h/SdPm49zd2fY3SVH0BmyRBmuWYz0IEgUagAX8aIIqBKM22Vj4VLrSDU3Gp3dhO1/N/TC4iQ/sR99/2I4r94Wlz93zonua99bkDE213JXafA2CGAXC/jrFJcszDCXxPQRHjJpwg1B5WcYw8Z3A5/6hmIcUv6yK9HmVvm7Vf/+k2XT+Pez9p1O2ea4cm0W9e2+Yifcyrh0i30PcyGNjzPmYDNlkU28CuDy8Yd+8i7HHNFVcQxoq0y2uF8NCts3BppD26gY1ybHPFVv1dv661hu/7UErt3SMI14YtBz3ORb2f6Pq7ovIqkJkct/crrNcxmAsroNk0xGI2hU8AszloXewyzbJWh3TE5bC9iAzdQuRoN9Ieh+OxEDPNe5v3qcc+VwpZluH1/RJ3qwi5KVQEvu/h5WKOxSyEJgYZA0UKmjSqVG04qGuP8leo6OXZdiQx9voK9OtKer+84y+7cD7EAlgQBEG4ak5ZyJRnaIfMjiAIltJmxdd1zdcKWZYjN82T38IQuGR16pIlp0tyGZr9vqS4Vrh9BNZxgiRNMJuFCDwfvlbQ8xCbKEOWpdt+xoZNsHMhFm32IzJ0A5GjG3R5sRD6pzmeq8oTUe1zoCmT4v8qDI3WHowxiJMUWZYhDAJMJz6UIgSehj+fI04zbOIIDDWIXKV+yDMAerbUrebH2D/40Bf18E59pNtsy/3l1fRqqd9E6q2diAJYEARBEEoen8yJpe9YuUYLJ2F8dLlyrd4DRR31tIYiQpLlMFJnz4Yrm5KulKPCJaX2c+naZN0/UPJwvEtzxt1yg+nEYDoJoJTCfBogSRXiOEF2RYdNnjoPEEWU/Yi7YTeQtmgvddlJe3weh9a2bfMDVC6ga/fsvrs7QFZYbRsQMbRWAAibOEGcZZhOAoSBDwIjDDz4/hxxkiJNUzBoq0SuPBk9t2y2YFt++8am8jMAQ4Du0WhjOEXtAM+WdmsqGsBwpe/8Vv2XuH8eHtnJtgypwIIwHqQ9uoPIUhCEIai7bKsrdaoT+J7niWXeGXHtObtWHqGgbU5Sdwldv1a8rzZ9FdZRjLvlClle9DGB72Exn2Hi+6Cy33GZvtuEuDd1A5GhfTTbnsjQHaRfPY3HlCPNmKf1O5tzh720iAEU8wJSChkz1lGCd1/fIs9zAAaKCNNJgJvFDFor5HkOrcSOrA9k/8mumK+2ycu2/ArDQ8YYGXktQyZLgjAupE26wYN4cXJGykqkPbqB7XKs8t/cvKwre6v/jTHI8xwZ48G9wnC48nxdKQfgVln64jEFSNu1yjpn6itMp9Ptc82yHOsoRZqnpfUOFXu/apdWn67tLsU5xw+bn5Mg8nMFkaMbiByP49i5QN0tdNs9be8JBEahoJ8EPmaBD9/3UBw0I2SZQZRkSPMMQE1mDABUap9365xDB9r6xp50eUjvvIMwZNvsxZVyqdZSSvXrUpp36fadZtvBzn7SBdRA4hKFvV0wgDTN8Jl3voyvvXsnLqAFQRAEoYndqidBsB/brWKbrtnq15oLEqUUlFIwWY48z/dO59uuiBkzrrhRHsol2SVwRSZ90tUXPNY/GmOwiQ2SbIXFfAZPK3iexs1cI44VNnEMJgUiBWZ3LIPPPW641P6uEXE17AbiatgNRI7H0eYauvPaI7GBK7ZuWMHbeMGF6+cMwSTAPAygwNCasJj5RXiJJEVmDHY2xwxw/4ot4bKMXfnbZzpt6fY9rxwinvZe+r2naB/S97Qj5k2CIAiCUMIAGASZOtmLTPgEm6hbA/uK4Lcc2bVZES4IwjAc2mCtLCAMEdLc4PXdEqt1VG7qApOJj5vFHJ4iGN4dOnHB+veSVG5N6y/BPkR+7iCydAORYzttrqHb3lc7G01X0W0YVAfCCEQKIIUoSvCNV3fYxOn2u0V4iSmmQbCNK1yfR5xbVmJFaB/ybAt6ayuEwq27PFahA7EAthDbrWIEQRDGSHFKtlIAC4IgPJ+uxW3byd9q48TkZhsjWBbHw+OS1akrZXGlHH3S5VWgrrBtupcnFHH9mBmbOEGS5biZT+FpDa0MFvMp4jhGlBonFL9jXB8382T7M742xCLRHcTa2w1Ejg9p7g+3WfbWLhTOnFu+w8zQWiPPiiPxSikYY0AEEGmsohibOEY4mWA28UFECCc+At9DFMdIcrO1Hj6UV1sYVqk8vvnKORFXwv2nTzRsnbJJZkI7YgFsKdJQBGE8SHt0Dca1T8oFYQy40re2ndBvXq8rcnzPg1YKqClzxPJheA49X3n256UthrZQ0KZQrPcjzdd285cImWG8urvHOo4AAIoY0zDAfDaF1mp78KTtd4TTaeu326yE5VnbQTMet8jQfkSObiD9avda49B99blC9T4zZWgaIrAxIOJSAQwwEXImrDYR7lYrZKXCVynCbBpiPpvC9xTAZi/+cCUPAl35FgvDtn2ma1T6DdV/1NM9tr0+hSFSHSKusq0Ke5sRBbAgCIIgbGFQ+RLsRSZ+wthpbrgAO7etCgxfK3haP7hXGBZXNgxt3/yU+n46bd4EmtfBRay+1TrG7f0KcZoBUPAV8GIWYj6diPeBHjml77a5vV4Lj8nS9n5XKBA5usM1yrLVBXTzGgDVcA1d3aMr9RERSCkQaVQqJQVAUbFeSTLGu3dL3JaKYADwFWExDbGYh1C0r4jXWhdKZEv2WIZRfBYvW7BpHtg8zNB32mNOs37AggZQ/w6h/BUug7iAFgRBEATBOSRcghu4JMd6WR5zt1pZApsse6CQYS7csrnyXMZGl3tu4TKIO+h9DvWJTVfQ9XubG2NpbpCvIiSBwWI6ARFh4vvQC4VNFCMz4rr43Ii7aDc4NDaLTO1BXA27w7X1rc25QP1a/fohV9GPrTGqOUWS5nidrREEPmZhAK0ImhRezGdI0hxxmiA3XKxlahrQrrw9taxCv1hjnVmFvL3SarBtRwQUhizjfhDSXi+LKIAFQRB6wCUlxTXTtmASBEHoi7aT+XXqGzDGGEw8jSxjpCavviHjzZlwLSaqK2WxvRznoKn4BfbrQhHTj2AAbOIUeZpiMZ/D8xQ8rXCzmCGKEsRJAkaxYTv29uBin3htSotrwKU++RrpUgrL+GQX1xLbu2tfo1VB/PDLB+cR1XtWCsxAFCdIkgTTMCziAwOYBBq+FyLNDOI0gym9kPCBNN1k3POnJtYof1GcJ1Do3/K31YtOT+n2/hwqN+0jt/4dOl2b2tilEBfQgiAIPSGDjiAIgnAMXeNFW2wuz/PgeXJm8xJcx8aUYBvHbCB1HTapPAgQEcAMRUAGwrt3d1htojLQXxEb+GY+g6cIMPmo57jX0k6vPcala4gc3aAZG1qwF1fb46E1R9c91HatJR3FAAwDzGADLNcbfP31PeIsR3VoNfA1bmYhwkBDlUn0cWjCHuWUXXF/bUINaPM65nlvRWVUP3bLX2EcyG6SxYgFiCAIgiAIruPqfKdSwFQn4evuodG45mkNMCPLyzg/pNDcTBi7hZ6tuGLZ41I5bC9DXzzWN7a5hC62dYv3qvI2wAylNNZRjDTPMZ1MEAY+tFZYzGdIkhRRHCOvxwbEThZjqVvXWDfE3bAbiBzd4ZDr6LH0lcKOLnfIbdgst2OsgR+4hC5u2PseUWnBu7sRIEChCE2jSIEBvL5fwvc8zMIJJoEPAiMMAngeI01zpGkGw7w9dGbzs21j94yKv/ae2cixbi7VU16H6p+HSHeXJrZxvPvGngMWwrGIAthyXN0UFQRBEITnImOkMHqIQDUlcEVTCUwM+NoDYJAbs3Xhup/UgQVVkoD/6Jvgb7yG+cYrmG+8C3z9XZjXt+AoBdIUSDNwloLSHEhTcJoBSVp8fxqCwgkQTkBhAIQhMPFBk6D4bBIA4QTqjReg978N9b63QR98X88PSxAKZBN9xzFuFPc3eAGgHBvL62r7fUKa5siyNfLMxzQMQUQIwwBaF7GBU95ZSTVd5F1601Dqwz7iQtoNJBat/chaZLyc0pZc6FOPdQu9Hd9RHBLbO+xV+w6XF6o5RBGKlAFSyDKDu+UaE9/HbBrA9zQ0AXriw9MaUZIiz3MorR9YX5/q5aQPhpHn7tCdDdik9KMBXD9v07YoXZusf216ri4iCmBBEARBaFAscgwkUoIgCINTnn7fv7S/kKkUK54qfD0Z03GWfB3BfPHfgL/8VeRf+irMl74K/tdfAa82R2endZtiE4E3UffnXbz9JvT73wZ9y9tQH/wWqG95G/SB94Le/zbg+6ekdHEurdzqC1GeCsDDA1LN+rCOM0TxEovZFEHgQVexgeMU6zgGqf35kSvtw2VERu4gsrQbiQUtXJquQ9Jt15sei4AWb0WVKaIpvqNA4MKPNOIsQ3QbIwxD3MxCKAC+r+F5CrnJsdqkB/NkN8XBO9dKNQZssnrtmyKffNXur4XTIVPsIAkW494gKQj2Iu3RHbbuWAdzrCKcA2mT7uCyLNvK1nQJXb/HGIPo9S3Mpz+H/LN/UCh6v/w18Ou782S4B+g7Pgj9x7+7fH0P8PLm0lk6CpcWxVIWNzmmrzwUt7Ju4avA8AMf82lhDUwA8txgFUXIMlN4Mag8FVxABi6PC+dC2o47iCzdQOToBjbJsWsd0hzbTct8oXGhONO63UcpnSCXVsKKGOEkwHQSQikqLYyBNM0Qpyn2bYy76fvZDiUrBd5aSY8dW6wzFe/SHXuem+2n3/yabSiXPhn7Mz1Xui7AKPrWz7zzZXzt3TtRALuCLH4FYTxIe3QHZhYFsANIm3SDq5Aj8wNXa8VlBuIE6Wf+JeLf+T2kn/os8ne+fMGM9g+9/23oj3wvvD/5Eag/8f3AJLh0ljpxZbHpSjkAt8ryFNri9x3zna6/iQjGFKY8pBSIGS8Wc3h65/Z5k2SI4mTrkl4UwO5w7e3JNh47gCHytB+Rof3YIMOuMbX1oNiDz2uRb5l32l9UfxaWwibPoQhYzKYIJwEI5ZyDGXGcIs0z8AEvbPa4fwY0uDCKpvFHAbbluWrshx/pm75j9Q6lACYYEDH69lhow3MdMk2XEAWwo8jiVxDGg7RHdxAFsBtIm3SDq5BjFQOrVACnv/t5JP/iM0g++fvIfv8Ll87dWVE/+GF4H/0I9I/+MOh977l0dlpxZeHpSjkAt8pyCm0KoMf6zEMWP8DOHWPxtpgNTfwAs9kEWhW/lRvGeh0hzXMwKVCLo8OhZHIVY8JIuNZ2ZQunWOCLLO1G5OcOY5V5+UL4AAAgAElEQVTlAxfQKBW4Hda/1b8PhmQGuKZu4OofApgNiAHladzMJvA9D9XjYMOI0gxREoNIlzFOCaD9A7JKqV7mAcPIoWHLLArg3uhTAdzmjn8IBXBfnnJyMBSX1s8wZZuxwxrelvrlEgwgSTP87he+gq+9uhUFsCvIAlgQxoO0R3cQBbAbSJt0h2uQZfalr2Lzi7+K6J/9Bsw3Xl06O6NAfejboX/sY/D+7MdA73nj0tnZw5XFp5TDXU5VBB98z4BWwDQMi9jA5QZsHKeI0hS5eegyskmfrvWEcSDtzl5Edm4gcnSHMcjy0Ph6aI7w0DK49sf2zU6Jy1R8Fvga0zDEJPDKeMMMY4A4TZFlOUyl9LJGicQYgRiPxp7nCnjUn8VrUwE8hOvnPp+D4XJnUvVt91tgk/J3yHRdIk4z/O4X/g2+9uq1KIBdQRbAgjA+pF26gbElcItwEGmPbuCqHM2rW0S/9OvY/JP/D9kfuOXauW/Uh78L3o/9CLyPfxR4sbh0dra4sgiVcrjJqS6hu75T7OEaAAZhEGAWTuF5hRLYGIP7TYI8zwFg60b6GFmcKi9XxwIXkLbnBiJH+xEZusMlZXmMW+hK4VVXALfOKUz5d/169S8b+L7GzWwGrQlsGEorZFmOJE2Q5jsFXR/WlBX9PtsqX/bMUWxQ/vJW8am2FsBD0PcBRaX6VdMW9b6Iqd33U7BJSStj23HsLIBFAewUsggWhPEh7dINRAHsBtIe3cEVWXKcIP6V38L6H/0q0k9/9jy/6XlQb78Bevst0HveLP72AyDwQJ4HBB7Y80CBD3geyPcKLwibDbCJwVEMRDE4jsHrqHi/Kf7ndQR+dQuc0WpZ/eCH4f+FPw39p34YCCdn+902XFmMSjnGRZ+bnM9VAlfuoesbsIoIszAo4vgRgUFI0wxRFCHJzcmbX8eW1ZVxwGVcaYPXjMjQDUSObnEJeZ4SGxhApxK4olJDFCFMNfI8BVC6dTY5JoGHF4tF4fa58BmNNDdIkhSZyQGokSqAjzv0NiasUNBxYfE6lBV4xdgVwCjdPtMAe5OiAHaPugL4D1+9BnHBpfMl9ITIUhDGg7RHNxAFsDtIm3QD2+XI9yus/q9fxPrnfwl8vxzkN+iNF6Dv/CDUd30b9Ie+A/Set6DefgN4eTPI79VhZmC5Br+6A3/zFcw7X0L++S+Cv/BF8Ku7wX5X/7kfhf9X/iLUt71/sN94DNsXpEO5LLsUtpehLTZZ3+kee1/b32W4csAY+IGP2XQKT5WWv8yIogRxkoCPcNdXjztcp6vcto8DrtM8uGB7WxR2iCztR2ToFueU59GKYHTNIXbzB4BADOS8U5pWXkMMGArAbDLBLAxK608CA6VFcIrMMOiZ7oD7f3Z2KYBtUM5V8wldug7vX6laYIMClAaqXzaU/RzpukarAhiQRZQriBwFYVxIm7SfvcXLINE2hHMh7dENbJWj+aN3sfzZf4DNL/4KkGa9pas+8F743/td8D707dB/7Fuhv+vbkYR+uYmiAfRrRXiIR2VztyyUwe98Cfnn/zX4C18Gv+5XKaz+xPcj+MlPQP3gh3tN9xRcWZhKOS5Lsz0Nsal26j2PvZ9NAkzDCQo3iIwsy7De5EhNBqDYvFVQAIoYf3VslZPwdETmbiBydAORoxucS47tYSIezhH4wOdAofQtPqvyrsCcb+8nIoThBDehv+eBJMtzRGkOYx6ucboOlVUMp5iyZ41qi3KOuDDF6Nv6t6pbQxyw7FNJva3LYCg1fpkN8VzryDh1PAyDNDX45Be+gq+/vhMFsGuIHAVhXEibtB9RALuDtEc3sE2O2R98Bcu/8/OIf/k3e0mPJgH8H/owgo/9IPyP/QD0+97eflY9mxyMNE3LTZHz9FtPlQu/voP53DvIf+vTyH/jk0AU95If+s4PIvgrfwn64x/tJb2TftuhxakrZbGtHF3t6dKWwM33D9MwUKQwn04RBB4IDGbCJo4QJSmYCVRc3fuWbfIRhkPqghuIHO1G5Oce5zpE1mYNjMdiA2//3o8nDOysgj1NCGsWwdXnSZohzXLktWSr74sCuBtbFHTEgBrQO9FQFst9u5O2RQE8ZJpDpusiewrgV7eiAHYRkaUgjAdpj26wi2cjCmDbkTbpBjbIMfvcO1j+7f8b8W9+6tlp6W99H4If+SFMPvoDCD72kfYT9vVrBOSGkWfFyXiD4S2A+5KJ+fTnkP/mp5D/5qfA33z97PTUd30bgv/0J6F++Pt7yN1puLJIdaUcgB1lOaYtDakIbts0PVYJbKqNLzYIJyGm4QSaCteNWZ5jtYmRZTm42sw78JuCANjRZoXDiAzdQOToDudQBj8lPnA9xERTEVyFndCKMAsnCAMfVTFyw0jTDFluYPD4nKL/ulxZHPec7EDYpJwjAHrAw8xjjylc1OVCAWyLotam+uUynQpgwI7NNOFxRI6CMC6kTbpB4cJIFMC2I+3RHcYqy/wrf4j7/+lnEf/67zwrHf2+txH+hT+D6Y//Wehvfd/2eqXsrbtBq/9f/9sYgzzPkecMHujUcPO3+4KIYN75MvLf+B1k//xT4C9/9Vnpqe/7ECY//VOg7/1j/WTwSFxarLpQFhvKcGx7uoRL6Pp9rZu7RDB5DqXUNlbbzSxE4HvbfitOMqzjqHT7SCBSogAWTkLqiv2IDO1HZOgWfSmrjr12yDV08Z627qGbMYIJDE9rzMJJ6W0E5eeMJM0QZylQ7ts0yzVE3N9h0h0Om5R+CrR1AT0EvVss1w4tPC/tog1Ufw+1jLelLtjUvsaCKICvAJGjIIwLaZNuIApgN5D26A5jkyW/vsP9//r3sPmFX35yGnSzQPhv/yimP/5n4H/fd7f/TteJ+gNuWbMsR5Lngy2ehlIA7/3G176B7Fd+C9k//tVnWQbrj34E/k//FNS3vv+5WTwK2xeszU0M28sDjL8Mp7SnMcQFrl8jBgx4qwxmZigi+Ipws5ht46LlhhFFMeKscAsNIoxbKsKl6NpIHXs7Fh5HZGgX0havg+fKs2npW3fXvL2ndm/T0re6Xl0rXoV1bXVL/fPA01jMQvie3n4/LxXBSZahOGi2K9O1K4BtUaRWVArgoX5jvM/DYHuIAWb0Za+PD2PP67UgCuArQOQoCONC2qQ7GDDAogS2GWmPbjEGeXIUY/V3fwGr//0fAmn2pDQmP/YxzP69P4fgR37w+N9tuH0m2p2Ur99TbZCkaYo8z8GkijPFA53075u2fJrf+1dI/+lvIP+13wbi5Enp6j//pxH8tZ8AveeN52bxaFxYvLpQhooxluUpbWqIcjxFEdzmnaDeBykFhGGISRDAIwAMpFmKVZQg50J5XO3yERswyB5/isJoGGO7Fo5H5OcOIks3OFWOh+YPB90+l+9z3h2531cEN79TTCSq708mAeZhAE+r8hMgz3PEaYbcMJgJpAotMoFBIHAv1pps1VTFJuWcRr+xdIc4CFDVn+qAY1/kbKBAUANNhcer+N7Rdzzla4MBpGkmCmCXETkKwviQdukGogB2A2mP7nBpWW7+4T/D/f/y98B39yd/l16+wOw/+POY/8S/A3r54km/32X12+WiNU1TZIa3K8n66fzn/v7QtC7+ohjZP/41JP/gnwJff/dJ6Xo/8eMI/tp/CATBM3N4HK4sYl0pBzCusjynTY1JEVxn/2CKge/7WEyn8PTO9fNqFSHN8sJ6uLpVrIKFnhhTGxeehsjQDUSO7nCsLLvmEa2HyGp/78f/rd53Hz5jZhCA6WSC+SyEUrv1TZbliJMUplQomt2XTypLSynK7z/x6xfABgUwMaB69jw0lAJ4qPSq+tyzXnmLDQrgIdO8BkQBfCWILAVhXEibdAMGg0UBbD3SHt3hUrLM/uAruP2v/0dk73z55O963/0dmP/kJxD+ux/vLT/1GFlNy+Dq82rxlOR5YQm81f4+Xcly7ud/aAGY/9ankf79fwLzmX91esLvfRPh3/hpqI98zzNydzyuLGSlHP3jggK4ee3hBi5DwSCcTDCbTQtrHFLIsgxRnCDJDQwzFCkAMl4L/TGmti48D5Gl/YgM3eExWR6jBG5zE92mAO76Xn0NpIgQBh5m0xC6Fjw1y3KkWY7U7HtQaioHj0esf4dIV3H/oWeGVgD3nSbDFArgkefT5nRd56ACeHuTbIw6gchREMaDtEd3YCbZjnQAaZNucG45cpxg+bd+Duv/4/85+buTj38U87/8Cfg/8OH+89XierX+f/NabnJkeeFi9ambHpdqQ4/l1Xz+S0j+zs/D/IvfOzlt7xMfR/AzfxWYDGsN7NpC1oXyXLoMfbanSymC2+7bbsYqhXwvDnnhztFTGvPZBIHvlfcDcZoiitPiYIsDdUsYN5du+0K/iDztRuTnBl1K1WPdQ1d/m5a4wM176knWD7yyMdBaYRqGCAMPWqvtzWmWF8pg8xwLYMZOATz+emuNco6LyLdUHVAeqQK4zXV4n8pqRc+1Tu/mmtO8JkQBfEWIHAVhPEh7dAfmrbOfC+dEeA5NhZlgL+fqX9NPfRav/5u/CXOiq+HJn/oh3Pzn/wn0t75voJwVdLlhbf5fLaazLEOS5Q/iAT8WC2sM49kx7dZ8/ktIf/bvI//t3z0t7bdeYvI3/jrUD33fU7N3/G9Z3P8064ntZbl0OZ7brs4lj6cogpuxgItrBkQKAAFsEHgeFvNwG0MtZ4NNlCBJ81qsv3IfkGtu7Ecwjrc9k0vnSejm2DojMrQfkaE7iCzdoT4XaPLAg9He+7Y4wNUco6oju7/rn2sizMIA0zDYzZNASLIccZYjy3OoZqzR6seo9mftEFvxvvAQZ0PAClsUdFRTAPep/AWKOL19zRvr8+7nWZG3p1kpgIdgCKtqW+rXNSEK4CtC5CgI40LapBsU8WkIogC2G1EAu8PQfSvfr3D33/9tRL/06yd9z/uOD+LFf/HT8P/4edwKA0WvZGobHhVdMYHz0h10DgK/+xr8zVdAmuFQ/8YA6I0b0Ae+pe/sH80p7dZ87h0kf/NnYU501+194uMI/rO/OnhsYFf6ICnH8+i7HxuyHH24hW6+Z2ZoVcTum0wmUFRtXOTYRBEyA5Q93J5FyBjG8UPP49J5E56HyM8tRJ52I/Jzg6a3oja6XENzy7X6+zYFcf29rxXCIMB8OilDexmAFNIsRZoaZIZBSoFQWBAToZxvFHk2pXvpuuvn3RG18WKN9W+JR/2GXKs80fSd5hBuqqv0hlAADxKnuHlwokekz38eogC+MkSWgjAepD26gSiA3UDao1sMJc/4134bt//t/wxero7+jnrzJW5+5j9C+Il/a5A8PUar++c4Qf61r8N88zXyP/oGsj/8Jsw33i3fvwv+xmlWzVsWM9Bbb4De8yboPW9Avf1m8f69b4G+/QOg+bSPInVyysIw/+XfRPK3fg786vb4H3jvmwj/y5+B+r4PPSF3x+PSAlfKcjpDj0fnsgbuUsYejPFXe1/FMp/4HqbTEL6nURn8buIYcZIWbuuLLw6yoXcqz5GdS23l2hDZuYPI0h1ElvZxijVw/Zrp/M7D7+6ti7hQ3GpSmM+n8H0NIkCV+zppmiPOMhhuV1Tv6ljN1TTGvytkk4JOEW3l0Te9xugd2PrVBgXwkOlKf/58jlIAA7Ix6goiR0EYF9Im3aAIFyOTEtuR9ugOQ8jy7n/437D5uX909P00DbH4j/99TH/qL4ICv/f8nELymX+J9HPvIP29zyP+1O8Dq81lMvLyBurbPwD6jg/A+9B3gj7yPaA3X/T6EyctEJMUyd/9BWT/5/970m94P/kJBH/9L5+Ys9NwaaHrSllEAXwcz4kPXP+7qUCe+D4WsymUKjYxDBvcLzfI8mLj9WFc4fPzXNm50lauHZGj/YgM3UFkaTddsX6bNOMDN79bzSuAykUvgTmHIUCDYLIcnq+xmIYIAr86XgYQIU1zrOMEQOE2mIi2h9SK9I1V9WzUCrrS5XOVZl/ulIGHdWkI1899sUu3NDUZs8wGTnPIdK8JUQBfGSJHQRgX0ibdwIABrlsBKwDmgjkSnoq0STfoU47m1S1e/1f/HdLPfv7o7wR/8iN4+f+z9+Zxkhvl/f+nSld3z8zOHl7b6xN71/jCNvhrG4NtCOFwEgIE8gPClZAESMg34QjkgGAuxw4JBALG8E2IwxXCEbABY8DmML7wgTH42F2vvbu+vffMztXduqp+f+hodbekVqulHklTb16Nd6RSVUmPSirVp56n3vlG0DWrM6tHUvjCIoxf3w9jy4Mw7t8Ba8ejY6/DMJC1q0FOPA7yKSeAnnUqyOrp0fMc8iOR79qH9uVfBt/+SOJj6MmboL3rT0EmG8NWLzFV+tityrmM4zzG9R5a7nWBw9JGeekQQvwBsEZNg6YpINxZa88wbbR1HaYrBJdZAA6jKm1npSPsWH6EDauDsGV56J0Q1iXo9qQbdKy3PnBvej8Nd8Q2KpGAEAwn2gjnsCwThmmDIdjXYL6HKi/BbVV0gY4GzJiHWJtlnsF7KI+Q0gAHJeURaot+b610hAC8whB2FAiKh2iX5ccRgOH0+v11YYRdy4hoj9UhC1ua9+/A7D9+Bnx+IVF6MjWJ6be8Btpzzx657KTwtg7jvm3Qf30/zHvuh/XwcGvbFg3ylCNBzzgF8v85BfSE40bLa8gPRvu6m6H/z3eBtp7sgPVrUH/PW0GOOCxF7ZJTlQ/fqpwHkM+5jPv9UwQxOy6kY+igLSEAZ6gpChqNmj8wxjjQauswTBOcez0w4nTJ3EHiLL1IkpzHyHkG/h1W4yq1p5WKsGGxSfq8EHasBsKOxaZvSZuIkMxxIaQ7+9x0zhoSgbwAxmxQSgAOqLKERl2Dosj+e5hzwDBN6JYNEAIaGPbh6AjJwX6HV7flIO/QxFnnSbjfg+uq96jX0Ts+S6E2LwGYww1PTpFp+Ouy3Qt557vSEALwCkTYUiAoFqJNVoPOLNTlXX9OMBqiPVaHUW3ZvOo6LHz+m4nTa+efhek/fw3IqqmRyk0Cb7bRvvEOtK6/DebW7bmXt2xMTUJ6xsmQzjoN0uknAZo61OFpPhj5zEHol38ZbHPC66oq0N7+Rkj/52lDlzUMVfn4FecRzXK8f4oU1jrOG7j3b8YYKKWoazIa9bo/PGbbNpaabRi2DSrJsC0Lsiz7g15h4nIW5C0Ah+HVviptaqUi7FcdhC3Lj7Bh8fGF3ARhoXuPi9oW3OWvK8wASSLgzIYiS5hsNKDIzjgPIQQ2Z7AsG6ZhwQaHJEmhy1cUhbKIc5xzUBDQnCK65BOmOePwz7DdyDckl0XmynIv5JXnSiWxAAyIQdGqIOwoEBQL0SargRCAq4Foj9UhrS15W8fcxz4H/Y57EqUnq6Yw/Y4/gnbW6anKGwbzvm1oXncz9Ft/Be6uRbWSkJ55BuTnnAN65qmJj0n74Wh958cwvnp14vTq618G+Xd/M1VZSanKR7A4j3CW8/2T52BQ0Otm0MDoMCKw929Zkpyw0Krs79MNC622DhYoM08vnCxtN0pOea0TJxgvwobVQdiyOghbFgO/PxGxvffvXrtFh4ru3Qgw3lnSixICRZFQ1zTUVLmrb2NaFgzbdiOVAHCFuyKJwWUR5wh3LmHWIZX9/DMWgHPxqCXu+tI5CMBC/F25DCUAA2JgtCoIOwoExUK0yWrgiMBCAC47oj1WgzR25AuLmPmHjycOo6yddyam//KPQCbqQ5eVuE4H59H84Y1o/eTnsPfsz62cMkEm6qAXnAXlt54Lctghg9On/IBkOx9D+xNXAPtmE6WXLjgb2v99faqyhqEqH8RVOI+szqEI752iegOHDeoGB4AJIeCMoa6paNQ1SO6gIeMczXYbpsmcVToCQnBRxPtxWF2IwuVH2K9aCHtWA2HH5aerrxCyvfd9P8hD1xOCubsWMAjxl/oioGDchkwIFC80tCz7JTNwmIYNi9nuUhTFEX+Bkgh0HJAyFlTz8NLtnVyQeRhssFzyzSvPvPItUvupAkIAXqEIOwoExUK0yWrgdPZFR6XsiPZYDYa1I5uZw8x7/gX2rn2J0k+98ffReMWFaaqWrD77ZrD4v99H6yc/B0wrn0I4BycEhHtCCvx/xz3KCOAe0Fk+y0vvxUIg3s6cH4nSM06BfOEFoGecHJsu9UdkW4f+/74K+7ZfJUpOT9mE2rvfDDRq6cpLQFU+iMV5dCjKe6cI6wKHpQkbtPVCQNq27YReZAzgHBOTDdRVBeAMhFDYjGGxZcCyrMJ5/4Yd6fkb5TWd0AkrKCg7VXl+CoQtq4Cw4fIRFHu9SWG9+6L+jk/XvcYw4QAnFIzZIOCglLqhoScgSwDnzBGMGYdtM1i2BXuZ1/8NUgaBjnBkHvo5bwE4j+tKST7fBGUSf/PMd6UiBOAVirCjQFA8RLssP95sTwfhCVxWRFusFknsae/Zj5n3fBRs/2BPTzJRx+r3vBXq6SdlUb3+ujyxB4tf/x7aP7s9u0xdocQZzuAAd0OTjfG7isB5PnLuisNZl33EYdBecSHoeWcOrkuKD0rrJ7fC+NzXEqUlGw5F/b1vBdavHbqcKHrFryp9FFflXNKeR9HeOUX1BI7625mE0hn45ZxBkSRM1GtOWGjOYXPAtGy0WrozIEuIv+jfqF7BSe2X1MpeuiyskDTstfASLgeD7lPfGynkZhvn+14wOqI9lhthv/ETFhp6kODbiQzCAJCA8NvzJg78yd2Zr26vAwSApjh9DlmS/MSMMZiWBYsxcAaAEuc5zLj7HRQsJ18KL9C5YZ+9vkge0XXKIABzMFAQ5PX4KLqwHkQ8Q7PHE4Dv3fkY9swuCAF4pSDsKBAUD9Euq0Hn40MIwGVGtMfqMMiW1qO7MPPej4HPLwzMSz5mA9a8/+2gh2Yn7Pn12PEoFr7xPRi3/nrkvDofZUDuLrgj4a25ld3HLjnqMCiv/B1I55wRnSat0PPok2h//Arw3QlCca+aRP19/xfkmCNSlZWUqnwgr+TzKOr7pgjewHH7e8NE8+B2zkEJgaYqflhoTgBmc7R1Hbph+YKx50GcR/270qYqYbwELV6VNll1wu6rMMsJe5YXYbtqIeyZL8H+QO/2ZNu87dz/25so5uHZ0BeQGQPAUddU1DQNskzdML4UjHMYhgmbMWcLpWDMiVAyjp5BKcQ57rhOZC6o5iXU5iYA264AXHDBvoR5CjoC8H07H8PuJAIwUNyPVMFwCDsKBMVCtMlqIATg6iDaZDWIs6O1/RHMXPRx8KXWwHzUZz0dq//6TSCammX1wOfmMX/FN9H+2W2j5+V+QJfT08fxTOborLs1CuSYI6D+8f8HetLx4fvT5q8baF/+ZbA77hmcVlNR+4e/AH3qcenKSkCVPpKrci7DnEfR3zPL7Q2cVCAOegFzboMQqWv/VL2GWl11PG/cgdy5pTYsy4IkSe5gbLbifbEtOxhakfZYdZLeZ8LLuxoIG5YfYcP8cfoCyQTfsO1dYaVDBOAwCCFOXwJwJp/VNCiyNx7EwDmBYZkwLNsVf8dDKQS6nARgj6zzzM37lTAQXgKP7RLmKRAC8IpG2FEgKBaiTVYHZ+apEIDLjmiT1SDKjua92zD74cvAdWNgHlNveDkar/ztrKuG5v/+AIvfuCZRHaLgACgvq+gbDfHOyQvXmhL67DOhvuFlIKun+8sYIV/ja9+D9e0fDU6oKKj9zZtAcwoZ7lGFj+UqnEOQJOdT9PfMuG0yzIBt9H4OxvypJG60Zycs9GSjDkWWHM9fxmGaFprtNpibRZbifbEtmw7v6lStrZaFrO8pIfKXF9EGq4ewafb0hoUOCneD+hvd/45OF9zmBHd2opBIkgRVkdHQVMhyJzS0zRlMywa3ORhjzrfOiEtRxJH5Or053adShp6vvctfFFlQZYz5eRHKQXOK3JW1B3RZxHpBB08AvmfnY9grBOCVh7ClQFAsRJusBkIArg6iTVaDXjsad23G7Ac/mejY1X/7Fmjnn5VpffRb7sL8578Btncm1fEdTdRbq6raeDpw6lNVZCiv/B3IL/nN8PxTfmzat9wF/bIvJkqrveOPIZ379FTlJKFKH8xlPpdhB53K8o5ZbiF4mPWCg4OojmcvB+MUlBIw20ZN0zBR10CJ67HDCVqtFgzb7hvkTVrHclhxdMIGqEXY6PGR930mvISrgbBhuRi0VruwZzYwzkPDOHv4+4gjyHFGevaRrn6Gvw+d92AndDQBBwXnNiRKUFcV1GoaFIk6aQiBzWx3nWA71SS0QRRZ9OzLE4CUoVd0nuvTZi1+dtWVcj9STZaU6l4Qz7vc4BwwLSEAr1iELQWCYiHaZHVgVXPHW6GINlkNgnY07rkfsx/6FGBasccQTcWaD7wNytOemlk97N37MfepL8K8b1uKozk4JyBkZYi+oXAAxLsOwx9Ojj8a2l++AWTDof370q4L/ODDaP3zvwOLzYFptb/7M0jPOCVVOUmpyodzWc8jbCA36lzK9n5ZDpt4nhFe+Um8gbtt4HrcMOe5SUhH5GrUVNQ01UnHbFicY6mlw7KYu6YfQGi4TaPWGKw6ST2U+oRE3v/W4l7CFY53H4VenwIgROFqIGxYXAYJwEGEHdPjLRXhwYLXveud3nkqh3kBeyIvDz68ncy7JpHxwP9xUFDCUFNVTDbqoNSRiAEOxjl004K7jDA4ccaRCGEgICCuZ7H3kkj8Hi6D6OeKnbQEa/SGTR7IJG9/4oFrV+6tD50dpbgXcsxT4MA5YFhOCOg9SQVg58CidEkFoyJsKRAUB9Eeq4MQgKuBaJPVwLOjuWU7Zi76+GDxd9UU1n34HZCOPzqzOjSvvBaL//NdcMMc6jjOOSghhRkMLhJesLVhUV770lBv4NQi8O79aH/4MvCZg/HpZBn1974V9JRNqcoZhqp8QFfhPKoiAAPL7wk8aHtUGl+wDYaU5wyqLOY6x70AACAASURBVKGmaVAVxd1MoBsmWm0dNveCOXbWDQY6QnT5rLd8EJLmSb2yKfL9JUThaiBsWH6EDdPRGxo6bF/UtqjQ0HHHMcZAKQXnDJQAmqKiXtecJSlc4ZhxDmYzmLYFO5CVIwb2Tm6LpyyCH3Enh1GareCZR4jiPMNKE/DAxGYGZBhJsEwirXie5Y9uWtg8zBrAHmX8aBX0I+woEBQL0SarAfPjlYqOTJkR7bE6mNt2YuYf/nXgervS4Ydg7T++G/TQtZmUaz38BOY+cQWshx4f/uAV7OybFAKApRCCyfFHQ3vbG0EOW9e9Pa0IfHAO7Us+C/7YrviEqoLaB94GuvGYVOUMQ5k/pPNcX2q56PUgLTN526V3oC2tEBw2aOs77XiiLueoa443sESJGxaao902oJumH5rRGbzlvteQYHS8u6hK7TwJ/hr3Acp2V3VVXwj8hSTpPdVry75tglKx0p6no8AR3o/wJLgo4Te4zY/6DHR56fblGVjr1fMirtdqmKipkCUJnDvRRxhzRGDLdvobjvcw/EKSeACXRfSjgOPlXJL65pF3t/ibLWW6ruK5NR6EALzCEXYUCIqFaJPVoDPQKNYCLjOiPVYD66HHceBvPwLe1mPTyRuPwdoPvR1k1VQm5S588Uo0v/XDoY7hHKCkfIPBy00n2OsQKDLU1/8epBed38lnlA/QVhvtj/w72LadscnIRB3ah98BeuTh6ctKSFk/qIMDXGU9hzAGCZplYTlsMsgrJw5nLWDSJQL7kwzcNAQcE/UaNE0F3H2WzdDWDeimAc4JKKVCAM6Q3ruoSm09KVW5m1aqmF9kRrm3qLBjJRDtMRm9fQMgftmJvrWAecAbmPT3TTwPYEIIbNe9l1KA2TYIAFVVMDXVgOzbyxF/LYvDsm3YzHYm2rh1irNrmQQ6KWPxN6+1f7P9Jgl+sbLcImqU6T7IM19BN0IAXuEIOwoExUO0yyrAwDkRAnAFEO2x3LADs9j/zkvAZudi08knHo+1F78TpKZlUubshy6D9fBwXr+pRExBNym8punpJ0H7yzcAUxP+tlE+RNuXfhbsnvvjEx2yBo1L3gVMZzPZII6qfFRX4TySrGNbFpbLHmm8gbvW/nNDDtpuSEUaGFTlABRJQqOmQFUUcPeBYhgW2roOiznxBgT54w+2LnM98qAaT4BuwkSJ4F9VeH6XjZHus0C4fCc6vvDyrgKiHcbjePR6a7KSvn1df8N2hV+3bXDHczgsz+B/CQg4Z27+bp+QcHDCMaHWUNcUKLK3zznOsm2YNgPjng3DW3dZvF49JEJSLeUThXeN8wopDWQsABPm90kzD69dIqFWPJfGhxCABf6LTjQ8gaAYiOdrdXBCB4pna9kRbbKc8LaOA+++FNYjT8amkzceg7WXvhukXhu5TGvbTsx86DLwxaXkBwXXpRSMjBMWergnL1k7DfXdbwI9rnvd51R9Y9NE+8OXgz34UHyZR21A/eJ3AvXRJx0Moip9/KqcR5VYTpuk8QqOO8YXgjmHqqpo1DXIlIC73jutto6WbsJiHJCcAUbiehcL8qfoonBYaGcP0Yt0EJ7C+TDu+0vYsToIG4bjTQoDXM9gABI6oZzD0nf9zbz3QUfwG7i+MADOGBp1FRO1GiSJgnIGTpxPRdNivhDs1cqJZELB0QkxnSW5hH7m2XvpAvmt/ZuV+Buco+ytBy2EWvEMGhccgGFY2LzzCew5OCcE4JWKsKVAUCxEm6wOQgCuBqJNlpOZ930cxt1bY9PIxx6BtR/5O5CJ+sjlta69CfOXfzlxerHEb864gzfDfFiqb341pN98lv936o/Slo7WB/4N/NH4yQf0xONR+9Db05UxBFX6uK7SuVSFIonASd7XfeEZA3975+KEbATqmoaGpvlzdBjnWFhqwbSckIxiEvX4KaP4JHqR/Wt7e/8ujxWLy3LdX2G2K1O7FHQQdusnGBoaCfoaXdsGeASHbfNCRXO3/6HIChp1DaoiewkBQmDZFgzDcia7up7EgDOXOOs+SZkEYI9y5Mvd9X/LUNf88swzX0E3HI4H8JYdT2D3sAIwIAZEq4Kwo0BQLESbrA6cQ4QLrACiTZaP+f/4KppX/zQ2jfyUI7H2kneDBEL/pi7v8i+jde1NI+cjyB4OPlSIMel550J9yx/4f6f+MJ1fROsDnwTftTe+vN84F9qfvyZdGUNSlY/sqpxH2cnD2yJtPZJsC0vDA//2CJ6PbdtQZAmNmgZVlkEdlw0YpoVWS4dp230TTYQoPH78qx0UF5elJkLsHRYhIg5P0e+x3vYorFl8gveUF/YbEe/FlUbfmr8926O28Z5/8Ih0QXwPY0Kc8MCEQ1UUTNQ0qIrk9y8YOCyLwbRtMDcSSdG9f4l7yjTjugb7onn1v7LK07O7lOMqcUU+/7zzFMTTCQG9KATglY6wp0BQHER7rAbcXb9FUH5EmywP+h13Y/biT8emoetW45BPXgSyarQ1WPliE7P/+GmYW7YnPcJZ/2mkUgXD4gRgS+5zTc84Gdpf/wmgKt35DPmxymcOovX3HwPmF2LTaW/7I0jPPnOovNNSpQ/uKp1LVRinTXoH+tJ4AwPdHsBh+XPOwZgNTVUwWa+7g5fOubZ1A01dB2OdwV1CKIov0VSb3rtwHPelsHg2xFlqJT7zi3Zf9QmFA+hLI7y+C0nS+6xIk23GTdcawSH7Io9jAU/ibl2961jPA9j7N9CJksBsBq2mYqKmQZEl5/oTDnDAthls24Zt2wAcIZRlYJRMPYkDa91mLf4ih3yDZC3+EsJBC17XceS7Et/nywoHdNvCvTsew96ZJQw9B0EYTCAQCPJBPF8FAoFgeOxd+3Dwo5+LTUM0FWs+/M6RxV82exAH/uafkom//oCXEH+XA/eTO/GEHHb3VrTf/wlgfrE7nyEngpC1q1F/318MXOdX/3//A75r31B5p0VMZhFkTdyg6Djp7TsnHRCkAUGiazDR/ZtTAirJMC2O2bkFtNpteE8VTVUwPTkJTdNWzCB4GWCcg7ke3tz9e9x36PK3iHLCI36AeH8VBU6S3999thQ2LDUr2Y5xfYrYvgYJ/NClnXflS6nk96cooU7fhAOMAaAUbdPEgYUFzC4swjBMP2tZotBUBZqqQKIEPGKdYkExCPQ4UYaeghB/q8nQHsAeK/HhX0WEHQWCYiHaZDXorB+TY6wXQe6I9lgO9r31ItiP745Ns+bD74D69FNGKsfetQ8z7/0Y2IHZgWk5ISDi/ikUicNCr1+L2nvfCnL4en9Tmo9WtnU72h+6LDYNOepw1P/p3YCixKbLiip9fFfpXKrCcttk2LDQoaGge/Z5MMYgUYKGpkLTVN+TgzGGpZaOlmGDkOB6p55ATgLbBEUg7XrC4o2+/AQtVso2FROUZCXeX6W3Z4nI8/7ybLcSLBj03AX6l4KISg8E+xzelmBfYcBx3FlgTFVkNGoqFEX2+yGcc9i2Dd22QCABnLgTNpj73cO78k0laA9B8DFX1rV/s8qbg/niP82hhZRJqBXP+PHDOYdp2bhn5xPYM7MgRqYFAoFAIBAIBOVk7rIvDhR/p9/+xpHFX2vHozjwrkuTib/glRd/Pc+qOVPH067/Kk67/quYM/XlrlYsFASJ/MH2zaB90SfAdj7mb0ozGYSevAnaX/1RbBr++G7oX7xq6LzTUqVJLVU6l6qw3DbxBhmDg0xh23zcAVXPGzhsaMr3zKEUNuNYaDYxt7gEw+qsAzw1UceaqQZkSep4EHMAkpTLeQpGo8tDOPATFJ+gV6nv8V0mGwYeMuXwA8uXoNc+K5MdBV14dutrk8tdsRzoeO5Sv18RF5I42AfxnYL9JLwvTeRxxIkm1dINHFxYwuzBBRim5fdDZEXGRK0OTZVAwMBs0xWI++uTN14/KO/y8hQ/s8q7EwusPEKtoLoIAVggEAgEAoEgBNGxLjbtm+9E67qbY9PUX3Q+6i84byRbmvduw8zf/wv44tLgxDyfj7xx4a2LaXOOJcuExZ0Z7geMNg4YbQDAgmXgifYiDGZjj97EgtHGvNHGw815AIDB7OWp/ACcGekJbbPYhP6hT8H+1ZbO8SkGJqXzzoT8kufHprF/fAvsO+4eKl+BgxgoFmRFlxdPz3ZvTWBKKaikwGTA3Pwi5peW/GemIlNMT9bRcNfq4wC4nf8AqGA0wgTFYBjp3nDEguLRa8OgEBW0ZZHgPf8uWv3GRa/oFRSDWYVFxLxZ7udXMGR02DO1CgTv3TRLURDSHxY66livDwIAsiyDExkmA2YXmtg/O4emYfpetxQE9ZqKiXodFADn3d9kUf3mPPopErL3/B1Hvz8Tz1/3+UXc/5UF4f1bbUQIaIGwpUBQIER7rA7OR6uYZ1UFRLssHmz2IPb9+UXgzXZkGvnoDVj3bxeBqE543TR2NH69BbPv/7dkiWNC+42LOVPHvGXg6Hr3Wsde1XRmw+YcDUmGwWzMWwYmJAV1ScaT7SUQABtqE2jZFpq2hVWyCoVSNG0LMiFQqeSXM61o0JmNx1oLmDcNnDS5Bg1ZwZypY840cExjCjZnkEi5n4PaX/4h6Hln+n+n+Yhtf/BTYPfviE5Q01D/1/eCrFudpoqpqdIHeZXOpQoUxR5Dh4WOEByC3ixeCEVncI+jrmmoaaqzDh8A22Zo6QYMw0TvqnxFuS6C4RG2Kze+9YJedmOug/iaSEeYnUR77FC2+6rLcqRM8lh4CGV/6a8h+hteX6N7t/dHt2dxXFmEEDBmQ1EUTNRrUGUZUuCzy7Y5LNuCzZiTe0i7ybotUd7JNw8BmNLsvivjvLfTQQEwJ1/C/QgzeVCW8M/iWb18iBDQgj5EgxQIioNoj9XBWwNOIBBkz+y/fC5W/CWaijUX/ZUv/qbBeuBhHLzkM4MTUve5XYDH94t/8X2cf+M38c0nHgQAtJmNR5oLWLJMAMCSZaJtWwCcsKd1SfZF3SNqE9hQmwAA1CUZ69QaFPcjuxFIdzAQ6lmjEjY2pnF0fQp1ScaCZWBa0XB0fRIAYDKGR5rz0AvpFcyRZE6A/ukvgd15b+eoFGEKtXe/CWT1dHSCto72x/8rcX5ZUfZwi2Wue9Upim3iQiuGwTkHetoFA0CoNyBrg1AOxhgYAJtQLLbamJ1fhG4YAABZophq1LBm1SQURe4qtyjXRRBP2LMx+OwvVdhhQUdsQcC2aW05RLJBXpji7klG2LUMCwFeufYYczpl9aj1vIJ72+NA+xXkRMP6DmFhoeOO80Rk5zesN7DzC4qgnFIYto3ZuQXMzs2hpevgnAEckCQCTVVQ01QokgTOWNezD+i/tFm0pbzW/fX6UUVt6526efXLp55ZXttxhOsWFIPUArC4OQQCgSAfxPO1QpBidk4FgjLT/O6PYd73QGya6Xe8EdKG9anLsJ/Yg5kPfAJcN+ITUgLYy9/OOecwmI0JyREbVikqAEegPaYxhUnZEcLXqjWsVWsAAJlQTEgKpATvHJMx7Gk3AQDTsoppRfP3EUKwXquDEIIJScGedtN/j9UkGcc2VkFy1fFHmvMFWiuY+Nr9IPR/vQJswD0XW9JkA7V3/UlsGr7jEZhfvyZ1GSuR3gE1QbEoo028AVwauLco4IT3JwSESACnoJRCIgSSewwHMN9s4+DCIgzTdvMimJ6oY9VEHRKl4KxzPcp4bVYSwwxeF3kwWuAQZssihPgWX/yj02s/0RaLTdiztastllzQ94TgqH1dfwd+nf2hSnDIUR0RWAJxwi1TCpsTzC22MDO3hMVmC163gwBQZAkTNQ2KJEPyBGhK4MU9yd4btvhkf66O9y+hzkTrPF4uZbJNmeq6EkgdAhoQL9cqIWxZffKwsXig54dok9WAgQNcBNsoO6I9Fgf78d3Y99aLYtPUXvBsrH77H4fuS2JLtn8WB951KdjsXGw6TimIzZZ9BM/mDI+3FnFkbRIypbhjdjeOa0xjvVaHyZjvxZsGizHIlGLO1Lu8hXuZNw1okgQtYn8QndnQqIS9uiMoH6o1UtcvE7j7fwn6NNoH3gZ60vFd24bpC5lXXTdQ5K1d+m7Q449OnGdWVKVPV5XzqBJFs0mSMI1p1p7s5MGhyQpqNQ2qTP182rqJtm7AYsx5bRTsugiypWj3vWA4ekNGD2tN8eWw/NAKtcHu4MAr6/7qFkjLRVDMjpu0yMO2df0dHnY6CGMssDyFexQhqGuq4wEsS/AWKGNuaFjmRkNiIfVLe72lQF3LtKZsVmv/AgCl+Q0PFPXcx5mvIBmZhoAWxhQIisc4w1NFlVXWn0CQNXS5lSGBoGLMfvLzsfvpurWY/rPXps6fLyxi5j0fHSj+glJQtrzirxfOmXPg2MYqyK7Qe+LkWl/0nbcMLFgDvJgjWLAMHDCcMNvTihYp/gLOZJeWG1oacETpJ1qLoWk9kfhQrYEpWfXPZU+7CYv1rpg5BghAQJCkG2B87HPgew90bRumD6G8/EUDxV39M19JlFfWVKUvVJXzqBJFs0l4WMXuUNGeF3CUgBDhp+PmQaFbFuYXl7DUbINzJ21dUzA9NYF6TQXghJEu2rURZIf4xiw3vR6JLMaeScI9C8aPFyY6+CtTmwy7n1bi/eWH/WbM/5VlHM/zCO4K2cx5Xz+EAH6fw1s3tpMm2bq9XhnEjRPNQGETYKmtY3ZhEfNLTTDmTG4jlEJVZGiKAom6HsTOAy8y/4G4hgpbeiMr8sg3qzy7PKhHzi2cMom/guIh3JIEAESjF5SfUTt/HM4AdvC3nMKxaJPVQViy/Ij2WAza198K6/6dsWnW/v2fgdS02DRR8GYbM+//N9h79senowRgbNkGYDjn0JmNpm2hJsm+8OsxraggcMI2O+v4OoJrMyDQxrHorhcsE4rDasm8c6dlFatcMRcAJEJxRG3CX284irobsnpCVjCtqP67d3d7aeCxWcL99bfircqXWmj/838A7fRhrLW3vRFcliP388d3wfr2j1LnPypFHkgTlJuy3ltJB/P61qYjBEu6jpmD82jrhjPoCo7JmobVq6ZQUxUQ8FT9xKIPegu6EROPiw1zf4PoXX/WW8tUUB6i1hAWFIPEbbFkjh29E8yC2yPTI2yyWWdaQNxkNgCgxIlExzmHbTMsNds4cHABBxeaMCzm9kkAVZJR0xQoMnWWxmHpriGFEJgAMfbnIcbPisfI7VMYVSDInlG9WJfbk3Y5PG8zmbmVcupu3tckWZXC5h4Xr/O7chG2EAhGgS02Mfe5r8emmfj934LcE563l7j3xOwHPglrx6PxFaEUhC1Pe+acY6/ehMU5FEKxXqtHplWphP1GCwBQcwVgkzHMDlh/d6/eBHPfQZ44mwRCCOyedxchBCqVEnsg19wQ04QQrFY0P78Zo415M50X8/B01sKK5Mk90D/R74metD9DDj8E2uteFpvG+Nr3wHftTZRfHhRx8GxYijwQuNIoqg3CvFQ8z5yudIgbjO0WY4N5cnBnzWBKMb/UxPziEkzLBgcgU4KpiTqmJutQZMn3uvHyi4P3pBX3enEYxgZlEC1WCnEjCIOsImxYLqLGWVhIe+xKm/MwS5LsV+TdxXnnF5us2269XsJFwPMG7vP+DfQbep9FznYEfhHewIE8nfPlIASgYL4HMqEUjHO0DRMzB+cx7wrBzJXcZUmCqihQVQop4biq5zVM/HlvJJPrnYftIsdaM6ov4PUXR84ulCy1t7zbhNAJiwgREzQEHVZqI11u8VN8/I3DBhj4K5p22lV/738pzlFQBJYhpKkgc1bqO7IoLHzpW+ALS5H75WOOwNQbfz91/vOf/jLMbTviE1EJWI4QxXAGptrMxqSkQKF04LpmOrPRkJSubdOKiinZ2TZnGpgzdWyePwCd2X6o50PUOlYpal9+SdjVXvLFYw9KCBqSjH16a6i8apKMCbeuU7IKmzvXvWmZmDP1XPtKBGRgF4Ddcz/Mr17dtz1pP07+7eeAnPCU2DT6v391YD55UqX+aJXOpYz0CqxFtEevZ06YOBzc5nn+EaAvvKOfHsQfCKSSDMNiOLi4hIWFJdjM6airkozpyQamJuoAt+EJ0GEibxJW8vdkERi1ryjGBZYHgmjPmKHX/hXjPIUmro2GDQf5wnDgf5FrAOTMSvgS9b1IMxjM6hWEi7LsgteP8MTgIBzh/REvLHTw+M6h3P9y6eyjAEhXv6W3X9PUDczMLWB2vgXdsMDhfLfJVEJNVaDJCgjrRCkJE04JHBHY6wsF650F4xh/ya4MDkLyGUguSzhtQVEh7oQQgaACCLG1WmSvxZJUP87DfuO+r4gTm5L31i/JeSZrG4JxIK6zQJAWc8ejaP3gxtg00+/8k8T59X7s6Hfeg9Z1N8UfJFGA2YnLyAqD2XikOQ/A8chtyMqAIxyctXU5ltxwzh4ycbr+MiV45S+vw+/c+l1cvWunH755kLAcx2pF6xOAASccdJy38iAUSrFGrQFwhGHGnRDYnHPMmTpszvBYawGb5w8MyCk5CaJBw/ruT2D/anPqMmp/8brY/ez+nWC/uCd1/llQpT5Clc6lChTRHlGDYaFr7QUHY5FsYN5fH9hmmJlfwFJLhxdQQlNkrJmexmRd6woLPex1Cvbtg/8VfX6BoBiIdlg+QkVhCFtmzTjeVUUbC4vrd4SGd+5LF/yrWwSOyzMoQJumidmFJczMLaKpG77rgkSBek2FJsuQKXEmyAauV1hI6zzIKv/8xc7yiKlC+F2ZJI/tFkNWbv6CfCmajYpWn2Epcv2LXLdEhLyPGE8/XyXb91uyeiQxARngGcpDygp0uRLUItmJD6prZ7bh4JPyPBcEHbwwgABzDSjmXpUV0d9ZHuY++9+x+xsvfQGUTcemypvNzmHu4/8Vn4hSEHu8a/4uWSZqkgQO4NjGqqGPlwjBtKJhn94Ccb1wg0xICrx4XXUqQwnxXhuWQZ7DJmPYp7dwRH0idRmUEF8MBpxH6qJl4rdvuxqcEHzjzBfh1FXrUucfhBC3PxXzTjM++UXUPvK3IIcf0rXde07EvQ/JhkOhvOJFMK+8LjJN+0tXonH26UPWPFuq9F6v0rlUgSTtZDnoDqPYfd94A8a9fwOdCTTcFQfgeQkH+g6EEtiMgYKgqRtoGyYm6xpUVQEFUNdUqLKCxbYOy7J8L2P01GlQ3T3CPHai0gqKSZjNhe3KjbBp+emdaOORZCKl+JLsQNAdPndc9EbZ6KrTmITNYHlePyHYXwiLCOKJwF117nvvd9J2tvXfdV7+hFLYAEzLhrmwhFZbRqOmQVMUUEpAKYFKZcgSYNs2LM+TGgBzy6DI/polWSt51LyzGtfxqiee4+IaFBkxCl0xxunlF5Vv2Txvk9SVcQ6G7l9xzqCf0j90OQE47f6Nkt2QXuHjuke5H+Qm+PP2DV//vGsbXm63h3SwvoIO/fM1BQJBEvTbfw1r20OR++na1Zj6w5enzv/gP/8H+GIzNg1h4xd/m7YFiVBo7vq9aViwDDQkOTKPb575W/jJ+S/H89Yf5YdYHoWWbWEuZo1hhVJsqDXQtq2Ry/JYrWiYVjQcVp8EYzZqUvrr1Uui4SjdQPujnwNMM3T3oHeh8qoXA+vXRifYNwvr2gHe6WNAvNMFK5GoMNDBcM+RaQhBcE0+f4CXcchU6ojIhGBuqYUDBxdg2c42SSKYnqhh1UQDiuyUlee3vGjf5aMMYyyC4RD2qwbMGzt0BbLsI9tVBO6EeC7iPb9cz9be0NBJl6TojULidC3608eVI8EVhKkE02KYnV/Cnpk5LLQMPyYJJYAiS9AUCYok+1Gl/EIzvhZ5kZ2wHDz/zC9BJ+ucwj/nQel1iIpDeIZPsiI+vMtIUa6jqEegDhHbh3m8Lcd5OKEYy/gQThZ+rewEZ/R34MG9zpYx3Tvx1zhpK0hfVzp2ExOM6zOsO0yUmHtVZorwTlpJ7Hvze2Dv3h+5f+3F74T69FOGzpdzjub/fh8LX/72oJTI+z1668wufPKR+/CK9cfiVUc9FTZnkMjozwmTMcxZOlbJqrOmU0yeJmNo2iamFS11eTqzMWu0cXgt3sPX5gxN23LDVI+OyRhkdwBDZ/ZIonkYSd4U0vPOhfqWP4jOI+b9yjZvR/viy6Izn5pA41PvB+q16DRjpCr9saqcR5Uosk2i3v1d23teF10eRj3bgt4njDgBGQgHFJliolaHrFBfMLYZw2JzCYblegURAjDmCtH5vqOKbBNBh0HRDYQdi8sg2/UibFlswuzZu22l2rDP0xbljMwyLg/hKC9zd2NXv4IQEroMT2eT01dgAEiMhzEHd5xxvLVwuDNGV9M0TNRVUEpBCXH1ew6b2f7kNQKAgYCiP3rKsOecZXsJyzObfAG48jihzoayiLVCAF4ZcM5hWjbu2fkk9s4uiFHo5WC5vQ6T1m/U47P6FaEeiPilOY9x2bhTDinhL/x8ivDLEk8U7P55t1d3meOoW3z+4c2gn/R2T1p+dozveRt9ZwsEgiia11wfK/5qZ52WSvwFAGvnY4PFX1nCOFrudXsfxa17H8eVux+CxRkoobAzeNgplOIQtQ5KCPbqra59jHPMGO2utKOWqVFpoPgLOGsC16iMA4Hy0+KtcUwIwaJlQrctzJvGyPkG4f7/RWNffxvsO+6OziPm2tJTN4GeExPmeWEJxnd+FF+BMVKEb5UsqMp5VIki2yRqUKvLy4aE7wvzvAn+Td04MYQSGKaFucUlLDbbsCwbhACyRLF6agqTjZqzDh9nkCTJ/UbNfwC6KGMUgmgGDbqO69tWMDxJB8yF7cpB1NqrQVacDXvONTguUkbByLMdYwzMC4ecgz27vHx77yH0C9HUjT5CuvLw/E3cNCHHdXsTE+cYtxBHWAaabR37Z+cxv9iEYVng4G7/REJNlSFTgHMGis71yDrMchZEXc90eLG2XRE4By/oMrWPMtV1pSIE4DFQto7aKJ3QcZzfcgh1SULAYAAAIABJREFUWZLVgzHZ+YsmnjXLIRgnzaeo7TE5YSGxvV9QKO79FbvNC6qF6NyOB26YWPjKd2LTrHrLa9Ll3dYxe+ln4hPJMmDZqfIflpcc9hS85sin4s+PPdWd6MKxq72Eg244ZZ3ZobO6k9B2jz1Uq3fl0bItWD1hn9e6a+suWuHhjBOVZ1uJnscKpVinjubROmfqIG5egCNAN20LmiRhX4/gPTJk8AQk47P/A37gYOT+uOui/eErYvO2rrkBmF+Mr4BAUAGK3J+LGzgc1DeIG8gLbpVkGaAELd3EzPwiFpo6mDsIO6FpWLd6FVY1GmDes5YQ2OBjWZyoDN/ZguER9iwXQXuJ9lhOBk3yrwyB2fvxrh7lJu/x6N6Qzd624L5B4aF7w0J3i8vRy1pwzh2vX0oBIqHZNrF/dgGz80vQbdYJIS1JqGkqFLlTT+86BJfNGPaciz7u0hHYy0PRr6kgP6QPfvCDH1zuSlSBsr6oo+qd5Hy8NGkfIEW9ZqPUi4OM7ZfWk1YgSEvuYW5Cfsl6VK4wPKjN+NMZl691kGUtXSAoD83v/Bj67dEelfULL0D9+c9Olffc5V+Gee8D0QkoAbFHXxM3CY8053HyqnX4jUOOwvGT0zA5g8UZ1qk1KFQCJQRLloUDRhurFBUmYzCY7Yueg7A4x6ypY0pWYXMOyX2mKoRiMiIEs8UYDMagpgilPGO2QUASH6szG/v0Vqpw0DVJhhIoRyIEk7IKmVBMyMrQ+Q2CetPho57hlg328GOQn/vM2HxCxaNGHbAZ2P07wg9iDBwE0uknDlXnvKnKIEJVzqNqFNkuSUXgrgFXb1tYfk5iAF7YROdYy7ZhGAY4AyRZAgEgSRLqNQ2gHLbF+iIULNd1K7K9BMMhbFk8kggiXV6WwoaFJdSjM+AxGZa+VBR0nHfc9ArBWdgxLI+we6cj/Hb3QTxn4N5s4vov3jkwTgDqeBkzxtBq6zBNC6AEEpUAzwOZEhDqlNG9DFqnnLA6B/WFvMIeZ5ovYf7wIsl4fK9sIZpL94xaQTDGsWd2AUttA3KWGffGj68CZTufvOsblv84r1F0WaMInkkHecvpTbuc97B4EVSLtPfSKPdB0jKTlME5AqHyhhN3suqwl+yVIhAsC7ytY/Hr10TuJ5qKVW94eaq8zQceRvvHP49NQ2yed1RNn2Mbq/x/t2wLGpUwa+moS7Iv1k4rKqYVRyAlBFgwTTBwTEgK5kwDdUmKFFxrVMIhag0EwJyb74SkwOQMCmjos60hK2jbVqq1iKcVbah3hUYlHK41YDA7sWi8ZJnQme17LAdp2xYIIdCohHnTACUEkxmJwR0fhmjYlh2wb/4FpPPPjs4nYpBPefmLYP7kVmB+IfQ48wc3QP3d5wHTU0PUOh+8c4gbsCwTVTkPwXgJC2/Yu40x1ueZAzhPEi8qg+fJi8Dgp/fEseH8c6ndRssyMaFpqKnO+2GqVkNd4Vhqt6AbXsSK8d/Hg575om2Vi157CvuVi+D7Ofi3h7BnMUkq7CdJvxx0vQeXsR5FJNgWAYzcHnuF2bC8/H3uvNWwfd4mL2lvGu/Z4YeYdsM+M+bskyQJbZOhbS45oaA1FXVNhSIBlFBwAkjUCZVtM6c/FKz/uMizLOKp3CVDfPesTMqpaGVMmcII5x1udjnCkXDurCuQ5Bft3RfuPZjsl9TrdnBegm6GDZecxU9QPMZhp2Hz504X1v1v8Bfe/vueRalPgcMRn72fQCDopXn1T8Gb0SF8J37vhSApRbC5y74Qn4DSsYi/e/Wm/6wymI09ehMmYzho6n5o5KWQUMwyoThUq2NCckRNSoAl2wIALFgGDpp6X3+EgIBxjnVqHcRdb31Xeyn2w68myaAg2Ks3hzovjUqoScPNLyXEWQNzwRq8dq/NGSghoeKvl9cB3VlbeJUrmptsvM9a44tXAQtLsWlC31eqAvXlL4w8hlgWjO/8OKtqCnoQ/cjiURZ7hA1mRoVpDDvOW7MvuNc5d+fZCDi9R8u2Mb/UxOzCIgzTAgeBTAmmGxM4ZHoSqiyBwwbngM2528tc/v6m+E4rN+JbuzwkEVbiIgAKGxeboG3GYa/Ebw9xzyQi63H2qKgAXZ60NMSzF939jf4wxs74Vydf18fVdWbwwlE7E9ycv22bYanZdtYJbpqw3MMIAWSZQlUIVEUCIcS9r9xxNu7W1xebJaf8Ee+pfPULN7Q1ii/+juN5LoTk8lBpAbgMglEe4lYW4tkw18V9Xvf8kpTnpgWQLJzxoF9a8s5/+Vnu+3ycVFV0LnLd0pD39U76DOon2fOAI+aZFvj1Z08Cj5RKv4Irjejo5gdvtbH4rR9E7if1Ghq/96JUebeuvQnWw09EJ1BkYAxi4Zyp4xC1HphR7XiprlJUNALiKQNHm8WvQzwlq1ijaACAuiT7Yq/JGPboTbSZDYPZ2G+0Qdw0Nuc4RK0PrCchxA0dnfya2Jzh+7sfxpy7fnFSZEod72ejHZnGO696jMCsUQnrtJr/rpiUlXx6cHGvosUmjP/+dqps5ec/C5iaiNxv/fgWoBV9jcZFmJeAQJAHZbq3ogTeSHEYnS9MEkjf2YIur2DqDsCapiMEzy0swrCZ792zarKBNZOTUGUKiXNQzgFQJ2xjoNys2+ww3jxl+U5ZySQREMv23bmSCbMnY8z/ebYry5qbK5ko2+TVFqNGQYlTaNeARzVGTfMlKnwz5zy0TQ6Tr7fublg7jvq7v2/i1wqUBI/z6kL6y3AH9IPblppN7D8wh7mFJgzTBufOOsOUUigyhSIRyIQDzPbzZv7YXEdUzgLvOmb1bAvahYRM3suCcYRpzqoM8b4oF5mvAbwcN0BRO5t51MvLM+u8o/Lzy4v5OSLGoFc+CUknughAce/f5Ua8TPJ5SY9aj7KQe6iXYYRi9EcJ4OjeUMZrvNIRNsuH1g9vhH7bryL3T/7+hdDOOm3ofNliE7OXXA7oMV6m7kB6nhjMRkOSQQP3DyXE95qVAh+oGpVgu30EmuB+c/Jx1g2WCIHsrhNcl2Q8qS+Bg2Ov3sRfb7sdjxlLeOb0elBCYDIGk9mQKQXjHdFZIgQ2d9YDbtoWFEqxaJng4JDdfzdtCzVJhsUYZk0dX3p0Ky7e8UvcszSLl6w/FpQkDxMsEYq6JIemZ5zj8dYi1mmDhWsK4osp3nV5pDmP1a5QngnE8c6Lgj/yJOSzTgNZvSoyTV+WhACSBKLIsO/eGp7IZsCqCUhPPW7ICo+PKjwbq3AOVaQMdhk0yBjmERwnEvcc7Xw3EicylcUYTMuExThUWQalBBKlqKkqJEmCZdlgnrDTlTdFkS9lGewsGIywY3EJE4DiRCdhy/KTKtQwenq6Ytwyd4LtMKpNBsM/JxF2e/f7fzsbuyejkc6v+zhve3x5nHMwSKCUwDAtGKYJw2IghEKSKAh14ppIFI5ojY5TBqXdk9WyIo+JLYTkE0I5j3PPI9/e/AXFJbgGcKncj4o80zDLemU1qzLqmGHz7Yi8Ub9eSEia6jwYRvFcjLr2ed7HRWkjw5L1dS7ic2MQUffKuM+ljNcxz7oOdy3CnoUEvTFmy3RtBYI8WfrOjyL3kZqW2vt38b+/DT6/GJ1AlnPvqjzZcsIC934sGczGvOkI021mY0+7E3ZZoxIICPYb0SGxo6hRCTV3Xd0TJlZjUlaxdX4WP3vyIfznjnux6IaYXrAMzJo6OOewOceCZUB3RWCd2WjZFiYlBTN6GxZngXng3QMGCqFYr9TR1HVs0BqwXM/hJ9pL/jm1bQtzpu6vfRmG3nMNAEfEPaaRLOy3wWzs7gldfWxjVcahoMnAgbDcvICvuSFVvuOiCu+vKpyDYHlJOijWO5gaDAdNevZz3r2eMCUUjAFt3cCBuTksNFv+gTVNxbrVU5hs1JwBVmaB+8/AYt/fov1VA/E9IxAUh1HGGPxjc6iXoJuokM5Jxo6D2wcJwf6/A2m9/kcnnZ/zUOdAKcBs2xGqQaAbJmbm5rH3wEG0TcvJmDoewbIsQ1UkyDJ1J9dm++7IWvx1rhOvkLqRHiH+lo/hFulKSHBGyrAUrZM4qpCbNcE8k+Y/qi14r6taCqJmMTu/9ANyRbtf0pD3OQTDfQk6JLkeXppUMybH+EIc1baj1DVJ2UXpHIxynZKcQ9L8u0PppMuvKNdUIMgS/c57YO/ZH7l/4mUvAJ1sDJ2v/fhuNK+5PjoBpYBpdk93zhjOOY6oh4t6JmO+4FqXZBjM8dryvH4lQmLDHjsf0vF11xnDnG3ixOl1+LsTz8YRtQlM1xxv2rX1zjVVAByqTPp/T0sdj9t18gQIiH+ZJqSOR60Eigkq4beOOh4vPmojVCpBcut01OQqpy9JAInI4BaHTZ1wZY80FzApKVin1dA0TdjMxqSkYL1Wx369hZ/tfwJPnVyNU1ethUSSzVutSTIaPdcQTvFYskxMyEqifAbjPcvDrz3b/CDYPfeDnn5Sotz8PoeqQv3d34Tx1avD080cBPvFPaBnn56m0oKEcJ7P7H5Bekbply8HYd9gYfdVr/dM73FBD47e44kbbYYRiqZuQjdt1DUFDU1zwv5rCjRFhm5YaLZb4JwBCZ+ly4Xo/1aLMHsKOxaPMNEp6TiSsGfxibNnpP160+VSM8EwhInAUV6eQfGzV1TtuxfczxkvYogn+DvJgn2U/j5LX9mcg0rUv30IIeBEBgfHzOw8FEVGTVXQqKmQ3NDVikzBuBPxybZGH7fO+5lEBkSCKhLi+SzwyEUADqNo4tOw4mlYhyhNmVGNL01+jDFs27ZtqPIBILhWOfGf990BPiil0DQVqqpCVVTUGw00GsMPuvaV3b21b0vayQOMMTwwxLXw2HTCCZDlsTWDLprNJh595JGhjzvyqKMwNZXMCyaM0IEHOLPKO3+F3/OMMTzwwLbIsCMhheGoo47G5GRnMHmYAXYe+O84Xlvz8/N48smY9SF7kGUZmzadELov2Mka9aXb7wHQn+/BuTk8/vhj2LN7N3bt2oUDMwegt3W0Wi202y1IkgRNq6Fer6Feb+Cwww7D4Rs2YMPhG3DU0UdDVdV0dRtQL87DZ8iFtfKB7TjkOiqKgk2bNiWtbmq8c1tcXMRjjz0WWrXeR5dXXVVVsXFjp45RszcJIVhYWMDjj/fnn5agLZwZliq0Wg2qqmJycjK13QUO3rNQCAXZsPSdn8Tub7zshanyPfipL8QnYCxX8XfO1FGXZKhECt3fkOQugXc6JFTxhOSIlrOmjjVr1oCevAk4eRPIyRtBjjp8YP1lAJ78fGKqsxiMDGBQkGUZQDCI88bAv1fBCdXNH3gIeOAhfOrKr+B/tt2GF689Chef+mzUJRkKTSZchF1DmVLozMacqYfuHxriiS/RGF/5LmoJBWAPzjmk5z8biBCAAcD4wQ2oFVgALptQF0XYYJlg+SnbO3dQSOioQdne0M3cGXnt/Bvwn0PeGC1jHEstHS3dxGRNg6YqkAhBo6aipsrOPsMKlOf9g4EzZ2IOY2xZr29c2YmFC0GhESJ/8RnGDnHv/Kr0B8pOoudqhK2E5YpHVFuLmjgGOGP8UUKwE9ioa30y3+7d6TtjXhxhfRgKb6SPBPKk7jZJkvx+SrNtoF7TMFFTIFHixMojgKxKsG3m/NwJazRwXowAhIX3z4Pnn+Uzx3OKo5SD9GgnWVC2EM3ieV5Ocle+wjp3ST7ahhEBs/TMiss7CxE7SyG83W7j4osvziy/QdTrdaxfvx7r1x+KQw87DBuP34iNGzfikPXrMysj7fVpt9u45JLhr8Vln/4MVq9enarMUbn+pz/B17721aGPe/HvvgR/8AevyaFGPYJv7x5C0NZ1XHrpJUPles45z8Rf/dXbOi/jkWuWH5/9zOXYvPm+xOlXr16NT112eeT+rF6MHP2ioWEYuPfee7Bl82Zs3boFTzyRXLjuRZZlnHDCU3HSySfjjNPPwPEbNw4+yKtbxDMe6HS8krbqtO34Pe/5B5z6tKclTj/KLOYHH3wA//qxjyYuCwAOO/xwfPzj/xZZB++dyDnHtm334xMf/9eh8h+F6elpHHLIeqw/dD2OPOJIbNy0CRs3bhppkslKRHSCR8d+fDeMX2+J3F+/8ALQmLC4Ueh33A1z647I/ZxSkExDA3ezYBmO+EvDxV+gf0KMzRkOGG0cqrkT7yYbIE89DjhlE9adtAn02CNyFayXEzrZAM48FTjzVFz6B7+Li1otzG59ANrOJ2Bv2wll5+PYO38QJmM4sj4JkzG0mYU6lf01jwEnjPSiZWKdWuvKPzvvX4dB7zf+6JNgP78L9NlnDpfxRB3Sc8+BfcMdobvZlu3gT+4BOeKw4fIdM2UT6gSCcRPVJ42aFO3HHYicNO1+LzCGucUlKIqCRr2GmiKBUorJiTpqNY6lZguGaQIgIJTCZm5croD4W4b2K7xLq4eYeFNewiZoCBuWhJhIk8KG5cOzYe+ELn98LsKTOKrfAQScyFwRmIQeF37vRN1TzWYLS0tLqGkaJifqkCUKZjNQN0Q0ZQyMczhLYEidSHqkr6icKZZDo0CQhkwE4CgBIOqjIW5mWFblZ5XPMDPWMquHU+LAvNmYn0GtVguPPvooHn300a7t09PTOPmUU3DWWWfj9NPPQL1ej8ih+gxzD9x0042pyrj157fgla98FWhCLxiPuPDhocMHIQPiLMUg+R133I5f/fpXOP30M7zCfUE5prKBDsZwHc60HdSf//yWocTf5eLBBx/ALTffjNtvvw3NZnPwAQmwLAtbt27B1q1bcNWV38KGDRtw3nnn47zzz8e6dYf46Xqv7MCB75gQNVnyX/91Bf7pI/8MRclmYD/zwSQe3uktwozoubk5zM3NYceO7V3bDz/8cJxxxtNx1tln46STToYkRYtXAkEWLP3whtj9ky+/MFW+izFelABAbDs3MdXmDJOSMrCNL1gGOIDVrleqPNFA44yTwE47CcrJJ4BUWPAdRL1eR/3MM4Az3T6EzbDhkcdh378DdNtDoJsfgD43BwoCmVLsbi+BgOCwWgO7rSUohGJClvvCRx8w2piQZNTiQmsnZNAYhPH1a1AbVgAGIF/4nEgBGADM62+H+rqXDp3vuCnCuy4LqnIeVaGs9oiLdhXlDQz0iLF+aMb+gdXgNipJsGyGhcVF6IqMuqZBUWQoEsHqqQYMy0ZLN6AbJkCoH/axr7ySUdZ7Q9CPEIPLSZIxMWHPAtBrJ38ArjeZmGhTVnr7Eb3P1Mh+R483MAC/j8A7mwLvWy9FhzhxuRPFhIBSGS3DQtuchypLqNdqqKkUlAASpZAAMELAOWAz1ic8B/sruXm8AgDh4wtNWWBE2y8vcpIO8jDhkoMNL2pmyXKQhwdw8NoN0zntr0qvGBd2fNJt42dubg633Xorbrv1VsiyjNNPPwPPf8ELcNppp2f+cMj6g673JTguHn744dQemzMzM9iyZTNOPfVp6Jth5cQBi7wznMgVvPPvAWT18f/lL30Rl1z6ET/cbPC9GTcIkoakInUw/8XFRXzlK/89dFmeZ25fXUM6TJH1QDJb3HPP3fj2VVdh+/YHh63m0OzatQvf/Ob/4qqrrsR5552Pl7z0ZTj00EO7PJEHTe5J86GQZoIBAOzZsxtXX/1dvOIVv5/q+CRk/S4L64Av1/MojN27d2P37h/i2mt/iMnJSZxzzjPxogsvxDHHHLvcVSscUTNbBcPRuv62yH3aWadBOnJ4T0dz84Mwtz8SnUCSANseOt8kHDDaUAjFKmVwmHV1YgLKiRtBTz0B5JQTQJ5yJFYPOclrxSBRkOOPgXz8McDvPA8a5zjssV3g2x4Ctu3E4Vu2wzh4EACwoTaBOdOAbBM0ZIpd7SXIhGK9VsekrOCjD/0at8zsxvs3nolnrd2QukrOuzH6dc/3HoD901sh/eazhsqXHncU6AnHgT34UOh+64ZyCMAeZRaUglTlPKpCGe0RtbRU8FwGThwMmSzb21d1/kvACYVuWNANC5qqYKJRg0QpVFmCItdhaioWmy1YNuurQ9mubZCsvzEFy4vwSiwPaZxnhD3HQMg4YNdVH8IEwn7lJ2ko/j7ROLi9q//RPYMgmEdUdBHOubuPghIK02IwFxaxSCnqjRoamgpKiO/8JMsEFrPBGANn0Wps1vcjIfn1h8qWr6C8ZBoCOurjY9ykLXcUD+CoD7l+aE8aXimnDsuycNddv8Rdd/0SGzYcgRe+6EX4jd94XiaeebEerCUbeL/llptGOv7nt9yM0097GoDkYXXCBhQ48f4vZgIIGLyXuSynG5Det28frr7623jlq14FzrrLGrR+RVz9sxJevvGNr2Fhfj718X3lB+s46Fj0dMJ7rsdDO3fii1/6AnbuiA5hmhe2bePGG2/AzTffhOc857l45atejYmJ6BCscXbz9ufVEbn6u9/Bs859FjZs2OAVnEs5aYmbJFT059fi4iJ++tOf4Kc//QlOPfVU/PZvvxjPONPxZhMdS0EW6Lf9Cnx+MXL/xEufnyrfhW98Pz6BZecyl45xjjWKBhrTPsj0FHDhBaBPOxFTxx+dfSVWCoSAHHMEyDFHAC88DxSAtG0n2Ld/BHr3VkxIMhpu2OfDtAZM7kw04pzjzrn92DKzF1vWz4wkALvViMX85g+GFoABQH7hs2FECMCYX4R9572Qzjpt6HwFo1F2YawKVMnLs3fSfDBsamQfkRAQd/C1e1K681/GbDcfAOBghACUomVa0OcW0FBlNOp1UEqhKjLWrJpE2+JoNpuwbbsS11VQbYRXYnFJ+o6s0oSTwhMyPuWNQ6W56r32EvarLr5dGXeGjgPbORjASc+9EJMHuvs83rrE4Azcy4dIsMCxsNTCwsISJht1TDY0UOKklQiBJMnglMOyeV+Ya4EQlQXhhArAwwxGD5o1EiWqpr1xxh0qulPN3mOJs5O4L07Wez4Jzo84wltvzt2zZ7yk/dey6KLBrl1P4ktf/AK+f801eNWrX41zzw0MfEW4SvC+kBME4Myf6exsCrw84HoM8nSeg5RwAKwnZAUHcYX6zn3qebR7Yqh3D/cIOe7/+ZbhgZeku5HZNm679dZU9fX4xS9+gde89vWoaZo/88oLydFbn2HvEx7TNlstPUVtHb5/zfdxzjnn4qgjj+pvTYFLSXpEbW+OGaHUuQfc2V+O56w34EH9tJyE3As8mJ9nQcdQD2zbhht+9rN0J8WdgX4E6+LV1/0/Z70Kp36DbBFs561WC1de+U38+Ec/Su0ZmxWMMfzsZ9fjrrt+ide9/g3dbbmHwREQ8nluWZaFz3/+Crznve/zCoosO66OZMD+LCjzjNnNmzdj8+bNOPHEk/C6178eGzduikxbpvMSLC/NH90SuY+uWQ31GacOnaf1yJMw7ooJ608pkMOztWVb4Jz7omMUfG4B5Ns/AtuyHcYJx0I5aROkTceCaIM9hgXd8IPz4Fu3A1t3gG/ZAb57LwBg2+Is3nTfz/C0ybX43OnPAyUEGnHC2dckGZed/GxctWoHXnz4U7KqCaL6/3x2HvbNv4B0/tlD5ShdcDbw+W8BrXbofvP620olAFdpgLBK51JG4rxTykhUvaPEkcgIQm6S4FJBHATU+VD0P7paJkfLXEJdU1HXVEgUqMsEtakGdMNEUzdg2hwUBIwzUOp+o5DOt05ZSfotUtZ7aSVT5m+sKjGsg4LHIK/9Kjzrx07ceP0I2UY5cSRNKyg2obYk/boEBXXHPPsjmQTzibtfHCGZdt2Pkqu3MErRbBto6QY0RUZNVaC538qEECgyAWMA48QfM+2UlXZ6Q089YYOSbCNz9U72EwjywOmvu1oaA+SkHnRZDtonvcmz9OTNLo+OIOiJfaPnme+xy83+/fvwmcs/jeuuvRZ/+pY/w4YjjnS084AXhkfwzgh6SHsCrTM7iPuPcg6M9ODkoKDuYKAj5DNwOLOmHftyX0RlDM7MJPCu7eAcnDsCoPO3k5t3fzDinhd3gntvvu8+zM3Npaqvh67ruOXWW3H2M88F4ImNgRngXkJCuv8ekbZhpj7Wtm184Qufx9ve/i7nJR/U+n1tv/M86r3nabCxuTbnxOs0AN5ZUj8J8UOUEEr9vP3thMC2bVxxxRWpz4kQQFMkcN6zpoV3DmBdaYk7a41z5t8fYR8+jz76CC771Cexb9++1HXLg/n5eXz2M5fjl3feiT9905tRq9VS5RM2aJpF+OOtW7fipptuxAUXPCe23LjZxmGTJkS3rJ9t2+7H+y96Hy644Dn4wz96IxqNRl+apGGFBCsbPrcA/Y67I/fXXzC81yQALP3vAO/fnMRfxjkmBoi/HtwwgfsewOwvf40JScFkrQY85WiQk44HOfF44KlPAZnob1srnSjBN0jTMnH33D48uTCPBT188tpapYa/OP50yBmF2yacdM2M78X83vVDC8AAID/nHFjX3hi6j/3yPvDFJshkee6TKnluChFYME56B15D7z3evz5w2B3K3e/wlm5Ab7fRaNRRU50162uailpNQ7PZRsswnGebU6IzUZgFv29IZduB8DAtP8KG1aD3ueeNGwhb9tM3HlgARDssH70ibhJHFu+/vhiMYF8kvowohzdfyuVAWzfR1g3ILR1TjQZkhYK647oyJYAkgTFnnWDbtt2JcKM/J/K4V8X9LxgX3BXPOHhnDeDoxNl4A/emixOehxkYCH1IJG5Mbh1C8yR96YalzKJtHmzf/iA+8L734rWvfR2e//zng7urHXdmKHMQKoUe68w45q4nJwdnzsctY46YpptWqjrNLcyBdc3m6Xge9wtFvWniIF3/4YA/4/qOX9yRqq693HnH7XhmwBPTmTnV+bdHlgNto+axY/t23H7HbTj3Wc/uCPjoOHX7VzZ0dnm/t7XXJQg2Neb/wQMdie41Hr3zuO6HP8CTT6Zbixlw7r+ZuUU3VAlxwn0Sx3+cEoBSx+YYo1ckAAAgAElEQVQSBUApCAGoREGJHHktb77pBvznf/4nDMNIXa+8ueOO2/HEE4/jr972jk7I5YSM9swezNe/9lU8/enPwOTkZNd2QkiXN0RcnfrE6a606eoVnIcYW37JXhs33XQjtm7dgj9/61/g5JNPSXycmMEt8Gjf/MvY/Y0Lwyd0xMH2HkDrhtujE+Tg/cs5R41KoJMNkHOeDn7/DvBd/cJkGIeodeedaNnA9ofBtz8MfO+nACEwj1gP9eRNICceD3LSRpA105nWuwx4gu/8PVtRf+ARSHv2R6ZlnMNgNhZtE68+6qlQqIQTJlejbVuoSZ3AR6RRQ+u44zFpcsxv2YYJSYkN2Z2oniR+rjl/5EnwLdtBTomOnBCGfOH5kQIwALA774H0G+cOlWcRqMrgbVXOoypUaYJBkLDB1d59vkcJvImoMZ7SEgV31/xlhGKhqaPdaqOmaajVNRDO0ahrqNU0tNsGmnobjAOU+zOzK3ut41iJ51w1hJdwuemadC/aY2kRYwHlYhhtxk/LeWB8uns8PejAEzbBw/s3Y6wTJpoQMEiwbI7ZhSUADDVNxdREHZJ/PIcsURCZwrJ5l3id9j4r290pwj8LuulMyJDDGkPW3sBxIm8YwVldwxIWlndQeSHzkWLLEMJuegzDwBe+8Hncd9+9ePOb3wJVVcG4Ey6RMwbbjant2d+71owz31A8GGzX3dZMHZpY6qiPbp6d/++dldSdJoxBL5dms4l7770nZV27efCBbZg/OIfVq1e7hYeIpoFtcXdtWAcsr07Zd676Fp522umYnJx0BihI94DFwMoGCXt+9SSJqvu+fXtx3Q8HeIglwGKsSxUMDv56V77LLl3/7HgjE0JwzTVX45qrvztyncbBE088gQ9/6AN417v/JjYMMNDtHRAXsi+L+2x+fh7f+MbX8Cd/8qau7Um8UfN8tgdzjhODy8j+/ftxyT9ejFe+6tV46Utf5m/PKtpHWTqbYYOygsE0b74zcp980vGQDjtk6DwXr7w2dj9hLLP2d8XDm/HhbXfg/SeejT897jTQP3styJlOyGp+cAF82w7g/h3g23aCP7Y7dBYJ731P+Ds4mg8/hsWHHsWaH//cqfuha4ETjwc5cSPISceBHH5oRmdSHPj8IvjWHcD92x0P3yd2AwBk28ISY1il9IfJbtkW9upNHF2fgkal/5+9946Xo7rvhr/nTNlyi64QaoBADaGCKJLoxGAbjMHGNsaYEjuxncR24tiOUx8nTuI3sd/nTXHeN3ni2EmeOM4TOthAggEbA6YKVRACSagXQEi6ZXuZcs77x5SdnZ2Z3Zk7e+/uar98Bt2dmVNm5syZ3/l9fwVzEoZH7C2nnw0AyKdlpFevAM5ZAixfDLpgPuYRAl4sg/7pd5B56x2cIkeLauEEtWwVfaD85GkkQhLAZP4c0IVngB18y/O4+tIrXUkAA71DnvbKdfQSevmZBMnSdWtRhxeO+zjXWcM6V+NAoaKgoulIJxOQJQEEHOmUjIQsoqKoKJcrRj5hB3r5Xvuh79HWO+iTiN0Pv/ex15+tl7cvAUwdoOWEMuXdCo1+6OjORquh2i0w03PXMkKrvYd2jXXLYXf9zt+CINS9x9TMFWz4dVBUqgoqVQVJWUIqlYQkCnZESFEg4JyAcTNiZwRZxWCX+oRqN/W1j3rYz46YOYCbKS3jCl0c1Zu4fqy5Jx/zX5sd5ADx8iIN7008XfAMj+pzXrdi8+bNODE6hl//wm9hcGjItgAiXiEaOGAkhIc5cQNUIKYXsEncTGI+8ioalfxsdu62V7ZCjcmzk3OOTZs34Jprr/M81kp/LHid165Jvlgo4JGHfoQ7PvUrVkuwhYCgMe3Rncn0+4F774GqRg9pbYBDAK9ThBhW9+acRIg5qmF7rztfZlstwzn+6+Ef46knfzbJ/kwtSqUS/uav/wpf+9rvY9k5y3zPCzICsrxy4wgBbeHZX/wCV175S1i27Jymxg3W7zBGUOFRuzbOue1lVu9Z3L3zOWD0//777sWJE8fx2c/+Ws1S0wdh5pf+orB3wfJFqK+/6Xs8HYHU4qUKyk++4H+CKBietjFhfdbw8t04fgy/8bu/Z5O/AEBGhkAuuQC45AKjb8US8OYBwzt4137wA0cAznG8WsYpctLOT+vEDCmBklb7VvHj4+DHxzH21POG5/DwILBiKcjpc+1zGIyFruDzjnAARV0FAYFEKOQWQyDnNRUpQYTYwrtX0FRQQpB2eN26UdRVDAi1cNmVTBaJ3YfA3zrqeX5SEJF03KIq05FVq5iTSEMgBGelh+1jZMYwyLlnA8sXg6xYgpk+RDkZSGH4934D1W98B4gYTcYJbsqqfreIbd0B5PLA8FCoeoVLzvMlgNlru7ouDLQTvaKg7ZXr6CWcjM/EMmXm8JZtnZ7EAOpy5nEYYexVnWEiV4AsihgaSEAURQiigAGBYiCZQK5cQVVRwBkHLDkerWhbehutkMInI1neLeivN7ob7jnOTw/d9c/TZdjTy+i/k52JICM0935/PWBNv++u26usUTcHJcbfupVO0ozmWVY1FMtZpBISBtImEUwEEMIhEGKsjZnFJwS4ZpD6P7tNsOm/G304wR1fCgKTAPY9OQaFtLfCPfwSwRkuoG6/ldTYFXK3eX3T/8n0WyR4vrTWudZx3v0v96GDB/APf/c3+PLXfq/mxerJ8tUUaYTULHAIrZ072bB9DU226d5u3PByvPW9vN6TAO70sbFh/Uu45NLLsGTp2ah7p6eo25s3bcSbu3bGUBMB8+i0c5zWdvrPOf/9yENdR/5aqFQq+M53/hpf/+M/wcKFiwLPdQpzzm9DO+bjH/77D/AXf/ltiGLjZy7s++HlTTEZWDV4LVR5ly/nnnn6aWQmMvid3/law70PIryjzFkno2K311B9cWvg8eTla8LXueEVI7euH2IkfwHgm8vW4RPzFuHC914NetMHAs8lA2lgzaqah3C5DPbmAczasQfS7oMgB9/yJKfTrpzCGbWKGWLCqCNXADa82jBzKExHlekYFhu9ZQGgpJSRoiJ0SsF80m84oTGGFDFSHTDX/uPVMk5LDdT3GUBWrYIKImRX/WVdQ0oQkQLsurJqFRyALCUC+1FUFRR1FXOSA6jomn19iflzQM5ZYnhHL18MEsJznJw+F+KvfRLaP90ZSz5gQoLXOfrzWyB86OpQdQrrzoN6n3/UEn3TNojvjZYvu4940Sd4+mgXgqI0cQcpYK1D6rzDfAz+3V7AlomiIFBoTEcmX4IoUqSTSSQkEYQAQ6kE0kkZpUoVlapqGrwaZLLlrdN/Dwz070P3o7/e6A6EjTzV7c8zsPektwni/rzaOfCUS9DoZOgZ/8qmOGjdM/U3nDIN15znOlqloghFY6hkCpAkCQlJQDqVhEAJKIihj6XEzBNsa+TqrqVOF0eIuaaLH+0Yv/3Qz314w9BLcA5Qd6hdILpC3lmX11bfgeA6DBgvObMtNQhgZNd0bK1/6L37MnVwt02IyWxSChBj46a3oNe5dohcw1USvfBZP3HiOL73j3+PYrE43V1pO8ZGR7Fv755Y63z36FEcOXI41jqnCvffezc0bfLeNmFRLBbx0IMPTHm7fnj5pRfx858FhyvtdCiKgu/+4/9CpVKGKAoQRdHeJEmy/xYEwd4sRI0S0Qxvv/02nnh88iG+nZhsNAy/8p1gkBQnXnllK/75n78PnTEwzu2tWaQRzpixBcoP3mUZY2A+Zafyu98XkMOh/MIm32PS8iWgEfLdln4RkPtXlvyPRURKkHDtqvNx2ld/w9/t0wcklYJwwUqk7/gopG9+FeSfvw369d+E/rFrUF52FiAZRhTvlAv41q6NWD9ueMbOlBKQmhCVCSpACJCRT5VTGBAlJFogfyu6Bo0zT2M7hekYkhrva0XXMCBIICCo6FrdftUj//KQKGMkgPy1LL0nNAVZTQVOm4uZH3wf0l/5DIR/+DMI3/kT0M/fBnrVxaHIXwviletQviYuAjU4spL6i/DGgOSMecDsmb7H9S1vhK6z09BL38JeupZeQK88jyBi1ZlSxrnP/hveMkqQ3EIIASOApnPkCiVkCyVUNAYQApFSDKVTOGV4EClRADXbZlaKhb48ZGO6ZdM+4oHzuXmtO/roLnTTu9gt/ZxK9OfVzoKXI5uXXGLtd4LzxnWhszx1rHktGciLJLaOUUqh6zpy5QpOTGSRK5SgqDpgBoemlEASKUSBGB7FJrfCeS0lEzHJ35N9TPV1W72FBteoVgd31JeAcyNUEOe1UKgGH2291PUWJDZJivZ5ik0JuBGuoGZXa05e4HA6CliWMrWwTeZ9oKJZjfPjNtUX0R4cfecd/Ov3/wm/9eWvQpa9PVV6AZs3bWxLvZs2vIwFC85sS93txLtHj+KZp36Oa6/74JS2++gjD6FQyE9pm37Yu2c37r/37unuRiwYHR3Fd7/7Xfza579YJ6xRSiESI9SzRf4SQlzvuhECRlErsfbpkUcexsWXXIrZs2dHKu9nhRgV7nA2zYThbsZLL72IGSMjuOOOX7b3Ob/hDVabjm+9F5p7rTTPv+vlMdPH9ELZ5h+JIXXF2tD18WwOytYAIkxREHeoiVlDwxC++jmQgdSk66IJGVh1NhKrzkYCANd08P2HceDF53HkFyX8w849uGTmvJajngyYnsPjSqUhvy3jHFmtiqquY15ywKs4AMNbt6JrmOmTH9ftnWyhpGsQCTXy9TKjDzMkGRxoyOFb0TUIhHpeF+ccx9QyhhcvwuCqc3Dm8iWgyxcDQ/59joqRX/kE9KMngJ37J11X0BzD33oX/NA7IGedFqpO8eILoP3kGc9j+mu7esIjoheuwUKveBn1CnptbDULweg5/nhjfHqv+1InTxFi5/OrqhoUrQhZoEinUpBECkqAocEUVI2hWK5A1XSjfI/c67jRSi7FXhmnvQj7cTErMpTjGPF45/qPsmsQtI6ctnfSmsenp/WuRX9enT74ySbUvd/U3dR0OE4nwEa5xqmXc8o3blnH+VsiIgCOYrmKYqUKWZYwmExClkQ7lLQgmqklOYdhn8zNGI/Gf3HLMn0v3T6mE3UEcBjlaRTYym8G1D5jZkhNd7TUnhnANW9d4/KJqec2YiwRGOH0wDl0GMQvYxxMZ2DcsCzUdR0aM2uxLA3BUa1WI/Xowx/5GJJJhyLPdas541AUBcViAYV8HsViEYVCHqMnTrTNU3f/vr146MEHcKuDLOg1bIo5/LOFzZs24qM33VznVdkt+OkTj2HN2nWYdWp4b50o2L9vL156MSA35BSiWCziP37wv6Hr8YYjlSQJCxctwsjITAwNDWN4xgwMDw9D0zTkcjnkclnkslkcP3YM77zzdqxtv779NTz7zFN47/uvrbOOVhgzBShnaGxuh4wjhIJQgkqlFGt/FEXB//mPH+L3fv8PjBZDLOzamxO4BrcAG6Wt4Rkz8NGPfsxZae1vhzBcKhaRz+dRKBRQKBSQzWVx9J13YsiF7Y3HH/sJFi1ahEsvbfSqswR+ZvWVe4TyaSIHNJDJIeQG933uHZmje6C8EuyxmLwifPjn8gtbonYnEoq6ivSv3AqyaEFb6ieiALJsEa5atghXffZXAADswBGwN/ejsH4rUnsP++b5dSIliKgwHUmHty8lBBVdh8p0MM59SeWUICLlk8eXcY6sWvUkhwdFyV7KS5SipGsYkRINdSlMx7hSbQghnR9MYuCqyyCuWILTz1kEkpo8wd4MRBQgfOlXwb7xt+CZSRqKBUeBhvbSVkghCWBh3WpfAhiKCrb7AIRzFoeqsxPRS0RdH52FXhpbzmvxMoILChft3O+drstRj6nKcMqpis6g5AsQBYKhgQFQEEiigBlDA9A0HeVyBVVdR5+2iIa+wWKHgtU+7N4p28x/vJ6f8/T+I+0K+M6J7X4ne8XLp8PQN8rrHFhp1bjzN4KHvntebfZ+WqSy7YVMKTRNx2g2B5EKGEgnkUwmIAK2hzGllqMdrau/l2THMDgZr7nXIQLBym2vY14WF+HgVrRTX7f/Zmj9ZXS+wB7WsPYZhtetddjaZ1MWzvasayfEYLWJHTAAtYzhphcv4QbJywwS1wiNycAZDMKXm/sYA0DtCZFY/bHy3VrWL5wDPNoLeelll2N4RlBYRYu0rg8vyDnH22+/hd27dmHvnt3Yu2c3KpX4PPZefOE5LF+5EudfcGFsdXYKDh44gOPHj7Wl7kI+j107d2DVuavbUn87oSoKHrz/Xnzht3677W1pmob77rmr7e20igfvvxe5XC6WutLpNFaduxqrz78Ay5evQCLp7anlFnzHx8bw2rZX8fr2bdi3d685/0wO//XwQ1i4aDEWLV5it0k9Q5XWoiEwpoNwAk2Nf8Gzfftr2LRxAy6+5FJzNq0Rjk6bWrdQqevM/A5YC32zWMTQ+y0vOiJUP5BO45prrvVsz4KfclBRFOzduxc7d+7Arp07sX//vlhDs//w33+AxYuX2F7YrRDrFilMeM1IwHlmK94TbjS77x1p9d3jqL6yw/eYcPpc0NmzQtdZCgqt6/LUiAPq1ZdAuuqSeCttArpoAeiiBUhUqyi+uQ/DooyipiIhCJDOPB3kovMaygzAyLXLOIfoGM8W/eg1wjmAgqZiyMfDFzDI2zTnoB4EsfMrxAHMZTpEKqCka0g7zpfBccZoBuy5jVCYDolQlHUNLDkC+fYbg25FW0BGhkC+9Gngr/7ZMx9z6xUFr1G09Vsg3f7hUFXS5YuNMOY+Oa71bbtAly0ymu/yeavbCRDnsz9ZFUedil5SArtJYGufE275z5KpvGQrP69hLyUtIQQaAyZyeciyhHRShiSIkEQB4uAAEqqGclWBquswtRu1dnvoGcQJr/sybSRUT6Nu5HufwWuhQGusBGmJu/V8Zg7ntro6vCKf+Paqj6mCnw68YT6N4T10z899xA+3TOZGfz5tD5p9yyilLhnDOteUSepmw3pi11mn13tpRJ4ldl0mHQNBlMC5kdaiWK4gJctIJiQkJBGmjx44MXgczgBm8lTc/F+tj9Mr23vJf3300QzeJv3wFzbdlhDN4D7H8l51ndW0Hj8EhlirmxAA51KHc+4j+pkTivlCEW5OOjX+FYzpRmhTgdqFCQRwzqHrOnSdgXMGRWO2B5yu6+CMG2Q3atytO349IdRuyLHX+7rb9p5bxHVjm2ecsQBnnLEA77vmWmiahk0bN+Cpn/00NnLznjv/DxYuWoQZM0Ziqa9T0C7vX2f93UgAA8CON17HK1u34MI14cN9hsEzT/0c7x492tY2WsWrr2zF1s3+uS9bhSwncO111+Hq913TUvh093x5yqxZuPp978fV73s/jh8/hod/9CDeeH37pPrEGMO//9u/4uvf+HOkUqmWBBJP6+gYceedd2L1eedjcHCwQRB15m8CYP+2OkRMJX7NUTWeSBi+3hoR7oFBotfIe7977rVflmWsWLECK1asAADk83n8/OdP4qmfP4lCoRC+My6USiV8/3v/hD/9sz9vuPeW4Oq3IDMEbQ+JwbqHjmuy6/DphzP8tjt0dDM4y3obM9QjunHcyYVqQPhn+fzloetjY+PQdgWE7mXMU9EWFWTRAsz6tdtjqy8spFd2QhKNeZ8DGFMqmH/NFaDvv9zz/OYjtxHNMjCnQ9Rl+e8OuvZTALxURuG5l5GtlnF6atAIKz2aAY4eB+bPCdFKPKArloLdfiP4fz48qXoC55cTE+BvvWvk9g3Tt/OWg232/k6z7buBW64HMP2KiZMdfoZK3fxMup2Ud6NX3hE/uSbI89ewbyQ2GdtI8NbLTI3K1Vr7jAPlioJKRUE6lcRAKgFKCBKyiERCgqLpKJYqUFUVhNIGObgXnkFciBrJpn8Po8CfZuWcuxYU4e5v4PNwrClhOXXA8Rv+omr/fZk6eN1nr32R38M6B6Q+2o2+Ifj0wytEs/mHPcPaTgC2atBpdFarx0umdu63ZReYJLLjPGqdI1AwAKVqFcVyGaIgYGhwoI4IJhQQTJ7H6B8DY945iMPcgzjQbvK3P+57E6H0QZ2u0LQUysz2pK3tqxfcODiYKYBxe+FjK6RpLXelIIgQBONvDmNS0nSGSlVBvlBCJpfHxEQWY+M5jE/kkc0VkS+UUShWUK4qqKoaNMbBCQUEAZwSgHonQ+82iKKIyy6/An/8Z9/EZ3/98zj9jMmHQCyVSnj0kckp3DoNmqZhy5bJk31B2P7aNpTL5ba20U489OD9sXqTuzE6egI/feKxttUfBqqq4pGHfjTpetZddDH+5M//L3zggzfEkjt7zpy5+Pxvfglf/NKXMXdeOIW0G9lMBj//2ROT7lNcyGYzuPOuOzGeyWI8k8VENoeJbA4ZM8R9tVoFYwyEEIiiCFmWkUjIkCQRoihClARQgYBQTJqk9lKq1XtaTK5+q273Zn0XnfvcIIRgeHgYH//4zfjO3/1/uO32O2Ixxtm7dw9eeOH5hracC4Fmm+d1wvgmM86hm5E1WirnMmRrdr67bKv968MfrFCCtv+I7/HE+StC11l+upmhVYyLroEU6Fd+FUScntQLvFQG9h22fw+KEuYm0qDnhb9vnQCSTiF9zlKcnqqnh/2IzqkAve49oBHyUIeBvnFb6DLiBSt9j7E9B4CqYv/uz0199BGMXnlHwng0hTHMdAYbC6qPCgIIFVCqKBjN5JAvV6Eyw6BephQjg2mMDA9AFgTjS2zJwl2uC+kU9OXRsCAw1KCNhqnW2GzfyCS1zfI7Ydxa1Fh+KA0Gyn10PsKuY4G60dD3/u1Q9N/BeBHEg7jfA8sJo3Z6zfu3FT7F09nC3E+45XxBIIgiGAcmsnkcn8giX6pA1Z3OFcYmCAIEQQAhJJbIiX30MdVoIIDdHyk3oeoFg07lYBxgnNRtHNTerN9xov7DykHAQaixcegAYeDQwbkODg0gDIRyUMohCgSSuQmEw4iyTKCpDOWKikKxjIlcAeOZIsYzBYxnixjPFVEoVlEqK6gqGnQNYJxCN1oBIwScEnBKjQVRgLdQL5DAgBG64cI1a/EH/+OPcd31N0z6mja8vB6HDx2Mp3MdgB2vb0epTbmTLaiqile3tj/3YbsEoGw2i5/89yNtqRsAHrj3HqiK0vzEKcDzz/4C42NjkctTSvGpX/0sPv2Zz2FkJH5P+RUrV+H3/+iPsfq88ydVz7PPPI2JiYmYejV5vPj8czhw4IBNFOqMQdcZFJ2hVFWQK5YwkctjPJvDWCaL8UwemXwJ+VIF5YoCVdXBGEAjfMOcBkZw/O1JykaYPi1P2FY2u08e33ZnPxKJBK6//gZ8+//+n7gghrD8999/HyqVSl0brfa5mUdtkBex+x77fXetBUfQwr1ZxJG+Aq51qG/sDjwuRyAyy89tDD4hLnGLENAv3AEy+5SYKoyA13bVe40Ahqfs7JnT058YIF7Q+Mz5a7umoSc10M/dAnL63EnVETQd6JtfD12f0MQ7njsMA4z2u39O6oVrsNAr19Ir1wH0zrV4XYenoSHq5SZCCKi5EYcnDgBQuCOVNdbnjsDCGFAqV5HJ5ZEvV6Fzw8BSlkQMD6YxYyAFWZLAmR48QfYRCX0jxRDgMPNjAIQTEE4A698ppOPq1meMgesMgBHB0CCD44tE1cc0wXqYDiODProH/Xk1fvjJE5ZMUtNvWccaywTpdqzjXr/tNhgHYQB0IzIKpQLAKQqlCsYyOUzkiqgoKnROwGGwwFZ7FhFs1dtsTLSD9+l7//YRFnWabC+vGMDPUwfgnJgbBWek6bes1VCLYTZHSRDTIoTrDGAcAqEQqQCRCkY+GipBICLAKJhGUK4oKJYqyOYKmMjkMD6RRSaTRTafR6FcRllRoeo6VKZBY7rRBiHghAOmJy+nhnWs9+LK7Jf5r7UzzETRTaCU4kM3fhRf/NKXMTDoDvIXDo889OOYejX92Njm8M9T1U67x+oLzz2LI4cPxV7v1s2bsGunf67JqUSlUsGTP308cnlJlvFrn/8iLrq4vTknZVnG537jC7j40ssi16GqKp547NEYezU5cM5x/713+3pJuAVKxhl0XUO1WkGpXEGhVEauUEQhgqc9ASBQgBIO8MacknXftGn4JHgZflkE8cDAAL76O1/DJ2+9raXwx37IZjJ4/LGf2L/DCpZ+3sLWPmfobi8ZwXlN4NxQsJhbKwvxKFbd/YWiP5Sd+3yPiYsXgA6GCS4MsHwR2sG3fY/HuYyhN30AZM2qGGsMD+ZBHNJ1505DT+IDXeeRxmLPIcPbebqQkEG/8hkgET3KBiH+7z47cAS8GPL6Tp0ZSPTrew6Eq6+L0J9Lpw9+973bn4dbTugF+F2HHxFc9xsOQtg0Zm+lrKHqYGb0NCtcIkWhWMZoJo9CpQpuevHIsoQZQwMYGRqCTPtKxnbCSy7uy6Wdy8NxwNYxNhyz9a8GIWyRwif7s+x0hNdp99Hp6D/D+OEnY9jGaQ3n1zbLK9hyvrNkF+KSY6w6GxuH4cjHTd6GMBAigFMBFUXFeDaPsUwOZUWHphvP2WjH39HC6/q6Bd3U1z7Cw9bmcs7BCcACtM+cc8vHFpae2jrbb9C7lbRe+537ANhErsMvCbZ5HmemAp2BgIMSDoESiKY7vvEySmCcoqxoyJcqyOSLGM8Wap5d2Twy+QIK5QpKVQWKzqCDgFMBoBRWnE9LqHJ10vZGCjPhG+EF6n/3igewGytWrsIffv0bkwoju2f3m20hA6caxWIRO94I7+ERBfv27sHY6Ghs9U21UMMYw3333BVrOI1SqYQfP/hAbPVNFhvWv4RSqRSprCTL+K3f/grOXX1ezL3yBqUUd3zqV/Ceq98buY7NGzegkM/H2KvJ4e23juDZXzxdty9obNcER2OjhKJRBG0OzjkUnYMREVRKQJRlyAnJ2CQKSaQQRWqu98O/axyAbm7czCPv/jQ/344AACAASURBVPa6/w777bnhhg/hD//o60gkEqH7Z+HnT/0c1Wq1rh9hCFT3ua0albnBOK+TXzhge4V7EcmttNHSuW1aMHbjwlPZ7Z+rV16xNHx9r78ZeDyuu0PWrAK96QMx1RYNXNOA1xrzJ5O1HgRqN2H+HKjzTq3bZVzr9HoBk9Pngn7ulujlm3wzeEAubD/QpQt9j+lvHvRup8vmiCD0wrV007zdrJ/ddC3N0CvX0ky2Dfpt73dsrdRFCQHnDJwzCAIF5wxUoAAhKJYqBhFcrkLnhq5HEilGhgcxPJCCaIRgAWBqfJzRaSwdUB+xw0mE9sCwd8BjvFiCPwNsReYUw7n2aGV/0/ocpHBvPsfORdNZKeTD6BOJ3Y8+KTx5+K2ZrOgkbpmkVX2WbwQ4myg22GRLF6RxDTrTwamR4kJnHNlcASfGMxjPFVFRNTAOgxsCqSOdOSdt/bz0x1Uf4VDvNEvrBhD3funqJzDztQupPHa2Y6W54CB27j67Da6DmyQv5RoIdNMQjoASCioIRp4ZEDDOoGoM5UoV+UIRmWweE5kcJrJZZHM5FIollCtVqJpuvMym0tc0b/UIM1m7tNrmHTrTJqsDSdz6rA4kEnXQnZg5cya+8Ju/PSlP4GeefirGHk0Ptm7ZBE3TQpdLp8N5P1nYvKlJCMwOx5HDh/HCc8/GVt+jjzyEfD4XW32TAecczz/7i8jlb7r5FixeEp4YmQwIIbjp5ltw1sJFkcprmoYXX3y++YlTiMcffRQTExMNloFB4ZJtC8OIEcE4B4qlMnL5PDLZHMYnMpjI5JHPl1Aqq6iqGlRdBwQCGiWnKAco56Ccg+ua8U01Pugw/nJ4v6ImLLu/Xe7vmvteLF++HJ//whfD989EPpfDy+vXN3Y/gBh1e5+0Es4wqH4nwQvADs/t/sa7Q2Pb5Zos6J1ttboo8etbq+hGYzLtTX8vxUgE8PZgAjgOkFNngn7hjtDyb9xQdx9AMV8AAEyoVYwqZZB0EmTJmdPar1iwdhUyarVul5e381SDXrEW9Lr3RCpr6gd8oUUguMVl/t9ktmuvf196SGnQK9fSVxJOL7rt2xkGzUhgv9DOfuEYvcrV7XOVr1PYEgLGOIqlMiayeRTLVTDOARDIooiR4WEMD6YhUgLCmB1xhpvn8JNGgxIv6p8V8di8jUS7f06qj1hkX4/5j6WTm+pRZbXHXVscSWDr1zq8Lmx0H/EjSPvrvPFRHmufSOxshFl3959jSPjwL0BzOSWMTNPYbM3bmBIKgRBQGM9P56YujRipPsuq5RWcR0XVoLvqFoSazGTtbwfaUW8vy8QnL+rnnToC2PpANXjm2ltz5aq9WaEVHefYjRLUPHk5ByW1JNpUkMzk2hIEQQRAoOo6KlUV+ZKRk3d0IocTE1mMZQxP3nzJCNes6AwaNw37XMrcOOEUkqMqbE8GnDp7Nn7981+EKIqRyr+yZTPyuc4g76JiU8SwzLfcdkekchtfbiRX/OBHuky3gPKT/34E2Wxm0vUc2L8PL77QOeTjzh1v4MSJ45HKrlq9Gldc+Usx96g1UErx6c98NrLn54vPPRurV/dkUa1W8KMH7p3SNi0CtraDQNcZKlUVxXIF+WIZuUIZE5kCcoVoHuLOb5FAKcBhp0NwmyEyl6drGKxduw63fPLWSH0EgJ/97KeNMoaTkHXNQ1Y/G3Ilo7Vvu1v4DyKJ/dBAGKP2/Xf2y6tc3bU0mV/9wlxP95wcN7SDb4Erqu9xecWS0HUqr/vnFI7lziVk0K9+BmQgFUdtkwJ/bSdkUzmeEkQMCBJw3oppJ6bjQPKCcyGZUQw067vx2s6OcGkht98IGsE4AQh+NGx7cD5sL9Agsr9cBT/mHw2ml+aTXrkOoHOvJUy/em1s9cK1NLuOVhStFqhLoeolX/krW02jOEKhM4ZipYrxbAGFqmoaWXIkJBEjw4OYOWMAgiBA1021KqVgfQJ4kqjJlw1HfBTiNfnTsn3soneCA+DEWAsZisFOECNqaOTh2wLOOXSdQdeZSQjXnmcfkwMBbIKo5sjUvnekNw01Tj706tq+XfCVUYC68NDE43wv+aRpvRaJCwoKYpsSMRjRSBjnACVgACqahtFcAcdGJ5AvK4bKzfFIBTNCrRuTHQPdaPjfR+eANlMY1QalERbZud930HIrvkpN6V+/gOAQBWpuBJQYiwJd11Eul5EvlDCRM2Ktj2YLyORLyJcrqCqqvRhwx3d3thPkJRQHvOrtT+TeWLL0bNxy6+2Ryuq6jte2vRpzj6YOx48fw8ED/l5Ofph16qlYu+4inLVwYeiyJ04cj9RmJ6FSqUw6bLOu67jv7rti6lE82Lplc6Ryg4NDuP2OT8fcm3CYPXsObro5WgjMbDaL/fv8PZKcmCphZvu2bVM6txCH4oVzDg4dIAyEMoDUzKuifrMIMfI2JxIJJBIJSKIIWZIgiqKtsLPad3u2epGqzb5lN9zwIVx22eWh+wkAb711BEePHg1F5noRxn7EcFC9YeWCsCRsGKOwKMJ/r5DC2p5DvsfIyDDonFmh6uPlCrQDb/nXGao2bwi/ejPIogUx1DR5CFt2QDRJ0iQVkBLErs//a+PsszA4PAQAyKhVlDQVvFQB33d4mjsGEFEA+dKnQUaGwhcOeFX5eAZ8PBuuL2cvDG7uyNHmXerS+aOX0X8mfbQLrRrMNT0HNcVrmG+rIXsBEGppthjnKJXKODGeQbGiQGeGLCwJImYMpjFzeAiiSAFNR4TYOCc5ojKM3l7CQBfKoJyb0Q3bzrO21h3EZJA4mT7wLnyOfdTBa03cf4bdi/572Do8ZRQOEJOqdc/zfgZPLdULM1KEpTuCpbIzHR1BQIgAEAJGgFyhgBNjGYxl8igrGjihtrFNK/qnTnj+fVL55AAFqRdGWlVc+u3nxii3CVpKuL2B6+BMQ0XRUCxXDY+nbAHjGSMsZi5fQLFSRUXVoOrMjqtOAQjmeAyyPLVghXq2wxZR6u7qpOD34e2EF7cTcdkVV2LBmWdFKvvqK1tj7s3UIar375q16wAAa9ddPKXtdhJe3bplUrmTn3nq5zh69J0YezQ5aJqGN7a/FqnsNdddh6Hh4Zh7FB6XXn4F5s2fH6nsa9te9Z2722msE4QfPXAfqpXKlLVXf60UhlEVNf+u5ZWPchd0xhw57nPIF4soV6vQGAMRKERJhCRJSCQSkGXZtki0jKgsuD1ag4jVW2+7DbIsR7oXmzdvAtBozez2prX+9TO68vvbWX6y48pdt9t72lm/37W0Sh5zc1FhrRha6Xk3KnKUA0d8j8nnLA5fX4D3r4HJ3RPy/stB3nPRpOqIDUePG5sDRBSB85ZPU4fihXEtKwAApyZS0M1nx7dsn85u2SAjQwYJHDJUf7N3me0Ob7hHF57hX9/b77ZURzfMF83QLfNeq+iFa+mlZ9LN1xHlOQSGeA44t+Xwi4yBUiv9iqGfYURArljBRK6IYtmQXUUCyJKAkcFBzBgagCT0FZPNUJMD/c+ZnExsqNeN+o2/vdYN0wE77C4zQgFy1hnvLXds08VE+3t417ZayOjuWUtMF+rWXA6j6ql8rNxjs9IdWlv/CXYPvIzw+++hN4L0htQM7k9RH/UtXLQSa4exERAIsMJCU4iEghIKAg7OdcNFkhCAClBBUNEZxjJ5HB/LIF+qQNPUhjYtnsx9XWHuQR99RAW1LBkAa/Jh9ZNQLcCFsZ8zEM5B7BDOOggYBApIouHVSwiBziiqCkO2WMFEvoQTEzmMZQsYzxVRKJVRripQNM2IqU4IQCmIINReDCd5S4hpZeE/2J0vbVir1LDwmjysze0B1J+8Ddz40Y9FKrdn95soFosx96b94JxHzse7Zq2haL5gzZpIE7xf3uG4iZF248H77oWiKKHLjY2O4onHf9KGHkXHnt1volQKH9o3kUjg0suuaEOPmsM9t1FKcfV73x+prm2vvhJz7yaPifFxPP7Yo9PdDQDhPVP9YH1/qqqGUqWKXKGIiWweYxNZmxzOFoqoajoYIRBFEYlEAslk0iSHRQgiAQgDh25sHosRxhiGh2fg2g9cF6mfmzdtsucgv++ptc+C2+PXLwqHs5wfEdtsYeWuK8hYIaguP2MxP4WZZUBnLeZZwLlBvh2dvoDUArwTpbNOD12f8pp//l/THDB0nRbIogWgn74pcvm4wbzyxa5aCpKe/tDUcYE4vJmHRMPI5OiL0eSpdoCuWApy+42hynCCwJiLegQCmJwxz/cYO9IaAQx0N8HlRCfOdVHRCdcRx/3shOuIA906tvzklTBl/Uhdex9QF4KR+siyjcZyHIb6h4ASwzFYZzoK5SrGMgXkK4pZDpAFipGhAcwYSEM051LOYaf7qpOjTirao14SJISaW3vW995jombIWiOFuYNcbN+74yQwrTDPQOP6IW5YJrwN/TG74Nw6xv24RbhJ4U5dR0wbTONce3PMfdMNr3Uxc239p9iZaOYZ2slr+ukEIQSEEhAKEFoz93Y6Ajp1QpbcEaR/8tP5EG6EkyCEgHJAAIFIKBixjDC44SJMOCgl0MGRKRZxdCKHY5kcSopq5hJmANFACAOlRlRbi3MzsraRer2QdU1tfvadzgv0MRkYXK71hvi6xtqTDBgAHQAzPHkpARUIiEBBqADOCaqKiny+iEw2j4lMFuPjE5jIZpArFKAoSh0ZdbIMLkvJC5w81xyE5StW4uxl54QuxxjDgf372tCj9mL/vr0YG/XPweaHufPm4fQzDK+OkZGZWHr2stB1FItF7Hi90VvGaaTgHJ+dirGxUfzsicdDl3vgvnugRiCO24k9u/0JiiBcevmVSKXaq9gP44277uJLMDA4GLqNzMRE5PzHrSDK3AIAv3j6Kbz1lr83YrfD/UwZY9A0DaVSCdlsDqMTGYyOT2A8k0EuV0C5XIWmMlAiQpaSkKWk7TUsimLNyMq0WvzgB69HOp0O3a/Dhw+hXC6HLucWhKPALUB7LazqFwuTbyPoHC8hv9n5rEUr76lcOISBdsg/OoN45mmh61N2+HsAT1b6IheuBDK5SdYSH/grOxr2kR7x/rVAzltuaP0dmDteADsxPk09cqFSBebPaehjMwT59LM9B0N3gwa8K616APfRR7vRKd+dONBL1xI3vLyG/c7z14vUFLLFUhmjEzkUy1VbKSpLAkZmDGF40PAI5roGzrktk+qTNPjqIz7UZFb37xjeIcNfxeVeO3WoT3LX26gj2R3bSYVpvnAvb9/IdbnWjtPw+vQRA/pksA9sy4zG0W3IHfXmGs2M1YCa3OdnDEdRn4eYwDT+cZynaQxjEwWcGMsiX1CgayI4N0yJLBmGGtSvac3U2Jc4HEX66ANwEMC1CcTYZbunmyYVTAcURUWpXEGuaJC94xM5jGdyyBfKqKgaFJ1BZRzMMK2ok8Gd5NPJhlZzAvY6rr3u+kjlupEA3hgxDLM77LMVDjpK+0Eeb1P9EZl5yimRyj3z1JN492jzXHYWtm7ehJ073ojUVtQ+toID+/dHKnfV1e9tek4rlmvNrNpahSzLuPyKKyNdSzvf45s+cUskopwxhvvuvrPjjSHiRINgC0BnHFVNRamqoFCuIJMvYDybw3g2h0w2h0KxBEVVAUIhSCJE0QgpPTIygmuuuTZ0Hzjn2Ldvb2hvlKD63GRqK/UGeQa3Ssx61ensr5/VrjtyiNs72E9u8KrXL3S31z3jETyi4wSvVMEmMr7HxbPCE8Dq7oO+xyYbD4b9+KfQf+cvoX/tW1C/fzfUZzeATxMRyUtlYFfjt4T2GgGcTqG00GMceHk/TwF4uQL91Z1g9z4K9ud/D+0LfwL21/8SWhlIAtRs/HD4lBV0gX9KBn7w7VB19ZIiqZeuY7quJe52e+WZAL3xroT95nutFwI9hH3OCVp/GHonYwOM6HAcBIWSEUkuX1LAmEHCJGQRI8ODmDk8CFkSwXQdBIZHTl89Ov1wP2c3d+Ykh8O8Svb5Zpnpdq6Ni5TrFtSvO7jL03u6ezc5eD7DabiwOMnepm1Z3wD3WrDN7fYRL6ZrPd+JsCioumiwHHW5fBvLeEegc/7tPtcJpzewAFLnXWl/AwEQSqFxIFcq4fhEBuO5IkpV1TAmIsT2HBaEWhQPP/1RH31MBpQxZrrPCwARwDiBpnNUqiryhRImJrIYH8tgfCKLbK6IfKmCStXM0QsOIlBQUQChtM4TxbJmcBO/vT5oG4XeeoX0yTwpLz37bCSTydDlDh440IbetA+KouDVrVsilb1w7dq63+dfcGGkHNY73ni9o0Jn33zLrZHefU3TcP+9d7d0brlcxkM/ejB0G4ARavnDH4kWprwZNE3DkSOHQ5ebO28eTp09O1YCNw6sPHd1pHIH9kUjwVvB8NAwPhwxzPyhgwfx4gvPtXx+t8/flnGX28vVykdiHbOIRZ1zVFUVxXIFuUIB2VwBoxMZjGWyyBaKWLn6vEj92Ld3b+hFi9/Yb/Y+OIlSPxLWS/h3l2uln0F985MJ/K7R7xqs87yu06tMQ/2ePa8v065xru0P9rgXTpsbqj79WHCkjbiugp8Yx8TTL6D4vTuhf+1b0L/2LbB/vQ/sxS3g4/6Edqx4bRe4O73D/DnG1mOQ1zXOK3zr61PTeFUBf303+P2PQf+L/4XRz/0Bjvzl32Hsx4+B7TsE6NEMhgK/14oKfmwsXH0BBDAA8LEpGpcdiF5aa/XKdfTR/Qgib93yE3VsBN6K0yCFrEgFW97RGUO5UsFoJodSRQFjxjmyKGJkaACnzBiCIJC6fJy9B3c84e7Rp9Wes3f/A0lEDhBOAGZH1+wcdO8jiQ29Qj45Hx/3HYy9iQbS+SS7/l5FN76HccKWQ2j91NxMZ+Slw3HqzbzOqW/TzBNc52kMcOhgXAcRjLDRxWoV45kcjo9lkC2UwbgAbvoBW+0KjvSo7rbiwnTpk/uYPojFUgWEEKiqCp0DjDMQBjBwWMHUOQCIhtBFCQAYFl9Gzo/ax99p8Vm3/yQeWM7rdpLgft45vQxRFLHsnOV4bdurocq93WUhWl/f/lqk8KZnLDgTc+fW53QbHBrCOctXhPZq1TQNr2zZjCvfc1XofrQDZy1ciMuuuBIvvfB86LL79u7BhvUv4ZLLLg88778feQi5XDZS/z5040cxMjISqWwQCCE4duzdSCGply4NH/57KnDWWQshSRJUVQ1V7siRQ23qkYErrnwPNqx/CYcPhW/n0UcexvkXXIjh4Rmex3tJiLavxfpmm98hyxjM/c12Lubt46ZRiqZpmD13HtIDAyiFNDg57DCK8CIuLfgJ2WHQKjHarJ1m4yCof15tevWh1bHm9Fr3kjOAemWG/TzrGwxsox0Wp0GErTB/Nogshavv7WOT7VLLGBQlyFQAYBDC/NkNwLMbAABk7iyQ5UuBlUtBViwBOSX+70n51Tcgu/ZRR77cXkLyovOh3P8YqHPcvbEXvFSOPd8xV1TwfYeAHXuBnfvA9x6qI9pnChJmpiRk1erk2ml2/K2jIHNntVwfmRU8xviJ8abnNJTpMWPdXrmeXlgz9sqzsNALzwSI/zq85Bi3I4Cf/OXczzmDQIkdOp9zgIEgX66iVKkimZCRTsgQRQGiQDEyPARV11EsVe3UY73wfLof9bnnmsEtAlMOwJVAgds+VtND+POTeFgFzeNuL2/rmcexjms7LONcnJQ8fgNsItgB2qnPrg9feOkUOvYdbCNMysq8H/XzUdA9cuvALLjlJs86YHgGgxj51AkIODXLcAJCBEAAdM6RL5ZRKFeRlChSqRRSsmSoaDg7KZ9XH+0FzZfKKJQrUHQjpxzMwUkoBSEAoWYoaAjGQIURr9wify0EWYX2YcCtVHd6FZ0sWLFyVegypVIpEqE6XdgUMfzzmnXe4Z7XrLsoUn1Rw1C3Czd+9CYMDQ1HKvvIwz9GsVDwPX7gwP5I5DIALDjzTPzSVVdHKgv4exFa7/rYifC5oAHDY74TIYoiFi5aHLrcaISc2GFAKcWtd3wqksd8uVzGjx98oG5fN1sy+6Hue0xIHQEYZF3Y4NVqbYRAFAQsX74idF/GRkchSRIkWYQoCRBECkIBQjgYodBBzA3QOKCBQ+UMmmPjXAc3s3Fxc7PgFtqbbU5YIZWd8BoHzjasb7qXt7C1z8tLppklaivEsd9YdVp0O3MG2/mfrDKmMYBh3+fdVlwW/vqof/hkYc6poevTjvrnFo/zzeUABOI/t/BjY2DPbgD73l3Qv/IX0H///wH7wQNg67eCx5FDmHMUXnmj7po4ALa6t8I/2zhtDsZmpFDRa0Qs1zRg5+RTCXBFBdu5F+xHT4B967tgX/wG2Lf/Ceyhn4Ht2tfoZQ1j/B8q5fHg23snTQT7gUUIA00CvL/5aDiPYq/5otPh5Q3mvIapCKM4VZgKWWQqZJ5ekqt66TrCXkuQ7OK13+kNTB1lGWl0GLDLEmKLqoRwEDAQcDAOlCoKxnJFZEtlqMyQYRICwcyhFIYHU5BFCs44GCPQLf0W57BcSZvJVdMDL5fSbnctZeC8tVRoXnKmjsZtKrPw2t8QUttOZoRz6jHGruVQyjk810i152lt3uutsGDE2LxK2usdl7frVD7eBs/bKWw7CgLXkX10DXrBYz8KbNkDqKUIBsCJQdIackYjj+WMjufcb8HP2dH+zQGBUkMG4uZcRrgh0xBu6L4EAg6Gksowmsnj2HgW+ZICRecO/ZbRaUIsvczk5ZY+T3dygvYf/NSj3sKV1/3b689j+YqVkcqNjbWXPIoL+Vwucg7aNWu8CeDzzr8AoiiGru/QwQM4fnzqvKOaIZ1O46abPxGpbLFQwH898pDnMV3Xcf/dd0X6CBJCcNsdn7JDbLQDUcfu4qWdSQADwOIlS0KXKRWLbTfkWLDgTLynhbzJXrDyR59MwnBciDKvHz9+HOPZHMYmMsjmCyhXFXBCQSUZAwMJDAwkkEyKoJSDEN2hPeB2vhXGAM4AzgWACwCjRghAB6EZ91vtRx67FwNeBLS7nrAIWrT5eW179dsNxjl0xmqEiaO8e9Hj1Z8w0II8gEN4P9r1HfHPER/nsx+tllFlesvn83ePgz29Huy7d0L/7W9C/6O/BvuPH4Ft2AY9mw/dPt93GKcUDeKxaJKixyiDsGxh6Lq6BbPXrWkgIVmEPMBc06Dt2ofqj5+A/j+/10j4Ks2jWXAAf3XwVfze68/jB4d2hO6DXU/A69IsnLkXyKkz/ds6Pvlc1Z3+LQxSxtfNWx1+Ha2i059HGPTKtfTKdUwGzYhgt+KUkFoqCgE1Bau7rPu3M7UYYNz7UqmCiUwOuUIZOqMgoEiKImYMDmLWzGGIEgfXNTDz+80YAeN9h4WpRDMZOKwsOVXEBe8F/r3jUE8IGxsB5wSWk5H7hkd9zpQbm9fj68/b8cBaNzrTQPbRfTiZdF+GgyNgiBxW0GXAmimCjFy8jPj9HE/c8g+nhpOln17FkIsYRJFC0xkyuRxOjGcwOlFAVTM7Taw5EqDUNKLr6dQXfbQDDaxSr4Q06mT4hRvwCzPQS5h5yimglIaerMZHR3HGGQva1Kv4sGXzpkgT8aLFS3DKLG/ldyqVwoqVq7D9tW2h6920YQM+dONHQpdrF9ZedDE2vLweb+7aGbrsyy+9iIsvuRRLXMToL55+Cu+883ak/vzSVVdjwZlnRSrbKsYieL5KkoSZM/0Vu9ON2XOi5ZwcG2v/e3zDh2/Eq1u3IJMJn//wgXvvwR/9yZ9Clt2BVvsIwqxTw3tulstl5EslDKRS0HUOTVdQriiGME1qOYkFQYQoipAkARSGB4ntaUsMS3LGVcDMtmJEi6vlySXcCLtj/fb7xgaRtV7neSHo224d98vh6xUCMQyC2g06x+94K31tVq/XfdQDSClx3uzA/nlBeyfAA5g3jXLdMmQqYPDGa4DxLPiOPeA5/4gUXtDeegfq4beAJ56FCo7hMxeArFwCLDdDRg8PBpbnW7YDACaUChSmg3GG+VdeAhLBOKxbIKw9F8mnXgTjvBZ6busbwGduDnywXNOAfYfBd+4FN0M6v5udwIiUgCC0fr9KmoqcpmBuIg2+bCE+f9lvYeyuH2Dl4CmTvTRvjE6ELkJm+/eFHa/3AA47s3DXC9Spq5JWrssybHEST31MP3pF59C/DgN+MpDXMUKIbZjBg86Bl4zh7CcFB1BVVKiqDjkhIJVMQBIoKGOYNTQENaWjXFFQVVWDqGBWq8Sc4iwZMfKlx4DuHz9+8JK7J6O0dhr+uGkn0vD/ydVvtNHLT2f6Yb3e9e++9W88aQQ559P+DHuRXnMbcjCPbwiB93qwj86D1/e7l58dtWWM+veTA0YEEvjfEy9ZxS7vItS51YirDutc69+a7oWCChIYYaioOpSJHATKkZITSKVTkIRafVEiH/ZxcqN3NUcdDq/J9GSYdCmlGBgYRD4fLiRiuVJpU4/ixaaNGyKVCwrzzDnHmrXrIhHAWzZtxA0fvrGjxtEtt92Ov/r2X4bOIQsA9997N/7w69+wPaLHRkfx+GOPRurHjBkz8KEbPxqpbBgUi+GIAgAYGBhoQ0/iw8BAMFnhh6Aw3nEhmUzh5k/ehn/7l++HLjs6egI/e+JxfPgj7R8XvYSood2VchmD6TSAmnLFWECaAjBn4DDnCVILOS2KIgTzX0ocZKVDicCY5SEA2MpCzm2lgvG9rSn9vIT3VuZNd0QPN8kcdNzZRivf/8nM415ttSpzNCOOgzyE3dCPB3gARwgBrb/jH+WC+oR+i4IZkgy+5XUI3/wqMJgGf+td8J37gJ17wd/c35QQFgjF29UizkgNIkkI+Nvvgr/9LvDkiwAhoAvmA8uXGDmEly8BGUzXla++ugMiAIlSnCInUWV6z+b/tbFqKYSBFA6NnsCC1KDx7kxkwfcdBlnqMNzSGdiBw+A79gK79iH/xpvgVQWDgoSMWsVMEQUAVgAAIABJREFUOYl5yeBvqkUyv1spQpRlzF6+DIlzFmH+yrNBly0CSch4738+jPPPvAgjUiLyJQWNSR6BAKZzZsHPL52PZ0PX54RzvrCMazpFloz6XveCkW2Q0imueqcSvfBMgP51WHATAu5jnuPXQQR7yURBxnswQztzGMlAimUFpaqKlCxhIJmEQAFJFCANpqBoMipVBVVFMfx+TBnRqpIxS05qP6FvXV63j5ewaLvXrvV/bo0n60gwKczrT6vDyfWEOgHcfj9q70ntaKh3xpo3YupZH8HwXEPCRcCT4Hexj85CGCPvbgUhsNc53BU1wE+m8ZNVnKg3PDVkFeYyW/LU9RAOI3UCQAXBcHrQOQrlCvKlMpJJGelUErIoQqD1RhatfmN76fn1EQ59ArhD4VQU99oLOjQ8FJoAVpT25FyLE+8ePYojhw+FLkcIwYVr1gQeP3f1eZDlROj7MDY2iv379jZ4zU41nD4Xc+bMxbXXXY/HHv2v0PW8e/QonnnqSVx73fUAgAfuuweqokTq08dvuRWpVCpS2TBQIvQvne5sAnhwcChSuSj3IgrOv+BCrDp3Nd54fXvoss889STWXXQx5s2f34ae9SYGhyY/HuqUe8QQegkFiKnE4czw1NB0BlUz5kHCjXJUIBAECkkUkJRkiKJkfzc1xqAx3c47hTqlm6VhAKjLA7jV726Qx4vfosCPBHaX8cor00w5G7Q48ZInvEhqd9utehK3cs9YQD5cISCcrR+CwubGoWZ8t1LEi+NH8Z5Zp2P2sVHo3/k30K9/EWTBfJAF84EPXAlwXkcIs517gUKpoa4RKYEJpYJZCdd3h3MU9h9C4tBbEH72fB0hzFcuhT5nJnD4KAghGBKN6AQJQQTO69H8vyaIKAIrluK0TSVUmY6k6b3LX90BDm4Tvnz3QaBSk40GAUwwBiIauR9zqoKEICBBBfuciq4hKYgoairGdAWnrVoJcdXZmL98CYRli4CEDMHRF/bT58B++tykyF8geEzyKCGgRwKMb/LF0PX5oVc8oHpxTdXN6D+P3kaQgtSPCA7rDewePgKhIByoVFWUK1UkEgnMSCdAKYUsCZDFNLRUAvmSAlVVfQ3iLA/VdnjXTJOtxbRjSo1MrCFVa93c7TPfWEYAMMIGO/f1MdXwMkR1/l37bgR+PvzWKrzRy7uP9sF+7x3zPHH+7qMr0Yvymx0QhHuv10LLNBYcsg015zcjq693GfveciMvOqWGHozDYKpLio5yNQ8BDAMpGUMDAxAEwa4nquF+HycHGgjg/oCYPvjde8YYKKW+XkXdhihkQbXa+QTwhpfXRyq37JxzMDw8I/CZyokEzj3vPGzdvCl0/Zs2bIiFAJ5sGB5n2Ws+cB02b9qI48feDV3PE48/hjVrL8Lhw4ew443XI/Vl5apzceGatZHKhkUU0nNgMJqH7VQhPZBufpIHpooABoBPfPI27N79ZmgDAU3TcP+9d+PLv/O7/e+hDyzlCAMAYuT3FgQBut56jlSgfl6vu9fcFI+tNSMMMtg8WFszEkOZwzgH03Somo6SGUaaABAFAYIggAoUsiBAEA3BmXNqegkzMK6DcwbdzKliLExrxleW1SYhhtDu5UnrNU6ahZH2874N8iB21+FXvpW+uL2Rnec6FaLu0NheffMjmG0FjfOcon8ecDI0UFdfs/dPfzs4x72D4o+Mbx7ahscPvolvr7gMN85fhOE9B4Dv3QXhq5+pKS8IqSOEKefgh94B27UX2LEXfPcBoFBChWmYIXqHl89rKigxDBk45xjdux8DB49A+ulzoITUQiBbGEyDP/EcdHAwDogtzlUMHFVdh0wFUEJQ0jXIlEIizZXcHIanrEAIVMagcYaUScqWdA0CIXUkayyoKhAptY09CCFgDz8JPPxkYLGZchIA8HalgL89uB1r0jPxm4tXIymIOKpUkF56FtLnn4vB5YsxY9liIOVP7LJN28HufASAkQt6REpAjEgKNB2T+SIwNNCyEajbU7yu38VSrCEHnV4cU/JtdCjh47oOQsjUX0cb0e1ep54Kry5HL10HEM/Y8pM1PNtye/4G9MdzH7idO54QCkVRMaYokGUZ6WQCokghEIIZaRmMy6goGsqVKhjq+AnAzE3slnPq52X/a3LdAdd5Pqf1IDpJX8U5h+7j3W1J+9T40UeHo/ZeGr+JydyQFqSFOMlfP+O4zhn1048gr+C68wLO76MTwFEb8c29Tbv1OVoksKG7MOcY16U4r0032eJaOGkPUtch21jHBWLcx2CvYForQ2qygxUBWucUubKOfCUHWaRIJWSkkgmI9hLV7JPjEoz2a3u69Tn1ER19D+AugTsXXze/rMlEMnQZXQtHLEw1GGPYsili+Oe1/uGf689bF4kAfvWVLbj5k7d2VF5TURTxydtuxz/+/f8buqyqKLjn7v/Eu+8cjdS2JMu45dbbI5WNgigeyskp8EyeDFLJaP2LEvY7CjjnOGXWLHzw+g/hvx95KHT5fXv3YOPL63HJZZe3oXfdD476BTQhBIlEAqVSo9djEHRdi6U/XtaXHICq61A0o40C5xCoodCT7LzCIkQimAo9ZhDJOgPnDIRSaDoHNwVuxgANDKIpwxueyjXBGgi/eG1QbKKWr9ja3yz0UF19IRS2fiEVgwhnL09hr3BHfv3luXxwnwbTLbVhgZWDU0PEoYA5Pz2CFxJJDIiiHWKSbdqG/A8fwMhnP+ldiBCQhadDWHg68MGrDMvfQ+9g7q69yGx7A9h7GIly/XdhhiQb44xzKJxBpkIwmZovgv34pwCM5WFOU5EUBIgOItcKa5xRq6gyHXMTaZR1DTrnkEUJHIA1k5d0DRm12jRUss4YQAgEQkA5ByMEJ6plzDRJUb+sflWmo6SpNjEbFowDR80Q2mGwLTuKl0bfhnLhPHz1I9eCrlyKBU0I37p29x4E+/5d9sJd4wbxLSIaAdx0TBZLwNBA6zJ+kDGWhxd6HOCAYSjT5nVIuz10puo6pgKTIR07jaDplecBdPda3UJcz6TV8IRBMkWr7VjyjV2UCqhWjRzAyYSMVMLIEUxAMJgWkE5KKJUqKCsqGGcghAIg0HQOSoxKLDK43uDNu4+98Nwng06aU5xoZpgZ5vw+OgvG43NnhDZA43yMLjIoDkPTPgy418RAnxTuLNSI3wZ4COxen4EWbI07CsR1yV7XJMByLgjWqfgZ9RJCao4VpLGM89/GKG0AuGH2UtU5qoUKcoUykqkkBlMJiAIxUqXBMnLiINQ3OEIfJwn6BHAXwC9EY7eiWAwflk6SpDb0JD7s2f0mMplM6HKCIGD1+Re09EyXr1iJVCqFctnfg8oL5XIZb7y+fco8XlvFsnOW4+JLLsXGDS+HLrt7167I7X7w+hsw69TwuSajQopAvFdCPuOpRrkSrX/tfI+93qGr3/d+bN64AUePvhO6vocf+hFWnbs6cnjjXoZTzucwDGDCzktA/OPBS3FoOHJQM/cmh844GNdQVhRbgBYEAUTXIMsyZFmGJMughEIWOXTGoDMGBpMghkEWg9dCAxKDuQvsixfc6yXDIprVe9Fyg+DzC8fsWa9rEeL2Wo5zId1KmGsLLB9MSNGZMxrqDfSCrAYY1xgsfWB7reA3F6zE504/xyZjx5UKhkQZg0+9DHbaPNDr3tO8EgchPOuDVwGMgx9+B9Udu6G+sRvpPYfA8gUcL5dwZnoISSoAIT1pB0UJb5ULOCM1CJUxHKuWMCIlMChKkKmAQdF41yyPXTcoIZBaaLOka9A4wylyEoQQVHQNnHNUmR7oFZugArK8CpUxSBG8ZyVKMSeRaql8hTPwRWdgYPUK3H725/CpcxZDjhC1gr87Cvadf6sbZ80I8pbqhb+DkVCpglPaYPTphyAPYDQxkJgM6izV21h3u2GRwED3Kxh7hXTslesAeovQjus6Wg2haIE6ZADn0SADuVqdxmyraRooNQjfSlVBuVKFJFAMDQxAkoxoGIMDSaTTKVSqCorlimHsRI3oG4xxnznZbf5n9cV9Fb0zpoPQ7fopN3ppLjqZ4Vye2Q5509OVPkKiTwp3ICYxzVvvIrH/1/nvotPIwx5y3PGdt518m0c6aWbUb0eg4IbOinPD8NyLNK79ML9TnIMTDkIllCsKKuUKBIEgnUphICFDFCw5y9HpPk5K9AngLoGnh1OXCtph8/8C6CjvVS9sikBiAsDylSsxaIb7bebdJcsyzjv/gkihpje+vB5r1q6L1Ecn4ha2PvbxT+CN17dHMgqIgnnz5+O97792StqyEGXsFouFNvQkPhTy0fqXSEwuf6IfvG1+TU/z2+/A3//d34aus1Qs4pGHf4xf/vSvTrZ7PQf33S4U8pG+R7LcpvHg6As1lYDOfZYwbU1nmq4b4XAVBSVFAQEBoQSSQCEIAkRRhCAIkCi1w0Azxuy8wgzMZnXsOdKtOOKoCznqFT7Q71p0GIJ93WLJy7LWY/HhFZ45quzQTBlmece4SVvrt57znzdIMngs2MpXo0JjfVTxJ7iCSLawOFYt4YzkICghGJZkZFUFs+Qk9DsfAU6ZCXrR6nAVUoMQTi48Hckb3gswhsT+wzjl9V2o7jqA9P4jODYxDp1zzE8OQGE6SrqGlCAiQQUoTMfX3zQijnxr2cVICSLGlIoZTgpQOMP85ID9O+1D+johUwGz5OYE8JAoIacZhGhBU6FzhjnJNBjn0BhDmWl2nmI35iSipQ5w9lFlHj7GlIKceRrYiiUQV56N9DkLQdNGW5FjaeSLYH/zLw15dFXGQImRZzIqSMDo5IUiRJFC10lrIfWDCGAAyBWA4faklLDnEeKbVTF8nTHVE7bNXlKF9Arp2CvolecRNxnWNJSzlxcMaoZ9fopVr3KiKJjyCQNAQAiFxjgm8nkkJAnJRAKyKIASIJWUISckqIqOUqUMTbeIX+rRFupkSa/+uPd7XX+3o1v1Ua2i7x3cO7AeZR13E4aIqi3p+pgmOElhZ9qA/jvZZtQN+vBG7kDtmXFHfdwZjtj8X6c9yYb+kBoZbC9LSeMYdOtgggzfHCb0sFKccXCD3+Xm7fIzeuOOSCXQQYgRwU7jHPliEcVSGbIsIZ1IQJQEiLReJ9XHyYU+AdxlaCVUY6ejWAhPHrWTAI4SXsoqRwhBtVLBtldfjdT2Wkf45yClunVszbqLIhHAO3e8gXwuh6Hh4dBl22l0MDg0hI987OO4567/jLVeP9x6+y9DFKd22pMjkJ5R3pGpRFSCul3vcdC4XLxkKS67/Eqsf+mF0PVufHk9Lrn0Miw9e9lkutcd8HZkaGmRW4g4XuMeD63miDMEZaeCjsOwu6yZpnJOUNEYoDGQihG6nHAOSSKQRBGiJEEQjfDRBASccejcJIXBHc2YZDM3FjjOhY5fKCCvhUK90G+RSLVDBLVFsPvanb/DCvthlb5u0rcOAR67JJUAY6xuMQ8vQtvolOF5Xqm6q3HW2FJ/W8FMqTaHi4RilhnGeLxaRvIff4ihP/0y6NKF0RugFImlC5Gw6mAM8w6+DX3HHtCd+yHs3AtSrBlZbJo4hgcPvwkA+P1F5yMliIZHrlmdwnRIhEAgrXsR65yhpPuTtxYIIZhh3g/LqxiAnadY0RiqTPcNX51RqxgW5cacxi1CZToyuoI5Zy8FWbkUZMVS4JxFIOkUrBYn++R5VQH7zr+BHxttOJbXFCN/pBTdeCXIOZ2bYZsppSDEIIHrDCtcEzVJB1PcvFQBaRMBXGukdkFuMtVrzunElYvtCRwjmT2daHWu7+R1ZDeTZV5ROJy/uw1eBGuc1+KUF2qhDb0N1vy8bALlP+NAHXHLAAhEQFXVUVUKEEURA0kZsixBpBRikiKREFFRNFTKFah6fXSWmjLWIe8BdX13wx3ZxH0NfmWme9x4hbE82eD3Trsx3c+qj9bgRUS5YZFRJ99o7x64dZTOt6//LoaFx0hv8Bb1+U557PNKCeRe+9XmVUc9fu/jVD9OT70YA1wpgKg5RzhGov2Xv+cur5MZrAv3+r4KhIKZLsfcsy6rSlJTA1ld5BycEOiMoFRRUaooEClBOplAOilDEAQI1LmMazSs8zLsdx7vo/vQJ4C7GN340mmaFsnbs11hWP08B8PgtW2voloNH2pPlmWsPv/8ps/Refyc5SswMDgYmiBkjGHz5o147/uuCd3PduPSy6/AhpfXY/++vW1vZ8nSs9vahhcGBsKHjJwqj+ioiNq/gYH2KKKbvUM3fuwmbN++DYV8cA5SL9x3z134oz/+0yk3HOgm5HPhozoA0d6NIHiNg1b2Wd649bt5TcQn9olQdUDRVKCimuQvIFAKURQhiyIkUQIRKAjn0HUdzPQ60Ti3iVrikNAZqymVjH7UlIy+49qlkOAwyOko1pxhFHmtEgq+CsKgtjyUapwxO8y2F1gQody0p63DjxSdJSeRr1aNMMF//hXQebPjaZBS0MULQBcvAD78PsjjGcz42rcA0xPp8lPm43cXX4DTf+kynH75VY39gmH1CwLQFu8E5xxU10DF5mHZS7qGCtNxigcJOhuGh71uPkfJ5Sk7wHSonPuGoq4DY2APP1m3Ky1KSN50A4Sbr29ePgo4B//+XeB7/3/23jvskuK+8/1WdTrnvHHeyQwTgIEBhjAzDEmgEQKRBBJZCMlBtuWwV17bd9f22ms/tteP99qPZe5a9157WdlIlg2SFsmykEAISWAyEwgiwzCgGdKkN533xE5V948Op7tPd5/U5z1h+sPTzHs6VFd3dVdX1ff3+9X+0M3NeFN3hG3UQIhdXxABpslqQnDAU4XLDcorLkx6Qjji6SD2SbwcqyJwSvIM231fjOsJG2D0njsY2aWujRHjAVwvtgKEcXC7vcVBYJgmFspVCFUVOUVBRpFBOEdOFpGVR6HpJkqVKnRNBygFs719CHfaj40HRqO8gqM8gxql1wuORfEXaP4dGHRjj2OF0HfR84f1VteMUVMGA1/tlL6LCUDg7VFThAu7YY3mZuIkRRo+ha3hi1yWoacKuSriv0uc1+c/zuiL2/11r9OAs94xUiEgHmcCwAzIzcQaYHKjpNQ227mitoDMOQxGsFDSUCyrkCUBo9kMFFkEpdYUZ1b/M9yIO/2+DTLEXdIR7ZRF5cjhw20dt3Tp0oRzYuFUqLH7hFgTe9fv2b2rrXNvPuNMZDKtBSgUBAFbtm7Dk48/1vL59uzc2ZcCMCEEt972Wfz1X/5FcyEP22BkdBTX3XBTV9JuxLJlrYsChmFgZnp6UecqboXDhw61ddzSZd15jxsxMjKC6264CXf/8z+1fOyRw4fx8E9+hCuu+njyGesnOmjLtfM8jI2Nt+Ud30vCPAA4ITAYg6qqqKgaCCEQ7O+KKAqQRAGiKECUFctr155LjnMGbzTbmpcf6s4RHFwMzRs8AoaNt5sS7FB41ycd3i5qnptYg6uAUBjnReMS4wEc9ETshLyuQREEa27eAGOiDBRKKPzV32Psz/8zaBe8LcnUJMi5Z4PvfN711L3+uJOwdvl60BuvDD2mVd96CmCi4V4Wo/YSl1ZU56KVFg/f+XzdupJAMH5VveidFOyue8H2vBS5PZOAABz3WPOy35iQEEAQiCsEh5LNRM73y6vVRREzrXELXmfdP4iDBPwYEYEHScgZlkGnfhLyOqUb19JKlLOgYNzq8+w1LvOe12QcC6UyKlUViqK4A6OKJEAWR2AYJsqqClUzwDkHowSEAd52WjcN8VIGgzRs9ADjGtl63RI9m9NiHAicYvON3Tr/poVYT1jM5gARvZBFgNj9jOCaPixLu//gjN84T52/fePdvba9kQGbs060pyOzzuCMsfj3iTqWUFhlzQHNMDCzUAElHLIkIJeRoCgSBLs9w+xpj8DrPZ9TBpdUAE5ZVF579ZW2jlsy1T3hqJUPR3Df+fk5vPH6a22dd83atXjv3XdbP27N8W2d791338HBDz7A6uOOi9ynk850J6w+7jhccunH8NCPH+xK+tffeHPi3obNsqxNEXffvjf7VgB+6829LR8zOjYWafDgDf3WLc47/wLs3vkU3tzbet5/9MMHsPWc7Vi+fEUXctZ7Or3rr7/2asvHTHXJqKebhHmmcM6tuVZdwcCasxcATENHVdcAMIATy0tYFqGIMkRRggjrmXfnEmYMzDPHsDMHjGUZ2iBcYMhnzN9RsztKnh6UKzaHHBwccHfDMze4N7Hf07j3m/o7Ps0M+LNqjHcjSU4CZuDQmBkqADuMTOeh3v4PyPzXL4AoyYe6p1ftgLnzeRyqlrE6MwJdNIGX3oD5wWEIx61M/Hz9gPlDy9CN2cLiEbWM0Ss+AjLS2VzCUbAHHwN7MN64rmp7P092EgI69I2z81CuhBp/WCIwhWGade8iycjgEQJwnJFEN6gz/ljUsyfHsAiOQLhQN6iC0zAIqMP2bAHduZY4j9qw8zrGJ+6gf4P0gyGn3WM4BxVEGJxDLZUhqgS5TAYZSQQlFJIkYkISYZgchXIZmm7Ue+y08Jw2G1klzjs6eB1JMKh1RD+TisLDQUTwgZQBICgKe4vumH4Xgx6jjXbtg1vlBPwOE4WB3pVn0MjA+n4T3z2LapPHRf/wibmW2687c7IVzwSWo3RENBHHycDablpKMBHATQZGCKq6AVXTQAjFaFZBNpOBIFoRUhgzbc/gcGeClMEiFYCPQZIIe9wur7/WugA8MTHR1TmAO+GZ3bvb7iTdd+93cd+93004R/Hs3vV0rCds2EdmsSr5qz9+DZ5/7hnMzswkmu7Gk0/B+RdcmGiarbC0DQ9gwBJZe5nvKDRNwzvvHGj5OK8Xf68GFm659TP467/8CxiG0dJxuq7jW9/8Bv6P//jbXcrZ4KLrOt7c+0bLx7VrGNFP1Hmp2s81s20yRUIBQizDSQIYpgmzylDhdphXAFQQIEmStWRkCLDEVtM0YRiG5SnsmZ84ziK0GbwGvjzwF/H8P8zOs5E3WcN8xL32sbpxRMcoRtxK8ou1pEmxT3rrXfA77gb5rc8lPiJENq4HNq7Hmn1W3ZsRREyrZUz+4BHkPn9roufqB/i+A8C+A5jRqmCcY7mSxYrMCIRrL+vK+cq7nod8171N7auaJtA4UnYksbYJnmfaEhJqmygllrGJScGZ5+3NZABEhOGPM5LoAl5P4EGXD4YltDWQDtb0I2mZNE8znjHBfQnqPWIaCXBOvcXs8CyUUnBOUK5qKJYrUGQFuawCiVKIhGPJWA4m5yiWVWi6bg/0JnPNYcROsZHweVIWh2CZpnVC73EjKtkQxPcpwl6XtBgHA2/RxfWrh7o4m1R0B+Gr4C9P6z3sddk54ZmdzFnPmWuB3/BY77fBfUbrfAAIBFgaj9cILqTF46mbrD0YTAhUBOcAMykooVioqFgoVSEKBLlcBqMjWfjC1qUMNHVjfGmj79jAsSZdTNRqFW/ta32e17XrNySfGQ/OvfBaA4ctYezZvbOreUuaZ3bvcju2UdfZ0NOsS8iKgltuvS3RNEVRxK23fSbRNFtl5apVkNowYGjHU3Ux+Nnbb7UVqnvtuvU9qXe8rFy1Cpd+7Iq2jn3j9dfwzJ7dCeeo+4Q3AGvrOy2Nt9/aB11rXWRYu359XT6Cy2LiiLmtLAA8IXgATgg4sUQaAVboHDjees5inQwgBAwEJmOoVKtYKBQwOzeP2fk8iuUydJOBiiIkRUFGlqHIMiRRBAWxvXgtcYhzgLFgHR42IFELFFRb/MMazh4cHIx7Fm9peKymg+9yw2+mEBM+iNd/i71p1u3OOXhMPZRkNWNyDr2Jjo/KTBR3Pg/WpJDYKvzKiwEAZUOHykwoVID89PPgpXJXztdLyvc/DMCac3e5YkWOIGdtAlmdfBQGtm8/cMfXoZkGWIMHJyOIWJnp0AOZRJ+Da7rfu9fX/uKWB5ogQBRsoxDOQeRoW17eorFTUrAef+uTwhG0B5mwvk3fEpK1YLtgWJ4tYPCfLYfFeK7CpoUItsfq1oekEXcs547w623jMStKC6GoajrmFwoolMvQmdXWEgjBeC6DyfEcRnMKBE87h3GrncXtRhkBAs94axHIvJFWwvrmYW2wqPZTo+NSFo+wNm9aFr2FeJZ28PbHUgaPyDFS9GaMIjFaHGyp262Tl2JR8L+53DOW4YyNhL+TXR6J4twdBrLaF9zqCzp6rhtJLr6d4377I9oAhBBQSq3FbgORkHRgryccEKkIbo8jEYGDE9PKqEBhcGChWMGhI/OYXSiiXNVgOso6gu2IwL0d+JdleEmDeR+DtCJwJsnLL7/UsucdAJx44oldyI2fdu7De+++iw/ef7+LuUqe+fn5trz1FovNZ5yJLVu3JZbeZZdfgZWrVieWXjsIgoD1bRgxzMxM4+AHHySfoQ55+aUX2zruhBNPSjgn7XH5lVe1NS8zAHz3X7+NSqWScI66S7fb6i88/1xbx51wgvU8kJilHdoRcjsxciGeBnYw367wG3FNNT24lgeTc1Q1HcVyGflCEflCAQsLeVTLZXDGIEkiMoqMjCxBEUVIlEIA6hrYBAScO4szCEEAboI4gZM4A+fhIion1sLg7Tz5/wXqB34ZZ3WLG74wxhCG63q9sO6xenXWOeGyCSGg2UxkeknO4Klzhhk9IsQuLEHC5BxzugqZUrAHH4P+w0cTO7+DdN5WkNUrUDR1yFTAhKQAqgazQdjiQcOcnYP07MsAgKxnzl16xY7Ez8UPTYPdfidk3YRMBZRMHVUzup3KOY/d3hzRzyYVRYBHD/YTWB03gRCIArXWxxhCxL1zi8EwDGA7IvCgXstAeZQ1mdVhEOYdhuU6gMW5lqg2W5QQTO0lahDUf6w3vGFNCCbEMrxzauGKamC+WMJCqQxNNwECSFTASFbB1PgoRnNZEM5AwMDtsImO8YL/GW/ufrX7Djv1ltNuCvMyHeS67VggFYQXn2T6o976xFn8RrqcZb8IAAAgAElEQVRpcQ4OrpblvIcDLQq3+IR3OjDTU2qZr5VTbSyj9i4mORIVkouotolHiXPPGOgDBo8L6x9GtX8EQi3HgZA2j9XIIeB244bYArR1fm6J1CAAoWAAKhrDTL6AI9PzyC+UoGrWlETO4t5d973g4IQ3M7V0yiJTJwAPVEcxZWBgjOHBB+5v69gNJ3RXAG63Qb1r51MJ52Rx2L2rv72Wb7zlVihK+/PrOSxbthyXX3l1AjnqnBPaNGJ45N8fSjgnnVGpVLDzqSfbOvaELr/HzSLLMm75dHue5oXCAn54/30J56gL8Pqf3bBpnJudxe6dT7d8nCiKWLN2bd36ODG420JuP+G9Hs4BE9bcLAulMmbzC5iZzyNfLKCqqSCEQFEUjGQzyMoKREJBOQcFB+EMVvxpCoDCZAAhlqDmDHA2e8s4rDmFGQATHCa4z1vSHWj07Ocsbmcgbm5co17E4qgf9PKKwSQT/Z3gMV6WrSITilHBH/P36dmDWP/gV/GFVx6HzhkoIVil5Kyw3wCMf/ku2J6XEssDAECgIFftwHI5i4wgYE6zRGny4BNg6uKG+u0m5P5HQZm//OhJ60HO2pTsiQolsC9+GSiU3FVjooxSnAAM4IjamRFQbJNTFiMtc91n3/oBQRAgimLou1NLr4NY1Sku3sG/lP7AFdSGgGF6rhbrWppt77kGaCEewXFpBsVSYg+QumkRClU3kC+VMV8ooaqZACgoAUYyMpYtGcPEaBYKIbV524nQMyEvLBLAMD13xwppuQ0yQVEpLctBxTemMiTvZBf9X/sMRxD2iMLcGi7hNR1z0W4AJfCNxzgiMBDfzml23Mvb9mnGgM4nFFPuEYMBQkUwUBQqGo7OF3B0roBiRYXu6Yf63gXn5qb0FXQYKqx+JzKMRIQXbiv7tgMl1A0PsFgD58/s3oVDBw+2fJyiKFi/4YQu5KhGWIUXdr8dCxfOOUzTxHPPPNPVfHWLF55/HtU+9mKcnJzEtZ+8vuN0PnXbZ/pm7uhTNp3a1nHP7N6FwkLE3H494Oknn4CqRs+7GcXU0qVY2kdzvp562unYes72to6dnj6acG66gKc67+YX/oc/uK+tqA4nbTwZsiS5nhmuh8aQC7vNEtoeQK2BzgmBzoCyZmCuVMZMfgGz83kUSiWYnEOQRIiShIwiQpEFiJRDAIcIDjACzgQAFIwRMEaba0+EGMRyUhODnSWszBixFp6J8QA2TI+IbAvMqE/f912OSY8kaHJKCcGo6BfS3qsUAQDPzB2CQgXfkM6MVkXV1MHuuNsKL5wg5OLt0LMKOAeKpg4AVgjoJwazPRKEl8pgj9QbqZGrdzRvrdDMeVQN5u13gh+ertu2VLY8y+f1+m8dJQQTUmftChJjnEAk2Wqfe56oOmtvStz3kFIARkx48jiji0ViGAbHHI8P9++UxGk3AN+wlMewXAeweO98nDdvWAQFNPCeiWpr1rY587U6kUgoOAh0g2GhVMJcvoCKqlreMIQgI8uYnBjFkvFRSJIIAtMd6F3sewT0LgJcSvKk5TgM2EKUxyM49QweXJzoWN6lJ++lE8IrbGnF23dgvX7bwHOLXON1eN7HLgvCXmcH57dXBG6ufRK9L4U1NVlwoRyh7afwzDGAmOCEgVPAJECVEcyXqjg6X8R0voxiRYPJ7IM47Mgpx8pDNDhQoHkLgpTOiepoNOqQDPKguGEYeKBNr7nNZ54FSeq+B0Mr95JzjtdefQULC/ku5qh7qGoVL77w015nI5YPf+QSrF23vu3jt20/F6eednqCOeqMjSefgtzISMvH6bqOJx5/tAs5ah3TNPHoIw+3dezZW7YmnJvOueGmm5HNZnudja6QZFs1rC9ACcHRI4exp81oAs2GeR9+K9Rw6toHlNjalxVShxKr8Ua45eXLCIcJAs0wUSxXMJcvYDa/gNmFIkoVDSACFEVBNpuFLFGIAocA2OGHvKGig2GjW7/zcUZrcR67qKpu5znsrK44bA+8AgBVokNAh6fSPofUsi/FW9acjDu3Xoa/P2OHLyQw5xwypRgTZUDVwG6/E/xQvcjYLkSRoX/0PIiU4rhM7ZvCf/jYUIwa8YefBgLezGRqEuTcsxM8CQe/427wBuK8QgUs6PWe1RNSZxFK4oqJSpaHvlXXhoTsCvkbhh6ZHlEW1wN4WAehnXBpw3l1vaeT+5p6AqcA4WNZceMlUf1+r3F86DHE2yYDuG0cTgiFwYCFYhVzxTIqGoPJrNFjRaJYMprF1PgoZEmA88QvRmmHGRTG7ZcKioNLWoaDT63s6kVhqziPJVVu8KkThHudIQ+tasPHGt4xqLqIZknUrWEDXISDUIBQ28A35IlppCU1oxs5v6PaO2HHUkrhPCSUAAIBKLHGPxgHqpqOuUIZh2bnMb1QhGYyy3C/r576FCD1AG7JO7fVxaFRB6QZ4XeQefCB+zEz094A6NZt5yScm3raaTDv2b2r6/nqJv0eBppSiltv+0xbz342m8WNN9/ShVy1jyAIOOvsLW0d+/BPfozpo931Om2mPnvoxz/C3OxsW+n3owA8MTGJaz5xXa+zsWiECblNLSHfI9M0cc83vm7P+dEalNKG78KxKPqG4X6LAt8kZg8qWpqEZcHJvXPj2g16xghUTUe+UMT0fB7Tc/NYKJehmQxEpFAUCdmMhIwsQKTEChnNGAjn8FqmM9sMlnBm7eOzj22BBmJUMylaHTEOkzOwOA/ghNtNnFvn9HLZ8rU4e3w5RELdbYQQS/x1CAkz3CljV18KCBTeKMn84BGYL76e2Dl6ATdNHPn+j+vWk8svAoSowMitw+66t6nw3FlBxIgowmAMhqeuO6KWfb9bJe7ZJCO5moEDLKtt6lhRc0DwhCxz4DHhv4ntAdwPfb1hGZR2BMfBvor+IMlvfc+8bRJmGN4Rh15cR6yXDPzySdy+QHhZBL1ZrIhqjlcwAxUEmCZDoVLB7EIRxYoKxq16WxAETI6OYunEGEYzCgTOwZgndCJjoNxEp29FVF3b6phSKigOPmn5DRdpWQ42QS/hjttAIWFT4mr4Y9WwPil8orA9PtLNd9H6ZrsjMj7PYO8+9bO6cgQ/9VH5pCCWhzChllew5wny6VmsFuXNcY8mACjlllhti9ecAJWqjiNzCzg8W8TsQgkV3bAPYQA37bT9w1scw9X+7Wcs24IBudFJfvRaSacVj9x2RNtm9h9UUfjZPbvx4AM/aOvYbDaL0/rIi9OhUqngpRdf6HU2OmLvG69jfn6+19mIZd36DfjwRy5p+bhrr7se4+MTyWeoQ7adc25bx6mqin/52lfaEtviaKU+ffedA3jg/u+3dZ4lU1Po9jze7XLRh3dg3fr2Pc37lWaF3Ha/Y9++55t4c+8bbeXtlE2nYmx8vM0rO7aolUFtcb0Cg+tDLTgD4f8AmCZDpVJFPl/AfH4B8/kiqlUdnAOSJEPJKJBlGYog2vMJA9S2OmegMEFhBZQW4HQzw+qR0HolzgMYAC8UG1sgO9dOCJCNCSmdcLt2dWbEnd+3lhVrUFekFLOaiqKu4UC5PmQ/PzxthRtOaJ5eanvEHqyW/J5vP3o8kfR7xp4XMVm05jXO66rlWa3IIJdemNgp2IOPgT34WNP7C4RCpBSH1LJ7rzm3QpO3TcyzScdyvvfYDY0Pv2jhe7cr0dMycFFyDUMWg2bPMwyd/GG4hmHDGUAaBtLraJ+oNqyvfdvCvnXpwzHctxbvtFouJgNAUFE1TM/lUSxVoRk6GGeghGAkJ2Pp1ATGR0dAiRV1BYTAhOAad7VaxyxGnZQKioNNI6eVlN7Syhhv0Fs4pf9oVJbNOJI1cRbf0vDI1Nu3A4JmZDWjUGuBzzi7qaSaORvxtklC9iM8UHf4x4aC3r7BtpB3rJAST4horzEc9ZzYzhR3ohJ4hmas6HLWepMxlKsaZuYKODyTx0JRharDmlLTltI5Z+CM2ZVYzfkgrdO6R3Im9S3QauUWNbjYSaOl0aC305iPfFm6KAjH3YNBarDt/9nP8PW7/rnt4z908Q7ISmeh9hrRjg3/C88/B11LZiC3V3DO8cye/vdivuYT12Fionkxd/2GDbjo4h1dzFH7bDr1VKxctaqtY3/29tttGVIk0bDUVBX//E9fgWmajXcO4cM7LulbgxXL0/yzdliTwaab3x8vjzz8EJ58vHkBJcgll17m+x1ivJrSRaxQhQRUEMC4NXddRdVQKJUwv7CA+XwexVIBuqFDlERkcxkosgSBCqCEghIKMA5m1ubs9XrLOEQ+c3K0FzArlluqp2gmOgR00nWOxkyYIfmZ0SqomgaWK1lQQrBCyYUez/ftB7/j7sR6NPSqHRgPzEXLX3wD7K0DiaTfC7QfPAqRWnNSZ6mIBUMDuXg7yEj4PW05/d0vgN11b1vHHp8dhck5VGZiZSYHhQrtZyTm0RTGx3z1tusN7BEXvHMq8YVi7KloRgbB8IhJ/QR3vDd46g0cR9g3vlvfese7Zlie90Ho6zdDL68jTuClIeu8+0YKwLHr7bRtr2BCKAgVUFI1zC+UkC9aUVg4CEA4coqIpZOjGB8dgShQazAU1viEk17cvev1mNAgjUulRDNoY4wpIUJUQBBOy3JwCGsfsbp3MmSnZtIknt37czhuwKl/Fx0huOYl7PF47aAR7LWBd5sgtjeu857HjfvFGbv5r8ha53gFOwbwvjaSR1SuPxFAKAexJhkGB4FqmJgvVXB4bgFHZvMoqxpMxsG57eLMODyzL4cK3SnJ0PNRb+dh7XbDYzHF2lbzMIy8c2A//uGOv4euR89NFgelFDsu+WjCuUqGQRBOm6Hd+TsXk2w2ixtvubWpfSmluPUzP9e3Yh4hpKNn+sEH7sezz+xJMEeN0XUd//K1r+LI4cNtHS/JMi740EUJ5ypZjl+7Djs+0pu6JsoSr1/Z+fRT+O53vt328StXrcJpp29OMEcpreLUj8wTMtpqnNdCGmkGQ75UxvTcPI7OzmO+WIKqqaDgyMoSchkFGckShQkhYPYceJxz998onJC0YfBSfZjk2LZgTFqAJZQlRdHQUTbr2zPe+WBzooSqaUSGB2Z7XmpbgAxCNq5H9oxNoISgbBqY1iowmAn+QPvGGb1EfWUvDr/ymvU3M3FILSMjSKBXJWPQZbz5M8x86U5U7fly87qKOc3yNtaYiXldhWqH4iwaOgqGZeRnMIaioUNnDBKl2Fecx3mP3YOrd93XQW6ia3kyZondzfQNCCHgxXLsPuJIBoT271dlGAYmOeAbfEmp0cs7kpZHSpAo0bbZfYPro/epbXfaRQIVQakEkxHkC2XMzhehanZ7iZvIyRRT46NYOjGKrCyCcA7OzMjnuF+FnX7NV0rrpGU5aES5FPZvGzAlHp9e6PP2rhcdg8f4QweF7pqySNgaLThvexItH05x0qAY7N0nph/ZsI/pdfYljhhMfMJh048TYSCUQxAIQDk45TAYwdxCGYem53F0dgHFigHDFostnLuU0g16rpQ0O8jRiljbrtDaz42c8Ovpz/w+/tgj+Nvbv4hCoT4UYrOce975WLJkSYK5SobZmRm8uXdvr7ORCAc/+ADvvfdur7PRkK3bzsHpm89ouN9HPnopjj9+7SLkqH3OO+8CjIyOtnUsYwz//NU78fhjj8R69iZlRKOqVXz5f/4dXnzhp22ncd75F2BkZKSjfCwGV1/7CUxOTnb1HINs/KNpGr5x97/g6//ytY5CkX/siqt8YmN/fsGGG6+FqBt6B6brteI8l7V5hBlMw0BF1WxROI+5hSIqugHCAVkQkZUVZGUFsiCCUhEAhRXNh9iLp07KRkf14IVKXV7j6jW6aln8tSb4hI2KEuQQr8+sICIjiCgYGvK6iiVyBgVD84dm9tBqCOI46LWXQmcMIiHICiIoCPieF8Bm+3t6hzDEHzyC47LWtyIjiFiXG8PE+VtBVq/oOG1+aBrG7f+IjMEg2FbMoh3aGbAtnT37m5y53t4mOKqm4c7xfLBawuFKCa/OH20rL41qfeqJeBLmjUYptTrhTojQBnNLk4wCSRQhCh14LHeZfu57xeH9hjtXMKjXMiy4nvMYjvZF0Eh+0OmH6wi2vwnq2+fBfYNRGYLRTuLa9ZQSCJSCcxPgJkzTACiFwRkKpSJmClVUNAaDWR4vkihgYiSLybERZDJynVFzP9zDRnSrX5rSG9JyHFRqql/qFTyI1Id25sRaGLi7OOucJSj2piXde7xl5/XwNhn3eQo3pl4+dsZvBEosQRjcMoDn8e2TqIgnwfVOfgkIqDNPMPH3nOPGOAmhbp0jEAIB1viMdU8INMYxVyjhyHQBR+dLKKsaGLPGj1K6g+ht0IYRty2OOCvLZiwwG52z1cHzZq5hkAbkAeuFSoqw0I3+geLGqGoV37j7LjzXoZeiLCu45pPXd5RGt9gzJN6/Drt3Po3jb+5v0RQAbrn1Ntz7b/8auV0QRXz82k8sYo7aQ8lkcPXHr8W37/lmW8dzzvGtb34DxUIBV159TdfqrGKhgP/1P/8/vHOg/XCisqzgqo9fk2CuugMhBNlsFjd96tO488t39Do7fcfRo0fw1X/4csfGIsevXYft552fUK5SkqBWf9SHcA7uU4s0xGGYBgzTQIVzd/BUlmXIsoyMVPMKNgzD8n6B3fgHQCcnYR6ZDc0Pn/cbjTXTLiRTE+Cz+YgLDF/dDmHiL2B14KY1S7heLmcBAEvkjC0YhmeA3XUvMLUE9NwzO8pTdstmlFYvh3J4BjLs/JkM+PETwK3XdpT2YsI+OIwje36K5bI/pDf9xGURR7RAoQT2xS+jPJeHzhnG7MH0EbEWilyiFBO0Zpjg9epWqAAqKSibOjKCiIumVuOvTv8QzpqINz6Iglu96Ejoiinfb5/IGAjxRTkHmS9EpkVWL3ePFwQCQigMk7lpEUKsDPVB38cbSn5QcWunIbiWdujHQUbHEMf5Tg0ag5jnZuiH99071hHMiXcbR3w5BMfJQvclcOf5pYSAMxPOzHucMRRKFZRAkMkqyCkSKOWQZRGyLIGZJqqajoqqwzBNgAMcFCD1+e4X4u5XsB3X6+cgpT3i+gsp/Yy3TRmxRx87GR3LRL1fcRoOkDr+9gOhZRBSnl7jcVdi9e0Wrv0Qbov/sMudwLb4IK43OLPDKhNfHRCu9fjCSQfWu+vs/1ntJG7J0p62nbdtVC8+O1mshXtmnKCq6qiqOiilkCQR4yMZyAK1BWdmXwu3qyfqGnyGVWbp9yga0fmjHaG3Vzc2LdDuEXZvG3XUKpUKnnjsEfz7ww+hWIgejGqWy6+8quveeO3yzO72BOD1Gzbg4g9/JOHc1Hjs0X/Hu++80/Jxz+7Zg+tvvLlvwyY7LF22DL/8q7/e62wkwocu/jAe/feHcfTokbbTeOD++/Daq6/ixps/hfUbNiSWN8YYnnricdx/3/dQDgnH2gqXfuxyjI83P39zt2n03Th7y1ZsPuNMvPLyS4uUo/5mZnoaD/34R9i186m2Q/l7ufGWT/V9PXMsEPWNbzctR5Sqqio0TUMJlvcwFSgkUYIkSVAESwA2mQl15VKYe98OTc88OgORR88pHOpds3o5zAgBmNgW00mgMRNFQ8dUUKQkBDpjWKnkfPmjIHinXMC63Fh9YpyD3XE3sOQ3QDduaD9ThCD78Y+CfdUflp0/vBP8+itiw233E/zBx2GwwBzzG9eDbFzfWbqqBnb7neCHp6EIAihrre/ghH2e06sYF617mRUl3LZ2U9t5IiR6WI0ubS3qDSEE5sxc5HZhuV9MppRAhGB59XPeFyKMl37LT7s4gxHDcC3NMAjDxA3sLgaGYXlH+onY8S/HSCaGZh0ogmXnbOKcg1KrjVSuVFGpVKDIIjKZDGRRBBUEZDMUGVmGqhuoVFVoug6AWhF1bJEahPQ+rF8bpILw8JCKwsNBTbTpcUZSOqKhYVJKH+JvI/CALYZTjMHSDHOWrQnBlvG+Oy0W8e8TV2+HfZ/Dv9m2Zy/hrogd51wYXA/YeSTWPowZUDUDR9UKZFGEIknIZhTIogAQAq+NCuGuPzH6ILjxQCB6f8R56Ab3aWRt0kyDLm6ftKJaXJrxygb8ZTY/P48nHn0ETzz+KCqVSt2+7bBq9Wp89LKPNb0/D6lkusWB/fvbngv1oot34LwLLkw4RzVM08Q3v35Xy8cVCgt47dVXIkMsp+9hazTzHAqCgBtuuhlfvuPvOzrX/p+9jf/7i3+Fc8+/ANdc+0ksmZpqfFAEnHO88fpr+Ld//RYOHTzYUb4AYOnSZbj0Y82/x51CkExI5Zs/9Wns3fsGdE1LIFeDyXvvvYuHf/JjPPfMno7CPXs5/8IP4aSNJyeSVkqydPrt9L53TqOfMQZumlA1y3BAIIAoihBFEUJM2GY2PQ+A+sZbG73WwuqVMF/ZF54eeCC4b/tQQqDx8PdBoQJoSGildbkxaMwM9x62xUnyp78N0iCUdRzk4u0g9/wAvFSbC5aXyuBPPANy2YfaTnexcPK6OuOfKkDodO5fzsHvuBt8334AVhkpEV7cYRyulpERBExQBSuUnLu+aOjIUMENH90qcc+ksHJp7LFh3zjz6Ez0/sun3M56zRMYIESozSeW4DuSBK1GHupXXIv0IfPAGASxN4jzTDHUezMMIkPzjvTRdQQjLYRuC3gDR+0XNejZaIyL2IOanBNUDQa1UIYkUGSzGcgiBaUEWUWCokjQdR3lqg7dMKwwj5TY7a/e38tOSUXE4SIVoQaX4KuYFl9ShNXV3b25qaHNABJSRK7hmGcXErM/4HjbWmIwt8Vg//ZovSesbRTa1vH8RR111rZ4Zpy7j3y8jmito9S6KsYYQERoJmAYKkpVDaJAIcsicooCSaDWvMK+67GDVafPdyw+D+AomhUHW9meVkT9T7BMisUi9r25F3tffw1vvrk3EaHIiyRJ+Nwv/ypkuTmvlcWev2L3rqfbOk4URZx59paEc+PnrC1b8e17vgnDMFo+ds/uXZECcPAet3vPOfrP2wTwX0/b19bGcZvPPAvnnn8B9uza2dY5vezZtRN7du3E+vUbcObZW3DW2VuwctWqhscZhoG39r2JF1/4KV5+8QXMzyczb6Qoivilz/8qFCUTu18735Vus3TZMlz98Wvwve/+W0/zsZjMTE9j7xuv48039+LNN15HPh8RTrdNlq9YiZtuuTXRNFOSodNvaLAz4Hix+EIHEQKTEBiGCaKbwMR4dHrTs6DUFpB5rXvDubduCJzzuOg5YpMc6hcJxSqPEOhQMnQwzmFwBpnUC4wCIVjQNYxLIe0aOzyx8Ge/DYy1N1c6UWSQSy8A//7DvvX8h48Bl17Y9yM2/OGnAVVD1TTAYc2pTKYmQc49u6N02V33gu2pRXMwGEPR1DEpRc9BXTZ0cFjhoSckGRnBZyMLkzPM6VWszYZ4dTcJiRmjF5bHC8CAv/PMOQc7Gh5O3Zue11MfqHkCm4z53rVef3+99GN7sVW8YckG/VriCHEq6CviBrAGmWF4R4D+uw6fUAtPO8ljPOAan8Nfv3ojlXgN1MPCK9Z05dpHgXOAcWaPnVJohgl1oQBJoBjJZiErEggARZKgSBIMk6FYUaGqqh2NMTrk4iCTjhkOB6m4P9g022VMi7QRJOLvxSN9F/ufZtrW3Luft0hJ+HGOGOxGDokgzqDNWV/Xtua2IE1queaw5v212kg1T2VnvKjuesLOxzm4PY+wbnLoZQ3FkuUZnMtlkVEkiJTACglt5aPf2pX9hth4l+7QyJM4jLQwk+H++74HRYkeBOOcW6EcSyUUCwWUSiWUSsVEwjvHcd2NN+G4NWua3r+dsOXtYhgGnn/u2baOPfW005HL1Q8cJ8nIyAg2nXpaW+FrX3rxBVQqFWSz2S7kzM9ii/b9zE233Ip9b+7F3Gz0AG4rHDiwHwcO7Md93/suJicnMTExifGJCYyPj2NsfAKGoSOfz6OwsICFfB4zM9NQVTWRc3u54eZbsG79hsTTXSwuufRj2LN7Fw5+8EGvs9I0xUIB3/n2PbH7MMagVqtWvV4soFS0/k0qekMYoijiF3/5V6Bk4o0BUhafJOri4MAnY6wuVLNlwUlAnPbb8ujpHcwjsxBAoMhW+8TkHIbJYJqmRwT2D6oKxy2PucZkByLKpoFcQBQcESVIlGJareK4bL2IKxAKRRAwq1XrwkcDAD88DfP2O0H/8DfaD9l8+cXAj54A1FrkAn7wCPhzr4CcE27c1Q9wVQP/8ZMALA/ro2oFa7KjIJdfBAjth3EyvvcTVH/wcJ2AWzTCBWCTM3AOFE0dS+0yCh4LWJ3m4zLtCfUOcT5a4rrjmkvDM8DPjsaEgF5WCynt9rns35RansCMEJgeEXgYxYOU7mKFnw0MPvU56XhC7xmUuqbZEIn1c9z5oy9Eh5mG9RIxa05fiQgw7Ggj1rEUOqPIV6og5QoysoDRbNYy5KEEE6NZsJwCVTNQKFeTuOSUlK4R5kXW73VASkp36O1zHyri2aTvZO+Ja1uH9SV5yA+PFuvHNmhrNBYU1/4J9QAmxOdh7I0uzDkHIzUD5rA0fb8BAAygdlsK3jaVAM3k0Aol0KLlCZyRJYxkZUhC89G+jlW6LgA3stprpYJJK6NkePrJJ3qdhTou3vERfHjHJaHbmhmo7raw+Nqrr6BULLZ17NZztiecm3C2n3teWwKwrml44fnncMGHLupCrlKiyGaz+MVf/jz+7kv/I5E5Vr3Mz88n5tHbClu3nRP5Hg8Koiji1ts+i7+9/Yu9zkrTlMtlPPLwQ73Ohg9CCD77C5/D2nXre52VFA9JfyubCWto/2H9uyLay5EfPIK5QhGEEIj2nC+SJEAWZXBbpDKYdQ3MiSi0amVM3tq6pEjmtCrkTA4iscRJjZnQGcOIKIV7+NooVIAiR3dI+L794HfcDfJbn2sr03RqEvzi7X6pGCMAACAASURBVOAPPeVbb37/IYj9LAA/8Qz4rPWdkqmAFUoOZCQHcmn702Wwx/aA/+/7kdctMdwRckVK68RbgzEcVEtYqeTc88dhcg6xw4cqrh8jbmjOANI7cGp+cCRyP2H5Ul8nnQQ6/IRwCAIBIRSmyVwDjqAXf6/ohzwkxSAOcjf7paCt7NwHuFkdwDIJMsjvyCANPNcNdjqDmtaPWkj9QPQToHZtlFI3jLSDM6hpHwQAYNwyHOOcgzkRT8BAuBUWsVw1UNVKVhhEWYYsCRAIQU6RkMvIKFeqUHUDmm6Cg4C7BnhW4oQSNw/9ft+jSL3XBpdmpgRstH9Kf1MLmuCP3DS8cEuls+toP/37/Ma9W2nUhd4T17ZupjRc7+BAGt6iDG+HkdC2jH+fwDPhNegBqdvX2V+w9zU9xvzBfbz59yVPuOtXTDix6xkKcMA0gKKholTRIEkSchkRsiRAEgTbK5gBEPxtOeKkwd37eaw854s2U3LwAUpJcdiy7ZyOQ4R2+9lqN1SvJMs486yzEs5NOGeceRZkOdqzO449u3clnJuUZjjhhBNx22d/vtfZSIQVK1bitp8bjms58aSNqUFEh9x4y63Ytv3cXmcjxSbK4nLRWbokfvvBI24UkkKphPl8ATNz81golaEaJmRJQlZRMJJRIFMKcU10CGiL5K55Ss74wkpPq1VI9lywo6LU8PgFXcOcFu6hw/a8BHbXvW3njV61AwumjoLhmb983wHwfQfaTrOrcG6FqfZACYBLLwAZaS9iCn95L/hX7gEhBCszOUiUgnGOI2oZjHO8mJ8GAOR1FQZjYOBYrYyEz9EcQGMmjqjlDtua8c+ieMLxrSWXL/rmfg4iLF9iqQmIN9SglEAUBYii6NunX/psfVFvdUhQIErpPcNUJsNyHf2MUyeGGrrBimLhLAQAZwwEgECpNSeeHZLEl4YjJFMCIlgxG2ubCARCQUF8A6pO6ERN0zGTX8DRuQWoBgO3BzazGQWT46OYGh9FVqIQOK9FYLHnCg4LWT3ocM91RS0pg0dahoNLrdzgLnaQ2KbDSQ8GBOAUgyT+tkr6Hg4P3vfRW5TeNg6lxJ6L15KRowThMMfO4BKGAAIBBCKh7t9BCCFum6l+IwexFwYGTrjbvlE1DXPzCzg6ay2Fsg6TUTD72h3vZ8JNUJigpDYlB2Os+Rs5wHQsADdqYA1T4zIlec7ashW/8LlfhiAIdc9J1EdmsT88pVKpLc9aANh8xpkN50JNCllRcMaZZ7Z17L4392J2ZibhHKU0wznnnodrPvHJXmejIzKZDH7p87+GTKb7YcQXi+tuuAkjo6O9zsZA8skbbsSOSz7a62yk2PRdZ23FVOQm/v5hu/NB3cY/CIFhGCiXy5iZm8NsfgH5YgmaYYKKIoSVy2JOllz7MyuIEDztlNWZnCse5nUVeT0+pP64JGNElKBHdDDYg49BfeCRtvJGVq/AxLYzoTPLZ8gJIckCImu/wF98A/yg33tVBcAvb8/whr/zAdiXvgZumO46gVgD76OChL976wVct+s+/NYbT4FxDpFSyFSASJvrBjHOsUzp8PvGo59FosigjYwjAugH3o/dLqxeYYsR9SG86jvtloeaKAgQHE+1Pqo3+ikv7cID//YbPLAcCwzbNQ/Fe9JndU8YUeNaceujBkp96yipG5lzBmOD6zgHBEEEB0G+UMLMfBFVVYPjfyNJAibGR7BkYhS5rAwCDu4JLV07txUF4lggFTEGn7TsBhtv2QUFqOFiuLWPtC4dHurF4LAIG8HfjfW9qKhwUW0hgVhCMIXlUE844j1zba9gKgCEWmKwO3YkiOCg0AyGfKGMwzMLmJkvoFhRYZh2woQChLrX7ow9HQskfpWp2JvSLDsu+Sh+5Vd/HZIkhVqLeP/t5XP1/HPPwjCMto7duu2chHMTzznbz2v72D17Ui/gXkAIwZVXX4OPXzuYInAul8MXfut3sOb4Fr2X+pyRkRFcf+PNvc7GQCEIAn7uF38Jl11+Za+zkmLTlx2z5dFhoPH+YfdPTgDmWJ7aQjAjFDpjUDUNxXIF+UIR7KR1MSdL7vrndRUlwwrXXzENNzwjYIUaNpq41zIVYHIWKQLTu78HffcLbeWPXrHD9lIGNMZwVK2A73nBDbPcT7Af1QvTIxeeA3GqNREUAPjsPNjf/CNYuYyyoUNjlgh8uFrG+5UicqKEcdkK0U1MhiUhczE3IiOIUJrwFI7NZ0wzVtp0QsvpmTECMD3OCo1OiN2ZRr0HW/1vq/PsLP3Wm+vLuqxFHK9T1uNBs6DwOfh3tjOGaRAzvY7FIdIb2PWgoW64Qcfb1vEQdrE9euuOFyxvXU4ADg7OOCgsj+DawGgtbcfoK1+sYHa+gEKpAt0wwTkgiRRjI1lMTYxhfDQHWRQAztxjGbivLXMskYoYg0vq4T24BMsq6JE43MLwcJK+j8NB0Gvf8dgnhHpEYH/PITY6iqctFJwWo/7k9v6EupFTiHO6wHmiphLgMAHCrHYTHKHXmgpD1U3MLRRxeHoW0/NFFMoqVJ2BgdrhoI+d5zYRAbiRm3dKihdRFHHjzbfglltvi7W0CFpiOL9d645Fot3wz4qi4PTNizv/3qmnn45crr3wic+kYaDbIizcRSuLw1UfvwafvOHGHl5J64yMjuI3f+c/Yf2G1gevB4HzL7gQG08+pdfZGAjGxsbx61/4jzj3/At6nZUUm35tyJLVMWGb3ztU24/U5lNyww/Z4QwBQKDWYCo/7aTI5HiCMpZIKExwVE0DeV2F4PGaUaiApU0KixlBRF5XUTHrDcsIAPK/vgG2b3/L+SNnbXLvbU4QMSJKYIYJI0Rs7SX84BHwF9+oW0+v2tFyWqxSwdH//v+Az86jykwUTd0dyp6SM1iTtaI4/Pza0/DAhZ/EX5zSupGcaod/7pzo91E8pfVvqL7/vej0Nq5z2xeUWmFEG82zZy0cgmCFgxZF8ZjqEKekpM96Sqs0GgML2+YIwYJtaONNwxrC5HZUBgIqUFBK3LaPYHvKWGGnPeMhHOCC1UapqDpm5hcwny/BNE0QQiAKFCMZBUsnxzAxMQ5RFJCafgQ9EtP7MUjUC4mpADVMDFYREs+/qR6SMlzUDNniHfOi2kP+8NI0dDw8zBhOINRq74TUBe55uDddAOAQKECpYy3HAHBwIgCEgoGiopnIl1QcnZ3H9Ow8ymXVip5yjGiZLQvAwY/rsXKjUpJh5arV+E+//wf46GWXx+4XjC8f5rXg81To0nN49OgR7P/Z220de+ZZZ0O2vU4WC1EUcfaWbW0de+TwYRzYvz/ZDKW0xMcuvxK/8mu/AUVpby7nxeT4tevwu7//hzj++LW9zkpXufW2z/jmRkypZ/OZZ+EP/vhPsOnU03qdlRSbfh78ICdF1xns7Xdr+3GrkRrltchhheYVNp0YfS4gsVGEUVHCuChDIBQrlXpDq4KhweTNzR+zTMkiK4TXK0cX8ih/8cvgh6ZbyyAhIB4RNSeIoITAeOhpmMUkBMxkYN9/qL5MNq4H2bi+pXS4YUL9H1/B6EFr+oqsIGKFknM9dZ35mRnnyOsaVig5jApSy++GapoYFRrP8dwIEjMoJJ8S/QxHob/9TuQ2ZeMG929n3kiRUFAOUB7ega/dF+Z6A0uiaBk69El9MgyDu16vvcSvIibB1Nu3Mc43ZdDv0aC/Iw6Ddh2tOkO4hm2eddSeE8+7nRMCCAJArXnsTHv8zfbNASECKBEgOPMFEwJCBWiMYTpfxNG5BZQrKhiz3HoyIsXS8REsmxjDaEaBSB0DIStMNPdEKPF6BA0jYd/BVEgcDBq9a2lZDj797RlM4Bd8U03ES/r+DSJxBgwk8B42DgHdTHso7vlwtEbB7sMKIZ7Eji2+9duZ0sLxXLb3JxwEhhUu2jPfLyciNJNgplDGBzNzODS3gHyxAlU3wbgdgYXb7aLg8+z8xxl4k2M//ULLAnAj9+uUlDBEUcRll1+B3//DP8LatXHhGtujW8/inl3te8VuO2d7gjlpnq0dnHf3rqcTzElvSMIbt5ecvWUrfve//CFWrlrV66xEcv4FF+J3/vPvYemyuLk3h4OVq1bjssuv6HU2+pKxsXF8+rM/j1/7D1/A6NhYr7OTYtPvnSxyUrTQR6bngIViawmuWgaMxES+SKhuNzlH2dDBEW78qDGGUovTRRxRy3XhoLOCCG1+AeyLXwYKpZbSIxdvB5ma9K2TKirwcH982/nsPPiTz9atJ1d+uPW0vnIPpFffcudhjmJGq0KmFMuULAgh+KBaigzBHca4JCMndi4AxyGd1poAzFUN5s9iPIDtsOhB4wnXwjoQUiv4t/Wbg1ICQawPCd3vdUw/EwyFlui99EaX9SwpLTIkz/ewvKeDdh2N+pNhBm3UV/cG+6UcHAygABHsENHOvgAoOGjgTa+lQcE4kC+VMT2/gPmyCsNkACEQJRGjORlT46MYG8naHsUCIIhumPpaPgarDJIkTMgYtGcyxSItx8GnXhROuizD0iEhS0qrRNWl6fs4vDTjERx05gt6C7vYRswCiCUIw/YO5o4xXFw4aucPyzOYEIBzE4A1HsBMQNNMLJQqODq3gOm5PAplHbrJYHL/9KROXqyqwhGcB4djY6bjIaVfBKtGbDtnO/74T/8c199486J7xHZKu2GRs9ksNp12esK5aY6TTzkF4+PjbR373LPPtD3fcTcYVBG3U1auWo3f+4M/whVXXQ1B6GzewSSZnJzE53/9P+Czv/C5gXuXO+HyK6/GsmXLe52NvkGSZVx+5VX44//257jwoot7nZ0UDwPReVq1DDwbHS6Ze7yAm4Vs3hidXkK35Fvvv4lP/fQneCkf7pm7RFIivXqjWKHkUDZ1X7mNihLGJRn88DTM2+8EU7Wm0yOKDHL5RfUbfvIkuGm2lLduYPzocZh2G+OIWsaMVgVZvQL0vLNbSod950Gwx/Y0te8SSYFIqRtye6WSg86auxcaM7GgN3//I4l5COmKpSDjrRnQGDHevwAgnrwhsk1EqWVF7XgDU+73TvZ3wq0wpKIoQBAsv7R+if40EHVdEzhep0mnGfc7JZ5h8QQGhug9GbDriOuX+oXVGu6cd4E0vIOg7rECAaHxEdOce2aaJigVwIiIUkXDzFwB+UIZhsFACUAJw4giY8WScUyMZCBRBhIUlEGO6YokTJxIRYvhIC3HQacmyjbjmdhceim9IhWEh5Oo8XpvVOFgW6fRsY7xnDM1hreNEn6M0/ayPIcFkYIKBIJgnZcDYISAEwrV4JgvlHDw6ByOzMyjpBowXdtxDkKcxYqaNUikAnBKV5BlGR+66GL8wR/9CX7p87/Wt56CcSH53n5rH2ZmWgzBaHPWlq09CxtLKcXWbe15AZdLJbz26isJ56gxwyzmtossy7j2k9fj9/7gj3DKplN7mhdRFHHJpZfhv/7Jn+Gss7f0NC+9QJZl3PLp23qdjZ4zNjaOq665Fn/y53+Ba6+7AZlMttdZSrEZtI4SOTk6Egh/60Dr6Z0aFwY6mfvyb7Pv4u1qAfcd2g/GORjnPk9SpxPSKhOSAjUgSOZ1FQDA9+0H7ri7JRWbXHohoNQMdBjnMGbmYO5+oeW8JQlTNRx+4CGY9rWMCBImRBnkqh0gLRg6GY/uBvvOg03tqzEThAAlQ4dm32OR0qaF+ryu+Tyz2oXHJCFvbd1Y0Ni7P3KbcNwKEE/5N+NtGtUZdENs2R1zQRAg0P7pOg5avRdHEiJw6vGbLMPyfA3DNQCDex1RIffDjGncPjA8MoRHMKaU1p5LQsAJd/WPsAFTADVDYm5CsCYPhqobmJ7LYzZfgqYz2+CBQ5FFTI2PYenEKEYyitV+YgyMMwyHSUR7NBqXSL3ZhoO0DAcfJ2R9MmGjU2/fXpO+j4NNVJ0ajIQStj5IlAjsLAIh7hQa9cdSEOLvvzr5YdwEIRyCQEAJ4HgFAwAVBBgcmM0XcHBmFkfnCpgvVVHRLM9gbjWRBgoRgO+mO79TUtphwwknYOu27TjvggsxOjra6+w0hRMSz/sOcM6xe+fOttPsVfhn9/zbz8Wjjzzc1rF7du3EmWe15o3TCCcsQ0rrHLdmDX7zt/9PvP3WPjxw/3144/XXFu3ckiThwosuxseuuBKTk0sW7bz9yGmnb8a2c7bjuWef6XVWFhVJknDKqadh2znbsWXbOel8yH3GoHaGyInrwF/cG7qNx4hbkeltOilmozWY2umd+pMNW3DfoZ/hU2tOBoEVEvqIWsaoKGFCUlAydBxWy1ifG7PmTW0BmQp4p1zAupzlCVoyDGQFETIVMPPkHkxNLYHw89c3lRYZyYFcvB38oadgcIaD1RKOz44BDz4OXLit1ctOjieewSqTumrjiCi5eW0W/vJekK9+q6my1BnDUbWCNdlRjImyT5xn4PigUsTabLzn7TI503HbhXj+H0bmnDNaTlN742eR28SIEOthQoODV4jw/h3slAuCAEopDNMEY6w2stbj9l1w4GAg4RwMcMUf/zbUPULNvAPOPgN8V3pG8Ns60M8W6sXBQWWQ3/WgZ4sj4tbVxdbO1jGovcfck45j7GH9bW0RKPWFRHX39f0GOCfgBCCiBN0wMbdQgiBQZJUMMhkRIqUQBQFjIyJGchlUVB1V1Qodbc11R+yEPHXL4BVH14lrnw/i83usElWOaRkODlGvYngRpuXabwT7KEHSd7F/CSu3sDJ0jNu8x8Qd5wrBnmg9znHOpEUc3N3udqNsD2DO4TOuc+b6raXD3baWc7TJAJUzVHUNhVIVkiAgl5GhyBIkiYI4HsbO2bm3jWa3mYidnq9f1+3nt3YPAFsATul/+rViW7JkCdafcCJOOWUTztqyBRMTk40P6iOi7qthGPjp8/Xz1DXDyOgoTj5lUyfZ6pgNJ5yApUuXteXB/MrLL6FUKmFkZMRdF2eNk7I4nHjSRnzht34H77/3HnbtfArP7NmNYqHQlXMdf/xanHfBhdh+7nnp3K4ebrj5Frz26iuoVCq9zkrXIIRg5apVWL/hBJy2+QycvnkzFCU6XG9KSjvQjRsQFYSXv7kf0A1Aaq6JSggB1q0GMgpQVUP3YQmErd08vhSbx5e6v0VCsCY76nZIDM7AOYfBOTRmQGMmxkS5KQ9SSgjW5cbcTu4KJQtKCBjnKBg6ljz4GNiKKdArdzSVV3rVDlR/8iTAURM59x0Af+0tkNNixPJuwTn4Dx+rW00uvcDnrRqbxDsfgH3pazhcKmBSUhrO/cvBsSpjzQ0tBbxWBUKxXM5CZSaUiHRMzmBwDoV0Ng2DM8gfhXT2aS2nqb30RnR6G+sF4LjOdM2a2i8YWFAgIDUSQiAKAhghME3TerdavoKUOkIE+I6TTCSVYxPfOwO4A0Up/UG/hKJvh0YeLkC4YE9QixRAUO/AYe0bfr7avs4AKQBuzQVMqDVPcFlVUa5WoIgCstkMZEkEJUBOETGSkaAxjmKpDLWqAoSCUqFOWRnkckmaRh7D6X0abNIyHHxqNoxpOfYzjconfRf7n7B+aCNhPyoN374eo1lXQHbTtYzVOOdgxBGEo997X5vKFmqtfzi4L24sByEUusmQL5VBKgQwTYyOjGAkl4Uk2EZ6jLmOho7iyxgDpT14Vu1TEhIQgNMXJ8WBEAJZViArMmRZRiaTwZIlU5hauhRLly7DsuXLsX7DhoETfJvl5ZdebFvkOXvL1r6Yt3Xb9nPx4wcfaPk4wzDw0+eexcU7PtKFXKV0yprjj8eNN38K191wE97c+wbeeP117H3jdbz/3ruWR04bKEoGJ23ciJNP2YTTTt+M49asSTjXw8HExCSu+eR1+Pb//mavs9IWgiBAlmVIklWv50ZymJpaiiVTS7F02VKsWLkK69ZvQDabhnbuZwbV69cLCRGp3G2GCfLWO0CLQiXZthn8qeciNraUVCwaM1E1TYxLlnDptJ0nJAUTkgIAMBhDlVsicEYQcbhqeQqPiFJs2gbnKBkaJkTZNQ5dk7WMsdhd9wJTS0DPPbNhHsnqFZaw+OLrvvXsvocg9EAA5s+9An7wiH+lQEEub24OcWN2DuRv/hG8UsGoIKFiGg0FYALiemK/Uy5gTXbE55mdEcTYd2lGq2JUkIAG52kED/PotJFOPxkko7SUnvHOQfCFaOMvadMJDdNwOrleYcAxOHC2A/WDKkFvYOIRgfuBQfYO9OLcT9d4ZLAvZygIccIeWIZlsHRYriOMqEFRSqwBTW9dHdzXv66+XnRuWTCAg+V9TFA1GNRCCaJAkVEUZBQJBIBEOJaM5sByWVQ1HZWqCsMuA3dAlxIM2HR4PWNYvPKPZaK8EYe5bhpGhi3ix7FI6qk/OHi9fb2E9eHiyq/ZKMaEEDv4mOWNyz1+vcFjfe0ZX5sKtuOuJQrXtgkABxilKJSrKJarEEUBkigiI4vIyjIsDZgD3LT2dyYnszsW3X5Cg11JcuD9Q9wJ6Zi+IL0nLYPFoxuD6IsxMJ8+IylhVCoVHDp4EEePHsaRw4cxNzuHqlqBqqrQVBWEUGQyGSiKgmwuh2XLlmHFylVYvnwFVq5a1RdGCymtk46zHDv0o/DbyffI+N2/BA7PhKf7icsgfOrq1hJ84XUYf/OPkZu9oRQ75ahawZgoIROYS1ZnDCVTx6TkF/WqpgGTc4yIEma0KnRmYqWSc0UF733UmImCrkFlDKsyOXDwmnCpyKB/+BugGzc0zCN/7S2Y//3v/CsJgfDX/wVk9Yo2rrp9jD/7ErDvgD8rF2yF8Js/3/BYXqlA/W9fgvjekYb7OhxVK8gKIkZtwf2IWsYKJVe3X9HQUTUNLFPqjV7Kho5cA8G+EYQjdv7fsV/9NLLXfrSlNCvffwiFf7wnfGMuixXf+Num0wqbj8mdEwlAbV7JmpjgPdbpKDPGat7AfdBG7Yc8JIU3HOwgMawDqqEhugeYYSqXY4Hge+WL6BBYVx8GOi5Nq563wkPXvF7AGQRBQEaWkFVkUIG6bSnOOaqaiWq1Ct0wXU+bY6UsFoP0Xg4XaXkONmn59TfNfn/ScuxPnP5ncPqhZo+NWxf2bHA7PDRrMDpUlza3vIs5Z7Uwz04fmnFb3rXEZpEAiiIjk5EhiQIkSmrjPty0+3etTRvWKpwzaAbDy/vex6G5Qm0O4JTek5bD4tGtgfSwcEydppeS0gzZbBYnnHgiTjjxxF5nJSUlJWH6UfztFHLSevAIAZi/tq/1BM8+FRjJAqXw6B0sPhJvSywPEQwBK9TwfFWtE4C9QvFSOYOqaQCw5hE+WC1hSs5gVJRQtT1bp+QMprUqZrUqMoKIUdHuHKga2O13gvzpb4OsWhabR3LaScDG9X7hlXOwHz4G4ZdubuOq24PvO1An/oIQkKsbh7Pmhgn2pa/Vib8FQ4NChVAvYM45coLo87YOE38BuAKxyVndvM2dir9AvPgLAJkPNz//sUP1+VcitylbWg8nHRYCCwCoIwIDqIWHrrfaJoSAUgpCCHTDaPn83WCYRIhh8TwdljIZFi/zlMHEDmro1s3hYaBJxCCqIwxHpQwQR/x1fGSIEx5aR6FchSJLyOUyyIhWBIisLCCnjMIwGQrlCnRdd4ImpiRAUMRP653BJi3DwSb9/vc37YiFaVn2D816/jqERT0Jro871hJh7eDOxL8t9rxOC4zU2lak1iqzzw1wRmCCoKLqKFdVcM6Qkaw2VFaWIVACcLI4Nr6ea6LpQ58yDDgWI15vhrj9wo5LCqcSSmJJSUkZfhzb+2b38y5h+6SkRJHk96nTbxSJE6v2HQApV1tP8/wt0dsS/qSWbe/RIBty4w2PzQgiCCGQKMW63BiydgSGsmngULUMQgj2zB3CR574Dr78vj+MMwolsC9+GSiUGp5HuKpeZOVPPAOzWG54bFKwsLl/z9oEelJ0GHAH/pV7MP/8S5jT/M+CAIJ5PXy+Z0KIT/zVGUPB0CLPMSpKdeLvoWoptGxbJa5tKZ99KsjEWMtp6i9Gz/+rbG8cHtxLMHyo1/qaUgrCLS9mCoCCgIBZAgHxz9vk7C+JojXfUaCd3QuSbtsvJsFcD9p1ROV3kMvEwfEYcP4dFMLaj4N4HV663Z/vJdFtLFInsNa1zTzrvHPNOV4qlDr7Oil60gn+JpYvC+MAoQI0w0Q+X8RMvoBiRbWchAGIAsXEaBZTk2MYzcoQqeU9zLkTMNHxmPGWUaC8hqPoEsN7r7z/NjvmldKfpGU4WISVUVgZpmU5mKTl1180O84U1f6LCwXuTdMnNrsJoa4dEiYy19pQTjuKg9hiMKEchFp+xYQygJhuJC1BkKCaHHMLZRyayePIXBnzRRUVTYfp5NuTB/ea4JjksfoMRtyTwBYQQixPZ8Itf+P0Ye8Nwyr29cvz1MxHeRjvf0pKymDhHXBJIq2U4WcYjIqEs+O9FVmMp2MU9EPbGuyRXPskJ0o4otZ7G+uMwWhxPnZHgJySMzjOnvP37VIeRUPDE3OH6vbnh6dh3n4nTDVa2AQAcu7ZIFOT/pWqBvOhJ1vKX7vw2XnwPS/UradXNPb+rXz7B6g8shMEpM7TNydKWCbXe2EbjOG9StG3rsoMqKYZe66jagVFQwdgzb8qU6EuvHerBEN7B8nsOK/lNPVnXwH0aGFaaWJ+6DCi6glKCARqSb/Wd8rfeQ7uT+z9KaW+dV4Wu4/QL32SVgh7agblOprJ56BcSxiu4QOGRNDG4F5Hv7VpkiTKs8XpMIS16VyZ1Sfk+ut2wTNo6QjEhDpCMXcSACG0VtfDkZ25OwexYXKUqxqOzuVRKJahGyYIKARCkFVkLJ2cwJLJMSiyCApmhUpkZiDfIdeW4tJsuz0VoQYTb3kxxnz/puXYP7Taf07fxcEmFff7i6gxrGDfM8yguZn9KaWg3t+o9XWj+rnBdpXV5w30je12lSUWe6J42PtwAJpholCu4uh8AYdn8pjO+RgtJQAAIABJREFUF1HRDRjM9LQBAXAGa/5h6zyc1+ysvdcaX0/VvJy7G3A65ZhkkDpkg5TXlJSUY4dmvH1Tjh2CosswwHMZkM0nR25nz7YuAJNNJwBBwdN7zpZTjGddrt6DU2cmZvXWvZeD3LTmZPz15ovwf208p25bydTB9+1H6f/9p6h4jhYCBbn8ovrVDz0NmK2J1O1Q/OG/152HrF4Bctam2OPYY3tQ+dYPUDJ0jEuyz6PXweD1QrvKzLrw3DlBxBLZH5I7yDI5A5NbaVFCMCVnYvdvhthnTRKhXFRfro2oPP1c5DZhw/Eg4617FMfRTMc7uJ5SClEUIYqirzPeS3p9/qQYlusAhudahqV9NgzX4DA0z5bnOqLafo3WhxmYhg2M+tOpH1AMes64g5lUQEUzMD2/gNmFIkqqCSoI4LCmxFgyNoJlk2OYGMla4Q7hDU1tzyHfxr1JiScVLPqfKIEhyts0ZTBJy294SMuyt4SLseH7tGJ8TAiBAOIKo0GBtNOxN0IBN6SWP1cAsYRdnXGUVB2H5xZwcHoe0/MLKGu6NYTitJl81+Bvo7XybKYCcBcZJE+cJImznAmGs2l23zhc6w3PErYubPHudyyUTUpKSv+TNi1TwhjGbxM554zIbfyF19pKk14YEwba9mRMkryuuuIhAGQFEdkOvUcBYHVmBJ9aczJOHZ0CAMzaYZA/qJYg2R7DuZ++DnbXvbHpkEsvBBnxz4HLZ+dReXxXx3mMg5XKMB96uj4/V+2IjcfNX94L/pV7MC7JmJIzKBp6qEc1B+o8sEdECUrAW5iCgDYodUIIJux5m2e1asse3PWZC3bU/GQu3AaSbV1kVp9+PnKbsm1zy+l5iRqIjLIsJsQOcRXwRuO8FhJaEATLI5jUQn/2qh77/9l7z3A5ijP9+67qnjxnzpwoJCEJYRGEAIHIWWQw2CYZAwYb44jjOq83eu131xGvw3/XmIyxjRdjMBmRTE4CIYFAZJAEyjpxYoeq90OH6ek0+ZyZUf+45kKnu7q6qqtD1XPX81S3GG7auR61lqtd61Er7dYmtU4gNMrO2qgOjdJO7dEIXmKtHb/JOoQQzcMFFg9hm+BrCLMljxant4y9XJqQa9hhBCgqQzaXx9aRMWRyBSgqA/S8YtEwBvpSSCXiCAsU1PBqAUC6pK3akcCDrbPwE4ODduxsgrbrHgLv4OmlWk2tUhqv6LBWr2BKShYE6yS4WsukjZWhhYcmzBSDCbRlrcwllwgBJwJyEse2sQw2bh/DltEssrkCJEkG4wwAA+dqDd6/5QQCcEDTqTQIqeahqeXBbkW53c7djcb3gICA9iDw9A3YEaEH+IhWkgy8+Kr3fq88K4SBbraRO0IFbCuWPH4JIegRw03Ju2gJAxQiFNukPGZGE2ZIZJUzSPc8DLbMuc6uWZ5EHOS4Qx3bJ29d5u893CgPPYVksTxcMUnEQY480PMQvm4D2K+uB1e0kM15VXGs/2sQoQL6whGzPUekArJ6GGcrm4s5FJl/CGiD/317FX702nJkVWc+tcBBfEXu6AlOr+xKyC+/Dp7xXvc5cqD3ZIpa8PM6MyZLWvdbces3C4IAQRDMsNDTbSiZ7vM3i26sRyfXyQil3Im4Tfro1LpY6YY6AN7hDP0iMfjaYQCbZ7AW8rn8WIDS8s+Y3QtY+39JDDa2MU6RyRcxMp7ByEQGeUkF4wxEF4LTvUn0p3uQiEcgCqVvSpc0V1tjDTkc0LkEAlTn4tZ2QejvzsXPsS2g9VSrG3n1icx/81KfiHMOMG0itybQUk2kBcr6QPZz2se//n0xQwzm4EQFofp2EBDOQYm25AZjQFGSMZrJY8tYBpu2T2Akk0dBhfkd9xOm3QgE4AAA1Xvj1vpSqyTsGrMsqp3BMR3YH+zpLEtAQED3EIi+JTRfMgSh4HY0+tMgu8z23K0+4R3y1pO5s4C5M733k+Z6AUcFETOi5R62o1IBORcxslYmFAk5VcHmYg4hQcBgOAYCIKdqwmpOVTAmF8H+cBvY8pc88yEnHgkI5V3+/u0T4C++1nAZXVEZissedQiy5OSjQCLu4jgfGQP7+VXg+ZJXb1aRERMEiNR9uBITRG2ApPdJjVDRKmdlXm3G/oJ+vRTd2LKlmMOYXASgeXL/5I0VuGnDm3h6xLnucvVw3/uL9vUivHjPmnMtPPG8985EDKFFu9ecZ81wDgo9VBa3DpiNN7hFPEZpME0phaD/rN7AAY3RTtexkXK0Sx0aZSr6c8RyEjdP30bP32390nZ6RhrFy1vF135iX2cX5UZCarVx2I605l86hOueLN5GTwCaBzEhkGUVE5kcto9lkclJUFWtDiKlSMZjGOjtwUAqiZAggjNmqSMF58QpCndJW7YDjdr4AtqLZrVjcA9MP8Hz2B0EbTg92CcrA6W28PQGBgD72r56P0gFBwPX7EeEQCDU9NqtJWKtW5lK0VdQ8gw2BtjWnjih4CBQVI5sTsK2kQls2D6BbeNZZAoyFJWXT0RlhohtHK9lyTgJBOBmUYvX6lRSj4hbjWjrN8OhWi9e39mplWavtvgaV1undmvvgICAgE6EQJuRFrxJdzzIgft47mPPrgKKUs15Cqcd630+oOnWbc45xnUhEQDCVIDEG5/OQAmBQAiGwjFELaGNM4oMhTNEqICoIAKcg13+R7A333XNh/SnQQ5aXLaNcY7CPX9vuIxuqM+uxNjmzcipCjg0T145JIKcfJRrep7Pg/38ShS3bUdeP4YDuGbD6zj2qduwbPNaTMgSMrqgnFNkbCnmkFdlrMtNYmMhC0W/3mNyEe/ns5D1vwVCzX9zGCEwtX5eUgghrofrTgghfHX+vjhp5nwc2r9TA7Unvi+y+One96Yf+b97h+yOHnVwXXm64TVR09xvTYPy62kVB+x5GWsDW0NrAdNjbAwMMu2Hn2Gmk2j1vcX1h24q7t5ueka6pS7V2G7Kd5T2e6Wn+s+ZR8nL1/i/lw2oBNXS2QypjHPk8nmMjE9gfDKLvCSDMQZCAVEk6OuNYbA/hWQ8CkoIwFVYJxWZz1UXvCPagUrv2kAU7g7qacfArjn11PI8BrQX1T4rwTt1arH3ZyrqUZZZy0bbGMKs2ffhABgDYwwMHERwX1rU7x1qHxc7+1PlZfLWnig4IcgVJWyfGMfm0TFsGR3HWCaHvKSAEQJOid6DohApsPu8mdhjzlAgALeSWh9wvxdDox649pvTzbhj/PzycStzgEYgDAcEBNjx8tAI3pwaBCgzPNHgnbnDQX3WLiWKCva097qnnscdvgTo7/Xcz5p8mxkGTkMEToghpPU1ZetlRCogLoQQ1b1crQxHYqYjTFL3ekVRArvsavBN21zzo6ccXf43IdiwfAX4xi0NldONzN1/h8qBXjEMxjkmFRnsiAMcaxEDAFdUZH9xNfi6jSgwFZOKDM45coqM5aObMVEs4L18BjJn4PqbU6AUCSGECBWREsNQGMNgOAYASIcimBvvQZgKYJwjTKm5vm9MENEbikAw1lAWQ2Y4bZFSfHP3A3DlvkvN9M2GhEOI2dqhGopPPA/k8p7748cf1kixqsJrUqZmrAfAYa6xbRWBgfLBtCAIEEURBM7QoVNNt4xhprsezTp/txjFWhEOejr6j0Y9uqXP2jX3V4V6VDOJ3uNAz7ROu5GXM4DTIG685ykVwDiQL8qYyOQxMp5FLi9B5drEIZECyXgUQ/0p9CRjCIeo7XgahC6eYqz3WiBgdAdBO3YmRlvZw0UH7di5BO04NVQjBntpOASaJzCFMVmOgugRTryexUp5VnPe0j5jYjVgHwkQrkXlIlyAwoCCzJApyNgyOoYNW0cwOpFFXlEgM81+koxHsGD2YCAA10utIt9UhVmpRoSsVG63zr5X/l6C8Y6A94yM6macBAQEBHQLxOdneBe4/UodrO4xzgXUyJyZwGC/527+6HN1ZUtPcvc2BcrD0zaLvnC0TDickGv3XAa0sM8A0BuKIC6I2JB3X/c1RClUzvFGZqy0cTIL9rMrgEnnMWTBPGDBPPNvianoEUJQ7nmkrnJ6wd9ci8Q772OnaBwhSiEQguFoHHEPr2x+zU2IrnkbAJASw0iJIVBCoHCGn+xxMK7Y7zicM3sBBsJRc23lCBWQ0NMRAgxFYq6TRySmolDF+r/jchEFVamYrhKV7qnICYeDJBM155t74HHPfXR4AOLu82vOs1b8+rHU8tPGqNbZ0+5jDlEUIQrCtL/3u+WbM93XsZl0Qz2aWYPpvBrc498BnUFFr5cqtnunddtXevdbjwecE35UBkxkixgZy2JysghF1T4dnAPxSAR9qR4M9aWQjEdAwKEqMnw+QwEtwK3t7Q4qAZ1P0I7dQdCOAQH+2DUawKnTeY13zclo0MIoC5YlNogxCdrI30dnq0kXIkbmXFsn2PKj0NYKFimFCAKBa9HdCBHBQZAtSBgZmcDmbaPYvH0Cb6x9H+N5FgjAU0UlkdArjde+VguvgWDZHKpt34CAgIDppl4R1x46ruYJUoC5RmfAjgk98gDPffz1d4CNW2vP87jDgHDIO98W3HISU5HXhcS8qtQsKo7JRXPNGUF/dkRKIHmImAIhZjoDvnkb1MuuBncJnU1PORoKZ2CcY7tUQFIMgTzxPPjImCNtvbA7HgQATCoStksFAADZdw+QmcPOtLcsA3t0ufn35kIORabqIbUl7JpI4+QZ83w9cntDEUx6rLccoQKGIzHf8uZVBZQQLZR2g1TyTUqccVLteY6OQ17xiuf+eB0exc3A+m63hrEyg4cS7prWPpYRRbHl/eHAKNY6mnlt7d5mnQ7rElG+myYXAN1xbwG11aMa75Natpf+5mUeKtbkZYZQQiyRIIwfBwQCFUBekjA6mcHoRAb5fBGqqoIAECiQiEUx2NeLdCqJiCggoL0IvNc6G2vbMSO8qcWzLaA9qNauEniUdj6BZ3DrsYrA9gi59nR2wRicm2KvAAKRUHNCutleHlMmy8NL1xmtRYdRAkYBlajglAMCIEAThg0LLgMFJwJyCsGr67bgtoefAbVmHtxc1eP3QFoNG35hlxv5Wc8TCIrtjV872u8Rg+BZDAjoPOoN88yhCQfWXyNPfyPeuPWKuLXCODd/wbsugB5/uO9+dr+3B6QnsQjIsT6hcUnzvYDDVMCoVESRqRgIRyGS6ibrjevewikxXArprDMciZthit3ONz+eAgBsKeahGuLJm++CX/5Hh8pNDtoXtD8NSghmRhNgnGPLxBh4PdfXBb5xC/iKlwEAIULNctOTnCIle3Q52C3LkFVkjOmhswd0L+oiUzEnlqx4vowio6AqCFOKrIsIrA2B/Fs5JoimZ3EjVPIqjxxxIIQZgzXnm7/rYd/9Ub97vMl4jUPsA1oKY5DLdCGYefaBBUoREvUw5y7iXzP6w9UazNy2dxpTOX5oVRQra/6d2AZWjAlu1U5ya+flQrqhPQx2hHq47fPr11vHA7DZuLwcE4wvn3W/ER7aSG6OP0xbh7kFhDEQzjWBmFDIjGEil8e28QzGM0XIKgc4QAkQi4TR15vEULoH8bAASgCVA4wIsH59jTDRBNxRj4DGqUeACtqgffGyLRsEbdj5BM9id+DXhkGb1k4lRz3P/S6OmIQQEA6IhEIkFIJlRrg9LLQVY3GrasRgu26r977KBwpmOC6jAMYEPVVbRoMT0OBFUJlqBdmAgEawG8WC+yogYMdAM4M4f35Cru+vzb9VQX8jwEG6Bzhgb8/d7JFnAcndy9MP4YP+3pEcvOkW9lmxBCJU0ELyVBGtJWMRLt3CGMuM+XoSS1zr5A9HYphUSl6/bPlLKNxwa1laIggQTz/O/DsqiNokjIeedvUYrhV276OmiBcVRKRCYZCZwyD77lGWjq9+HcWrbgSgeeCmdAFWpBQFVUGoij6QyhnG5SKigoi+cBQJ0entPSoXkVW975t1ucma6udHJYEncd7pdeWb8wnRHd5vIajPWtfTRaXvjdsAVxRFCIJzosNUfrfc1m8KcCf4jteOr1iH9hJ7/eiWtu+meniJvX5UekdT29jBzzjpPL78/87zEmiGSatBU+vLGFEhisUiRscmMDqRQbYggXGAg0AQKHp6khhI96C/J4EY5QBXy8RqAHp650SlgKnBS6QI7M6diTU6R9B+nU/wLHY+9jYM2rJ5VNN/qtSHEswfBdWiODvTuUwdr2S3NbQiv7ISQmCEjbY7A1B7wh1psFuNZ6bfha9le0BAJbw8ydtRwAkI2NGp1kujHgG3kjdup05I4obH73QXJKAtEU7w8QKWZE0ErpX+NMghi30StMANGJqoKamqr8A4LkvgAOKCiN6QvweqEUrZjU2F0nq/6VAEOVXB+rx2Xvmeh6He+2hZerL0UJBE3Px7p2gCPJsDf7y+tZYN7HkUVAVZRQb90PFlVmC2bgPW/+R/oMiaqD1oWb93VCqgyFQIVXhOC4RitsVLeFKRHCJsiFBEPLync4qM2bHa1+P1wu/dGzl8CcS5M2vOs/Dgk+AZ9zWgASB2/BE159ks/L49DOVjSu3/RmyLciO99XhBELQfpaXFIDH1Y6tuMKJ0k3GvG+ph9n/0PlC7evlWwixzl/TlOv2+slJPXTy9TnSv3jKvYJ9j3Cawl7YR0ytYLylKYaNLeVBoHjSmB69AQQQRCuPI5PLYPjaJiWwRRUkGZ5r3SzQsIN2bQH+6F8l4FAKlZneDoyT8cu69rl9Aa3C7r9yie3TTM9jNuLVl4FnauXg9i0Fbdh52EThox+bg1T9yi47i1icyOsxGBBRKCCiHJgYb6eBvhvKy7Rp9Gn+hWDe/EGL2twjh5QLwjnaT+IVvtv/t1ridZHgPaH+qFXcqTU4ICAhoPc3wxu00AbcejO9sIPwGVILsvTswY8Bzf11hoAEIHz6+8rnrytmbCBWwqZhDUgxBYc6VYXOqAg5uTvjwI0Qp+sNRz/29oYjmJayvE1xQFYSIAIUzxAQR/I+3gS1/yUxPImGQ4w41/zbWKuYW79164A89ZXoRK5zh4ysfxMfXLQeOWGKmYSNjUH92JYZVgpjLmrt9egjoiufiHON62Ggzb84xoZR7MadCYdfw2SpniAhCVUJzJaq5dxLnf7iuvLN/u8/7vIk4wofuV1e+zcb6/eKAeVGcYyagtOhBuUHeGI+Jugg83f3cbhoPN7MuO5qtoJkYz4erZxw6UAjuknuhW+oBNPasl41DPDx3HUKxx7jFPS10MdiIceRyXltdSuItBaEUhWIRoxMZjExkUSgq+tiCICxSJGIRDKSTSCfjEAUCgXAwVS19m7qkjTsZr2+6OVa0rD3bTc/ljkQgJnYG1fSvgzbsDGqZbBO0ZX3YtRjjetvFYLf0RjoApeU/oa0fTLXpdp7n9WovaoswV70t2SYABwQEdBbdLBwFBLQT9Qi5wTNZMmo2uq5xwI6B4LcW8Mat5vqyNTF3Fsixh/gmYU2+OSkhmBNLoi8UKQsDPaFIUDhDlApIVyF0GriJpQCgco6kGMI2KW926PvDUewU1Tx886oCcA52+R/B3nzXPI6ceCQglMo1Lkva+r0vvlZ9JS1wVcV7ty/DqO6pnFFkrBnfhlPPOw9ED+vL8nnwn10BMjruCI2tMGYeWw0jctExYOoNRZAQysNAbyvmHQOnnCJju1RoivgLVDa4R5ceWpf3r/zS61DXbfTcH//gUpCwM+z1dGP97LldmdL30f53+cA5FApBEASzlQODRUAn4zWD3/w3AN5BXcbgeWxfmtE2DmOysd3280pfabvmEVy+zerRYj3eakAlhABCCLLKMZkvYNtYBmMTWRSLRbNM0bCAgVQCA+kUelNJEKJ7FTfpmx/QWuwG7+Bd05kE4Wnbh2Zd+0A87Bwq2SCDtmwMNzHYiqeHLi8/nkALyayJwe7nccPadt6T8Awv4PIyuPaEunF2QDUG+mDGUkAn4ecdHHgJBwT4Ewi5rcPq9WtQPt8/IMAdsrSCUPu3++vKV/jY6eBxby/aVjzihBBslwqY0D1VJaZCYQwioRW9fu2MycUyj9cCU7E2NwkOjlGpiKFwzOHpajx/m4o5SPk8Jn56OdimbVrZ+tPIL1mkl4shryqQGYN6x4P1VfaJFZiVV0xP5XQogjuPPhOf/dRnAABcUcF/dT34+k2OQznnGJWL6PPxcrbTF4og5RE22+i3M86RUxXH+ztEBQxH4m6H1gwBfG8eEgkjeck5deWd/ctdvvvjHz7Od/90QAgBJVQbyHJA0Nc8cvP01WCwegNbvYWN/4uiqIX1tBw/VQbFbhoHNqMu030tunlsTqGFhesUDI9K1kURXrrpvmqWCGzaF0hpypXX+MgZ8cEZMc+evyEEG0ZKa9nd8wIoOARa2i/LKkYzBYxMZJErSFBU7X6kBIhFQhjqS6GvN4loWDDzZ7y0RnDZNePunlMBU0M13uDd/B3oJqq1tQe0nmbbsYLnsXsI2rFxvGzHpQgmlgiu1L3fZIyfDRuRVxqv96lHyQAQEMJBuL7MBqGBB7Cd4KYP6CYCISugG6nXGzd4DlqHYQQ0DIHBlzSgLmJRkCMP9NzN33kP/JU3a883EYN4gX8Y3lbcs7948wXs89Cf8PM3VkAk/qGc/fjl+2uw70N/wg3r1mBULgLgmBfvwTapgG1SHgpnGJOLeDc3AYmp4NDWDS4yFYPhKBTOURybgPrTK4BJbU3ZxGnHAgCSYgizogmIhACvvgX+5traCsc5mEU4zigyGOeYc8KxCPdoa/Tya24CX/266+GEEAxFYlWfrshUqB599SJTzfWSKSHY2bJGsFE2oYnv/0qe44nzTgdJlcpQ7RhDeXsdpFWveu6PfvAYkFRPVXlNB2XfW5REYDfBwA9jwCwIAkRRdEwimqpveTeNDbulLt1Uj041utm9MzuxDna6pR5A858RVxEXMNcItgrENeVhTmIv/z/gXGfPNT9B+07IioqJTBYjE1mMZ4uQZFlfSx6IiBTpZAxDfSn0JGIQKUCYHiFFDztMCIExlcF+3m66L7qJQLjofLzExKA9O4+g7ToPt/YKnsX6sTvmefWR/ezRhGsewaI+oVqwxTwzjjPGyAbVthN1m50XGMnLCW7+gE7D756txfgWEDDdBN+lgIAdC+FEnzDQANhtD9SVLznmYGDBPO/9QENr4LoR0cMfv5ubqNnr18qG3CRS8QQKTIVACEJEyzcdimC3ZBpRQURvKIJd4imEqRY2d2Y0gXQoApFQxAURjHOom7dAvexq8HwedMEuwIJ5EAhBVpXN9XPZvY/WVDb+4mtQN2wGAEzIEvKqAkopQh88RsvvlmVgjy53PXZLMaeFqa4Sxjk2F3IIUff5q0kxZO4zJqQYbCtqYbIbaQcrnPt7jgszBhE78yQ9rXN9ID8yf7rdd3/yzJOrK+Q0Uja+dNluS11K6zUgJpo3sCiUvLimkmAM2H4EY/P2opsm/3XLfdWKeviJsYYQXIu3cHke5f/X4OBcixRhX/fOTKELuIIYggqKgqxiLFPE9vEssgUJCifgIKAESETDGEyn0J/uRTQShigKYIxBVdWysgXvl84jsNl2F0F7di6BiNgZeEVcsBO0X324RTKx7/ebHF3yDiYQiBYm2vg50nj0j+x0pQdwvYKBX8gRr78DAjoR670eiGoBrSbw1O1OrOH/AgKaxq5zgT3me+7mr7wJblnPthbES872T2ATqxrl3/c8BH8+6BT8fO8jsaWYqzufn+12MK5YdBQunLMnUmIYCmcAgCgVwLkmdlYq91AkhryqgL35LtivrgdXVAinHA0ASIghc01cvnwV+MhY1WUr3PN3vDY5iiJTkRRDGIrEQJYsgjhrJyiPPgt2yzLX44pMRToU8Vzf2A0O7vDqtdOrr62cUUqiNgAMRmKIi81bM7fS56nnK58w/12LIURdtwHS8pc890eOPgh0eKDm8k4nhGgzmK1/l/dBueV6lv5tTWNd3kTQZ1UHInB91DOW7Za6tzPdcI27yU7STfWYaiHYTQx2O8ZPHNaiP5R70DDGtTCGZr7atAPr8VTPA0SEwoBMLo/R8QzGJnPIFWTICgMIEA4J6E3G0J9Koi+VRCwaBoEmBDPGzPy65T7Y0fDzKg3atPMIBMXOJ/D07gy87K9GezHGzMgZQTtWplr7tt9+ov9HCdWWWqJC1YKyFc65JgAHRvbasYvCAQFTjVtntpEXcCC8BXjRiIAb3Evdh/mugWZ6YZUOCAioEfGCD/nuV//svz6qJ3NmgZx0pG8SBtvCcA1yWP9MRAXRXHd2RA9RXAu9oQgO659piqV5VcG4rImb43IRuSq9aHvEMDjnGHnhJfBrbgI5aDHIzGGEqYCkIY6qDPz+x6vKj2/cguKqNdgtmUZOkU3vWvqh48FXvw5yzV9cj1MYAwEc6xZXQmasogdvQVWwtZhHiGqezzJjGK3jmvtRqZsVWXooxEW7uU4gtffZ7P+evP4W37yT53ywgZJPD0ZfwBSB9Y9Hef/A+JoYXxZiXmfOS88kIQShUAihUMi1f9HqMVk3GTuqqUMn1HVHa5N2xuwbdmg9Onnif6VytqIuHNAmzlX4Ltv3u40P7eNG7b2vTW4r7QM4GIi+brB1rTzt/xwEHAIYKFEggGmGUhBIkoLJXB6jkxlMZnJgjIHo6aNhEX09CQz19yEei0EQqGngNmqqXT9mGsA75b7Y0bG3k5vdLGjLzsGrXx20YWfhZcMO2rH9KftGWwjasHoq2cgr2dI552Z4aOtP2wlzjK0dS8pm4lEjgwB3qnWLDwiYTlo5szcQ8rqfQMANqAYOONb4JejSUCIB08uuc4F99/Te/9o7nmvKVkI451QgmfDcT0C0BVhaRJQKDQuS6VAEUT0UrkCItn5vlRhhhKRHngG77X4Q3Qt4WzEPRfd64Q89DV6U/LIBoIWLTgoiRErRp69vPDFvJhAOYfTnV0D1Q468AAAgAElEQVSWZMcxRaZiq5SvWfwdkQqQWOXpJlFdJI9QAVFBxKhcMMvWDHiF0M8kGUfiU2eXzZR2+6a6eaLIq1+H9Jy3929oyV4Q5s1qTkWmADfPLgGkLHxVJWHAmPFsXHTT2A8gHApB0ENeGdcx6LPURjCmDWgm5vsNpf5iJ9HJ457pKLdbuGevdFaP4KqOMVReyzGGGOzMW/tpaQg411NTah5IqBEikSAvKdg2nsX28SwKRQlMv1EpAdLJKAbTPRjqSyERj4EQmN9xI+IKKIV1jeLA8N2+VPtcBO3XGfi1Z9CGnUMlL8WgHdubSn2l4JtYHdXa2ytpksZ+kVAI+mS50j5q9qcI6UC7bbsIE1b394CAHYXpfu4C/Ak8dAOajenlG3TkAqYY8WP+no7qn+6sL+NYBMLnPlYhEWnZYoZxMWQKktsbEIIjhoBKCGReW180JYZBQTB5053g4CCJOARCkGeaJzHP5sAffto3Dz4yBv74c47ticMPQObHv8XE5ITrcWFCMTPqLcB7EaUCUqFwVWmHIjG8PLkdMmOm53WzqPTJTHzhfKA3CUYAFRxMNx7bQ2YBTiPHxO9u9M275zx/z/hOwUv0ta5fRClACNcmYxCu/dsFQRAgCgIopVMenalbvofdUg+gO+rSTYazbqpHt9DquviNM61/u4WGLhN6jV8VXjJlf1P377TVU9j41qgqQyZfxPbxSYxN5lCQFC1EIiEIiQJ6YhEMpVPo7UkiJApaBBJCoaoMDAJUAMzW/6r1+e2me6sb6Kb3746KW587aM/OwNpeQRt2B3YP76At3anGRm8dLxs/tzE11UXfUr+qtMTZlAnA9QoT9kp6bbdfhHpEDq+bs5qb1fqCCghoJu340gxExObR6LsxaIOAVmAIv45weMF9FjBVzJ0FHLC39/71G8CXv1hX1mT/vUBOOLxCIlSO9dsgIUIxqVT2tPUjTKjpGVttKGhA88aJCiI2XvlHYMYA+sJR9IglgZXf9TCgegvL/P7HAZuXMIlGIT66HOHxSfSFIg4NfVQq1KWrM85rWiv4yndW4/Sn7sC/rXmqjrN5UEXBw0sPhXj4Esd7k5HSTwWHCtt7lXMUHnwS6roNnnlHDl8CYff5097/axTr+E0AAbWGlIDV2K+tBaz9WVrn0WrEN/5NKYUoihAEwfX6tPKatUOfvBm41aNT69Wp5e5WOjkktJVuedaBqatLJUNm2ZIONm+WavIx6uFMb/UK1sI8wxK/yDyGEBAqgBCKoqxgbDKLbWOTGM/moahMdyImiIUpBnqTGOrvRW80hLDAAaaAcm4xqzqxC1Fe1yigPanXLhzQnritWxq0YXvhZtu0j5eCtusOgrasjJ+t3xr5yjEZDgDl+k9LbaZtugDsVchKjevX6M3qGFUSSZolugQ3ccCOSCBCBgR0B37f644LGxLQ0YjnVvAC/sPtgEuY4WoQLvgwMHPINw33Mew1g1QojKQQgsoZxuRiXXkwi3pWUBVkleqvh0AIZohR8LfXQ1JV5CzH8pExZJ9+3vU4XpTAH3J6CPNCAfz9zRAIRZgK2FLMme+RMbmIuBiquIavHcY53stnaupfbCpmAQCy0Jw3luZ86t+vJ8P9SHzhfN9xQZkojJIYzPIFZP/wN9/8kxefXUfJ2x+t7+jcVu3fpiCMkhDsdo6A6rB6p3cynV5+g66px3QXoIl0S5tMNV7vYS+7mpetrZJnjFXoLcsHhtdxqWdnzcdwJGEcKEoKRiayGBnPIFOQwEG1lekJEI9HMdSXxmBfDxLRMCgpRQashFtfILifOgc3wSKw+7YftdrpgzZsXyrZl4OJGd1D0H6N49aHMvx/jX2OUbLbLLpKmft3YqwrBXqlcW73yr9SWbzyqxYvZd0vHefcDCUT3LABBsVCAePj4xgfH8fkxDjC4TBi8TiSPT0YGhouC3VnpdPvoVqe3U4kMCQGtBvZbBYT4+MYGxtFPp9HNBpFPB5HqjeN/v7+qvIo+47r/2fgoC0WwDqRbCajv9vHUCwWke5NI9Xbi1Rvr6v4ENAAs4ZBDtsf/KkX3PePjEG97X4IH/UXil0JiRC/8gko/3SZZxLTltjCx4AQbV1UAk3AjVChpu+M1Wu3PxyFoocjzCoyEmKo8vn1/79fyOAnb6/EvHAcX/nAfkiKIQjLngCOOMhxDH/8OfBszjNPxjk2FrKYG++ByhlkVUU6FKm6TlYkpmJGtLowzkad/3H3g3DC8Fwc1j+zrnPaYag8Nkp+5wvg4TDAGayrHDoNTdr/CSnty/3pdrBR95DZABD7yAmgwwOWPMrHS9ZZwO2OdZxplN0wznPOTW9Ba71Kad3zND2BoUWpIIRAVVVP77BW0EltUIlu6bt3S5t0Sz0Y5xVD+nYKU/VeaTVTfW9Z3/3G+Ulpp7nPbgR2sy14C8ruwVusk4W43rezJ9MMphSMaV7DqsqRzeaRyxOEQ2FEIyJCggBKCcICRTgRRzwBFCUZUrEISVHBGC/FoCalE5XdM5xrz4PtO+dXr4Dpx0tQdGtHr/QBU4vXe8S6z83TtNpjAtoH6zu20fdqt3zjOwX79Q4iZrjjNgHarsU6Js1Bk4C5bstwWCorzbq2Zl4dTgG4Es0UdP3yr+YFXqsYPlU35coXVuD6a65qLBNC0JNMoifVi/7+fuyxcCH23mdfpNN901umnh709qaR7uvDXnstwqK990Gqt7fuLP/6l//D448+Urbtxz/7BSLRaGNltTE6OoKXVq3CyhdWYP26tcjn855pQ+Ewdt55Dnbd9QPY/4ADMG+X+Q2d++knn8BNf/5TQ3nUw/d/+F++bdPqZ7lafvqj/8TmTRsbyiMcDqMnlUJvbxqzZs/GPvsuxq4fWABBEOrK769/uQlPPPZI5YQ+CIKAVEoTnoaHh7Fon32xx54LEYnUZ2hvx/uo3uskiiJSqV4ke3qQSqWQSvVixk47Ya9Fe2NgcLDRIrvy0//6/7DJcp/tPGcOvvHtf2z6ed5bvx4vrlqJl1atxObNmyDL3h5/iUQCc+bOw26774ElBxyIwSHN41FzaOOlf7tgiL9r330Xv/nvnzexBrVz/oWfwAEHHVy27We253r2znPw9W9/t+nnXr9uLVatfAGrX1yFrVu2QFG8w+wme3qwYLfdsXi//bHX3nsjGo01tSyrXngBN1x3dWkDIfjcpV/C7nvs2dTz2Pl/v/pvvPv2W+bfA4OD+N6/fr+l5zQQzjkVipcADIDf/hBw5EEVvXldmTMTwoVnQPXzvtSUKfdF5ZpIbygCzjmKTEWRqeitUjBlnEPhDGF9TWCRaJPLikwFVCAhVBaBAWBTMYd7NrwNAPju7gdqeb29DvzNtSAL5pUScg5+76O+eXFwc73egqpiXC4iIogYCNfe74pWGfr5/XzGzD9EadPEX1Qh/sYuORd0/s76wMs+uY+4Gge5LtSzdRtRvPPvnnmTRAwJy3rYXoPkThkUu3pz6f/m0DyjjQ3O8ZqeruxxtAng+vWhogiVMVMI7pTrE9B8uqX9u6EeRt+z0+sBdEd7GExXXfyM9dZ9biKOt4FYgNvIxpGelKyT5bs4tDn6xgdH71NJEiSpCEEQEKICEvEwBEHzphEjISQiITAO5PJF5AoFKCpzfqv1bxSlVOtLuVz3QEjsLCp5J9Z6TEBzadTOb3j2+3kVB7QHtT6Ljd4bAc2jmusdTJZyx65FukVyIoSAgwFuArBbRnZqE29aHzCymtk7ldJ32g3EOfc1+lfLyMgIRkZGsPbdd/DCCi3U326774GPnHkWdpm/6/SUaft2jGzfDgB44fnnAAAL91qED59xFubMnVtzfqqqNqVcbjDG8OTjj+GpJ5/A2nffqfo4WZLwzttv4Z2338KDD9yH/v5+HHXMsTjy6KPrEgwYZy2rYyvwminZrLztqKrS8PWRZRnZbBabNm7Ea6+uwd8ffADxeBzHnXAilh53Qs2iK2ON35eyLKNQKGDLls14843X8eQTjyMUCuHgQw/DB0/7UM0TJ9rxPqr3OsmyjHw+j82bNzn27TRzJvbeZ1/svc9ifGDBgmYUEwCg2O4zRVGblncul8OD99+H5597Ftu3bav6uGw2i1fXvIJX17yCO267FXPmzsPxJ56E/fZfAuIRhcBOs97tjeD2vrA/16ravOudzWbxkH69R0dGqj4uMzmJlSuex8oVz0MURey+50Ice/wJTRNoOZxt8acbrsd3//nfEIs1V2y2oiq2a93Ee7siw/0gxxwC/sgznkmUa/8C8Z++WFf25OQjQVa+Ar76dZ9E+szJFnvEE6Kty5tTFciMIVTFMyoxFaNyETOjibLthjcw4xw5VUGygjfwYf0zccm8vbBTJF4Wppnd+yiEL19k/s1ffA184xbfvARCkQ5FoHIGAmBWLImCvj5xLX3uUakAgVBTTLbDOcekIiMVCmNWNNH0vrzm/O2fp7BkL4RPW1pWJrvR188DIfvr633zT3z8I0AsVvG6deJYxg4hBBQcnOtCsGU7UC4El66pU1A29lFCAEEoW/fNml8r6IZ2CAhoJbW7BrQv3fS8T2dd/OwC1XjgVRNS2c+7yDq5yB72pfz8BCrjUJmCwrgMkQqIREOIhEIQBAJKgEQsjEQ8AkVRkS8WUZQUKIoKSilUaJPEVMa19YkJqeq6d9pErwB/vNqzm94n3UgwOaO78BubBbQ/wXfRHXu/Qou0Uvq36JfYjWaINvU2UrXH1ZN/o/VyE5879WZ84/XX8POf/AhLDjwI53/8QsRi1YXeayVrXnkZa155GQcfcig+dv7Hm+69Ww8vr34Jt/71ZmzauKHhvEZGRnDbrX/FA/fdixNOOgXHHn9C3Z6lnUI175l2f4ZyuRzuvP02PPrww/joeedjv/2XTHeRIMsynnjsUSx/9hmccuppOPHkU6a7SG3Hpo0bsWnjRjxw3zIs2G33uia8TBWqquLxRx/BPXfdgWw223B+69etxXVXX4mh4Rn4yJlnYZ/F+zWhlN2Dqqp44rFHcc9ddyDX4PVWFAWvrH4Jr6x+Cfsu3g8fOesc0wO7mYyOjODm/7sRF118SdPzbheE8z4I5bmXAK+ww2veBn/4aZClh9aX/xc/DuW7PwUmvdu81eKvlf5wFJxzZBUZHPAVb0OUeu43vIElpiKvEsQqeNP++56HOLbx5avAN54MMnMYAMDufLBi+bXzaeGs43rZDE/ezcUcBsMxiBXEbSMcsJf4CwAbCzkMRrT+YCv6CxVzTPcg/jXnc+f09nULS0Yg3/8E1LfXe2ZPZ81A6NSjoRqSCdfD8nuMWexjuHbvQ7lhrFNEAVO4NephnXZSup5MN96XQkRbrzUlBFQXgRnTJkS02sjaKX3YHYluaZOgHu1HN4k209kubmKYX3msXi5e4q6XsGxMcmNgejxoIz0A01DqXj4O6BMCBSicQc7mkaMFhEUR0UgIIVEEAUdIFCCKcSRiHJKioFCUIEkyGFfh9r2qhkCA6lyq9fYOaC+qnZxRTdqA9sRruZigPduLWiOL7sjt56g74WYEA2oPZdD0C8VJWceqUzBeBF4/L7opPMSK55bjsp/8GNu2bp3uopg8+8zTuOxnP8bIyPZpK8PIyHb8z69/id/+v183Rfy1ks1mcdutf8VlP/kR3lvvbRTsFKzPg/094/ejlIJS2jHP0sTEOK6+4nIsu/fu6S6KiVQs4va/3YLrrr4SkiRNd3HaljffeB2X/fTHuOqKy129haeTN994HT/64X/g5pv+3BTx18rWLZtx1e9+i+uvuQqZycmm5t2pvPPWW/jJf/4Af73pzw2Lv3ZeXLUSP/rh93Hnbbea4aSayXPPPmNG8ehKkgkIF5/lm0S5/lZgi7e3tu93J5WE+MWPV1WUqfoiEUKQEEMoqIpvv1MgtGwdYDf6w1GEqWaszCiVvfknFKkUQFFlYHrIZ/7mWvA1b3keZ/DhFffi0Mduxsai8znaKZpArkKdAK3+/S4ho2XGMCoVAACzYgkz9HVzqc4ol/jmZ0ASlT3v3foxbMs25K79i+9x8U9/1HkcOBgBGAFU/d9WrOOUSmOWdsfoCxq49Qm9xrD2vqQgCBBF0cxvKq5LJ1/7gPamG+4tY73vbqDT37XtiPUdbmAY6Y19Xt8E63YvWwQAUFBtkhAh+jp51nzcv90EWr+LEK715wkBCEVRUTE6mcPW0QlMZAtQVG19Y4ESxMIh9KeSGO7vRSoeQ1ikZv6N3je12CkDpo9q7VlBG3YuwbPY2djbLGjDziZ4DjXs3x7Ra4cdr1l29v3OfVWswQH3DlA7Cz9us/sN/Mpdq3d1Lddgl/m71rS2JWMMmclJjI2NYusW93B+mzZtxM9/+iN853v/jP7+garzNvjAggVI9/XXWKYJjIyMeIY53fD++7jsJz/Gd773z+hNp2suUyOsW7sWl//PbzAxMe6ZRhRF7LHnQszeeQ7S6TR602n09KQgSUXkslls2boF769fj9deXYNczt2baf36dfjFz36MCy76JA60rXtZLXvsuRDJZLKuY6slFA7XdI+2yzMdiUSx9z771HRMsVjE+Pg4tmzejGKx4Jrmztv+BqlQxIfOOLOuci3efwnEGjy/ZUXB5MQ4tm7d6iniPf/ccmQyGXzxK18rG0RXy1TdR7VQ6TpxzlEoFDA5MYGJiQlMTk5UFN1WvbACr7y8Gp+4+JK28OR+5qknceMfb/ANa5xIJLBw0d6YMWMGUr29SPf2IRaPI5/PIZvNYuOG97F+3Tq8+cbrnuvWrnhuOd5+60187tIvY/bOO7ufJ5nAkgMPqrkOOT38tJWd58zF8IwZNefVP1D796cWXnj+Ofzh+msrru+7cK9FGB6egd50GqneXkQiEYyPj2N8bAxjo6N4+603sPbdd12PVxQF9y+7Fxs2bMAnP/XppkeyuOnGP2L+rrsine5rar7tAjl0P5DHnwdftcZ9v6JC/c31EH749fry32cPCJdeAPW3/uugT/UwYjCiCYzjchFhKji8eDnn2CrlMRzxj9YiEM28KTEJRUYR8RFOBUKwXcpjMKydmz/+HPi5HzSF4EqsnRgDFQRkZRkSU8E4L1vLNxUKg3GOEangui4w5xwbCznMiiUc+0blAvpDrYsCQwDwKmT+2FcvhrBQW0Kg0uxjt/25n10JFL0nZ4WOOADi/ntZvJCI7p3kNFCwslV0LbEsib4QTx2eRtONdbxpHyMaf5cH6gQADkK0rV5jKlEQwCh1fFtbtTZpN3kHdgOd7nnqFk2gU+sClERggvarB+FOH4aK3/8ued7b7b3lFaXQEHXdDPde5fcXXQm0BT/MnMCJdi9YMteO1ycW6ZsAEFCqrT9ckGQUZRkCoQiFRcSiUYiUgxAgHosgHotCYQySpKBQLEIyllUhROt/cGMZA802ZoaLtn/xarbLBrQb3WAXD/Cm2nYMPImnD793qPHvwDu4s6nkwNnNmHXnHODwXgPYjr3jVXWYMZfNlV6E7dAIrSpDK2cgHHPscTjoYGf4vmrYvn07Vjz3LO5fdq9DlMxMTuKq312Or3/rOwiF/NeQs3PiSadg730X11WmrVu24Lnlz+LB++9DoZAv2zc+PoarrrgcX/vGtyCKVd/GDfHiqpW47pqrIBWLrvsX7bMPDj74UCzaZx9EIpWNk5IkYdXKF/Dk44/hzTecaw/Ksozrr7kK27dtxcmnnlZzeU897UM1r2/aDs+eG5U+zLWSSqVw8ac/W9exiqLgjddfw0MP3o9XX3nFsf++Zfdg7rx5WFyHiHjRJy6uSxTinGPtu+/gyccfw9NPPem4Nq+9uga33fpXnHm205uoEvXcR62m1uvEOcf69evw8ksv4uXVqz3X65YlCVdfcTk+fMZZ0xY6m3OOu+64Hcvuuct1vyiKOPjQw7DkgAOxYLfdXUPFM1v7ZyYn8ezTT+HJJx7H1i2bHenHRkfxq8t+hk999nNYuNcix/6hoWF88pLP1FyX9evWOgTgQw8/HEcdc2zNebWSvz9wP/52y82u+0RRxCGHHYH9lizBgt12r2oSxejIiP5ufxSbNzm9yl9+6UX85pe/wOcu/VLN63T7kctmceMNv8cXvvzVtn2X14pj1uLnzoP8jf/0FM74u++D3bIMwtn1Pb/0iAOAzduh3rKscmKn+tRSekMRbC3mHQIwIQR51Xvigp3+cBSMc0hMRZGprt7DCSGEhGDp7xUl8L8uA1++qmL+Cme469DTkVcULEoNQGEMo3IRjHPEdY/mqCCCEoIwpZCY6vDizagy0pbQzwVVwaQiYygSqyh0N0o1PYvIhR+BeNSBFQ02Xv2U4s33+oZ+Jsk4Yp87Hw5501yfsPx8VnO1vRjWY9yMwu36rrCLv9rf5debwC38J6AJwd4hNikAUGqGgzbzaOHYr12vc0Bn0a33UasmYDRCPQHs2rEe9dJu763y93xlW6JXGq9QvKVtpW8Lh/69sCQ3RFjrOcqzK/2hMAalIKFQKEIUKGLRKMIhCoEKEAUCMR5GLBaCJDMUixKKkqxP6ir3dNbOq4nBdu9mP3Zko3enUItncDPyCZheKjl8Be3YHtRjew7arjPZkSbbEJDqBWCgcsfJmqZW7LMsGsmrXXEzFLVLzPKBgQGcePKpOPzIo/GH66/FSy+WG/nWrX0Xd91xO8446+wpK9PQ8DBOPe10HHn0Mfj9tVdjzSsvl+1/5+23cO/dd+L0D5/R8rI8+8zTuOG6a1zba/bOO+Occ8/DbrvvYW6rRpwMh8M46OBDcNDBh2Dz5k24+aY/uwqKd95+G0QxhONPPMkzL7e1Cb1CF3ULXu+gViOKIhbutQgL91qEVStfwA3XXevwCL7h+uvwgQW7IdnTMyVlIoRgl/m7Ypf5u+Lopcfhqit+6/Cgf+iB+7HXor2xx54Lp6RM7QQhBHPnzsPcufNw6mkfwvZt23D3XXdg+TNPu943t//tFmzZvBkXXPSJKX+G/vynP+DJxx9z3bd4/yU448yzPdeR9XoGkj09OO7Ek3DciSfhtVfX4KYb/4RtW8ujPhSLBVx9xeW49MtfxQcW7NZYJTqIu+64Dffd4x66fd/F++GMsz9aU2QNAOjr78fS447HUccsxWOPPIx777oD+Xz5JKb169bilz//Kb7xnX9s6nvi1TWv4PFHH24rkb2pz1BvEsJFZ0C96ibPJOzW+yEsXggsmFfXKehZJ4Fv2gb2ZIWQ2tPweR3SvYFHpAKSYsgUTufFUzXlQwlBmAjIKDJkxiBS6qiOyjkKqoKEvoYvu8/9vWRlVC6iLxTBrvHSxAaRUsyIlkTbcVlCRpExGIkhQgWIhCKjyIhSwVwX2BCljfFGTlUw6OIpPB2IJx+F8EdOdGw3yuoUJG0Rg95ai+Kf7/A9R/yz54Ek474Cs9dYyb7d8F7isAoahndRee+xXcdfRnkE/XKoNiHYa3xFiDPcs5mXIIByDlVfG7jVYke7iSk7Ot3UHt1Sl24RT6d4blhLadd7y82O5iUC29NYvxl+31Fi+T6ysjz8y2HdTiwHSIoKKZMDZypisQji8ThC+ty3aIggGooBiENWVBQKBeSKEpiqgnGACgJABD3CRXO+1e36vQ8I2NHY0UPUdgrVTrgJ3qmdTbv2e5qFNqnNI5xKNe7w1o5VtRfKmr+14+QWVpnpA3O/n1esfft2r3RThdt1tV8L+37Psreo/IlEAp/9whdxyKGHOfY98vBDmBj3Dn3cKnp6evCFL30FB7iEQn7owQcwOTHR0vOvX7cON/7h966d+zPP/ii+871/wYLddm/o3poxYyd88ctfw1nnfNTVo/lvt9yM5c8+U/F+2ZGZjmuxeL/98Q/f/LbD47tYLOC+ZfdMaVkMdp4zB9/6zvcwY6eZjn133Pa3aShR+zEwOIiLPvkp/OM//xv2XLiXa5qnn3rC0wu3VTz2yMOu4m8ikcCXvvoP+MznvmCKv1z/Mc7Nn7HNjz32XIjv/NM/42CXd7wsSbjyt/+DjRuau7Z5u/LSqpWu4m8sFsPnv/QVfPrzl9Ys/loRBAFLjzse//L9H7reZ9u3b8Pvr7266WsC33brLdi8aWNT8/TC65vUyu8TXXoIyB67+qaRf/17IO8eqr8ahC9eALLvHpUTAqjOZ7S59IUiGJdLXtCFGjyArfSHoxApRZGpmFDKvaoFQlBgKrJVrBkMAFuKeaRDkYrpZkTjZtjnnKrg/UIGSTGEjcUsxuUi/r71PWQVGTlFxlYpb5azHfo6wv57IfaZj3nuN55lN4MzIQQ8m0f2p1f4n2O/vSAccYBje6mPScA5gTaEo/rf7mMo48eIuzcbY7xsLGWUt10xrqMAooXkZLxsuz0t4P5utXpPiYIAURBAXTzGmk1g5Gsvpsse0Ao6vS7W8rdDPbjHr1qs/fJOpx3aw4tK/Uwvu1qlPqojagQpXydYt16aPy25y51CStdPEARQSiGIERQlhtGxDEbHsshki1CU0r0iCgQ9yTgG0imkUz2IRyMQKAFXZUf/uhlt0w420oDG8bOJB3QOlXSNgPanXbSngPrp5vYzYxlW6kxYB8vWf1NKm2Lk88rDTzA1sDeOlzDcaS/S6Xp5UErxsQsudKwHKUsSHri/irCILUAQBFxw4Scwc9bssu1SsYgH7mtdmbLZLK783f9ClssNoOFwGJ/5/KU47oQTq15XtZKRnFKK4044Cd/67j9haHjYcfyNf7wBmzc7w4kGlDPVwvjOc+bg/Asvcmx//NFHWj45wYtkTw8+87nPIxwpN8avffcdrH7pxWkpUzsya/ZsfPErX8MJJ53suv/uO+9wRB5oFe+8/RZuudnp1TgwOIivf/u7ZQJio9+DSCSKj3/iYnzyks8gHC6/R/L5PK67+gpIkvfalN3Atq1b8cffX+fYnkr14itf/xb2WrR3086V7OnBZy/9kuskptdeXePpgVxL/lZkSeOiyZ0AACAASURBVMIN113ru360G7WKuNMpFImXng+EfILYjIxBufL/GjvHP3wKZJfZFdMB5aFppwJCiOkN/OL4VvzDi4/gz++9Vl9eAKJUgMo5FM7KajIQjjpCTttRuCayDUdiVXs9GfdOOhTB7Ki2xnxfKIIzVyzDxSvux92b34XMWcvDPdcC3XUuYt/8tG8a6zNhHYcA2ns7/8trwEd8JlLGIoh/5RNmer93ffk2qxhsCMTlaV3HQdDEYUY0r1oVvCwscrtCdGO8YOt/u4vA3PV9ZvV9ppRCFEXLeo6tq3s7X9cdlXa/32uhU+th2qAwHVOqymnW+Tnn4JbJNZ1Muz8jXv1VQ+y12mrsdkVKqWnPrJRnaZvb98Yafc347pfKURKetXSUEsiqinyhiO0TOWwbm8RkrgBF1e4ZgRLEo2H0pZLoTyXR1xM3nQRaNWHL2l/pRsP3joKfTTygs+g0/WJHx2u8FojCnU+3tB8FnKKvW0fHi0oXwNqRsM4wd1w8DnBW+tmD57gO3uv0NPHzHq5UF690dkNPK+HcfUZpM88dDodx1jnnOra/8HyFkIhV4NYpse9zq0skEsFZ5zjXMF2x4rmaBf5qJwdce/UVGNm+3VaOKL769W9i38X7lW1vlhfUznPm4Mtf/bqrUf+6q69surdYNzNVAsUBBx6EXT9Qvk6uLMtYvXr6xNadZs7C0qXHOba/sKLxZ7ibIITgI2eejY9f9EnHerqcc1x/zVWOd0CzmZycxDVXXgFFKffimzlrFr7xre9ixoydADTfo2DJgQfhk5d82vGcbNq4EXfedmuTztJ+KIqCa678nSMsc19/P77+7e86Jj81A1EUcdHFl+DIo5c69t1795147dU1ded9+ofPQCKZLNu2ft1a3Hv3nebf0+Gp21IG+yGce5pvEv7si+CPP1f/OcIhiN+7FBgeqCLx1IvABs+MbMY9W9bhl++uRoFpon+RqVBr7BP2hSIQCEVOlcu8ixXOIDH3yQQcwOZCruYyG2UblYvYonv5CoRiUbIPAJASw+itwpu4UartN9PhAQz+4GsIx6IgTPXpa7KyUJHWMY9898NQV/o/57FPnwukkq77/IwK7um9gwWVjNFama33LuOdYSw0J08CoD6qUckzq3J+AqUQBcFzMnSzrke7X9uAzqYb7q1We9ASyzujXi/fauDQbWBd0CZAZ9xbXn1bv76uVVB1czRxprfkCWfIbzdbpXkgZ+X7KQEIwBiQLxQxOpnDtrEMipIKlWmZa+sHRzDQG8NQfwqpeBQEWhQP6HZUe32a+Z3pBqN3t2O/170I2rE78GrHoH07C792Ctqw/ekUUdicF84B2pfuQU8yjp5EHIlYFPFYBJFwCJGQiJAgQCCAQLTBNQWB3dfRbQZatf8u8zoG13rjhENbssk/FJBfh64aQ6f1GKsgaRcn3cRie/01I4rTI3nKsdWhUfbYcyF2njOnbNvo6Ag2vP9e9UWC88FoxPC8cK9FDuP86MhIWcjSeutuv4deXLXSdU3eCy68CLvM37WlxvOBwUF8/tIvIRQKmdv22nsfnHveBVV7HAdoaG1ja58WaB1uazS/vHp1809UA0uPP8Fxv7zy8uq2/ThNJ4cefgTOOfc8x/ZsNou/unjmNpP77rkbY2OjZdsikSg+/flL0dPbaxrCWsHe+y7GGWeXJtaIoogjjjoGS487oSXnaweefvIJvP/e+rJtlFJ88pLPon+gGrGvPgghOOuj52KXXcvDF3PO8beb/1L3c5nq7cV5F1zo2P7Asnvx7jtvd564WyX01KNB9t7dN41yzc3A+gZCmidiEL/3eaA/XUViy5pvU8gpO83DObMW4F8XLIGgnz2jyNhS1ITZnKpgRCqYnrp+7xICICGEwLVeORjnCFMBm4s5x3EKZ5CZitkxd8HSmi6jh5GWmIq1uUkUmDbZJUoFc23fmCDiN4uOwtqTP4WTZ8yr+TrUCkeVa/wk4hC/+WlkRRGMEEQTCcSiEYhUABjTx0YEnHGAa99bZunrcs7BXn8HhWtv9j2NsHghxGMO0cpWhWGn8uRJre/jNyjVtpcLxVbPWPvYqB0hHhOGy7cBJXmHgRAOEO/xY0gUHZ5Wxv5m0q7XdEelne/zWumWenjOYmk0W31o2MqrZL5fuqwP1kn3VrU2Gj9boTXaYdk26F9ZczvVfuZ258+uFhvHUUL0/aXg0uOZLLaPTmBsIoNcQYaichDdFpuMRzDcl8JQXwqJRByCSMG5HsXDFvWilslM1X7jOsXovSNTjY0yaMfOxs2pqmMndncJbte92ufQrjkFbdiZtOt71bifxFg4ZG7g+kxL6jLrTWUcnHGo5k1pCKMcqqpq/+Ylwc9whuCwin7UVdS17tNmSTIQaB0hN3G1dIxbqC/3v6sZwLttr3UGeKUZg9Wct9GbpNbj/V4ue++zL95bX24of/vttzBrdv0eUtVeP78yvf9euQj91ltvYuasWWV5+OVVzYv4rjtuc2w/6pilrmE8W8H8XT+Aiz75Kax4/jmcfOppmDN37pScd0fBOgGkGeyx50KIoljmxfnOW281Je966enpwS7z5+NtSzkyk5PYsmWz6VUaUOLIo4/BO++8jWeffqps+4srX8B769c7JsQ0g7GxUTzx+KOO7Rdc9AkMDQ1PSadh6XHHY3R0BABw3Aknore3GrGrM1EUBQ/cd69j++kfOQPzd/VfV7YZCIKAT37qM/jpf/2wzAN5w4b38dKqlVi8/xLf472+Wov3X4JDDjsczzz1pLmNMYY/XHctvvNP/4JINOpxZGcjfvlCyN/7OTDqEW5fkiH/+HcQ//NbIOke9zQVIEMDCP3H1yD/x2+AbSMV02sOGxxTJQXPifXgsn2OKttmrLELABEqQOW6ZyoBNhSyiFABQ5EYCkyFyhjiYqistMZavllVgco5do71lL2LikyFwhkSQgh2OIAxuQiBEKTEMCZkCRxAEiGEqIB58VI7VAov3QoISgJtRdIphP7lS1BnDEKVFBQkGQQ5RCJhJKIRRCMxSLIMWZZBBQGMcd2QS8yBPB0ZQ+7Hl1c8FR8dA8/kQJLlYa/9JsG6TcK1pyeEwlgL15neeRyHy3iKc31Mph/h4fA+HcYK45wCzFrq5SRm9Tg3vJ2tnluAIZC7lZsCIIJgjnsNAqNM99Mtbdzp9TDtUgBA7FM9asineUVqCMa5KRh2Om7fnnam0pjfbb99cpDdkOv4Dpd2mH8b97A9P7diOOyBhAKUQlIYJCkPQikiIRGRkIhwWNAnKxGExDCSsQiKsoxCUYIsK/p3q9ze1crJTPb8W3megOZif5aDduwc3NqFMebswwft1xFUqwkF7dl5tNN71XQN0wRc7y4yJRyiCIQjFJGoiGg0jEQ8hp5kAuneHvT39WKgvxeD/b0YGkhjqL8PQwNpDPb1ob+3F6lEAslYFPFIGGGBIixQiASg4Fo4NV09JiAQiFC2FkctyrnXrJdavILtM/ysv2qEQ7/w0lOJ2/ndyuNXxrnzdnHkOzE+PeuaGszbZb5j2+TERFNnO73w/HPY8P77ZdsGh4Zcw2K3kiUHHoTPfP7SQPxtIc3y5I5EIthpp5ll2yYnJ6Y9ZLfbMzxdaxN3Ah87/+MYdhHH77GE020my+6527HG+IEHH4L9lhzQkvN5cebZH8WZZ3+0q8VfAHjmqScxOlIu4s2bPx/HneD04K+WWvsVA4ODOMNlOYNld9/VUD/hrI+ei77+/rJtW7duwa1//UvdebY9yQTEb/ivy4rxDJSf/A4oNrCudV8KoR98DWT2jIpJDW2sXYZnAiHoEcMIUy3E/c6xJPp1gZgAKDAVij4G2FDIYkLRrpPEVET0Ywg0T+JRqQAAyCoyolREUQ8NnVVkrM9PQtb7vgIhiOvibn84agrS031NCK9B/O1PQ/jXL4END0AboJS8uQqFAkbGJ7B9bAxFSUI4GkUsFkU4LIJzBlXV3umirCD7g9+AT2Yrno6t24jcv/83kCvUXC+/MYa2nZb9SmsFV5cfJ5q4yox1gmsuYWsxx2woRasyjO/eGN7A5QZy699UDwlNmzS2cC1FG80ODyjRLW3SDfeX6VzQBXRTXTqRSmN9L3uAn2ddpf6Efa8xIal0mPv9IAgU3FiSgWhONXlZxXiugK0j4xjL5iGpWmQLAo5oWEQ6GcVgXw/6Uj2IRcKgBIDutGMta3APBhhUY/9qN0+2gNrw0wECOo+g/bqD6WpHUwDO5YvIZHLIZPOYnMwik8khm8sjny8gXyhAkhRIMoMsMSgyg6oaHsCltbCskU0o0TyJQyJFJCwiHosgEY8i1ZPAQH8aA/1pDA70Y2igH4MD/Rjs60F/OoG+3jhSiQgS0TBikTAi4ZAWipqWz/ys9eXVrItr9S6tJBp7dSDtP7dQ062mkiBskEqlHMdOjI/XdU43oa0ewa2ZZfLi7rucYs8pp55WFpI5oDtpRBDu6S2/NznnyExONrN4NZNK9Tq2jTf5eekmwuEwTv/Qhx3bDS/gZjI2Noqnn3yibBulFCd/0H9t04D6YIzh/mX3OLafdMoHq/q2e/3q4eBDDnOIte+9tx4vr36prvwAIBaL46KLL3GU6cnHH8Pql6ZvPfJWQ+bvDOHTTkG9jPUbofzimsZOlEpC/LevAHNmVkxKdB/g1q1g2BiCfo9EqICBcBQhqoUgHI7EENVFX4kxbJXySIohKJzhN2+txH5/vxFfX/Mk+sNRbCpkkVO1iBcRQcDsaNLMJyWGIZI2W66Ca0JmVc/sjAEI//5VYGhAe845AbGsjUuoAA4ChVHkiipGxyYwNj4BRVEQj4aRjMcRDYnI/ui34Ju2VV1Etm4jst//FVAoelfDZ/zjtc/b6FOdEFx9flOP3avJ/ME5ZnOkIeWuzMaSGabnpO5BJYoiBH1t4FbN2G6HaxlQjn2idCfTieW3P2/d0A5A+3gkN4NObRN7H96tHtZ7z/79sNr67PnZjyOEaJFBAIcHOCHeAhxTmXmu0n4VAAMIRVGSMTo+ie1jOWRyEoqSCsa1tJGQgHRPHAPpFNKpJOKxGARBML0DW/Uds9NOfYWAxnBzHApoP7yebbd2C9qyM/Fy4gvasf1xG0+7OY+2Emp2AowbSfcCUJkm8ioqg6IwSLKCQqGIQqGoicL5IrK5gvnLZPPI5grI5YvIFyQUJQWSrGphSFRrpWCZhQZQSiAIFKGQiGgkglg0ikQijp5kHL09cfT3JjHYn8LwQBrDA30Y6utFfyqJ3kQcPbEIopEIQqIIgVKtE6XVQqsP10JVg+trmVnX+NW3aX+rAFMBlzBfdkyx26OD59rpswnCtc60asXD7Taosp7XuBFV1TnPv5Y+o9HRdatvvZ3PVj8UG95/D5s2lq8ZODA4iIMOObSl5w1oT/yeW7eOlNvx00nQGaid/ZYcgJ1mOkWe5597tuG8uf5jnGPlCy+UhQwHgAMOPBjDw5W9DAP8cRNq169b6/D+nTV7NvbZd/GUGkQALRT08See7Ni+6oUVDeW7YLfdcezxzvWbb/zD76d9MkorocceCnq4v9c8f/kNqP/7p8ZOlIgh9G9fBhZ+oKrkBJq4Nt2er9UiEmp6CifFEGZFExAIgUAoBF3QpXplZseS6NNDRYv6GnbtCq+lEXadA/H7XwPp7y29E7TOrENI1Ca7amGWVZUjl5ewdTyDscksxn99PdRX3qy5rOyd9cj+4DeeIrCbIFJJpLXjLQSXBGHn/vL8GCn9VMMz2CX6UavxGl9QSiGAgHKA6h9ed+8twPBvNo41f7QkDgi6N7BAqT6+DPpWOwLGZIDp7ssHlPrP1Tx73PJrR5hhiwreI9OOn3NCJdudVaC1isnWfBzisU0Q1mywVq9g/W/qLAfV1xmG5XwKU5ErFjE2mcXopCYGywoDByBQgmg4hHQygqE+TQwOh0RHWbltuqJ5X3JWtq3R+7WSfTMQMdoXt/vf2maMc6hBG7YtlTSIqXREC2gMv/5o8E5tb5xjUOdzaWhxzWtHI1KIZt9xnZ7v18lx21cSC1UoigJFUSBJEgqFAnL5ArK5vEUkziOTzSOT1TyMTcG4KOtiMUcpYiop+xHCIYoEkYiIaDSEeDyGtC4SD/SlMNSXxmBfCgPpNPrTvUinepBKJPD/s/edgXITV9vPjLT19uuCCwSDDRgbd9ObTQeD6RAIYNMJISThJaS9eZPwkQQChCSE0HsNvTebXkMHV1wwwaYZl1u3SZr5foykVdtdbbtbvA9RrnckzRxpRlPOc86ZpkgYYTmAoByABALCOKBqIBoDGAdnFByS2GsD7omb/d3ZLQWNiV8hFVKMF5GnQqeI8g2rd6dMvb3uULFt7R15luRGMY24xyN8bVNzU7EimVi8aKErbeY++0GSpJKV0UDtwmthaMAZWplSiuaWwvadLBW8wj03NZXue6lHEEJwwEGHuNKXLl5cVL7cNEwSfd8Sr75mv/2LKmNTR7bxdMniRa60Gfu4ydKBwi677Y6wY2/eJYsWFT1RnzX7CIwYOdKW1tvTg3vvvrOofKsd0rkn5vTOZW++D+3fTxVXUCSMwG/OBZ0109flhIjQubUMAuCibadh5f5zcOXY3Sotjm8YRqF+59hkxi6Q//BToIg5JeVA6v6noLz0dsF5sOWfo/8PVwOp/MKW50sG51pUptPd9+WSodpg6tUznc/QRowxxXgmSZIgy7K5bnKi2Oevxne3qaNe6qRav81CkLHPQvWSvl6oNXkzoV7alYFc40GmdKdncK4yrDo49/nMThdOUtmAqqroj8fR1dOLDd196IsloDFRN5QwREMyOlubMKSjBa0tzQgFg+A6cce42+OZW4y/nGNhqdAgKeoDpilBg3iqeTQIxNqHs+4a9Vh7yLZuLyZPc8bBPTpsv5ln84pzWsQZ3r+cQ3gYqyoURUEylUI8kUR/LIa+/n706uGo+/vjiMWEx3EikUIioSCV0nSiOG09KSZJwlpOlkTo6XAogGgkhOamCDraBUk8eFC7OAZ3oKO9Be1tzWhtiSAaCSEcDCAoi3DT4Ex4BVvfB+wTMeO5rJutO70B/StX/IWeLAWcHYF1YufE56s+c6W1tblDypZCJr/tbdVnbpkGDRpcMlm8QmVOmDipZPk3UPvw+jYTiTi++fpr23VtbW0l+24LxSqPb7iU30u9YocJE1yL8jVrVucdbt6w8jejTujpqVQKy5d9aru2o7MTIzffvAipaxN+xr5SjIdehPu48TuU4hEKQjAYxDbbjbWl9fR048s1a4rKNxAI4OS5p0OWZVv6go8/wltvvl5U3tWOwP+cDh4NZ72GPfEi2Lw3sl7jB9IJh0L66VwgIOe8lphUZG0vvuQMispqAzECAYlfvu6RzjkxdyhxH+Avvw3+6Pzs8oWCOfNhKz5H7OJ/5k0Cm3JkWPznIoN1CWHYCHtf750Psx7G2OcRXWmglBA27yykrZ6dY0iuccaq2OecC+9i/Xc5nqmhqKk+NOqkemCNpGP19K3V2qmXtlVvz5FtrZHrnDFeeEUA9LrXTLflbehSrWRwZr1huiwKTihUjSGWVLChuw/dfXH0JxSomvhmKAWiQUkng1vR2dKEaICYHslOXa7zvZQSmTygGgRU7UDTfcg53JFhVM6gMgaNMzBwMNRuX13PyKVbaXyTtQVrv+pVd406rF740XcWU5eempx8SUwvQb0IUWcjdJbpdHVmlnDUqqZBURmSKRXxhIJYPIX+WBJ9fQn09iXQ25tAf0xBPK4ipQhvYlXVTIKYcw2ABkIYJJ0gDgQkRMIBNEUjaGmJoK21CZ3trRjU0YFBHR3o7GhHa3MUzZEQIkEZQYlAIoBMREgxaAyEMVDOCw6bkO8kKltj8JtTtobkhBcZOmabbX2WlF2GbJ6UTrmsx8IFH7uuGb3NNkXLBADxeAyfrVxpSxs6dDMMGtwgzBrIjiWLFkHTNFva6BJ8K8Wgp7sbX/z3c1taa2sbhgwdWhmBagiRSBSjttralb50SXFewAaWfboUiqLY0rYfN74keVcC/kna8hg25UJ/fz/++/nntrQRI0eitQwGTflgWwcBDABLPTyV88XIzTfHIYcd7kp/+P77sX6d/z1Jaw6DOxD40ck5L9Nufxh8Uf7heZ2g0ycgcOnPgeFDfF1vEsE1FBa61iDUpjyj54wLw4dCvvxXILtnDyHuC5+tBrvz0dzX/egHiFx0Rs7LtE8/Q+ySfwLJwkhgA5kUuV7rO3u6kSro0/S13oa/HNwM3MQJTJWgIINzb7NTLjiJYGu6Hcx1GKSv8x1SS0ho63sth8FuA9WDeqiTelL+ufRJlqOW4De0dS2g1p/DajQF5NC9+SV2c51Dets0M0Q0cXsSi+uzy+0iUgHEFY7e/gQ29PSjqzeOeMrY5o1DliiioQA62powpLMVTdEIAjI1t/tIb93n1u0OFMQ8gpkHd/1Xe998LcOmr8/SHsxxhqSNdqz1yCxzx3oZk+od+RiWNlAdyDaGZSOHG3VaW8hVb8Ya1SSArRZnXhZYXhnkKjzTtdkmQdkehDENnKcX5YTYyzBCUCcShjdxHH19Md2bOKEfSfTHkkgkFaQUBkXh4IyDWJRxEoUgiGUJ4ZAgiNtaW9DZ0YZBne3o7GzDoEHtGDSoA+3tbWhubkI4HEYwGIQkSTX9sVjf94JPPnZ5NA4bNryi5JGXTIMGD8Zmmw0rSf5ff/WVOdE1sP342iVlGhg4zH/+OVfaxAp7js+f95yrL2q0Z/8Y5/GuVq/+oiR5f/Xll660sVVIAJfLG3eg8c3XX7m+hW23275C0qQxdnu3DF995W4bhWCf/fbH1qPH2NKSyQTuuO1m1zhXTyCTxkI+9eic16lX3Ai+cHnxBW42GIFLLgCZPM73LYToE/HiS29AB0Fa2emX/SW7ToF8yQXAsOKN/Pinn0H94zWAoma9TjrnREhTxiMxYSykn52aM19tyWfo//O1QErJeW1W+TjP+t17r1uM/YCN39SS5r02tK3/IFZsnIi/GnhFwqFbPZcoiM36OZdinnNuenIZ56yglEKSpIxhPP3I1kD1o1LGCw3khrPP4TU6sDZI4OpANj1oNn2l1/3OtGxksOse/RD7BRPTgFBc7y23MWYBsEetgBEGmkBTNfT1xrC+px/re/oRS6TM0ighaImGMKi9FYM72tDZ2oJgMGgrY6DgNEazmmZpnNsOVqG5xaaIfPrbjN8LjLoUXsJaY3ytGzTqrzqRqV78EL6NOq1NOA01DFhWq/5CnGSyFLf+269y2OkpnKtxecniZa2d4W5TYaFpGlIpBclkErG42I9YhJtOoD+WFCGnE0mkFCUdapqJyYdhmSdR4UUcCspoiobR2dqEQW3NGNrZhs0GtWNIRys6WpvR1hwVexCHQggFApAlCokSQ4Xi+da9rGr8IlNF54tEIo5HH3rQlT5l2vTMm1J7dRgFS5CGUe+JRByPPPSA6/zue+zlLrfAZ/faX3izYaUhlxuoX7z+2iv44ov/2tIikQi2Hz++YkTZmtWr8fqrr7jSvb6XBrwxfMRIV1pfby8Aewg648gHvT3uUNKlMmTxS9rWA7HrF16hu6uhbx8ydDPXe/bat7sQUEpx8tzTEArZQyKvWrkSL3gYrNQTyL67gR6Uo69TVKiXXgf+9kfFFxgKQr7wdEjHu/cOzwhCTIUvaaytigTXt2nx12dxWYJ05vchnXsSEAwUX/zHS6Feel1OkpYesb/uaUwggYBM3QHST3OTwGzRcsT+fC2QVFzubX7nu07lsNe93koA5zrNVEuDc2I7/KzpONKhAQfKytw6nol1HIVMqKgDxkEYhwTn+tIAs+UDCE9+6zhJCYEsSZAlyeP+7HWUq83WslFxPaGe5kNW1EvbMr4TCrEPe63WVoMErk5kcmaxjivZzjvT8vcWNg7ikeYghAkBp8ScJojoF+KvSdhRACBgjKMvnsS6rl509ycRT6agMaGflChBKEgxuDWKoR2taIlGEJADIMQeFYPpKkA/BIP3Bc6fYn5ghBJmJLdRh5G/yhlUzgZ0flGX8HhdWgH1kg+M/M2w0ZwVXH+N+i4f/OiHvHiCRp1UFrn4uFy6v0z8T6NeqxuGAZXgQjk44ci9cVmODHP928+CqdSLKq/yvSZuRrqVuAanwlpd1QCkG7XVspsSAqLv/yRJEgglANGJb4hzsiRB5hoIkXVdDdcHUwrOxf7HmqZCU0V4a0Ey6+HRiG7pZsjoMQp7Wmd4PJ9T2eMHqqrijttuxdq139rSo01NmLHPvr7KN+XIq+TMUBQFt99yM75bu9YuUzSKXXffw1OmQjokLwK4tbWyIUILxT133oFQKFSWvH943vloaW0tS961huXLluHBf9/nSt/vgIMQiURd6QOhRNq4cQNuuuFaV4jhrUePwVZbu8MaZ8O9d92BYLna0Y9+XNXtqNVDtt7eHjFmFJm3V1/j513UqxKy3PB83y0tFZDEDkopmpqbTcMCQOwDXCoMGjwYRx93PO6583Zb+tNPPo6x48djiy2+V7Kyqg3SSYeDb+gCf8e9lYUV6j/vhBRPgM7cpegy6WH7gkzYDur19wGrv859A/R5GgAxi2p8377BhfLJoOR8Y/gQBH4yFxhZGgMQ/p+Pof3zjpwSkL12BD36IHf6NEECa3+7Nev92sJliF36L0R/cQ5IKAiuL+ac65xs3kn5ei7ls54yzuVTPuPcjECV7f5ywvCQ8pIvncbBuVXRT/Sd7Nx5ATDDGtrzKA7ZZGxg4FHq+q0k6qltVaIPKTU4ANTBcwCbRtvKNm46zzv7jVzexq57PPIzTbWIuIIgtw6MEArDmsyYgyaSKaRSAKUpUIkgHAgiGJQhEQJJpmiSKJoiQWiMI6loprMM44Csj33O+rb+zigTsb8bbkkrFJxzWDflMsZxA5TXR5ssG8pQJ4VAs1s7AoDN8/sYawAAIABJREFUWK9Rh7WFYowhG6g8Ms1762k+XI+wD3+kOALYCa9Jnt+Jn3ViUOxk0c99ngQmB0SI6fQCn1I3sa1xDjAGxSovAai+VwelYt+OgCzpv/V9p4ghH0BlioAcBA9ycxKWJqOF8kBVRUhrRVWhaCznu/GcFOa5gOjq6sLtt9yElSvcYREPOngWIpGI77wA3TuOsawy5FIebdiwXpfJvV/frMMOR0tLS0GWYV739HR3udKqgSQoBN9++03Z8taYlvuiOgfnHG+/+Qb+fe/dUFV7yMe2tnbMmLlPReRauWIFbr3pBnQ72jIhBMcef0Le+W3K7ailxYsA7i2I/HX2gE6PVEopolG7wUBjElU6WAlWA80e9VsJtLS02OTr9ZC1GOyy2+5Y8MnHWPBx2tNV0zTceevNuPCXv7GFd6s3yOfPgfrXW8A/yL6vsnbzA0Ayldtr2AfIqM0R+POFYI/Mg/bQs/ncCcCYB6V/N+AA5ybxS/J8R3T2fqDHHlw6UV57D9oN9+a8jkwdD+nM72c+P20HSD87FdpVuUjg5Yhfdj2afnUOVEfYYb9rn2zX5SKKc5dBDB2d5Vr3OspUDhtnedplz7hLGoD2b6w9zb8WTbpbiW8JdES4q+1Zn03S7+ecm2RwqcbzeiC4ahnWZkGsDvENVA3qgXQ0+0XU9nPUG/z0v9nIzkxKc+s5vwSyflJ0PyZRZ7QZp9zO3+l5pnHOMIbSmDgUJQESJwjKAQSDEkKyDEqFZ3A0JCMSlME4QzKZQiIl9JXM8X4yPZMTzCCyy9TU3eW768hqDNaAeGemmVsVdUFWUli0/XRI00Zf2UAD5YHvCFuNeUvVwjBkLikBnMuy3O89A9FgvCdw1jSDcHXfB9hltCpgGGMQW2xpUBSD3GCmlR8lANH35BCexASSxABLODEKofgIBmRwHtQXAUbeDJqmIeIjZF0+73HNmtV4/9138eorL0FJpVznd5g4CXsXQWjlM6Eyrl2zejXee+c/QibFHVZv+k47Y8+9Z5R0stbX1+dKc5IyDWzaiMdjWLJoEV6YPw///XyV67wsyzj9rLMRCoc97s6OQvs+VVWxfNmnePON1/HRB+97XnP8iSdh8y22KCj/TQnW3iTi8e339/XBakiVq8ao0zLcyKe/35YebWoqeA/BBnKjv9+rb8/PoKlciEabbL/7PcahYnHCD07Gqs9W2ojmb77+Gk8+9iiOOva4kpdXTZAvOA3K5TcCHy/Nep1212Pg3b2Qjp9VknLpkfuD7DQB2rX3gH/uf1/ntMJMKDVqdT/D0kN4YZrEbx7vhWy1Oeg5J4KM2Kx00jz7KrS7H8t94aSxkH52Wu7rpowHPX8O2D9uz3qZunAZ+i+9Hh2//wlUCGMOhvS6Jquy2COtGO+lTGUIBTgAeCkCeFYFgc3jxCPfUsNKApuLPg6bBl3IZKwRCbgemtDaCG35WH6DsYybExWKBglcAejWCq63zgHOxIlar5OGF0f1IFc/XWuopz7Lj4I71zhsXONVz173ZvSsTd9o/vaKUpUe3tKrV8a47rSSnmcS5p4PJBUFiqogBqHjCAZlBCUKKkmQCEU0EkI0EoKqcSRTKlJKCoqiQmWiLMMRxlq6+Hd5vUuzrdM1jxkGY5o5tzQMvOqjxWYHN/9ye1oNPDwX3lvQYPq+i28I/r7TBqoHufrJRj3WFvwYQTUw8OAoMQHshVyVXAh5V4oO3fteDkH8ZlbCG52QMbkSA4x1YpW2eGdELPmpxatD4wCYBs5VV96Sbs0vUQ5ZoiBU0ieCFCCAJFFIEkUgICMUcnvsvPTCPHz4wfumYoJzpENPuw3xwRhDf38/uro2enpHGRg2fDhOnnNqQe/76SefwGuvvOz7esYY+vr60NW1MasSfNKUqQXLlA1Rj5C98Xi80YnVGbq6uvCvq/+e1z3JZAK9Pb1Yv34dNC2z5+px3z8RW48eU5BcN1z3L0j6HnKAfTJug/5Nq4qK3t4ebNiwAalkMmO+Rx1zLHbfY8+CZNqUkUgmXGnhSDSt4C0ibyf5mIjHi8itgVwIe0SvSCTc9VsJJBL2us830oYfNLe04ISTTsGN115jS3/5xfkYP2ECthu7fcnLrCYEfn4m1D9fB77IHd3ECvbEi+DdvZDPyuytmQ/IyGGQL7kA/IU3oT7wDNAX838vgbkfIPVQnm0aEJ6WjBuKUSCvnjcYgHTcISAHFu/ZbYV2z+Pgz7yS+8LR34N0/tyMJKn9N0B2nADiIxy0uuBTrPvtXzH4Dz9FpCmKhKJCVRUwllYWeylcPVc+DtInGyGcy5vHK+yj+O28xq7UTkuXlltzRA+yhrAq5dw7Y/QqT+1x2rsqrbwXym1rtCNrBCZJkgDL3lmllLuxBikOBv3giiTgVU1Zqo5zSx41XCX11qZqitB2EoG2U/VBZNTLcxjw+73k8vq1jdde5+HRHgySy7gO9vdrPZdOhy0nq6GWUGFyUCK5vhvOGRgnACVQNRXx/hQkQhGQZURCMoKyDEI4JImiORoC50EwBsRSKhLJBBRFNZ+TWcZOr/DQXs9bDLhX/47M3Xl6yz5u6m+FYPr/1VkbBgDmY2VRyjopORxzXcaYaSgqvgPDWCJtnGfC9ujmBLOc0jaQJ5xGMAbq6RvclFDqeqy3eWu5YI6DnJafAC41yhuagyDf4U0zX2Y6jepEMLXYVMExufFSkhjevSohSEIDoFgs+qGTwxIkiULT3O9h9erVWL16dV7y58J2Y8di7ulnZVVIZ6uTNau/KKk8ALD9+PGYe9oZZfGWa2nzDvmaCdXc6QwaPBjBYHn2bqWktj0VlVQKixctLGmewWAQPzh5DqbtuFPBeXy6dEkJJRKYddhszNx3/4LvF+2oPCFiq7EdWb9mL8MYY1/gYr/6Fsfe4qqqIh6Pee4b3UDx8N7PubShlgtFb49dDmfbKBUmTJyEXXffA2+98bot/e7bb8Uvf/v7uo92IV94BpS/3ZrTE5i/+i7UlAL5vJNLVjbZdzcEd50K5f6nwOe/md+9hFhnkzB2bqtv6Go7LjxTCplmkQnbQTr9WGBQR2kk0ue67Pp7wN/4IHf5Y0aBXnQmEPC71NIjAU2fAPx0LrS/3ZZdnsUrsO53f0PwF2cjEAkiHAqBShI0TUMqpUBb9QUQCICM2Mxcd1jn65mIz1whovOZ83pdb3jTOsu2ksKECGUzd5yl3BKuuURzb69wlRSW+obTU8v59aW3DspEcEh6aEnGmG1/4GpdP2wq4DpL4lkLeXSyBCS7u1mNoNEeBx4e9vl1j2rWnZQbfo2oXNc4xz2LBzB33OvyDkbm9mU4sBAuNJfCCMt1lZmJ4EHF2j2lKEgpKaGflAhCkoxwOAxKAEqB5rCMplATNM6hqgz98SSSmgpVVUUkRMu4aNPpcW6Gki7K2QcWpbf3E2WFlRg1vUt93lsLsLev7NfW0jNbjR9tJoacez6HGVFoU+qEG2igSrApzwcGEmlDGF4bBHC17cdgUxbAZbQJZpmbpSdqhiWSxQJcV2pZJydOBYJpJQcKTWNQVGHVlPQI01xKyLKMvWfui1mHzYYkSW7FkeMfA/HhyrKMAw46BPsdcCAkSSpLh9HqoXjPRRJUq0XSKXNPw+gx21RajE0CI0aOxIknz8GoUVtVWhQTLa2tOO74EzB56rSi8jl5zmkYPaYwj+ZaR19PZgLYLzKNXpkIyQYBXB54kap9fZUngDnnLjnybWP54KhjjsOyT5di/bp1ZlpXVxfuv/duzD39zLKVWxUIyMIT+B+3g7/zSdZL+dsfQV3XBemnc0HaW0pSPI+GIc89Gnz/3aHd9AD48s/zzsPUUXBjH9zqmG+UBlz/HzE9WApifrfeAvLRBwETx7pLKGLeSFIK1KtuAXJ4kQMA2WFbEfbZY7uWXCGXAYBOn+ibBE5ddj2UC89AIqVCphTSohWI/+susI1in3kybDAi550CMmZLEB97BvsNEZ3Rc9ZxXzYvvEznOGci+pFTNts9oi5pGb4BL2La6zntazYAcJPA1t+SJIHo3sAsw9o2nzbaIJKLg2GwbcDK8RfyRkXIb70+arxKasp7tgaRr26LMT2qnCWcbi2jXpS+hT6H0/DIsz04yV6S9mC0ji/E8teZt+lI4pAZcBPHmWSx55e+U6QLL2JVYVCVFGJJBbJEEQwGEJRlSJRApkAgJCMclKBxAkVVEU8qSKZS0GB/f0aXbDUQzxipYQDh0oFaflOHHreawayx5apb1JKDmQRIGoSL/7NuK+OcedZLX1VvyDWGNuqsduAnSlYmNOo5f9QEAZzJWq7SYXkJAEn/h8qZJTU9QbOCOQeQDP0WBRXKPW6ZXDE1TQhnu7lIyLKMXXbdBccceyzaWjugqio0TRN7jDEGTiW74oOUfzoWCASw0y67Yp/99seQIUNt5wrt/DOFYvNSvK/7bm0e0vpb1DU6q/rAkKFDceDBs7DTzrtUzf6tLa2t2HvGTOy598y69+grN9Z+960rraWlNORci1dfs/Y7DB1auj0qG0ijpcVN4q1b+10FJLGja+NGU7FnwEvWUiEUDuOkOafhH3+93DZWffDeu5gwaTKmTd+xbGVXC+Tz50C94T7wV9/Neh1f8TnUX10B+YJTQbYZVbLyychhkH/3Y/D3F0G7/2nwL78pIJP03Kumw49y3SrVILOL4U2+NxzSsYeATB4HY18wr/WDv3DMDinWbYD6t9uAz9fkFINMnwD5p6f6FjvTfJBOnwhy/lyo/7gt6/188QrwK28G/c25UOe/gcQtD9rPf7MO8YuvRtMvzwEfnzYKzJdkzHRPZhI3G0HsXCcxx/U5FOKW/GzKvBLOrY28JL2NMsc5L6LXkN347TzPOTe9nsA5NE0ricz1oqCsCKFtcRMqRanGfsCGFrke6qUeUGljCYOks/72i0whgmu9bVW6TkqFUjyH1ziZjgKY23CK6K68mUJMe5blNcby9JYjRlk2L0rd0Mm43eqoYkTDSKkakooKiVBIsoRQKIiQTEEogUwYpABFJBiBxsNQFQV9cQUpVYVqbqOQfibnnKHSYB7vXuXp7R8MKanj1Va6jefj7Vuv8Jy/mp+C5f14GFw0UHvItTZxolHP1QWrbsz6LdbLeqeSSBPAJIsFWo2hEh+2CBeWHkiKKV9MLvQ9hvVnoYCpKOOEeNK/0WgUgYDb28AAB1yKkpaWFrR3dGDIkCGYPGkSpkyZjHA4DM71PRRISG8XwiKfMQZV06CqDIqqQWUizanANhCJRBAI+A8fq2kq+vv7bWmDBg/GscefUBDBlonotXpXW+EkmAFg6eLFOPzIo/MuO5dcVlkaGFhQStGcJ8kSCATQ2tqGtrY2fG/LUZgwcSKGjxhZUrmaW1r8tXO9+SSTSSQd+9ROmDgJBx48q6Rybar4dIk7JPewESNKkvdmw4a70pYuWYRxO+xQkvwbsGPoZm5iffmy7KGABwJeYd+HlNkIYPSYMdj3gAMx/7lnbekP3Hs3th49Gh0dnWUtvxogn/V9aJEw2HOvZb+wtw/qH66GdMKhoLNmllQGMm085Gnjwd79BNoDzwBf5WdsZubjRYoZSjrnxK+CMAlrIL0vm0H8FoMtR4AedRDIlHEZ51P5zLNc7/O9BVCuvxcknnvPcLLHjpDPOcF3WTnz22kiZB8kMBYth/rbq4CV3tuu8JSC/kuvQ9vvzgcdPwaKokDT0h5lflHIwjubcjydnyEDz1qGV16aw6tD5FZiIljn9KyqcC8PX/FvtzGBU27huQwQWRbGtSVYE9SLUmRASSHvpWtRMOVmcLsS1SjqpW0BlXkWJ/lbLOpNh1Av7atUz2HkkR4V7fkbYwoTg1K6mzHIYofuK1eUDme6QxgxolmJYP3f4n6jv06PfebYSABV06D0x9DLGIKBAAIBglAojIAk5A7KAXS0BsE4h6ZxxJNJJJMpqBoD5wycFhcGutTIJYt164g0dS3eE+UD+71y86h93f5AwpwWcMM4NQ3reqVOhvdNAg3St7ZhjH3ZIlA14B8uD2C/JHA1TNbyrXxhoZa+1usuZ1Z+yuCci1AlRJC3HMbeFQUODbrizloiE77GMCKvMA/pTzjxJOy8y67pLKxZGhZ6XORh29eCcBBOQSmQUjnUWBKUUsiSmHRRffJFCYUkSQgEAmYYNsY4GAc0piEcchO9J805FRMmTBJ6yAzv0ho+TVFU/PHi32HD+vXm+W++/hpvvv4a9tx7BvTtlB1h4LzbY7Z2bCWGrfe1tbdjiy2+h9WWvYvXrFmN7q4utLa1ecpfLLLJWelvrF4xaPBg/O7iP1ZaDBf+8P/+hGDI/77N33z9Ff58ycU2A4y333wDe8/YByNGlpac3tSgqiqWL1vmSt9+3HgAxcdg2G7s9qCU2upu6ZLFRebaQCa0t3dg+IgR+Pqrr8y0L/77X8RisYp6ynsRwNuPL78RwCGHzsbSRYuwZs1qMy0Wi+HuO27Hj87/6SYx9kgnHwHS0QrtvqdyXqvd+yTYohUInHcyeDRcUjnojhNBd5wI/tp7UB99Hvh2fe6bssBWd8Tw0oA5fwJ0A/cyVbGN6OW6YpADnFjn3yVQkm63FaRDZoJMc38vfjxmMp23Qr3tIfD5b/iSluyzG+TTjvFxZX7wTQJnIH8N8JSCjb/7G1p/ex6iUyeAMwZVVaFoqvBa1IlgYXxqFO6eX3NbxKPcSuRcnmvudKI7YDDPa50W4KY8xIielF5/ldK71th721hLiQLs14ky055R9vT0b2tYTiJJ4IxBM4hgUvjXUctKEWfT4Zb+qrQFWfIvQ/ai/eptl0Hfw5yUrb8dKNRy23JiQHRYVh1FkVnlO57VIuqlfZWybVk9dQ3C1UoCE4g+0mpgYETFyJf0zWg8l77AMrcTY6x1LMt0p7GdA5UkqKqGFAPiyX7IkoRgQEZAlhCQCCRCIMkEQTkC3hRBSmFIKikkUwoUTdMjKxCbUFZCGtANv5znygRre/WcU9r+zXXdLQBuNX4szVw4Y5nWtNr+rAYWGbgAIP1eCQBmen1bVj0DTPI3kAOOubrZTxpdib4WLvW32EDx8PMdNTiUwuDJUGYazJzXDDTMCY/FgjofOTgXxCyDGIQ1j4Nx42Bg3LZTQla5DFBdSSD5f6y8QCyHE4LYTVud2Q6u363fbH+XFJwAGk+HbYknU+jti6G3tx89Pf3o7Y2htz+ORCKBVEoB0zRhdajv7REOyJ4EcGtzE6LREIKyhAAluqeBmDAaYaXFgwmFTyAQwOwjjnLl89STjyMWi4Fxi5LEeCeEmHviFNI+jAmscXh54C1dsthmdel1lAPFlCG8tu33euVXD17/1Yp86y/fuhg2fAR222NPWxpjDI889ID5uzEAFoblyz51eVcPGz4cnZ2l8Y5samrCqK22tqV98/XXtr1ZGygtxjmIVcYYlnkQsAMFxpiLAI5EIhi11VZlL1uWZZx86umQZbsd4LKlS/DKSy+UvfxqAT10H8hnHu/rWv7JUqR+dTmw+uuyyEL2nI7Alb+G/MuzQaaOL12+xhBA0nPC9CSS6//BOllM32tc6kGWmEpHrudizClgWXPrBo0lY3KCAUG0XnoR5N/+2JP89YJzfpjz/LfroPzmSvD5b/jKXzpxdlnIX1M+nQQuOh9VQ+8l1+C7199BfzwOSZbRFI0iFA6BUiIi/3BubRUuw2Bjbpn+nX2O43Uu0z2Z02DKku1a/SrznOFdW8xc3VxTQCiYJUIh6Ws96zXpvTnt7czZ7ly/IbywjaMUX0qtzusF6UuRJg+KzTB9cM4FiaD/Lt/M2Bq2lYAzUW6NVokL5VzzDiTK9RwD8X6sfUi91Ec9odT1IZwwxGHTOXIuIgUapC9gC99svd9r/mNNzzQ/8r6PmmVlnlOJg5qGZRygaeMyRVXRH0+gq7cfG7r7EIsnoWrcNMwJygStTREMaW/B0M52tDRFEZAkPeIiBzi1lCXk0DgTet4B/gb9whiOmO4oJA6mH/Z5V77gnIt89cM6D2+g9OCAWYcaZ2bbA4rT3TZQYniQJlZ+xPwWWfpb5Ma32Ki/mkbjO8yMuo9eYBKiPiueOQ4nieanEVkt8yoF53OonEHlDBrPPrWwTWaoBE4pNBAojEPRNPQlUuiNJdDVF0N3XwzdvTH0x+Ji4qaqrvxkmaK1OYLO9lYM6uzAkI5WDGprRUdLE5qbowiFAqCUmFb7ADB12nQXMdLf14dnn37SU+ZMHgeFEl/jxk9wpb30Ylohninfgehk8snbKabXvZmI4Uan6Yaf95PrvZXynRptfNZhhyMSidjOLV2yGIsWfOIyhmiQwf7xwrznXWlOArFYjN/B3ddsSuTbQGPsODep9torLw+8IDo+/OB99PX22tK2237cgO0nPnzECBx6+JGu9CcefQTffP2Vxx31CbL3TpB/fIq/i9d3QfnVFWCPzCufPDtsC/mC0xC4+negR+4PtJdm3/EMpen/AaY7iQVWwtg5ahkeKUYY53KOL2TkZpDmHIXANRdDPu0YkM2Hla0s/so7UH51OfDfL31dL/3wB6CHzCibPAZKRQJDUaFdeTOSHy/Fxp5ebOjqRjKlIBQKobk5ioAsQQIHgT3klikHIcgU4ShfIjhTuvhNLQcB5yRDHobXsL9ySzn/Ip5ZpYlz5z3O+62QKIVEqacCvxDUy9y9qMewaPoIN3u6AUdjLbVpoBJ1XG9r9Xp6jnI8i0kEZ7kml7FbPjCeIafxXFZ53OVb9RIcQF8ihQ09vdjY04+evgQUVQPXnURkArRGQxjS0YLBHa3oaG5GOChB4gBhlogaknBoQY3oO6yOOwxc6Gjz1L/V07df6+AQW5JYjwaqB8JIwtswghNYHAT1b9Hwos+h122gNtDoK9PIqmWsZsIgY8gNzsFAwECgCfUFOAgYoWCE5uGFIJQOXL9P0/MzDpXDdmgcYNxQUAg7cQpS0n2o8oXdCyN/Akh8JNb7hNLHcBIxLJ8UlSOZUpFIKK48FEWDqmjQNBF0mlKKYDCAcDiEtqYoBrW3YmhnB4Z0tmNQWyuikTDCoSCO/757D7XXXnnZ9I7LRrpZj0La8JajRqG9vd2W9uWa1VjwyceenYdXGeUkUT2fNcMkI5dlZ1FllvAoJ0T+jjIyGHYUKmOpSV0/h4GWlhYccPAhrrweeehBaJqWtYwGvLHqs888Q/NOmTa9pFa1EydPdqW98fpr6OnuLlEJDVgxesw2aGpqsqUtX/YpVn22siLyzH/uGVfaDhMmDqgMM/fdD9tsu50tTVEU3H7LzZ5GXfUKsvMkBP58ITDYn4e/9tCzUH51JfBFGYnyjlZIRx+EwD9/B/mis0B3mwZ4RFqpWzRFQWfsjMCvz4V82S9A998DiPjfIiFvJJJQ/3kH1BvvA1Lu+awnthgOuuPAfbNkp4mQfjK3+HxUDeoVNwJLVoKDIJFMoaurG319/QABQpEwQuGg6cUDuOfd2eDHEC7bPZnnYPrazLbecuShK94Yge3QDM+YEsw/jfWFRClkQkF4Ok1smyNUu4RYFOfcbSjh5R0sSRJkSSqYCObcftQCDDkzrev9PUs6RlY1Pbu1jhlrKKCqDSVZv3G7F2YlVlf11K4az5I7v2x6AacOLJfnr9c5L1jPWYlfAoeXMjwixpjl2Z5G6G257lVMKFTGkFAUdPfHsaEnht5YEklFg8YYCDgCEkFTWMag1iiGDGpHR2szosEAJAgymHLhGV2rMAko/VC5dxQTzWOe08DAwWvc8Eoz6lDlrEEKVwBcn/drXN8zPY8BWuPcVm8aRFQhZolE2tCl1h4GmoeoRvhyM6lmssA54QHgUtAPRLUaYQQ0h3JBLAoGQAAPiLB93h6KfmG+U8t9zvZghBsBdbeRRFJBb78I8dLT04vevhhi8SQUlYlXw0RtSZQgFA6grSmCjpYmTJsyCXvsuYctL1VV8dgjD5mT2lIQc14TYEop9jvwINe1zzz1pG9lVrYJ+UB1Opy7Fx5+wv1UArVKLhvI9E79ELilwIyZ+2LQ4MG2tG+//Qavv/pKQXJvyuCc48nHH3Wljxu/A7YctVVJy9ps2HBMnjrNlqakUnhxvtv7uIHiIcsyZuy7nyt93rNuIrbcWLjgE3y5Zo0traOzE1On7zigchBCcNKcU11RBL5csxrPPPnEgMpScWwxHIE/Xwgycay/61d/BeXXV0J7oPzth0zcDtK5JyJw7cWQzz0JZPL2ZS+zEiAjNxNhuX9/PgLXXwLpjOOBcWPKXi577xMov7gM/O2P8rtx9ddQ/3KDf8K4BKA7TgSdvW/xGSkq1CtuBF+yCoRQMA6kFA19sQQ2dPUgHk8hFAqiKRpFICClQzny/PY49EP45pOPW+mW9g62whr+2bzWI69C5ole8zxr5Ib0nMq6bqKm4slrrujM3wgJXahsftdJlYaQL/s6zVddpfdwEp7ZrDqf22ttVouohbaVD3w/i9PCooreAXNskVXLqMXn4JmOEnwrmXRNzt/FELuZ9BfZys1WlpMQJkj394ZxlDCYAtxjgPAMjidT6O7tx8bufmzsjSOlCEcfgEAmHE3hADpbmzGkow1D29sQkiVIlAJV+C0UKo/Tq9RKCjdQGeT7jQHpb9gghK3k4qZMRpUSVoNPYfQJM4qVgUxOU0541afzO/Qy0GjUYe3AyXNtKnVnW1nW6oO7OuAKPwIj9vDLFUWGCZ2/W92Wfrmuc58UYRUIFV7YKY0hlkyhtz+Grt5+dPfF0NMfR38ihf5ECilVEWQyITjxxB8gGLR7u3z80YfY8N036GhtRks0glAgAEopuMdkyE9btn7w1snubrvviQ7HXp+rv/gvXvBJzHjl6+da5zMsWbwI9993DzZu3OCrXD/5O9Eg//whHzJ3oN9pIBDA4Uce7Up/Wt872y8yBXGq1bGhEDz37NNY9ulSV/rBhx5WlvIOOfQCe4sHAAAgAElEQVQwV1t5+cUX8NnKgfVKnffsM3hh3vNIJZMDWu5AY68ZMxGNRm1pCxd8gvffe3fAZIjFYnjw3/e60g846BDXnrwDgY7OThx93Pdd6S/Mew4rV6wYcHkqikgI8kVnQjpulu9b2GPzofz8UvBVa3JfXCyCAZDdpkC+8AwEbvwT5J+dCnrAHiAjNyt/2eVAQAYZvw2kkw6HfNVvIF/2C0jfPxRkzKiBKf+b76Beeh20v90GrO8qKAu+dOWAksBs3utgj+feKoBMnwCyXQ6jpZQC9fLrwJesMPew5VwYQqYUFV3dveju6QNjHOFwCNFICDIlICy9urEquoHMJES2eYTX/D3bvNWdzmGEifYy77deaw3z5vS2KRSEiIhPEggo19ehptYpbSKcjfj1JIEJgSzpCu085THAufA8rUbkO610k/+oksV2GplIINs1HiRwrc6xa1XuegXnwkupHuBt8FNd7S3TN+68plTwo1swxvJs+gm/RHEu0tlMzyGzlRR2lwkYb9EghI12TPTogwwEisrQ1deP9V292NgbQ39CRUoV8lECBGSKwR1tggzubEd7SwtCsmwaBnEOMzKkQcybz6n/Z+gtOfwTRZXEpkZc1BuMurMSlkz/20D+KPe3YO0fzbqzHo5IQ2nBMhwNVB3MujOMSuuib9U1/FT0L7YVZS7SoppJIsNamhiWZpa6qojUxHFUCNVRY+mXYLYxIiZfjAOqxpBMKUgkUujti6Orpw89PX0Ih6M46KCDXbnddeedCAUktDRH0NnejKGdbRjS2Y621iY0RcMIBmRIVEwyuZjtZYS1g7ZOomRZxoEHucPqPvnYo1i5YnleT5/JajPbBJxzjq+/+gq33nQDXnvlZVz8f/+L+++7B11dG/MqO5s8Trk2KW9Qkl+45Vp4H1OnTcfoMXZPqf7+/ox7Z+eDWnkHhcA6D1uyeBGefuJx1zU7TJhYcu9fA8OGj3B5fTLGcNtN17v2hy0X3nvnP3jisUfw2MMP4ve//TVenD+vbongSCTq6QX877vvxHdr1w6IDPfedQc2rF9vS+vo7MTOu+42IOV7YadddsWkKVNtaYwx3HX7LUgmEhWSqnKgs/eB/KsfAk2R3BcDwNffQf3tVdDufBSIF/e+fI9L0TDo9AmQ5xyFwF9+geC1F0P+0cmgB+wBbDmyKBnKhkHtILtOgTTnKMiXXIDArX+B/Ksfgh60N8iQQQMmBokloN3+MJQL/wy+cFnR+fGlK6Fedl3ZSWA273Votz+c8zoydQfIPz0V8kVng2zrhwS+AfzTz8S9RMzZjb8aY+iPJbCxqxuxeBLBYBDRpigCAVl4qHKhTjUMMSVJAlCY56/VcNLPPWkv3/Qa3fjtXAGZeehKXrEWsZ9zhnfzC2sYTHMdalFqi/S0wjmTYa5zzmkqzSmFbHmvHiUDHgtOa37VG34490LZ6Y2mO5FlNfge6DmrjfPPXi3iem7/Rmp5jl2d7Sp/eH37Zh1ZO5kaQD2RQk5dRbUgn7frJBzzRb6Ebj46DK/rrGOPNRKFtb+yEcU55LHm67WHsVN2SsVh9J/EPESCqjH0x4TOsKunH/3xFFIqAzdDRVM9VHSz0BE2NyESkCGBgzANhDFQQs3vhIKCcArOOAjjIJyXfM/4crfdhhdi7YOD61ssMmialnU+uqnXszHnMiKRMr+evSX6rtOcBmzzPKtMGmfQONOd29LXmQYnDVQU2fpkRgCN6MYZnIFxJr7NGq07wjmMx807tlS1EyJpC7PKewI3kB1enregEjgoVA4kVQ37HHAQ2trabPd99tlnePGlV6BqIow0IYAsUURDIbQ2RdHZ1oKhgzowdHAHBne0IRBwe1QxXVFlTG69BtGdd90NI0dubr+PMdx2842m8j7XBDvXoJ3JA7mvrw83XHsN4vE4ABH++rVXXsbVf7sKmqaVdMDPNFmsdhI0P/K2umQvF4485jhX2qsvv1QUsWUsuKqt/kuNFcuX4babb3R9B5FIxPO9lhKHHDoboVDYltbV1YXbb7kJqVSqrGWv+uwz3HPn7ebvvt5ePPrQA3jogX+XtdxKYu+Z+6C9o8OWlkgkcOtNNyCRiJe17JdemIePP/zAlX7o7CMq4v1rxfEnnoTWVvt4u37durpuC9lAxo9B4M8/z4tMZc+9BuVnfwKf90beRkbF9bMEaG0B2XUK5DlHI/inCxG8+yoEfnc+pJMOB529L8jkcUBne4H554mmKMhWW4DsNBF01kzIP56DwNW/R+Dv/ydI6v33ABm1ec5sygH20ttIXfBHsHmv+7+pKZrzEv7pqrKSwOz51/yTvxecJn6EgpB/4ZMEvux6kwR25UmEF04imcKGrm5s6O4BUzQ0hUNobo5Akqi5C4wXgZsJmeaf2RSZ2UgzkZ7WsqTDQ/ssF2lFfaGK1Ezfs0ECO6+z/vbKx/pbliQbwe6kIHLJapCOlUYhXJqtThjTtxCqLMz2UqDBt5MErmXUw3MY35tB+Nby89RDfVhR6WcphfOWcHzwVy+lel4nkZtrrumXaM6Uf7Y8nfeZIaKt5TlebjZ5CaUAIUhpDH1xsd3c+u4+9MeTSKkauMZACBCUOJojAQxub8HQjlYMaW9DczQCCkH2imKFQpGA6n9JzXvoWQ3aKv39NJA/OCVmOGPVIBIt9bkp6OYygfM06VttDrWcCxLfKheDO/S3da3RQHXB8Mo3AgxxIO2dbxgM66SwaVhc5bAaocnW1HqwQgUAQtIdY/VXx6aLbO3MaIvBUAiHHXEU7rr9Vtv5+/59H7afMAmRcFifdBLIlEDSlSOUiHYQkAmCAcmVf3M0AlkOQNFUcE0DBcBJen8zAJAkCaeddTauuPRPJhELAN3d3fjr5ZfhnPN+jC22+F6xr8H1zF1dG3HLjddj3brvbOcppTjhpJNt+7CVGl55eimoSl12rfc51YJRo7bCjjvtjHff+Y+ZpqoqHn34QZx5zrlF51+u+q803nrzddx/z93QNM117uRTT8eQoUPLWv6QoUPxg1Pm4JYbr7elf7p0Ca75x1U464fnoampqeTlrvrsM9x43TVQVdWW3tbWjsOOOKrk5VULIpEoTj3jLPzjr1fY6nzN6i/w9yuvwA/POx+tDsOjUuC5Z57CU48/5krfZdfdsePOu5S8vHzR3NyME04+Bddfc7Ut/e0333AZKGwy6GxD4I8XgD3+AtSHnwNR3X2EC339UG9/GHjuVQROnA0ybYcyCOZvzCTbbgW6zSjzt0nOfbUWfP1G8HUbgY3d4H0x8FgCiMXBEwmQWAI8ngBiCaC3T9zc0gw0hQW5G40A0QhIUxhoagJpbwUZ3AEydBAwdBAQCnoLVEHwJSuh3fYg+Jff5nUf3W0apDlHgX+4EOp17tDttjI+XQX10uuE97iH8WGhYPNeh3bHIzmvI1PHp8lfA6Eg5F+eA/XS68CXrcp8s04Cy784xxU6mpD0PoCESiCEIp5MIp5KQpIkfa/gkIjok0zlRQIbyPee7CQwXOdyXU8IEUQeYKyU0+l5ro8JIcLCmgsrciNLQtzzd6/8M82xzHxthqsG6e0PhuFsrcDt8Sz+GOSp4XldETiKZSjAsh71o38B8v+OqwkE6boQ32ttPocVtVwfTjSepXj4dVbINBbl6qus45lvmYz8eJrE8crBOZ6LkY+YhupCaU/Rn1QRS6mQKIVECSIBCZIsQ5YASaKQKCAHImiOhqFoDMlUCsmkAoVpZp5Mj+BRai/gSsLL0aOB2gEzvj0jgXNQRwut1zq1bjpT7RrIfAxgC723gfLAGFdytTF7iHYxcFHLmF7t9WhqJwr5nKyTg2pavJhWMSAgHEXt61RuVGtYm0rCOSnedbfd8fKL87Fm9WozrWvjRsx77hnMOuxwaPpeNyJgadqrV1jKU8+9r1qaoghHIsJ6Q+NIpRSoqgqVC4s5w8t28OAhOGnOqbjxun/Z7u/p6cbfr7wcp55xFsaN91buOi2z/EyIP/n4I9xz1x2I9fe7zs06bDZGjx7jcRfKavbk1UazLSAa7biyOOyII/HxRx/aPEc//uhDLF+2DNtsu21Jysi3XVcLnJJ2d3fh6Scex1tveHuCHTzrMOwwYWL5BQMweeo07HvAgXjh+eds6atWrsRVl1+Gs889r2RENGMM859/Fk8/8bhrrzBZlnHKaaejubm5JGVVK7baejSOPPpYPHj/fbb0L9esxlVXXIZzzjsfm202rCRlaZqGhx/4N1575WXXuREjR+KY759QknJKgfE7TMDue+6FN1571ZaeTNZHGOhCxyfp8P1Ad50C9bp7wD/NQqJZ8c06KH+9BWSbUQicevSAhGXO9Hyu8XnEUJARQ+tItZUZ/PM10B56FvzDxfnd2NIM+ewTQCZvDwAge+wISWXQbsruEc+XrYJ6xY2QLzyzJCQwe/YVaHe5DUecIFPGQb7gdO+TwQDkX5wtvHxzksDXCa/h7ba2neKcmwaIjDGAEBGuTtWgqHHE4gmEgzKi4ZDwykmloKqaUKHmqRDJRuo6z/s5Z40Ek748HQ7LnU9aqaZypq8pIfb39duHcLs8XA+TLZTd1vKoS9mejRQ2PKeMtbfhEZLLG8R6jjFWMe8RqxeyH0NgL6JEkHNpZQ0lpOxRvziQkWc3vYDNC/MBAVBdepRC4Gy7tfgcXmRXLT6HFdZvvdafpdrAxKebt9GHH93fQNZVPuNpJu9g67156Yi4XQdt6pkc9zn7X0EY21X1Rt9D9XOapkFTgVRKBaUpSLKEYEBCQBI6QkKAkEwRlsPg0Qg0jSGRSiGlaEipGjTGTNlq8dvJJXODgKoteNWNM+yxMU/Ndk8twuVjSfIOuFJ1YLD2p/rTefSTUs0/afXCNNA1+kLrGFNQXvZ6tK4Zq+lbNOcsVvuRfMNeGYRbtTyY9WMihEMmAOFWJ+7qgXVC3ggDIGDzOIXwxD3muO+7rnv+2WfQ3dVlW9gI63qx11VKURGLJ5FS3B47qiasNSRCIUsUTdEw2lqb0dnahMHtLRjS0YrO9mZEIiFMmjwFBx1yqCuPZDKJ6665GrfdchPWr1snEs29Qyz1yRyhOjwWYslkAg/efx9uuv5aT/J3z732xv4H2vdDti3oPD49juL2ArG/V/97yDRQWXR2DsI+++3vSn/ogftcZF8pkE8bqQS45TDQ39+Pxx99GBf/3/9mJH+n77QzDprl/u7LicMOPxLbbT/Olb72229w6SV/wFOPP1b0fqwb1q/HNX+/Ck8+9qirPRBCcNLc07DNttsVVUatYK+Z+7j2XwZE2OO//PH/leR9L128CJdecrEn+RuJRHDaWecgGBw4b0k//foRRx2DIUPK6/VeDLyewe9RVLlDByHwfz+GfMZxQCTk+z6+/HOkfn2l8B79dp3f0nIc+aFa++dygn++Bupfb4b6v3/Nm/wle++E4JW/NslfA3TGzpDPcs9HXWUvWg71ihsBRc15bTawZ171T/7+zxnZL8o3HPSSlbZka3hHkxBF+tviHIgnVWzs7kdPbz+oFEB7azMiwQAIZ+BczMXFeMwBGGkMxEL2cY9/O5Ftbus3DXqI6DTsPgaMiAV9msBNh+Kz5pdJDitpDONdOc8TAphBxnRvJlBPQtf6m0CQnhJNK7LFHsx+ZaP683u8lgFAus24CTenzJ6Eg0c+pXwUay9rkrtZuk+jnmnBQogCeDaWucrhZfRczToNs0qFRQIyfQzV/Ax+UG86ppI+A09/t9bDum7M5g1ELeQvI06vIAHbrM3RzvIdx0oNrzmyNVR0rusNGEZI1hDTme51n7Dka+7fx133uYyAOAcoAad2mSSqb7VgjN0EIFREhFRVDf2xJLp641jX1Ye+uApNA0DEFhYBmaAlGsagtmYM62zF0LZmNAdk4S3FOMDtVD/hMENI25F7DlOtsLbJQmSvteetNwgv+PRhhBsuPMPSyeYEyda5WmD1fC58FVx9yGUAaRxGHWqWdUkDpUG5+2gjhLv1qGQfabS5ym44N4CghCBtx1XdaITocGPs9uMwYdJkLPj4IzMtlUrh0UcewtzTMiu9Mr27nt4+qKoGSqnYu4wSyJIMSZZAKUApQUiSEQzIYIxjzik/QHtbK+679x5XXh+89y4WfPwR9tx7JqbvtJO+bzA3TdS5zs96WUevXv0F3nz9Nbz3zjsZPax23nU3HO1BgNvaSRkadkGWnA1UBfY/4CC8+frr6OnpNtPWrF6Nd95+C7vstntZy/ZS6lUCTgm+W7sWixctwKKFC7Bi2TJX2GMrDj70MBw867DyCugBSinOPOdc3H37rfjwg/dt5xRFwXPPPIW33ngd+x14ICZPnYb29o4MOdnBGMPiRQvx5uuvYfHCBZ6GAJRSHH/iDzB12vSSPEut4OS5pyEQCOI/b71hSzfe93/eehP7HXAgJk2dirY2f/unqqqKFcs+xauvvIyFn3zseU17ezvOOe8nGDp0s6KfIR/4+TZD4TBOmnsq/n7l5WUxGvGLah1f6MxdEJw2HuqtD4G984nv+9hr7yL12rugO06ANHs/kK23KKOU2T3o6hqrv4b6wNPgHyzK/972FshnnQAycWzGaRXZayfIANQb7stwhQBftBzK5Tcg8POzCvIEZs++Au3u3OQvJo3NTf4aCAUhX3QW1EuvB1/xeebrUgqUy65D4BfngGw/2l/eEBbtlBComoae3l709QHBYBDhSASUAqmUAkVRARBwnULmDGBE/6WHNja81oDc3lJe5/2miXQjza7czTbP9eNBm4kU9lrj2a3P7WEn/cypZFl27Pfn1+gjfe3AgcNQ33nVTeG5GkrC4p+FO/4OBIx3INpW9Y5/9YB821u9jKH1snYv2XNkIG0LyipDk8rV1nJ51VYKfvpmp2dT5jE2e7r1HRDi3L6Pmx2x0yCKcZ6zt3e+R6usiUQCyQQgSwSUSgjJEuSADFkS+wAHAgG0tQXQwjg0piGZVJBQFKiaBpUxgFAQKoFyaysSYyon3FZWpeuzUPjxVreiVp+znsE5h2rOjwDodeTbszQ9ZSutXJnSjbkQKV/ZtQZrXwkA3NoX6i+okO1HNiUYvXSl2ECj7jTxA4BhzEAGfCubnBqJYgatahrwCCGgHA4SuDpkA7Iv8PMdfOsVRx97HBYvXGDbs/HtN9/AzH33w5Zbjsr8nrwMDiUJTFdSEV3BTUjK9GaQZcm0rKeUgBKKo448AiOGD8M///lPW3hdQJAFL85/Hi/Ofx6DBw/BxEmTsfkW30NrWxva2lrR1NwCVVUQ709g7dpv8OWXq7FwwQKsWf1F1meeddjhOPDgQ7JeY1g4OjHv2Wfwdmtr1nuLxeQpUzFh4qSyltFA/giFw5h9xJG4647bbOlPPPYopkydhlDY/36ezz/7DJpbvL1kC4bQOJo/J02eklc7+ve9d4NK7r2909lzJJNJ9PR0o6+nF729PUj48OQMBIM46ZS5mFJBEjQYDGLuGWeh89GHXeGgARF+/uEH7sfDD9yPUVtthQkTJ2PI0KFoa+9Aa2srItEoEokE+vv78M1XX2H1F1/gow/eQ1dXV8Yyw+Ew5p5+FsbtUI69SqsbkiThB6fMQeegTjzz5BOu811dG/Hg/ffhwfvvw1ajR4v3PWQIWtva0N7egUAggJ6eHvR0d6GrqwsrVyzHgo8/QiwWy1jmiJEjcfaPfoyOjs5yPlpR2Grr0dj/wIPx3DNPFZxHXc9XWlsg/2Qu2EeLod78ALChO/c9Oti7C8DeXQCy/RjIh+8HMqE0ofmBTTukG/9sNbRHny+M+AVA9tsd8vGH+vLuJnvtBJkQqNdn3xMYi1eIcND/cwYQDPiWhT3zqm/yN/Dzs3znCwAIhyD/8myov/tb1v2QiaoJEviX54CM9UkCEwLOhMaGEOE0k0gqSCRTCMgywuEQmiJhaKoGRWNgnNlCRHMulL1ip9vsXlF+jROzpVnT00mZQ0ObeehKaus5apxykLdGntY0b0NfDos/GaB7AttltCuVrecMD22DQBcepbmIElHuQBFDTg/XUhsL6qqxvEjgjBIMcLfpZZQAVI8epVBUBelo9bqsnBRVAcPjxRrRoZr0dfmgFHKXqj1YpeA+iEmbDJzbnqXSdZGP0ZUf5Bp7nWOivUjDi8LtpE8dcWDzNRYzIkcoDAATYZ9JIgVZpggHg5Alqu8jDEhUQlCW0IwwVJUhpapIKMKgjXFukgum17FHmbWOTM9S6fbagH9wwPyQGDy+RWfPVc6qJXYyzmhetmlio2l5whbpxzaKEVv/WU97mBcKI3x4NfbEYh3JdNvfdN2RPLZOKgS+TNL9TrAyLUqrBYQQSCAW9+vqkw/IPlmo1Ul6KTBs2HDsNWMmXnphvi39wX/fi/+56Fe2tFxt0DxPrfYyBFxX6iQUBsI1gCjCih6ATAmmTp+O3/7fb3H1P67G2rVrPfNet+47vPjCvIKe0UBbWxuOP/GkosjVxYsWFiWDHwwZMrRBAFcpdtplV7z80gu2vbO7u7swf95zmHXY4b7zWbRwQTnEs2HIkCF5taN33/lPyWXYYcJEzD7yKAwbPqLkeecLQggOP/JoDB06FI88+EBG8vrzVavw+apVRZW15aitcNKcudhs2PCi8ql1HDzrMAwaNBgP3X8f4vG45zWrVq7EqpUrPc/5xYRJk3HSnLmIRKJF5TMQOPCQWVi8aCFWf/Ff98mGhxIAgE4eh+BffwPtiRegPf5CXiF/+ZIVUJasANlqC0iz9wXdqTGWFgK+aDm0x+eDL1pe0P1k/DaQTjkKZGR+3vhkzx0hE4jQ3jnkU664EYELz/RFArOnX4Z2z+O5BSiE/LXIlI38NUBUDepfboD8i3NAttsq9/XMuUYRvxnjUDWGvv4YwDkikRCam6PQGEcsFgPTSWMYBo0+VuqZ5vm5vI1yX29E6clcptdJQwFMfSh8M6/3mOUabspivc/rOQyPZFmSwBgTUa+4X+VzmqQu5xrTVO5ZFVdlKM9QcOaVbxUOZen3tOmu+4uF2/yi8KquVt1WoagXfVKhz1EOZXCxZF+11Ek+MvghBf0QszkJW7jHTaZ7B2cikrPJxBizGU0ZZDChFIrKkEr1m1E0wgEZkUgYVCIA0xCUCYKBAJojITDGkAIQjycRi8fBOAMh1My/HryAc6Gen62eYYt+oH83hmvFQNanKUejCRUF8R7TEROogwzeFMHBSxblo2ywRL0w/hJwzy1lnFEwCp1z+I5Jls/Et1pCgGYCBQEjbkuyakGu91fueOXVjENnH47/vPUWYrH0PrnLly3Dhx+8jylTp7neS6Z3ZaZnUASJcC5iPyZjUphkQFLVMHjYSPz+j3/Eyy+8gCcffzyrp1e+oJRi5113w+FHHo1oNOq/nivWHNzvtzEJrA5QSnHUMcfhH1ddaUufP+957LbHnlXtfTiQ2Hr0GBx2xFEYPWZMpUVxYdfd98SEiZPx9JOP483XXytpON5IJIIDDzkUM/bZ1+YJsCljp112xbjxO5TlfQ8bPhxHHXMcxo4bX7I8S4VMfXYgEMApp56Gv/zpEiiKMsBS1RACMqSjDoQ0Y2codz8O/vZHue+xgK9aDfXvtwGd7aC7TYW0x3SQLfI3yNik5oXru8BefQfaa+8Ca9cXlAUZuRmkE2a79vnNK489doQsyVCvuTP7hYtXCBI4RzhoNu91X+Qvmbgd5CLIX/WqW/zfkFKgXnotAhedBWyffZx0GQKLH2ZoZ64HvOqPK0gkuhGQJURCIcgyhaYxJFMqNM5sHjQ5nyeDh6/bq8hNuvoJ4ezHk8iaxnl6HVfofFjcb8mfE1ONkdmDOP3c4h7hUey3X7C+s3LM4wcyOoHY2jk/T+BKw/u9c3s7qGEMJEngNDIASvf+6oXssHqd1vrzVEWdOMjKWka+Ol/rPV55+CFmc40PVh8uI5nqm2GyDN7TWcukEqw6LOM+c3sHKon5CudIKBriSj8opQgGRHTAoCxDRIomCAEIRcJoi4ShMgZFURFTNaRSKWiaVhffWC7kmmfU+/PXC8z9gj3muLSEMyrOue+9bInBaTaaUF5g4Ob+yYCoRyupWM/fZNpICBVtN8UYqDn37iYcIJbxqph5T96bUvldOPvxZq0UCETce64P/IxUn/K72kn0TKRqudHU1IxZs2fjgfvsHhcPP3A/JkycBFmWXbJkHa5sIRQs7dajTVhzIUTGPvsdiB133g1PP/m4i5TOF7IsY/pOO+OAAw/GkKFDHSJy828mhVY1DYoDqeRpIDu23W4sJk6ajE8se2crqRSeePQRnHLq6RWUrLIYPGQIxo2fgImTJ2Pb7cZWWpysaG5pwXEn/AB7zZiJJx9/DIsXLsi6f3EuNDU3Y++Z+2DvmfsiEomUUNL6gPG+99hrBp58/FEsWbTQtu1Avhg0eDD22e8A7L7nXmUj2q2W8aXGsOEjMPvIo/HQ/dn3O20AQGc7Aj8+BfzgGVBuuh9Y/VV+92/oAnvyRbAnXwTZfBikPXcE3X060JF5K4dyKrqrDj29YO8vAnvzffAlRXjiN0chHXkg6IF7lkQssusUSIxBu/bu7Bca4aAvPNOTBOZPvwLtntxhn8nE7SBfdHZBsvJFy6H++dr8b1RUKH/8FwK/PhcYl9tYyqsNGtF0TFkApFQNKTUGQoBIOIRwJAjOGBQlBZWlyVQA5n5/mTxqfEf+yeKZZP9teAcZKWkC1p2vILYJMZRaQlltKFuofh0XD+D5npxzfaLnkybTKYQ+2k7UMuZNDFNCQCUCpgklUK59r6x7IHvYxxYMQTSVJq+8yhWFu+urSrtH73ZrtHWgHjyBixmjOPS649573Tm9Bc1/l9GYodbrA6ifZ8lqsFO+Qm0/S/EGq4GYz1VupvEdyOx44fTSzeY8kOl+1z16RyDBUrbnFgweBDUR4xJ1PIstyobYjyLtncU5EkkVgAqCBGRZRiEioEcAACAASURBVCAQQEAikCgFpQQyJQhGAojyIDiPIKlqSCSTUFQNiqaaTUaMQ/q8wSC2dYcT4xE4t+gwa/vzrA4jjQYKgvE9GISUkwjONud2njNJrTyaQrXO2WoNnHNYtVnGa3V6mlbldyq64pyGAIbTXrW0mVK+S06sc5m0oQaFg+sy12+WsVC/g3AKwkn+BLBNkBqfNFa73PmSwAO5qbVv2Xjp28neM/bBKy++iLVr0+HrvvtuLV56cT72P+Cg/ORzipvDQtGJlpYWHPf9E3H0scdj+bJP8dEHH+Cjjz5Ad5b9Ng00NTVh7Nix2GWXXTBt2jRIoQhisTgURTGtDiVwEH2vU2NPL0rTk2dT3uq1FQDQmPhVEocfeTQWOfbOfuc/b2Ovmftg1KitKihZeSHLMppbWtDS0oq2tjY0t7Rg2PARGL/DDjUZ7njY8BE44+wfIh6PYfHChfjkow+xeNEiJJO59zZu7+jA2LHjMHnqNGw7dqxpKNNAZowYORJn/fBHiMViWLTgE3z80YdYsngRFMf+714YPmIEJk2eiomTp2DzLbYoWhZnyJeBxt4z98HCTz7Gp0uXDHjZtQgy5nsIXnoh2KvvQL33SaCnL+88+JpvoN77BHDvEyDbbQVpxi4gE7cHaW/xvr6KDQaLAV+7Hvy9BWDvLQBftqq4vGQJ0r67QT76YPBouEQSCtDdpwGEQPvXXdllWLTckwRmT70E7V73HuROkPHbQP5ZYcZb/NNVUK+8qaB7DSiX3yC8mH2QwPmAMY5YPIF4PAFZkhCOhNEeCiCeSCGppHRFLdWvFaEVJT3csRN+iWDnNZkIZed11tCRllxheGuaim6HloJYwnx5IZNFd6b7vDyePPOlBBLEfsyMMb2M8ho+p0lxwKK2GXDotVJTnsDeqB9PYKDwNSHJ1IzqdPwbKNS6Pm9AMUBtrVb1Jn49cP3mk228zla2iIRqXOMm02mWMSmbV3O6XoggdVUNlACUElAqISRLCAYDoEQo4UMyRUiOgBAClQGppIKEkkIqqUBlGogspY26iDDYAgSRbPd7rn00HETqA7a9g/X5HfWYn3h+R43qrhpYoxYZqObvMfP0z0H6Vu8jlAXW0N/plZ193SX4YMu3yfXa7+uLQSsi3GGuBlPNyikO0fg57JZj1YRC399AfMilqNuBCjdWSnh7AQh0d3Whu2sjurq60NXVhd7eHoRCITRFm9Da1ootR22J4ZsNgyRJkKiw9BfeDSJcm6oxqJoKpqpIKioUjZlhHIilfC+v4GqC33qt5gGngYGHOSmpsBzVDlVV0a33MT3dXejp6UYsFkMkEkE4HEF7ezs23+J7aG7xJo0ayA+qqqJr40b0dHejq1u882QyidbWVrS1daCtvR1t7e1obm72vL/Rz22CiCfA5r0B7blXwbt6i89v+FDQcWNAx44GGT8GaK2zb7u3D3zxCrAFn4ItWQl8u674PFubIe2/B+j+uwPNTcXnlwX8rQ9zh4OGTuTqJDB78kVo9z2Z1z15Y+UXUP70LyCZ3YCFR8MgsRxGRQG55CSw2xsIkClBICAjGAyBUCCVUqCqurcrYIvakwt+Ilb5SXcmpa+xym+obInw2jVSTQYvt0zec3p3OGevCADZfjOL8aiVpM3+fjKe8pBbv8e0TR1I0+TsqP3xV/e+qaMtO4qukypa+9Z++0JFvU7LglI+i3OM4gPjZVQrdZJJD2XzqM0SHTKXHquY85x7dBXEeY2//DM9Q1onJ0Z+WSKQZRnBgAyZUoAQUJM6E1teaEzD/2fv3aLuSa76sN+u6u7zff+ZAUlIgASSACFAXBxjcbEhILPsB+e6/BCvgE284iQgLCBOfOEOthNAICDggACBV2KzkofkIQ9+IA/xCllZJCEGE0AzXGYkbho0kkZoJM38/9853V2181Bd3dV9+n76nFPdX/3Wmvl/p7u6eldXddXu/dt71z7LsT8ckCsNtlk+yr2Nr+k+dVl0ZjgMWAVaeyv04XrhRJHaf33QPN1vitoUHIZaLySAvdJ46r3P4UN/+vHlCGAXY7yufQQXceU2PUHlN+YPTn2G51xQl+7fpWS9xLhrI4Nb0xCVCmR5JaSw+4kQpIzK1G+lMkQErRl5nuNwSJFlxmtQt+RBCGRwwJrR590VEOADwvwUMAZt0Vr8q+9G/r/+Evj3TotkdUGf9inA574B8gveCHrTZ62PEH7pEfh33wv91DPQv/Me4NkPLFY1fdqnQP47XwP66i9brM4x4F/5DeQ/9QuD5ehLvgjis19/dvKX//BZZD/w06C7AWL3TZ+N5L/8T5C+8xeA3/zd/rILk8CucbVa7wtTKRHiJMHtTQIiCZWmUEoX6cxmRhL2pLCcd9yQvn1l+wzkfTi+TtSODxnT61FLxtnURgK7EctddZjzPc+5I0SpTHkNS4f7gS2s4VX095UFWQiT+sTj74Ftja11taVrVJzUDmZvbIGWCF5jZLC7lljR62mg221op5G+7efscc1F9GLLoxxTr+0PpQFB9ZTWNmsHF3b1SEhEscQukZBSIqKK1rX5KTIG7u72OKQpcq2hNdccDNafxWIZrG3s3xeMzSIR+s8/jO07N7rUmcIuE2xY/H9LGREuCdKMgzYE8PMfORMBDMzxbPYFAjmXwdQArq/0ucreUs9ujRG3wHy5Lz3m3P5q834EclRTqSjTFwAEQUAiCDKSSOIIskgB7YIBKM3YpxkOhxR5npep8IQ4jhLwCV2R021p7wLuF/pGrc9jOmD9CPNNwFIYSqfE73sO6hd/Cfr//NXlb/7yTwC94fUQb/wM0Ge9DvSG1wFJvPx95uDjL4Kfe978977noH/7GeB9zy1+G/qCN0L+lbeAvvjzF697LPj//U3kP/nPF6nrpMjf970f2X/1k8Ddof8eb/wMRN/5t8uxkr/jXeDf+r3B6sfuCTyE5ncOEUExg0Rh9rT6LTEev32A25sdcqXxcH8H5nkRUmNJYCtPv45akdZLyeLe35WjSQA3y/Wds06l9m9DBPfLZq7t2ZOyw3SgmQGq0pOJq39NG6x9ra/G4/rbYjGqHSv4BthSf6ytLUsSwISWOdQ5dw2snQC2crsJDKp1qWqPbeMU2+cQ2du2SLG20bbz67WpoInqhHbN7oeKvLWWPwngZhdjt4sgRZVFhJlBkGACcsW4SzPc3e2RKTUqc8h9QXgG20Dox+vj1Kh7u//6uftSM4/OuEGMwb2Ct4KxbWVmZFrjyWfef74I4FYBO0gfH+Gmx2ISXrrYLvn8ln5pAxHcjrlp0gUBsZRFumiz75mUlcIIGE9GpTTSPEeWKWR5Cq0ZWhu/eyEEKoq5R1HtMX6dGyFC+P5izlvp8xoScD2E+SHg8pg55l58Cepf/t9Q//v/A3zko8uK5OLTP9WkZH3Vy4FXvBzik18BesXLgVe+DPRJLwc+6WXL3SvNwO//IPDc8+APfhj8/g9Bf/B5E9k7kH74JDz2APSWL4X86j8P+vRPOd99JoD/1W8h/2//2Ul10Be8EdHf+89mkfj8Jx9E/l//FPDSw/57vOG1iL7zbcDNrnY8f8fPgX/rspHAQ2AAYIYUhJskQRJHEEIgyzJkShdb+gzrtq5eW4vSb+i7p0YFU2PTUi6/1Kk41763a18Gofbf7aTw8HVmX2CttYmIGhG9wWwsDn0ltec++lvREypSSPeOY1+hAYAqI6KL8it3hbr+Gp79EHwmgeeMiL62rGWsjVmbfEc1N/GRedWuj+eI/B06zzw8rtquPdW+LQQhEgJJJBHHEWRBkBOo1GdyrZErhX1qtoVTSptNC517a61r76xLQHMpJ5mGrnPoDGKMnXWt7819QOib9UM0chWc8k4yuMu/NGAitNbIGXjymT/B8x958fwEcPPju22h9HVCZjYDb2v7Ag9hqb7wQZEeSm12CYwd302lzU2ZxswgriJ8pRSIIokoMt6DggAiUXkhQjTSRqdQTMbbH2hVEtvkduW6NKY8s4D1Yak30Id5JuA0hPc4YN04ffzy77wH6v/6dehf+fXBaM1zgG9vQMV/ePwB+CYBPbgFbm9Ajz9mHCEf3YHvDqD9Hrw/GLL30R2wPwD7dJBoPAfoL3wx5Fe8+arRvn3gf/0k8h//7+Zd/Pmfjfjvf8O8CO4PPG8ifz/+Un+5138aku/+ZvCDm9bTvpHAVp/WWoMKA2gURXjw4AGEEMjzHJxnRcSMcPRcwGodfcRwL0kwMmK4Ou4mK3NTX5rfbQbwofr7jNfNeaieYrNZ/vi4KtJCa338zVKLEhggin3a97cPW9I7hNhOW7bSkq2ML7vn9LVtAi5OmV+ajhLA+sacz+T8ECqicii+RvSuYRbNzBdt/Tt0Xde5vtHR9j602fvGpZOujkkhEUuBJI4hJRURwpWTDwHIMoVHWYY0y3HIMoAIgshk3bCggkTWBJDulHnLuC/t3DJCH64fTQe/TgfX4rvJncbGRv0G9OPqBLDF2LQe14ZVPjT5sPV1Hed+dqdOur72rWv8uQTmOjjYtNBW4Ws9z4woMmkBBRGSSCJJYkhRJ3hNewlZrpCmGQ5pikOWlc9CCGGMZeaKowhhHxAihNeLS7xtvs43Ad0I72rA+rH8GNa/+ltQv/xr4F979+J1bwH0WZ8O8VVfBvmVX9JJXPoE/o3fRv6j/3TSNfQ5n2lSMs9J+/zhjyD7hz8BfGyA/H31qxD/w78DPP6gt9jodNDf/TbgTcuRwEMGVpsSzHqcM2s82O3w2INbKNZ4dLevkZlHkTETo3u7zo2tx/y2KZftOT24Do4xKNcPMazxvO+6tnpMJHDDAFP7Yf4RPTL7Hv3rYis6iCVU1gw7zvrG1pqwlbEFXJdwXHo2KdvB/uzvOxVTHJd8Q339PSZA62XLv1qdnPrqX+Icd56fZ9Nrtz8StFYAUbHlBYELZywpCJGUSJIISRSDBEDMEERQWhfZ/QClFdJDjhfv9lBKQaHuuLHG7BDnwH1t99oR+m3dMKntqXRKAQGikfnIcG1XFHLjuDgB3ETbwnNpQm4qrGzKUxXR55TLQCBm5qLruR29L0SFllq9W5IIQhLiKEIkZRk17NbNmpHlOdI0Q5bnyJQuPTPtfYZkuSZChPB6cOnR4+N43RLCexVwv3Cl8f5oD/2vfsOQwb/z3uvI4Ave8DrIN38hxJf8GeA1n3xtaSaD3/17yN/5P4yKkqbPewOib/vGeZG/H/moifz98Av95V79KsTf963AE4+Pqjb7kZ8DfvN8kcBdqYvbyNvjb8aC/GK7xyBBRhFudgmkEMjSFFmuyuiYPjLjyDTb4YDpyjTmeHVMw0TztJWdFhE8nO55eK/gtt/aOj7r4egr+3QIldMq0/oJlbXBvhcmqm5dbalFlzvH19aOLmylHUJcJxBi0a+5jZFfXdkV14Dae9+IBu4iKe0l1bXd9uRzpZGu+0NZgSpRqAylGIdmBHMTdm0lIUy66EgilhJJJCGcOd9mrVRaI88VDlmOLM2hlNF9dLk2VMT7fcYa35mAdoS+XDdC/50fVyeALXyLLhwDxcVib8Iyry1OK85FfLT115S+C4TMaagryo3n7uz7odGmOGsIUJk2Oo5MWhlj2Cq0bmZoZuS5xj5NkWYZtNL1dDLm5g3t11HCz/1KuDmxezAlQiNgPMZ6Svv2pq917rmEl254LwIChlH3tzr/O9NK7BxS4Ok/gH7yafBTz4D/8Nmzy3FVJDHoCz8X8s1fCPpznz+aqPQV/Cu/gfynfmGwHH3OZyL6jm+at+fvR19E/o9+Ypj8feXLEf2j/wL0sif662uMQ/UjPzcqEjj67rcBn/eGo+NT353B8qXttb6VkMlqY/4WBDy4vUEcxciVRpqmUFoboykEQHXdRrd967TI0qaOjonKcoLPnPpLyzFQ7nhVj+js0ge6smnZrD/V76NLj66r/V1co5WqiN3iX1EYm0vyoYzAtgQwo5s29xtr1YnqzhLV8b5051dDR6Rdm3TeyLwAttCWc0cCn/K11hxDJuqoUW+LU9FRPSskiddIBLfpucbHYEpcNkHrKoq4rLIjWNclh8dl1phPIpdEsGsyO2qSIw9EFZXulNPMECSLe5n/7NiO4whxHCGREkIKyLoHDZgZihmHNMPhkCPN88LBq9gODmRk4mMC3lRxvH6MNMWtEmt6fwL6Efry/DhJt6RjVXCqs23AeNQI4D+9IgG8RlSKA6AvQXjNwKXJjrEv5VpIGO8+lNEfCTwX9iMukQJJEiOOpL0b7MDWzMhybfYYSVMcDmZfQilloZAab0Q3XbSVtzO//ylyT9A6p6bwCxjGWglgF2uZh4DljBBhvAcETMV135lR89TDO7Nv8JNPg596Gnju+fMLdm689tUQb3w9xJ/7ItCffdO1pVkM/P/9NvIfG04BfQr5i4+/iOwf/yTwwQ/3l3vly03k7yte1lmkjUy0OuPYPYGjf/CNwJvqJPBcB9KpaDNeRpIQxxFubkzK8P1+jzTNIGUEIUS5/62bNtG2W7TIPYYA7jrWhfbruw3WNXkGjdcm8mdSNDCb711jRNbd3yK27AZUjW3oS3WSy6fv2lKWkeV9kHkpbKUt5ySBz0UAd6HLVrHGvrpmmu5TYcnNeiTwtLYwF45JoCIpnk2zUfzjrJVjo3+PyM8JUcNtfaFHDPDje/RbX2x5IQSkAJLYpIuOIirWcAXjUCagmaE0cMgy7A8psiw3DnGykS2kJIXr91TFPsTCRwP4wljruxRgEPpvWwj9eRpYa2Q+RAAPYcxCeg0YuUSZasPHNfBaJMfayeAxcl1rHI59Zn3y9RGzAoCMIkRSIJKMKIoRSWm8BNlo5KwZeZ4jzVKkuUauFJQycwYJUXPlsdEC9m5LP7cpI6gteqOzrCfzzJrh59tdwdf5pw9hXAYEXBIrIIAboEd76Pc9B372A+Bnn4P+kw8Az34A+PjAPrDXwMs/AfTaV4Ne+xrQ614D8dpXA697zbWlOg9eeojs770dePiot9hJ5O9Lj5D9438y7ATwik805O8rXzH9Hg5G7wn8Pd8MKvYEvuQ3XRsBXJwBgRAnJj20EAJKKaRFmkQhRBk1417fSrw221GwD2OdD3uz+tSO2aifcXX3E8HjSGB7zH0OVVpojRWqUJOwDX2r6rtrt8dGhwP1d3NSHZvok/W3w3UGOrUtZ51GikjKUTGkK+8TCx/e9VPgrkOnZRs37KWpztbZvh1C17Ex58acb5Zt0wGG3BS67tGqQzjjXkqCIIGbREIK8x8Vz9VeqTUjVznSItgjzTIoZjg0uqlfGB1ANPSHNY+3qbhPbd06Ql9uA6Efx8NGAL/76Wfx4Rde8pcABqoPUAYAIUFgsyBZB6UrdjwXC6RfT6yCLyRHXySoj5hleL3gODxVvuPrCUKIgugtXyyAGZEU2MUx4jiCEDYKgorTRrlO0wyHNEWa5shZg7UG7EdILR/P6RHAp4yatjuGKOHT4O9bPA59H4JL9n8YSwEBPsK/97JJuiyClx4B73s/9B8/B/7IR8Ef+lPgIy+AP/wC8LHzkcMcSYhPeRXwqa8CvfpVoE99FehTXwnxuk8DP7g52319g/7ffhnqn/8vvWXocz4T0be/FTyB/C3JyUd7ZD/wTuCP399/wSc8juj7/nPQp76yta5JyHJkP/rz4Cef7i9XRAKLL3jjtPoXgmtwZYbRTYv0iWANKQVubm6w2yXIc43D4QCldZHZBrWI4C64T25MCui+6LL+4xUR3FW2X9e3x6nxu5sMbvubmcEaRxHBW9RzttAmGnBKOBdqY2OhOrfQHxZbaMtcwvFs344TSN82bKVP2v5eA+za6K6RZNNLnFz3cARv8/wp0b9NTKmjOXN2fRO0RTK3EszMEESIhEASx4hjCSnsnt5Wt0B570OWmwwpBTmsVeX0cRSbvNJo+aUwFHQzplzA9RH6ZzsIfdkOlwB+3ncCGChST9V28TB+Se4n+fWiMlGQwH5SIT4SrWOUIB8wl2i9RKqtU55dn4Gods4eMwVBRIgEQUqJKIoQR4YUthyvcYhgZKlJF53lOdJcHdU7FpcaHUcGu5Hl7jP8fnPnw/1YmtvfYZwEBKwBlR7i0zt7FgJ4CB/8MPiFj5kI1Ud78KM9cLeHfvQI9GgPvtub40qBogiIIiCJgCgGxRKIYyCSwC4BvewTIV7zKuCTXwl8UneK4fsE9T//IvS/+Je9ZaK3fh3oq750km5HRMDdAfnb3wn+/YH9oJ94HPH3fgvwmk9ur2cOJpDA8be9FfT5nz3vPguBuSLDynes0F1BjCRJcHt7AyEId3d3yPMcIDnq+fSRwM3jQ3NON7HbPjbaqjqOfK6jjwQeigYuj2mTFlprLlI/17/LtwKf1ofxqGwmxonhwpGB1nG/Ic0SWGd/tGMLbZlDAi/6DbmgLWkL/WGx5mjgJnFmt0EYhrVhd69E7nBx/27aJsfoYlOjg8faP7kUrrGOtxDAbr26sOG799FaA1JAonlvhiwI4V0cIYoEhGPPIwa4iPrNNCM9HLDfH5ArM7drDDvH3Ves9b0LqCP04zYQ+tGAi7n83U8/iw9/1GMCuJaCy/hsV9HAMB/utU4lm9yqPc3GuQYAs6Wl+chrywdMUWLGPCPfDKbnxNhnd6n0wucgzY3dhuq/2aQ37zRkmY1BIIVEFAlDCAtZS9nDbJTRQ5YZMjjLoFTdO9f8XaTqcY8zW8F6ZR/r6dv11JrX2t9D/XRfxn/zm4uBs+/x1tZXs725J19AR4argICALcKffRF9d4YLqNDn0d92jpnBTz4N9cPvGqw7+ta/CfryP9t6rnWMphnSH3wn8Mwf9Vf82C3i7/1W0GtfvXxEwlgSGED0XW+7WCRw57ttI8ScSBYqy5v9B5M4wi7ZQUpptjxJD9DF16UbTWwz4XQ5EXbrzwBaCNqxpHC9XFt/ts0ndW3Z3Pr4fn1EsGs4Lu5o0kJrZ3/g8hR59BV8Oq69RkxH5TBgxqg5VjOZLOmwfKk1zBnG6+sTg3p04zrb0ERXe841KpiH9/YFmrGN47GFfhmTjcJHtK+LeqT8tdCFo3qP67BzYL/DVJ98bcdr9usBnWvUfZ2ltW6haCNg2/Y35to/AIGtU1mxPggQIikQxxGklOa/hhFIA8iV2QrukOfIshy5Uo5tnmp6iLncRjVTYT+q+mgdI3IZrOX9C+hH6Mdt4D72o0sAP18jgB8+Kvfz9BVDEbcCRfoQrS/u/WY9pLXZBOpi952CYGich3M/t9kpkc8o15BMTTVbgkwURRwjSWJIUV0vhAADyHOFNM1wl6bQShuHkw6y17zrCxsrJyCQweuP9BVz3yv37zBnBgRsAP7O02GO8RNj+qUro0pTL8h//L8H/+t3D9YXf8vfBP2FLx4WLs2Qvf1nwE//QX+52xuzD+9nfPpwnXOR5ch+5OfATz3TW4yjCPG3XS8d9FhorUEESCGw28V4cHODNDcpEXXpeGwMu1LKo6gbC2o51gQRFffrIniPy/cdqyKFx88pTTK4b2uMrvNaa2gbbeTxXHsKtqLrG9vIQs7cV1y7ttIfwHba4trdFh8ZZ4wqb8MW+2SNMPPUJWxBNoToOONFU8/rWiP7nAJrd6Lj+8whh9tithjt+wyPBSsNECAEIRYScWIy/klRd/TQXN387qBwSFMc0tQ4hR1lOxHF33X9wOpO9zGSeM3vZMAxut790M/+YmxWpq3AEsBPPvMn+NALL1YE8MOHj6B0+8LmC8o9HIDetMuk1ZFH4rk7t0obQtXnt2fjyee+9RmXem6zUiVfQLYuudyYAsffHGBAFPuLxFIijsyeI27sr9YMzRqH/QFpliHLFFShOAohCs9JwN2b+BoT9FQScUuLyDVmi1YD5xXkaII5RAYHBKwXPswixwg62T1BliMfQ9hiHAmc/cA7wb/9nv6Kdgni7/tWnJX8LQXKkb3jXcMyAYaQftN100H3QWttnBbZps8V2CUxdrsdhIDZGy/dg0QCIQggwNpC+6J+u8hbq1scxSZ11TUY2WX15YGGuleUkTqi+D2dBGbmkgQu00huSB8GtqPfW0eBU52Pr/00ttUf24AlHBfVbK6gJ22lT7ZAANvsHMAl+kVMTu085ZhFXzpod+3s38rh+JjuWHeHvzUq+1z5bxF5TIVNT0YSiZRI4hhCUJH1jyvbPANKKeRKYa8Yh/0emVZgFiBBIJgtI6iw8/UR7fcJ942I2iJCv20DW+7HTgL4pYePoHX/B54vMJG2VcqJ4+jB40lUOovbOft3KEr52vC5X33HtZ7d2NTcl0Cfl5MbFVzKY1zNIYkgI4FICkQyQiRl4fVX7CeiTVqZNMuxzzKoPDfzkYcfL2Ojg8eW8QlLjaKuVq/tebShdPa5shwBAQEW65pXgh42DV3kU9PBc+xzvUpmj7FRu+gngbMffhf4t363v4IkRvzd3wz67NfPkXQeNkICNx0O3XSjSRJht9shjgQOaY40TU3EDUSZWroPbbpjbWsjak+LODadZxcZbM41zrAGkWgcqyQdY9CuHJ+rb+4qGpg3adBce3tsnwknS9Oobxlcd93qu/PcbD8+Yu3jCyjacEI7SmudB3rSFvoDuFwwzCVgmrDs2OAJ+1u1jcu2qN6ha8dGDI+5f9d5rpSLEXUZxyC7breu30V0kxAoU0bHUkAKAfPak6PTMHLFZhu4XCHNFVSuoKDBTBBHOsn29IU5CM9gOwh9uW5sqf9GE8C2sK8wJDAcErj6u9/3sPAmg9nzAFimg7lIZaudPRl8f34B3ehSRHx6bnOVxyVgn0/TkNVmqGJtd8iuUo8JIsRSYLfbIYpE9QFPBDCgNCPLUtwdDshyDaUclw8PJ+SmAaIratlH2V0MjaAtGVpOBcOv+SAg4L7CfQ29n2PDnDEbQyn1htCnM11k3JxIAmc/9k/Bv/5U73UcRUi+622gz/3Mk0SdAS23gwAAIABJREFUhY2QwECVnlBKUUatGOOuOf4JTzyBJJE4HFKkaYaSNG2kQXVHldV9LWyd2tEXu6KF3Wv6frcf73Z8burtLgncLOf+23bO/a2LKGDdlqdy5fB9jelCs6+Houpq8+NZJevHmNi1tfZJE1tphxZUs7ONhof60Rb6xGazWHNbXLsK0enj5Ij0bUvF0SNHE01HRJdI7UNfmbFkcncZ1CbQgr9tKWj/4Vr2E/de5lhUHKnWda0ZcSQgZYRdLBFHonVO5kKeNFe4u9tjn+fO1hH3MxX0FKz53Q0wCH24fqy1DzdDAAP1SCxmhqYxqmZbmwwxRSCXvh1Vk3aLljefFo1wDfgsmw/o80Tz7dmdst/HOVB7blxo1GQVUS7fE/OMi/dOCMTWmzBOUOqBDGitwKyRZoy7dI9DmptIe0GQcNrsRh1fGa1Rws7HxRoWkDXIeE2Ub5rrbXsVSQIC7jfWQAD7sDYviWZ75jqkTe2vqeWHSN6LbzExkwTOf+KfQf/qbw5eE33X2667z+6GSGDAOveayBhwNU40GBEJJHGEm2QHEhr79IA0UwBElQp15vgqI7Y6jncesyp3gwA+Lm/kZKujlw7R5q5d0b6tvxlobpLBplBp4M1ZQ7JoNIiOZFsTfF1rutDlpNvnHFCWO6dgHZg6MrbkoLq2sdWEzYgwqh0r0Y3W3CcuobfWlLP1tZTLf6fV4f6aRwB3yTaXyO2rE5hm4zs63yR/W/6uHXDsc6PlrC4r9QciKjL+SeziCEKgsPfZPtDQxZaJSjHuDgcc0gxZnps6iUAkoZkhWYNFU59pi0w+PrxVrOm9DThG6L/twfc+nUQAuxe5f/vWSOuZpOl076FyEQNb3grNpFw14nngnr4b/XyXz1eE5zYdozwhi0gFKSNEMkISS0ghCs9VlOmi0yxDmh6wzzS0UtWuJS2pay4e5VO2Zdo9l5bNt3n6PiBEBQcEXALrm9vc1HBrnpu7IgCb3vtDBHEf1vx8RiHNTCrn333vYNHoG/5D6N95L/Qv/9pw2b//DRBf/PlLSHgashzZj/48+Mmne4txFCH5B98A+sLPuZBgy8ESnGCzZ3AUR7jZ7UBEyLIcWZY5Dn/H+wqOMcK7EcPj0z6PPVZFBVeyiaPyU1JBd53T9htd1yOjtoAttMOm7gSKvrmyPBZTNemp31y+YxPt6Ju/VvattIn+KDCanPcITbtORQSPu9agcnY6FyaRpz3fBM2sG1OdLDvJ6MYPdg/2PJo+cruZYryU2WbcFGS2f4siRJKKlNHWXmdFIWRKIc8V0syQwUppqNIJD0Vt1RplHdhM7k8T7ry2cb0E7mObt4rQl+uFz303iwB2L3b/9aGhLuGjnRRcS4HQ/TE0RP4elfdY2fVZNp/RTPUSUMeY51KmqCG3LIFZl4blOI5xm8QQAkZxBIEEQzNBayDNUuwPGQ5pauqEOzeYI9fuH8I0I557/NTUlwGXh+soFBAQsCTWMe91kSdLztt9Dk5d505dT+bI30eALVH/6jAhEngM4r/7n4Le/IWL1LUINhYJ3EQt4qdwFtZaI45jPP7YA0RE2B8OyFSRsQaEas/dYVK1jSDuI7jmpYY2slSH2ohqlDq0kaf73e0jggFAaV0jgjVcynmdWPtcVR9f1/tOWlJPNkFo9XdmzVj7GAOKNlQeJ9cV5kSsuT/qaZSrdqytTXU9WncQqMDxzHLZFWdp2+oSEcat3yUa3emhJ8piUeoJ7v+4SvccS1lk/ZMQ1Ay1MjDZQ4C7wx77fQbFXASCGMc7gizTVtu01Gsby+dGeB7bQejLdeOa/XcSAWwr8IkAdrFkJPAYnJKKzjf4LJvPWMtz85+grlL5MJc7Btd9Odj8FoIQRRGSKEISG29OIURpWMq1RpblJh1fmhtFUcortKmOLuPdGENdm6Gt6XEZ4CfseA4ICJiD9c5vlzD+9K0FpxDAYV25ABYigaO/8x9DfNm/sZBQC+IekMCAMTsSAXZbQgJDCoE4SXBzs4PWGo/u9mCtgcLwPjaap+09bEt1OzYS+Ph8mT8HRLIhCzvnrZwoiexJaaKd3zYttI3e2QLWP1/yVffSXVpH7kqhvlasf3whkPKeoE1nXFt7jrO9uRkt/GvLUt8Cc6N+x5RpHp0icn/GH5v9r26HtJSDIAaBinTRAlJKCElOrLYJ7GImaM3QrLFPc6RZjjTLnECcIpZE1LffCDBY2zse0I7Qj+vHNfpwkQhg36Fa9i9qYjSJPeP7dDji0a/n2JUuN2A8fHeMaMInGY/ka3wk9kU4EYBIyoIQFpCRhNkqxIRk5LlGniscshxZbtLycUddYzG0z1ST8KupwS1GvylG9yGl1qd+DahDh7k1IGAG1junXUKfOsW5ayxhHHBGnEgCR2/7eoivfPPCQi2IjZLAGgyB6ruJbBgwUBo7mQEpBZI4xs1NDGJgn+bIlSqz29QiiUegjdialwbafq+I6ieNcxRpYjwRXH1Qa2aw1tC8re/O9c+bfPa9dC/Z21vZF3jt48pm5wv94SeaW3isBVpXUZ9kPbBWlleiz0Y3dW0cWov7rhlzp7Hi1L9LqnW/9o3h/N/+o4sIYUmEKJLGvicJUsgi4peLum1QMSFV1r6nkeU5tGaosty23tNTMNXeGOA3Qv9tC+fuz5MjgN2K3L99HIgaKD8ury3f1L0brgmfZfMdvj87X8ngJVMcJ1GMXRIjjqWJyjB3AGDmg7u7O9ztU+Say4/SMvkMmZ1EumTs6t2uj9ox0Z/USH0zpc1jPxJ86OP7iq6e8X2uCAi4DLY5N1mnsKme6GMzRARsCDNJ4Pgbvxb0li8/k1ALYqMk8FhYQjhJEjzxYAcuIljMPsECBAEUO9hZCysRlU5jXUa7LoPeGH1yPGnMLfOXE5vjyFhF4rQ7Fbf9bdM5MtfnzLVizbJXqPqUsJwN5Roa7zWjmpfGVtoBbKcta2+HfbfXTgxtwXlxqn2uLfBkrtNWa92jahomhYmmRRM3YdM9SyGKlNERIuFk1QMKu50Ag6EZ2KcpDqmJEM6UAgQBbPQsI0+ZrqW0/7XrLaJoA8GmmN4yxrw7vgY7BVQIfbN+jO3Dpn7ep6+nSi9LALu/fRp0lgD2CWshgn2Vaw3w+dlNjTK4JJYkMa2HYCwFIhkhSWKTVqaIDraKc6YUDocUh8PBzBUtk2ibgt2UtM+r2TXq9bVwLhHcJBjaTHVT6wyYhylvvs/zREDA5bCdOck1yFi0GWbmOPkEbBxphuz73wl+7x+NKh79rb8G8Ze/4sxCLYh7TgLDiVwRJHB7e4MkjpBmCneHOwgSYK6nk55C3paG/Mb5eprMqmxXxFd7lDAVKiyj0nKOr2/TacanhTaGXq3b93NcE9Yuv4HplyUjNq+p8Ta/r9aKbYwtg620ZWvtWGN7+lMQrw9jnW4ukhJ6kAwednJtNmWO+YOZK72ITKYKIQUSKRFHEkJU22swGGBzH80MpTXSNMddmkJpjUwpU4+gcuuO2n00H6WS5pWPqSlY+/sTUCH05bYwlEm0D4dc4an3vP80Arh5cx8JYADQxd6i2jru+CUeAMcTusUT75oIBMV8+Pzs5sp2iXF5SqRUX51EBEkCcSSRxBGEoOI/USp4aZYjy3KkaYpMaWPqKiOE63U15dUdE7A1nhAXyjTV29jW2iGDi1Fuu881235UpqNswOk49a33ed4ICDgPtjUPDRG9Q7qyr7p0wAWwPyD7wZ8ZRQJH/9Ffhfgrb7mAUAvi3pPAKAlO1hpRJA0RHElkeY40U1DafLVqItAEp5Ea6TtQfmyWgbZyzBooom36MD4tdPG7OKY1l2k91471z+FmX2D3G6i/tL+Y0o41YP1jy2Ar7QDW3RbXtuGTHXIs2jJRrK0NS+CchLA9Ny0yeLwOw3wc0NAnV1XWbMEhRZEuOhKIpIQUZLbpcEKFubDFZbkq0kbnSJWGUgqsNVBuyyFKPUSQyRLITab4nuI+vldbxFB2gdDP68CUflqcAAb8N1wzm4VCA97ZG9vSZjVxzRfR9771Gb4+u6XkOse4PFW2rlQ4tUgI1pBESJIEcZIglqJIM2O9B4E0zbHf75FlGXLFxlOwJZqiqw21cvbDqqtsV1sq4QfbOwfNK8OCPx7nfLN9nTcCAk5HmGMCAjoRSOASayaBuwhP19GXiKC1RgRgt9vh9vYGzMDDhw+RcxEJ22OM70oBXf7uON51bHxZxth5vCsFdOvv6oTZx28DRPD6derxkcA+aq2uTIRtOcBuoQ3AdtoBbKctaySBLbaQDnpJLEH6Trm+rXQRSzEZVbV1ncMNPDOBHAKKjYMasSqOEyIpEccScRxBEEHa+hxZGECuuSCEFdIsQ6qUqZ+NDsbgIoNgQBvG2kYD/ETot+2gry/PQgADvhutDcHjIwFsMTWS+pIvrN996y98em5d0aun4lzjcHakcqGzAVY2m7ZOlBG9KDIDWO1SCoFYmv1FTIRwERms2aSPUQr7XCNNU+R5Pkn2IwNET1TYmBZPiexoOxeigpfBud9sn+aOgIDlEOaYgIBe7A9mT+D3jCCBv/6vQvxbKySBf+TnwE8901uMowjxt30jxBe88UKCLYM20rPSrSoLpAaDIMDQKGJMkMQxbm93kCDc7Q/IlDJXCDEQc1thChHcPD5MAFf69BhMSQtto4AFsXHaBqCU9loXGjJ6bkOn5tUSwNbmQ1wLBNtIv4R2+IYttWMtbWmbgwMJfIyx6+iYNds93mZX7LtTny2qX6ah66hGMlcRxUbbIiIIIkSRQCQE4iiCkASbU89G/jIArRWUZmRKI00V0jxDvgGHtEsgvG/rR+jD9aGZxaMNZyOAXSGawlwSJeXDJo1WEz5/TFrMkfESqU/W8Ox8hM/P7VyyLTUGl4oIHnsvIkIURYiiCLtIQAgBWbr+MZQC0vSAfaaQKwWtlPEQbCNWCwLaRvlqmLR+5STdIePYNDtzDE/uPfuU9XtLCrd4iDZPXQs+zyMBAQb3bL4ICDgH9gdkP/BO8O+/b7BoiAS+Ls6xLidRjNvbGCSAbG9SFWrDXJnIYXPnck+8tm+/VjK353hbiM7YdNFNWF3atQO0PSd3z99m5hyrB5vU0Lo4v06ddI0y11B0nXDCaNesjRKFdNC+YSvtALbTljURwV24zymhhzAlZXSXTX9MHW0lmk5FY+1elqgdc++u64FqbEeSEEcRIiEgCAUp7F5gIoC1ZmS5RppnSHOzXQdrDc0wznyl3uI6GxE0WV6Cq3ptW8sD21mPXITI4G0g9N82kGmNJ585IwEM+GGoNkkgulNn+Ywl5Vv6xfX92fkMn5+dz0TwJd8H915aa8RSIIljky46lgAzhKiUT2bGxx7usd/vAQBCRkbhQbWXmU0rbZUhLoxZ7sfVEQncIdOcNo0limv3GeFNdF/g01vr8xwSEBAI4ICAhRBI4BLxd3wT6Is+9wJCTcO51mMTCWs42QcPHsPtTYy7fYa7uzujNxZ71TGj3LqkTZ6uaOC286elhW5vQ1v5vlTQbWmhrZ5t23u0xcpKsDZ528Elebp2TXRMVPMasI1xZRDa4he2YgMIJPB4DJG+S6aLBioiti8zyHCdIwp1yEEkAWhAm7YKSUUAiISUEkIYIai4hoqAEAaQs9ku7pCmSNMMijU0W6rXlIuKIBBlt1MQAhLH+pDe4NAcE5UYsC6EflwrGKliPPnMn+BDL3zsfAQwcHlDtUZzoegepF0pLnwa2Jd+flM8uwPmwfdnd0n5pkbmnhNdXo01o1pB/EZSIoojJFJCRhJS2AgFQxgfsgyHNEOudEkA2+Q1zUiNPo/HOZ6WY59pn1HQntMj7+XPjLksfH5TfZ9HAu4ztjojBARcAYEELuETCdxmrFzy+9FUX5FtAPDYzQ2SJEGmFdI0hVLaidodNnB3nWsjwsZG/o51qBxLADePcfN3Qf4ae0WVFnJN8MnOMA/bIlO2QARvpS8sttCeLbTBYk2RwG2ORyEl9DSMsZFPiR7uQxv5W6unqnCSXWasmUQV7XI3tTCZRswfJAApJWIhEMcxJJlU00Qm0MOSvZqNDVBpjTxXOBR7CudcbWNhtvyArRxmzzp7bvvjsk1PDu/jNhD60W8wMzKt8OQzz52fALY3PBeadWsau0NSfz0+wSfZpniBBfTD52d3LdmmRhRcQg6bnq4um1H5wAwpJaIowk0sIIWElEVkMMzeZWmWIc1yZHkOrbkmPxca5liFeg4h3HXdlPOD9S9Qh09oe6LuMd9a6vNcEnAf4NsbERCwMQQSuMS1SOBrrbM1AlUzhCDc3N4gjmMwa6SHFFmeQwhR7V4xU+frIsLGkr+nksFtfzefuksE27TQa1yDtqEz++U0fwq2QAIDWxlXBqEtfmFNJDCwnMN8wHDWjSn6ka0L6M5AMjWSeKh0X3VlMAYbpzKzxYawVG3Z7kpmmEAQKREV5LAQAoK4UScAAtKckec58lwhVbnZOs46stny94L+HYfwXm4HoS/9Qqp0EQH88fMTwBZLfzy3pomaSQB31ekTfIlYnktCBbTD12d3bRJ4KUVzKXmGwCCThg/Ag12COI4gBYPZpOoDgDTXeHRnUkVrrtJIt9bX0cY+efqiKIZSsJwyj9grt7LQtz15m5qH2G9To6/zScA24Q63rbz/AQFeYgoJ/Lf+GsRf/ooLCLUgPCeBr7m2EqH4PhcgYjBrCCLskgSPP7iFBuOlh3dQuYaIJMbkL+FGxAvQngmm63fb8T4n4a5zXdE/LgHc9ezdfYHdvYTXgDXJChSBSkcibycSOBDAfqD57b/29lisuR3uXN1nt/AFUyJPA8bBrtOujW6JCOAxkdlt9TT1Bh4o37i6VtaUFw37o0bT2mPrFcX2GyYBiQCzBrNGFBN2UYxERogiCSJblwLrYt9iEgAISjMOucb+cMA+VUaXKTau7HsW9wnhGawTW13Dt4CKAD7jHsBNLPPxbDxqzpUn33fjue/yBUyHz33qi2y+OB1MW8QYggR2sVEE41g6/n2ETCmkWY794QBVpIpmkNlbpIX8dvcS7pKr6aV4Sns6jX1HrWwhmUfU7xP8GOXLwpd3N+B8uE7GhHW80wEBm0QggUssQQK3RaE0z/kMK2OSxHhwewMCY59myPPC4FgY660h0vxNow00bvrp5vGhY13RPH0pOZtRRm2popu9UkUB46w2jHNgLTryMEIksI/YSp9spR3ANtoy5FB+SZy6TvvQhrVjKtF7zvs0y7ZlEOknmk8bDyVJTCi3jJMRISJh9v4VLbomANaMXGvkSiNVClmmTOZArtv+TIxyc3u6lkx8jq60tTG+tfbcR4Q+vA4OGeOp9z57mRTQbXBTOE0xYmpQkTf/vPD1w99XuQJOg+/96qt8U8jOc8rQBWvMEgCkFEjiCHEUQUqjCBKZVNK50jgcUqSZMdxpuO0qUi4RjEJXHtdgPv4IO9WQOWY+nrNw+0IK+zmSz4drvx8B/TjH+3DePg9Ke0DAVRFI4BLniARe05pZd/pjJEmCx24SCCIcsgxpZvRJGknQtRK7fecmRgUPEcFtaJ7XHeWtvt3camUNuLZefDq2Ewnsy7fKEthCG1xsqT1rb4sv6aCXmut9aMtacRSNe0JksNVpxuoHU8tMsIYtMLbYmu5AZJ5TJEwUfRRLRCRAReZAQQxm89s4ujE0MzIFZMU2cpYQ1lqbfYsLOyLBEMiO6IXXX7WeWRm2ivD+bgOhH8+LqxPAwDAJbFM6X+tDzucPSJ9lC5gPn/vVZ9mujbaICmYG6+IjSdTnO2bGLpK4vb1FEkdwveeJCI9Shf3epIpmVOlm6vOkBooEfl1z6akptKcY/AbrWqCOJXDfR3HzIy0oW9Ph8zNbKtNKQECAx9gfkL39Z8Hv+cPBooEEHsZa9Vs3qtfdx04AePyJx/HYTYyXXnqER4fUOB2KSmccWsf6InrH6HNd+mPzWS9FBLtk+BqJYJ/1iikw7VjPc+/CUUTVihHa4SfW3p5LpPm81By+9r7wHVPX91PtV1PuOXWETRmSTULb3o1JgArHPVGkVr9NEkgpDFkMLghbezOru8FECecKaZbhbn+AKqJ9NVdp2glO5DDb/Yg17gPCu7wNhH5cHl4QwEB9D4Ej8gQ42tPXle4Sw8Lnj0efZQuYD5/71WfZfEJz0aoIWgBFNIZN4UIAoihCEplU0VFEJlU0EbRmpFmOLMuRpgfk2ngEGmVSwJ0R27wv+wx4zWvGtGPM+b6UikfXj7zPXITR2o7wHp8GH5XSZfv02KElICDAM6QZsh/46UACYx4JvLV1sCJSCwKOGZEk3N7eIIkl0lRhn6bF3C4BGm5/1xrQlSZ3rtNgVwroNrgOj+Ux1EltW4/5bz19vZU1N5DA/mEr7QBCW3zDJYhg4DLz+Bb6w2eM7cMx36BT6hp/3pLCzXtz8V/FS7RV25XlxNjtqnKVLY8Kdc29DpBSIpICETGiKIIQhOaGHFz8TzFDKQ2lNdJcI01T5CovZTTJAgVohM63RYR3ejsIfXkark4At+3xY/xS3P19CE2a99IEMOD3h6PPsgVMQ9s74SN8ls13uM58XE5gwqR2hgYREEkqyOAYUSQhSJTpn9MsxyHLkaYZsmJPYGZXTa17GPbKMoIMblPAxy6+zXJtfodWjT5XdHAYqf24T+/yVpXGc/ShrXKrzywgYDMIJHCJKSTw1Hnz3A4xc+tu+24oayqOKwYi1njssVvESWycCg8pFFORJZBttsBjAyPXs9OUVdeFbz3WbBE5ZU9B2U4udDyqCGDbCrccQNDapE10amlK7A22tO5uoSl23G6gKQD6I+vWNPbWJGsfttyOOW3z4bu0GZC0lT66Luz67MLY+cf0eVv2kNa7TCzTnl0P4EaGvbr89eC0hqRHMjBr5x5URvhqu3UC2jOzVNUYHS0SAlEkIaWEEARJBCIu9DYGsShsi4ycCVppHIrU0bnS0EqZaGFd5YNuDu2aZtRwsuvK1lq13A+MfWfDe70iHE0dje+R0JWjcHUCuA1l2ibqm1ivCx8UkyZ8lCngdPjcrz7LZjElEtVXCGLs4gRxFGGXRCZuuEjxorXGw0OKu7s9tGYntZ9ZF2vehDBrp9b6yJg3NSq4rHNGdPBY0ELGFv9HqR9Yw/s8hDW/53MR0j0HBAQEErhCHwl8yny5hBH4qltfFG0XBOx2MR7c3kBp4MUXH5o96Nz00ULUrhvjBGgjJd0nfBQlzACJ8Q6Ec4y9bSkd3UxjNi20Mcj2t/Oa8EmWU7GVpnRFva8NgQD2C1tux1oJYOA4ejPgMujq/ym2qjkOfmPOj6m1m8StMGc4uQ5tNj7ZpHsGoihGLASklJCiukfz3hoM1oSMGYc0w+GQQuU5ci6c46wuqA25bLzsqsA8167atLGu+T1Zq9z3Cgw0tz08ctwofob+7IbXBDADR6mffYAvSkkXfJcvYDp87VNf5XLRJuNaFoWaIlVENQghEEcxktikiiYSEEXGBJP2JcP+cECea5PyBQBAYLKRx3bBFGDu3gek6fk6Buf2tLOKbh/8H5H+45rv9VreTR+wdLrngICAlWODJHBND5pJAi85V659jWoa6HaJwG53AwJwt8+glQYTtxr32lA7Z+vuMAaWDodd1/fIObZtzb+bJLD7r9a6TA/dJ8u14Js8p2Irzdk6CbxGhLb4B9eJaAp8sy1tpT/Wji5HtK7MdV11jL3XHPncv7uiZZcGM0OQgLZGPwKkIAgSkMJEDMtIQgpRX4MZhtwFQ7GA1hpKMVKtkOcKWZaZlNKo1m433tIGmgCFM51p8MXafSlsqS1rQKvu7xC/Q9cCjT4jfXzsnsNLAtgiEMGnwXf5AqbD9z71Wb4h2a69MLTtF1I75xjObNlIkNk3eBchEhEIxjBBRMi1xt1+jzTXyFUO1pX3niF/qzQ2Q1EWc6OD264de25MYr5r99l9wJx3OvTLeTFvng19EhBwL7ABErg5x9XWlAVJ4Pu9VhVpF7UGAUh2MR48SKA1Y79Pkec5SIhSP2zTA7syyLjQZMw2VDMdHl+z6LYfI4lgAEVa6IoY9nFM+CjTKdhCc8Y4pK4FW2kHsI22rL0Ndh4dmuN9thk1sfY+ue84R9roqeX6rnQz3ulZkcvF3sLGSOhWXMSPMAQxhJCIZIxICggBQwoDRQppBiBqcirNyJTZci5XjFxp5FqVdkkuiDkSBJAJPKEVvddzEOaC84KZQZCL1QW09BlVAVD3sT+9JoCByoNGe2q89Fl58Vm2gPnwvV99lW/JKNZzoKnwUZG3pS8S13rXaq0hpQQR4SaOkCRJeY6hATDSQ4b9IcU+zaCK8rV7tdR/JE8DY57pXAJ4Cu7j4n1pjE3HFHA+nDa3hn4KCLg3WDkJ3EsAA0CWI33Hu4CF9wS+D7B74Fb6IxDHEnmew2aZednjTwAAXrp7BK01hJSznSg1AYLr57sigJfWJ7qIYC729ys2yisdzn2xfXRhO/qWn0T7VGwlEhjYztjaSjuA7bTl1K2mfMFW+uM+Y0mSd3x9lmwq9I2WMtahiBjlnsC1OnruW9rwUOwDTPaC8giICBrG1iiLAiU5xowojhFHAomMIGT39mv2rpqBTCkcDhmyLEOWKygYUthe66tD3dK4D228OLgt8LMcWadXz1xFtZd/rGs9OgXtBPBLjyZ7n5wLZQSw/Y1xKakuDZ+VGJ9lC5gO3/vTZ/n60sRUClQ7VXLu+aac45wUde49RZFaBaiigcu0dQDK+F4yXoCxlGWaaCkkBBllT2lCpnIc0hRppkpjIAoPQnuPKuqjMIwxIEgAZBRIgbp375hn66LreZ6UItqjNWEM3LXNwjpwXlIGe98Af9E9r46JlcfIMgEBAZvBAiTwYGaSiZi7Rrdel2VIf/j0PYHvE1y9kh0Fskwyw+YAQSOJE+xuEggQsuyANMvBRLB75rp+X+PbAAAgAElEQVQrT1dqxi606oSN42QKzmlm79g0dgUGmIsd9CpoZpMtZ+B64Lr65tp03WNsxzBsW7GF9myhDcD62zEmu8LayBUbEeyzjWgs1vTcA8ZhqdTQ7efdY67dzFK07lkqdLJjPaNZ9xEpfFRbkTSXrYXQ2AedpIJlvfY6KtZmIQiSBKQUiKSElAKCHOnJbJ1BRZOYgTxXUFoj04w818jzHLnWJstKOV+RQxJX9k7b1iObFKPawu6ovVVJOnoa10eYJ8bB9Y+o3Aeu8exaxhDNXWf9tq72RgD7pFzYyUF5+iB9V2h8ly9gHnzuV59lA2akZ3H/PuO8yMydcjTv2vdhaM8TAbEQiOMIcRSZqA9RmPgUI01THHKFTCkorUCMWspp914mQsJEDw9F6HQRwmPJ4CWesS/r1yXQSah3lDcZgfx+RwNO6aP7M/YDAgI6MIkE/g9Af2lcJHCfIXfKunvKGs3Ms9NBB4yD1gpJFOGxB7cgIbE/HJBlWW2PtyahPJYIPirXcm7u+GiNjmkZr01ndxsJbP6uoqV9w5Z02y00pem8sGaEdviHrbRlK+0AttWWgGMsHSk8tvyYjHzl+QtE1DNzmY2QQJCiIIUjQwoTzP7DdaJa18jcXJvU0VmWGZJYKWRal06EDEMCMgAIE2Bi6gFKmrfOWFd/4jRnwUvj3s8bLUOU+TR9/3xwwnRqmYrGXgv4aotrJYAfPryD0torAhgoPsyI4HmGJm+N6r7KFTAfPvepz7IB0wngNpwj/ZfRd7pJ4CbcfUNqlRCVaWeo8KqyyWBukhg3NzeQNgWMMB55hzzD/tEBKs+hQCYaghksBEjX02WMMf4eeSuOWFPOkTLap3VsCJc0JDWjzgP8Qkj5HBAQcDIWIoGnrktDTmGnrnNlfYEEXhz22UopoZQCaw0ZRXj8sQeIIoHD4YD94QBAQEoJrfVs3a6LCJ4SUdzXht4ypmDr/sCGDK6Twj5hTXptHzbSjEACe4qttGUL7dhCGyy21JaAYYzJCnJKHU2MJYJr9ZsLjo8tCC44MUZhE2SzbYiUEnEUQUYRIqhyixEAJpMghLEBwhK+GgqELFfIshyHNINSebkVBwsCQcIknGllDU3dxX96Ze/jvZ0/2PbYWsFoSxW9tv5cFQHsoiQmABSzgzfw8UPRhe/yBUyHz326FtmWknJJUnhKKn7X+FCmZbEn0IzOMEQwCUYSx0iiGJGUZYpnpUxEcJrm2GcHaO146dWii4fTOc1RYt3rpp47pd7ZdfbfcFy5KyJEA/uF6X3h68gKCAhYCm0E6uh9WSeQwPE3fh3EX/zyo/u697vWt2FnewMJvCjs97/WDCHISffMkELg5maHOIqQZmY7ERdjxsakbUEa58bU3zZm3Ws7o4GLqBqXBC4UJGN3GLzzZeGrjWYOttCULueFtSK0wy9spR3AdtqylXZsBW39saR9YxIhO5EYnqJjz21TX5bBLpmA43jGaoNOAbsznb3G2BIJkgApTLSwICCSkaFs2G4z52RYIRMFnGtDAGdKmcjhPIcq0kgrtmmnHVunCf8FkZ/ZWqZ8M21tLmnlEVdPALfh+BvD9/2EOwlg7X78eApmNhuOwy8C2ML3ZxewLfjepz7LN1YhOrqu5VhtabNp7OaJVWLunuxNw1mpiAhhoh6K9CpkQoSx2yW4jSRImnQuokjhcthnuLvblylbgIbihnZjdHPxn0sGN+8z5ticesaW3Zrq0sTc9yHgNEx7L7Y+CgMCAlzMIV9r6/8hnUUC+0AAj5obAwm8KFz9TWsFu/evIYY1pCA88eABoiQ2+mGeV0bCGdHAFcncrnPNiQh22zDGgYJbbB9chLwwA5r9/JZZg+FwKKhgBU0YhS1FAgPbaMsW2uBiC+3ZQhtcbK09a0ZfX5xz/R6VtnlEHUtsfTH5mh6H0jILSmEXrHBsCzRVUakvaq3KrePs2iiLPYVjISAkOSwOg0iUeqB2LFG5ZmR5DqV0kUbaEMK54iLpoX962alY/ZyySbJ3Img4O9I1MJkA9ikq+ChNE5FXXxA+fiRa+CxbwGnwtW99lQuYnna5dl0DbTPQqV7hkwm5yi2uJoP1lDPzeEECFwSw690nBCGKItzEEeI4Kr31ACDNsiL9n4KGiYrQMKQyM0PAREGbyAltaiyihbXWZVqYPkUT6IkGWSgyuK1sM3qbuNBf7iGaY86ntX9LmD8vhr4ICLg02qIJ287Z833v97kyWfTJiDRD+v3vnBUJfG4015hZc2MggRdD2R8NfbIyGAoQawhBuNkluNntkKUp9ocDFAMyikonwz69zo0waZ6171C51QnVtzwhe1GD5HVJ3yljqm5XsNdaZ0mU7TFEsC7KU0u7mvEy54fv+tlYHdLzZkzCObYJugZ8H1tTsJW2bKEdW2iDRVdbwrfz9XApIrhL159KCJ/DwXJ0FHL9ouNjxe/mMUvxtclec8CDWQ8Jhd7GDF3nkiHIpJKOImnIYeHaT4v7a8cxkQENDc0MlWmkeYYs1+U+w7Z+ImkijJkhqN4GuzUeAFCRBcblkwjmgrItRAAuH228yjkkEMCojTbHSaHqz7FMwrIYJIAtvCZvig81Db8IYMDv52axBhkDpsHXPvVVLou5kbZTMcdDfMmozDH3t8qOJEISR4gjiSiOyzQvrBlprnBIM2R5Bq0rhbFS+qg0oGlt9yPuMQCOfP5DUcBzP8ROJeq3BNsTvr+za0aI+A0I8B9d72kfATwmAtIlm9z9ss6OIh20fuYPBotegwQ+GVmO7Ad/Gvz0iPYFEvhkMDMIjEgI7HYJoiSGUgr7fVpGjTQxKQV0R+RvG1nsytR2rI8MbuqhZdRLR3nrfK65n+C+JK59/yWxhaZshQAGtjO2Qjv8w1baspV2bA1j+2WO/jm17tGEbId+MqbsUL1DzqlACy21kA2ysAwa0phaZGeU+wsLEIQkEAGRkJBSQghhiOQioMUQw4bEZTCYTds0M7LcRB8rpZArhVxrKGXiirlBBLvRHmQY3+KH+9zZOezHu+6LHMWerC0nPJHPU5C4jo11NAEM+G0ItgSwrxL6/OwA/+XzHb58/Dfhc7/6KNs1SK+aA9yI8TOXpG6LrnDvb+8tuPBto/p1zCaNxW6XYLdLIAslUkgJBmO/PyA9ZDjkJjG/1sbwxzBRwX0KZx9hO6bc3HO9USkj6l075owkH9/btWH6M2yPZgoI2DqmOgW1lW9NN7uAzjY2BW1X+SmE8eLwjARefF1JM2Rv/5lAAp8R7jtkMrxw8RXOuLm5wYPbHbJDikdpVpaz101xkCgJ30amGjh/z8kY02xH896uofSICHYUatbWBuGHbrQlHWHtTdnid8RW2hLa4Re20g5gW23ZCk7JwDdkvxrK9NN3fo7uO+X7ZS7hPFqWGdd36myWRCTzQzOXZetBJBpEApEQiGQEKQ0pLIS51mSGqZS0Zh+kWiPPchMtrPIynXQVaGMCV2yGF4CLqqgkmjUz5IDT4bVwbhlqkd32GXFbJpyA6Sji7Kme7WhpHn0SAQz4bfy1smkA7KnHwRqeX8B0XNWQ1wOf+3QNsl1SwjZDQRtpO1e2LgK4CUkELjzwavI1FJ1YAFEUY7fbQUbSScuiccgU7vZ3yHN9FLk85/2YQgR3ne8jAbrKmHQx1HpuC5g7vn1+d33HuGc3L4I9IGBruAYBfG/eMU9I4LOtJ4EEvhisnldGPDJDCOMweHNzgzTNkKYptNZOGr1hHOlkMIEasrFGUsNY0jYfdEX/umXb1tgm+cuFUbI5arXWsNzwNbGd+csfg+qpCJHAfmIrbQnt8A9bassWcKqzZ1sE71D2uin37Ms20lXnGN15NrF7AjE99srmutj8LmNzsNTLqtTPVV+UxzRDUpE2WhCkkIglQcoIUliNzf5HNrIFJkmNgNYMBQ3WjEwDubIkcY5M5SBBYG3SQwuShcCqlNcnAhg4rxy1seGkefal7VsDEYCF97ieTABb+GoAth+h2lMCGPD32Vn4LN9aiNauheBaMq+lT30D43LytY2ZrlR3wDKytV1NZPbtJceby/WANMcIrp+cEIRdFCOKzN4dZL33NJDnCndpijTLC0XrBHlHGPddhXBsVMhQKsBan/TUuzYsMbJ9fn99wrTntI3xFRCwNE4xtJwrEngOvNFlr0QCX2zdCCTwWVF/f2xkbD3aIiJCsouxSxIorXA4ZFAT3ru6U6SzX5trJESlmzWJ4D5Hv+Z7OJgWsRENXB6DtSdylQFntNvlebAVPRUIkcCXxJg1cQ3tGIuttCW0wz9sqS1rx1IE8BDm6vZzyNyx2fNOuecp5duubdbQZuesZZcp7IYlfWujgm3KZ1i1s3I+tNBOvUIICEGIhIQQJlpYktl32PLBbUS/SVMN5EoZklhr5Eojy3NoUJliutken7GYjAyg3AU64KIgPkkvXoQA9iUqpfDDqB3z1Tjd50nkC3x/dk30kTaXwpxndkk519anPuAa0cBtaI0QvpBsY4wXVgZBhJs4xi6JIYRRpoQw1+3TAx4+yqCUNhlDYLzpjILnxlRoo3UNvBtTyYAx0V5jFOy2SJOjMgx3S5Gr4lJj1+f3+Brofx6eDI6AgJno01+nzgW+OMvdS5yJBD5OE3YlBBL4yiAwm02adrsYj9/eIsty7Pd7aBKG0i30PwWGgACBzXYkzjQwhpDq01WbRtyusTnG2GvTEVq4TpLutbnSNedJDeCCu30D2M5cupFmbMqJNLTDP2ylLaEdAefCnCxyU9HUEdy63WwjXY6pS42bNr5hTPmp9V8TTVe7Lom6noUUAlKaPYajSECSIZiJTLgLkeWV6Uhv1Gwc/lKVI8tyKKWQKQ2ldbk1qSBd8dJElbxc/I8ESOvSuZFttIzTqHJMWMMiHTsZ2m1ICDTawnQ8zlocF/nSWmvAVBARGFVEej8Yh1zjqfc8iw+98PFpBDDgx0tvwWz2AG5+Ifgkowtf5XLhq4ynGhbPiaWe2blk9rVPAf9l80W6poHrWrIdpW6x/zKDmCGEABEhjiPc7hIIIaFZgUiDmZAeFB6lKVSmwFQ3npGUIMeQ1ks8X4kErp0fUeZauOTY8PkdvjSGn4Wf4yUgYCymEMBjDC4+O0RuHmeMBPZiXQgk8PXBxmBFBCRxggePPYBSCg8fPjLvey3LTDd6dbuGztjlWDJEBDcjQbruaY28rq7qXmcjVq75CmxlLt1IM7z+XpiDrbRlK+0AttOW0I6Ac+ASBHCb3tsMngNMZOo50aV/z9HLz0UiA9OirHvvPbZc816WdLXyMJl9hSVBAIjjCEIQiETxL2r6pub6FiLMBTfFGnmRUlopBZXnSJWC0rqSgSREERRj2uDosaWYhXzanqtEFwUp7LbolACUo/EfCOCVoPQyqMZHR+roQ6bx1Hvfhw+98OJ0Arh2ywt/3bD7bxkF1x0x5oUBogO+yuarXBanyHduZewcz25JmX3tW1/lAvwigX1AORqdtCuVgxtB21R9bDzoIimQxDHiSEBGUVEHmf018gyHfYZMKwCizCoyReXoGztzlP0+ZbTTAFkVuAq959P49PldPhfaP/i61JJgEAg4HUtF4Z6qX5zqpBPIXw+QZkjf/jPg3/v9waLRN3wt5Nf8+d4y3q0BaYbsh98F/t33DhaNv/2toD/zeRcQ6v6gTqSaNH5JkuD2dgeV5ThkOZTWxdJYRE701HeU9alx3P17LBE8NRq4zaDb/BswNjtmrvZAvjC2M6f6kXFuCbSlvlwzttYva0doh1/YSju2gDGBAKeiy8lsrE4x5/toTIaTsTKfo3zzmjbnuq721drGxwRnU5KWONZJsjUr4sLZj4rgFiJCJARkkVpaEoGEIY0rG6CNAi+1U5P5kBmaAVWkj1ZaI1MaWmtobfREzQzWJguOUYnJqQFVfcxGW3btjnMaf/QMCFRo4GHuWjOKMeiMDwYjzXkZAhg4Dq0/x2TKJGYbub0zRhTwVS4Ln+VbWrZzKgDnwCny3qd+XRJz5sYlUC3JzrEzzbWnwsqjYZ2PqmcmICCkwE0cIdlJMCsQmdQrKtd46e6A9HConnOHAW8IcwjhKdHBYyPWLunt39bitnFzafj8Pp+KcW3z6/0MWDfqXsbTCeAx85Fva0rAhXBCJHBzvC2Zxm4xjIwE5ihC8n3fAnrD6y8k2HbRHAc204sQwowZ1ri9vcXNzQ55nuNw2ENrgIQEcz9h2nWmT5drEsRTSd8udKUXLDPbFKS2jRS5Frx7JyfDw3nlBNCVHEZPRmMImyj+64iyNLYyvkI7/IC7Bq69LVvAWNLxEhjrYGYxhgDuIo/HXj/3/NR62uSc0he1iNcz6VTMujBkCqPDuRlehCgyMTOsUyNIQBAgpUQiI0hJEAJFUuaiXOHkCBgTJ3P1G0V1mgHFQK5y5FmOTOXI8xyahMk8w2YLPdYASXYehij+HuFs2MZ3c10/7kKYx9YLZkamNZ58zx/j+RdeOp0AtpWekwDWdHoYuq/GaF/lAvyWDVhOvvtEAFv42rfeyoXryGb3QhN+PhZomMgNk4rkWIEwkQ/CZBIp9kUjEGIZYbcz/1YfSAK5AvaHA/ZZCq01mIz6NQdTlOgpx0vDHh3v41L74Oupd0m0DQ1d6Jw+JHDx9Z0+BYEADrg0TiF2+64JUbgBACZFAlsSeFVz+9hI4MdukfzQtwOv+MTLyLVR1KNerF5ERicUAoo1BBEkCHFE+ITHH0eeK7z0aG8yyTT0K8J4na45v9mtSYZkbcMYYviIBHZSgHAR0cFUt5VcA+ue3/10fJ2LdRPADSLFhw+NBbCVsQVsoy1rb0MggP2Cu5afq1/GciFdukyXQ1lfhGyXnWrM9V2yTDnfJkvXNac4iLbW6Ohaffdtk8OiWx5L3tavI5tel6ncfddKyMwQMPomkyFoic0aSUXksCRACoKUAiZwuLKfEhHAjnxEhowGQbM2WWU0kGkFrRhZliPTCrnWYM02+qb/GTfstWPbPlhvgNfQWiPVjN9+7/vw/Jw9gPuwxIcNM5Wkx9Lw3Vjhq3y+ygVcVrY50YjXiIYYcz+f+xTwWz7mkBZ6DNrS39QURDLpU+JIIokkpBQQAhAkwAykaYosy3HItUkRCAKTiaeo1C1dKkzAsXJv7+/+PUbxH3NuitPTVDK4K4J3rePO5/d5LI7bEBTh+4o6uVFhajRu30do+NAKuDgmRQJ/Legt4/cE9gIjI4HpTW9A/D3fciGh7juoiPgFdjcJdnEMKI27NDUREkTGsAUnmoLqGtLU7C5jsyHMjRK2ewRbNHVQQwpzsYXVYHWLYStrimnH+nXKSzmJzkYL2dtajIwN3uOWjMZUosVnrE3eLmyrHcaWsY23xV+cMmZ8G29tOkebXavMrjJw7RDGtr8WFTthfhySqWk7rN1r1B3G3bOt/qX6vssW2RY8QkRFSmkBKQiRUQwgiAxxbEpV5ctE08YWWjoXaoZSGkozlGbkWiEr0kuz1ibCWFsnRIJbLUEURLM9ZuQXhc5teXZ2wkpsPXbc2SbVSXGAwI1r/HvH7gMYCpkC3v2eZ/Ghjy5MAAPzDb323nxGV0LfjdA+y+ezbMD15TvFu+qS6EsR4ht8lm8NJPAUD8BL4EiRc/4VRJBSIhaEJEkQSVkRu4KQpike7g/QShmDYFGnLvbFsBHSVrEa2nNtiejg5rmxkXTUVsbRx/rg+5iz8PndnYL+dgQF9r5iLAHchTFE8bXn64B7ijRD+kM/O2rP3OitXwfx1V92AaEWRJoh+6GfHYx0XiXBvWK4xrHdLsHjD26Q5RqPHj00WWRcw1GXhxyOdbKmoc39ewoR7N5/bFRwn+GWtTGqaeZAAs/ARprhNwk8QACXo9sakTdCAgMe98kMbKUtoR0BYzElEKZLR/AFY+wptowQorR/tQVhjKl7TrDTVHmH6mmL1m69ZoI8c6Kyl8JY53AwOyEulZ4TSYKkIoJYSghRBMM064W7Ble/2FQNZiDTjDzPkSsFpTTyLAODoLSxpxKZHYcFRYWt29q7GdQTZFMG1xTprs09x+nZAefH2QlgYPgl0s4H3DGNct6B4rNh2mfZLHyV0Se5Rk/0AYPw+dn5TgL7SCp0KqGoVBUCIImQJBGSKC7T9xERlFLIVI794YA0zUFSmqhg5lqKwFMUvalE8FA0X5tn5BEJ3EIA+zy2uuDz+zoHgQBeB4be+6HMABanzo9j551AAAf4DD6k49IlA4i+6a9DfNWXXkCqBZFmSH/wncAzf9Rd5onHkfz49wC3u8vJdU9RzYui+K1BYCRJgtvbHbRmHA4H5HkGISIT8oBu4rQtgqR5zv27rYzrRNilt/YRwc0InbbzXFjlNFyy+DLz/hbWlw00AYBrOL0iOlXdDp2q9bRx5t0KtvCOWGylLaEdAWMwNRNi07HLp/7pczRuiwS27RibsaTvXqdgCuk8F501NbOuYFqfXooMnnKfZllBKOyjoogAt5HEgCQBIkZ9owljaDSRviaklyAAEHThsGj/ZWYohcLmqpAX/5XcXRH9a3zEqLVNZlthY9UVjS081phRY+24CAFc3qzDC4OFvKqheC1Gah/l9FEmF77LFzAPvvbrGklgCx8W32YKFiZj2CNHbZFSYhdHiKSEFAIkjOKhFCNNUxzSDLlWAI49HqfI0YYlyOBR18F63a0Pvr6bc9Hdnuu/LwHt6CNQ+yLz2yLExkbyBwRsDbX3aOyeuQCit/51iK9eGQn84ktIv+e/AT78QmcR8e/9JURf++9eUKj7h7oOWB4tSVgpBJIkwc0ugeYcd3cHaM0QVW68VvRGKLQQvlOyN3RFqrS1yb1/FylsvyO0vpwutaW1bStNuWo08EC07/hi7HdU80SEdviHrbTFNIMBbGQT7Svh1PHQRfqucZxVJPDx8XbbBjnXHG/VZq/twpxnNIYUdmWYS5iW14y4/6T6Zl4/ZH8oy6GQ2XgIGt2QLFXr7BPdZuMwF5r+LCoTBTls/pOmHjIEMhE5TlsD8hFBMUMXqaa1UtDMyJmhtYZS5jizSTdtreMaBHBFRm/hPVsjrkYAu0TJOdM8T4Hvhmuf5QuybQ8+k4U+96lx4h+W7xoeT2Of2zX7l5lBxV6+RlobBWLOmf0lNAhAFEnc7hJEkSgDQKSUOCjGo7s9DodDr9I45OE59x1oVeLK6N52J4Gp9fkEn9/HuRhuk9994jO65r7qXey7elq07BzP7zH1BgRsEb3zXrEnML/nDwfrid729RBf+eblBLsA+H0fQPa9PwZkeXuBmx2Sf/J9wOMPLivYvYKNSCh+OdErmsz4lCCw0tjdxHjiiQfY7zMcDmmpW5lrqnVkiPB179X8fc45v80I6/7W3E4Wz70XMG4N28o6t4VmGKMtnV3bZKDaSrt+91qZVlC9TItmVxl7u6tfFbbzjqy3HV2kUFeZa2NYFl1kvfBD3rViKQK4qz5fxtMYVG3REEKUx7qyTjG7386uE549ztVJc0Ht+jnfz2N1m6Hv9FPf9bnEMNf+qq9zY+qo2SbbzrfINSa7WRfBX5Wt7z9OhQJAAEgU6aUFIZayII2d+wDFml5ED9ecVjSIZE0eBqCUIYeVUkiVhtYaeZ5DaW32JNZUU9rIMtY18Z1B6ZZ19oFhZqs4OQmvq7rqdqbTx8t6ZoM6LkoAlzdlE1au4WcucJ+N2UG26fBVrjXAZxIY8Ltvh6KBr/lh4jsRPMVYxWw81yIpcRNHiBJZpjExURQaD+/ukGV5tWA7yqMhlPlon5QumZo4NSp4zEfsUJ3XhM/v4BwMKc0B3RhLpLrnj68fmhf91BsDAtaOUXP5lEjgFaaDVr/4S1D/47/oPC//xr8P+W9/zQUlCugCM0MKwi6JcLu7gVIKLz66M4ZOIiBXgBSV0UgXymKBpYnguSkWuyJgWFcRlt0RO8tjK+vrRppRRe+cEczD9xgZFNwLayYmALzi/tnOO7KNdgDd3xRra+Pa5L0GLvGMurI/rbF/6pHA4wJUDAhgqiJQe5reZts6Ny5pf+qz6VY0Z/86OkSMn9v6dArZTs7v8r/CsUAIASkFRJlqGsaZpet+msGEKtCHDSGtGWX2m7yILtZKQykFpRmaNVhz2RdmuxQU9yxioolq5GzlUNrQ6wWNeRVqa0jt+TnPhaDBK8vccDUC2HScn5PoGgzaPsvoq2y+yrUGTHl2l1SOfO/TsdHA18Acua4dsdzr5QuUCm4SxUjiCLIwBEpBUBo4pCkOaYo816XjW3kPIoDavOCP79WZqmUGGTyVWJ5a5lxoI7DXju42baN950DT43TcWBiKAO4ngAMCApbDZF1gy5HAWY70734/8JGPtZ6mT/sUxO/4jgsLFeDCrjOlwxCMsenmxqSGPhxSpOkBIDlqTenS5boih8fU1TSgDb1jbUZTq2tyrdxlvim2otNtpBnLplFuHT7dddeKnyJCYekl2sb42kIbgO20AwhtuS8IBPA01HUGhhAj28CAQF1/YbQsIQMO3+1ynBeXulfTzjvHEbBvTF3Dhtx3z74oYyIyPpZkUkkTESIhStusdWYzRLEpZ6N0bfy0eRbW4dH87dwBmgHNXEQOM1SRYlorjVxraDC04pquzMxQxbcCF04NpraRNmbogmBuP0tgEz2/ornhKgRwefPi40Z7kgK6CV+JGwuf5fNVNl/lWgOWXNCWhu/9eu659BSc8uzO3cdTI9CNh7pRKFhrCAncJDtEUYSdFMi1gpQSYIGXHj3EIc2RKwVRKChTycw5EfJTyWAfiGDf36+5qBtb1/lh14U2w3f7+Jp/j/M/r+30R0CATzh5Tk8zZG//GfDTfzBYNPrbfwPi3/yS0+53Qej/41eQ//z/1Hk++ZHvBF7zyReUKKANGlw47AnYFIda5Xjw4BaP397g4T5FmqZlphetdeeC10ewdaWAnLP+jSWDmxG/zW8I+/PcutlWdPrhhMMAACAASURBVKKNNMPZn28iJoTvHhv3593yqN5irAoUzrYb6RNgG+/JFtpgsZW2bKUdc+FL+5sEny9yTUXdLqCP29GiToxjaKr00iXp1iCZuwI6mk5v58a1CNXmXdvsjWMdva5pU26zmxWcreFVG+1iU8j+Mv/oQhcoyWGClBJCGP1ACKOzt8VTl3G3NlqHzZ8u/1oFsJvtYJQNPNUaKlfIlTaRxcxg1mYPY23SWeuWSHYhokJ04QpRV2K0qo3pZv82x/u155CrE8AAoDw18vlu9A7yzYfPsvmKJZ7ZOSc8n/u0L3WID1gzCdxKdBX/JxC0NobBJJKIoghRJCBJmuhfk3cEd3cH7NPUXDGxPUPPrqkICdGtTs9Jmzvl/Bz4/F7NRbNNVRC4n7rIHAQCOCAgoIlF5/M0Q/aOd4F/Z0Q66P+fvXcPtme56vu+3TN773PO7/e7ugjxsJAIL1MgRCzAIGSIKwSwgx/BrgpxUWCMExcG87JTMWAZAgSIDQJhgYQQxAQwcVwVhCnKCRISL4mHwDZI2LpXIAkhEKAr3Yd07++cvffsme780dMzPbN7nntmz+o+/am693f27Nkza3X3dK/p1Wu1S5HA+wTJ//j1jV/Hf+9/AP+sF5xRoIANW9SF/pdzhsvLNdarNdLkgJvdFjyKIDvyzp7LEdxF3QlcjzJR5wQHcD9oTLhNwehI4B4O4MkifXvSNwjNBXxoW0DQgyq+6dMXKnpbU+ISkW0IVT2Ox0U+oTlR9+30cQC3nd9GvS66rjkmSncqjuw44+++43tf2dvOaopwH3J+0zyTjvTVMhw5vwFj92FzDq48px6Uo+wFldFRO421A5mjTHHOWdUBjMayYpDG11oXIYSSWZQ2eJGOWqjtAoUQFRu9OD/XjuWL3My00NT6i0UdwBqhSgkyz+Wt3PnHjWlJqE6CU5ULoC0bQF8+yvRZwd73mZ3y2aZap1oqqvIB08s2Vb32kas0FAA1+HKIo1VoDFIKRJxjHceI4gjrmEMCiHgECYkkOSA5HJCmmRr/8tVj6h7Flep3txqVTc9A32N9dZ7ymhrK7fQU2vWad3HK+ProWxdNk9Yjb7sYxy+FS9t/gQBlulYUz92fH779ZZAPv63zvPgrvhj8M91wAh++42WQD9l14n/x0xD//S88s0SBAj17VHwA7OOfSvl25/ISq80K+/0OSZJCCAnGeWmXQ63rL8Zp/a+exKoZgX0cwVPNW0gpi8kmff+jCUQx7wJT98dft2yJ9raj33NY826DjY2hWfc+AcKy+5RBVIJo2h5jh3ChffUh6EEPn3TpCxWdbTZ022J+qlT1yKMwTQfwqddlxp6uPetu6PvJmMx7Xb9rGnNdngur1LR+R7R8d9I9av5VhtyWNj43ZmtUgh0fa6DiUK7dU3/JoNszA+MMEVP/KsdwaadrBzVjIv8dK+fKZP0pkHm0cXlctxchS0exgIrMFkKovYolkGbK/S2EijQGgEzkvk7tOJbKsrIt8rSWQPGPMpyOZ6T7ISGQZhK/87Z34d1PPIm45+8mRaeDkVKrwiCM7ykMAF0rVZaCqlwAbdkA+vJR5tRIxbmgWqfliy4jmw6abNn1kKt0ugKMqYm940k6PWAL7NMDWJbiZitwcXGBzUrtV7FZr3GxXgOM4Xq3xW6XQKUqUwO/ShFSGgrqviqqtz4J37TC0GZoNk3gt+neZ9J/6mhml6nqdt7+qevFYonFMvSwt/1AINDO0s/K6uu+DIfv+qFOJ3D68p9ADDjhBOaf9BxkDQ5g+SfvObM0tJnK0dn/ho0fKgihbLPr7Q58v8fFxRoPPnAPT11vkRwOym7jHEhTyDyaIMsyxHEMIQSYdrp26GdN5dewKM+ehaM5GkVHOZjRwEeOZs6KSbE5bDgKi/BPg1ZAQRdd8kljerVh2d+Im05yyiAq8R60qyQQCMwE9f4YOB5zzX9dkF9T1aP8Y/TWArXrqhkxNml/XrePtANP0zdIZEzUb+dY3BJpvHS7MO9+ZJ8af58yJ83kcVUfLzE4Pqf4bMhlnmNmvrFdt6loJVS6Z0ACGZBWvqzqWW9HjDFEYIjjMi015xx6G2LGZGlx5QsdGAN4pAKFIui54qi1b6iXthYrlSgijKVUexynaVp8FkZEsoBUQU4SedptXtxPWO5tez/JMgYpUqhJ7mgZBzByd6+O3lIFV/XAU3iYKLNkOoMuqDqWAjQYu5qrjfA8LE9Tupex9K3TrvtWyp8x7PZ77HY7RFGEVRxjFak00VebC1yuNxBSYJccsN8fDKOjXLLeNXk3xHlbN3JtuhynAWx2JPapA5/boVatLO8FhalhytIuFyGhA4GAE5y1X1+vhjmBGcing+Yf+sHIGr6Tjzx6VlmW4hzv3HO2UxbxIjpWCon71zvsdgkuLja4utxgt0uQHhJkhvNX/8tYdUW+GcnQlArPnKcYYmv1nURsswnLKIh53i98moNxWxeWz/iVjuBCl55V3pEF/bwwtR2gVoGSaGNwu22VBD3o4ZMuJq7qpbcXcx2Zb5VWd4j1+636j+X2hhjh/B1b910L74bYQGPnxpruZwaFUKev83+so3jMQonCjq5fq3bNQVjs9yy/KINqwxkkkkRUf5c7lFVWoKq9xRkDz1NS6x2LVcRxnpqaZQDUOXUlKrY7gBUDEAEs1g7k/ECpAACusiXn15GyTDstVBRtJSBKCHV2JtW9ZJ7eGlIi5UAWrRAhQyyzZVJA2yhycLNyY3FKAwTFB5uiTHUoy0hZNsqcu9x8iWq0rXCiwtxldkpffqpsTWn69HUjzlWa6FWMOIrUqq84ghAqRfT+kCBLVSQwgMGG8ykLHuqTi3MsnvCJqgO4aS1ifrRXeXWXNzVbhRahXAKBqaFk4xy+/aVe7Aksf/+PcPimFzd+v/6/vveM0pwG5fGo7jidkiy/bpneOU9OJ9UewU+7dwUhgP0hxeFwUKv6JcB5c6QuYB/FbON/k13YFMlr/qZeLir7zPE16scqEROVyaDpyphyexqC0oNO3zmcvJ3J5joh5eztQdgXmB5BD1q4rofr8rfhum7anc1axkW9JE40nnEeut57ppyTs9lRps1mC8wYGz3cJQNV2ua0dUmYZXXSPDCGl4d94aZuxaxYP2letitIp/6drByXYFJljDQj5GOuMk6C6TTUPD8nlyP/Odf2nTpJPZNMmjcEwAvHtvE2YdH+OIpeSgEBhid3B/zJex6n4wAGVGHWNxGnBOWHkbJsAF35qMrlAkuVXZ9BhHq9Lt3XtkHVGTylI7g+4aZXZXHOEccx1usYcRxDphmiKALjDNfbLbbbfX4Bc9eL0+RvK4+hZeX6y8iUmKvimib+miZrQzlOg27yoTwDgWmgaNv0dgJ/7ZeCf9qfO4NEI3j8fUi++lsbv16/4juAu1dnE8fHPvPcbVdKiVQKrCOV7CzLlD334N0r8CjCU9c3yLIMIncCN8GYPfVuWzRK08JD/V1TthZ9vO4ANq9V+Wz5znQET4Uv7dFNNY6F5g1zZcEBvBz+PCN+6AEEXZbGRZmH4IV+UsUZ2lRRTi7We3/fJZjCwVi/lkmfbISn2FtTZU+k9m44db0M1c5WHjr6uelabdkazWOcMWRCHJ3blTXIPKbnoPX8M6tlF5AQYEzZeuUi0wicqwjjCEba6uK6Wr5m/RhjSFMBJoSQAHBzs1vcKSGl2lRZzxqqvPJ0Oh1qD5cJZdkA2vL1kS04BY6hVKddqXCpsXRf28YcZdf2/Ax5rqaSrXH1fH59zjg4Y8oZvFohihg4U0ZDmmXYJ3skSabOZwwi38veHL7r6YLaVg/2ka3v90PPc5FTDcvQn89P26rKQCAwHMp2jfNO4CeeRPJV39z49foHvx24d2fSW962fnHu9lsd18t5BDPtGqAmR+I4xtXVBZgEtrsEhyzLV96rXylRq45fmyNYRWPiaIJ0THrFui7CMsFkLlosjgEABCDLOB71tayMw6faPT61V7dUaVmcUP/OKb10bFkZBeODbe66/Bpf9AD80cW1zFMuyDgW28IuJ5EqEpixwpIojhfmg2mD6EPnka68by0at21O8VRb0+asG/Nb8/Pc7YPa++FUkb/5xSbP3TKmHTPjX5s81mMNGYCO/BYd92y7lnmMMVZcjOnPeUSxassZOONGBPDNFrkveHHKla8cGfo98OeE2kNmEmQbD3X5qEKx3Kin0BiTzuLcLCHfFClcpr4nYwwR51itVlhFyjHMIwYJht1ujyQ5IJMiTw3NjQk6/Xu74dpHhymc5mPOp8ZcKzsDU+J2GwsEqOFan+WyE1i+449w+MbxKaBdH2PnhFo7LiZMIHGxXuPycoM0E9jtk3JVP+eQNQestt1U5rY8nVuumo68LDLKGJMxYzKNmCkITUzb0Vau5ruFaYue02Z2Bdpq9BfOh/pgoDG/NxW+6BL0oAc1XajJsxQuloMKXdB2jLJ39FacrqL8XdpmA+qutXZbqLsO27L5neIobpRo4nlRavZ4G7YMN0tTOFV70uYUXpLiKbcnOFoG/cLEGANfvoyOoNzJB9kC54ZivVLoXNtgxn9UWaJel6g322q9SnSFlDhkGba7HZ68vsHNPsEhEZCZwGa1wgN37+DpDzyAq80akAIM5SImvoqP7tM1MWiTpWlF4RAdKT8T5phv+w9oLosh96hDvVwCgcDtxMV+afV1fx/sOR/TeV76kh+F/PXfPoNEA3jiyebv1qteY1TgGIrtuLDBeISbfYIn3vcUkt0Wd+9e4u6dKxVtkKkMLpzz3E4AtMWunawSyvFrpt212WpN6ZxtMjV9tl2/7wJBPSE6VXpginXqNszyX398qI/imfJAF8CPOgH80sMnXQL0cLleJAMkOCTcdv4CgJQCaq9XCcZkbv80/5ePPug77g55B2h7N1jq3cGl9xfTIuJM7aV7mrV0OkOfcpu8Wpe2/+bWq0wBvd1BiHJPxKUxO9IsT0NAQS4T6p09ZfmoykZVLhcIZdefeklRL7sl5esTJTHXPfUknz5mpv+IOLBarRBHMSLOi99kQmC7T5AkBwgpjpyYfZy+feVrOla/j+2+U4+n1MbnLswJAddkX45QToHAXFC3A/pCKRK4b9+e/uTPIvupV9uv8eHPxPqff92UYt0aKLbpuj0koCeVJC7WK9y5XGN/yLBPDkizDJxzMMaVPVe7VmETdthk5ueuSUEto/6vfqxpMVvlc9NxWR4fav9YU815wPJqTCOAF/UhdYQNvDE3fagXH3TQ+KLLUs6jQH+ol1c9Da4/cyLKqTvMwZlbgS0BF7a5NWC6eUpb9krb/dp+33TOFLb4FNeoZ8k55ztC5U46U06f31nqY2gk8DlpLFNzLlo7gLfbfZH6iBJKCZUKmopz2gbFl1yArlwAbdkA+vItSduzGMrNzpHTN/+3bnhRhIJs52pv1kkuziAzUbZ7Buj9USLOEUcR1usYcZ46R+/9u9vtsEszZFnWqkOXLE30mXQc+v2p57sIhfbtBv63hUDg3PjY/xy+42WQD72t87z4a74E/Pmf1HrOOcag5IXfDfkH77J+xz/9k7D6mr8zuwy+Qbldl3Ye8u2vGDhnKnKEMdy9vMA6jpGkGZKDcgQzxq3OQillMRlUn9SyTUAOcaCa0cNNE2a2Sb8j57Hld2Vk8zj8sQ2XmPSe517O14k0FlW4H4xW4Hy9wA8dgKCHa/d0gSaHnVPlJRmk4Th1liKYV0f2lnNyk1zeugUHt55j2mZjnMT6N2qLueEL72zXtdmRXbb6FAsD+mSvGXqd4T+2HOshSlf9UKPVR5N/z/XLiZh8e+XpYEwiAhChmgLSp5QegUDAPZhEsQeYibT85zIUBjkzImLO6E1r2uFMGV8VI1KPnUIgORxwfaNSRO+SAw5C7Qm8Xq/xgQ/cwQc+7S4u4ghMSthm3MaOY0NXFo4pq9swztZXWPqubxV6C/8CAd+w9Ss+9zWrf/qVYB//0Z3npd/34xC/+SYAw1KrTcqT9xudvwDAP+6j5pchcDb05IhuW5wzcJ7bU/mx+zc7vO/J+5AAnvbAHdzZrCEgkBnWPINhgzEGkad7tmX4qU+2NfUHdUxbVAhxJHtTH1J/dmwBlcqmPX7G+vZJfvVdp/YzNOwoH8aU4r3Zlxdo+PGs+KAD4MczAqAYDwK0sI31bqDTJS8/3zcNZdLdKetA21Y8z/yn/kP+n3agj7tmU9lPWSe2Ntn07nX2d7Ee9JWr/t0pbaBPeVApn0Zq6jOZl8j9my2koN1JSanWpigzm2ZBU+3oqcoF0JYNoC8fRW5NmdXDeGuHR12SeNlRk2+qAf6U+5rkK6sQxzHiKEIUATFXewFLAEmaYp8kSNMULF/iXndmj812MSYieCzkjZ0RUGvby+Jf/QYCS+N7H1MZF5IDku98BeTD3ZHAq6/5UvBPf96MkjWT/dSrkf7kzzZ+v/7ObwB79oeeUSK38aGNFzYZgCjiuLy4wDqOsNsfsDskYBxq4Xxmjy5pSg3dFCHcZk/pKOB6pK8ZHQKUcpjRv5XJ6FK56mfjsNo7bxrb00XmUWOZsnG+Tsr1GDPVy/lxvk5ygh60mFMPX8roHNTHZz0Gu1iGcwVZLIV20s6NLfrXkMJyrP65PVq5rI+6I7cpvHWc0k0LC/tGC/ehT9ua+31iqusvFtU8AiGlWw5gjZQSGeHJScovv0G2cVCWjTK3pdzm0JJq2VGVC+hnnJzz3lJKRFGEVRxhHceIIg4BgZjFEFLgertFlqlZDjPFixDNKzH76NdmiMwRMe0rlNv6/Phbr4HAuWjqQ6hOCs0hU/Jt399rT+DV1/5d8OfPuyfwEbs99l/1zcDNzv79n/lgbL7nheeVyXF8GDfrUTyMMcSM4e7dK/CY4/r+DTKZgbG4jPqV8sgZXDxNOn2f+V2Dc7juJLZNxGonb93pq89rskfrCGtWgqrebdTv6Qunq0KjLHyok3IxxaJiTIIP9aEJutDA7KfD+/2yNKW5dbEcfXMAq+dk6L7A81K3MxX90lUzZq+f6jUB0xYp6/Toar3uOaVtP2X7WuqdwxZJ3ef8udtfHye9UztsmAXnlOCEoNLp2aAsW2AcoU7HQ7XsqMoFLDvxWE/NB6iyyrIMhzTD/Zstnrq+xuGg9gQWmcCdi0s8eO8u7l5eYBVzQEpIIYrUMk2pWvrI0uZ4mMOI8xHKbX1amOW/QCBwCm1941J9yxIpvtZf/+VgH/8xnecdXvJ/QrzhjbPI0ET26tc1O38BxP/NC84ojfv4Yg+YzwTnHFJKHCDxvvv38cT7nsKdqzv4gLsPgDOmbDbjPPMpastg28dBq2UxU0Cb8tXTQtejgrvgjB05pXWkTN0R3YVb6S7bGa8GLfvJh/rwKBO0F/XhI7e5XqinfHUJs+z0eGj+5xI+tgGV5YROPdTtzGpaafM/cxQUAEpbsC2ds97eRNt0+nN1VB0WsTu2f2jrY4Zez7Z4sqns5qTpXk33Pdcz1fWezxhzMwJYI8COVq9SglInU4eybMDy8rWtfF5aNlfxsdzOoRHlcqMsm+askVZMQEpAL1EyJ9DUCwEgGRAxhjiKsI44ooiDMwYwDiGlShG9T1SKjoaVffVIkN7itRglU+HbC4PGhbY+BFMdX+ssEFiCqfsKX55PapHA8l3vRvJPvxs4pPYTNmtsXvqtwJ3L2WXxAf/GyPrEGs9TJAMSEuv1Gnc3G0gO3NzskOlUzIw1vhtwi01nmxRqcubW7Vkz0rfJLrQ5co+cz8axagrDcfXqQp/V9W4wXIX6D3SowrL7A7tQFzYEADCACV1PanGFq/rUCXrQwhc9NH318U1vCrQt3HetvJuimt1FOT1N56NrTF0nbTZe9btx4ZenZi485bpLXLPdrjwtU+UU8knpaAroOlJKCCkhGc24YPOlilpHQ/mFXa+YolZmAO1yo4wP5baUBpTLzhXZluhLbE5cc0KOc47VaoU4isAZEHOOiDPsDwfskgRZplLySajIDCEyI82gRH3SqS1tYJtctuOn9r8U++4poNzem/GzLgKBOTD7Udsenyj2X7KnW23D136xk0O+J/BDPfYEntsJ/OR97F/4IuDx9zeeEn3+5yD+W39tPhk8ws0xcQok1qsVrq4uINMM2+0WGYty26nsP8z32TKl7XE/0DRR3PQ+bIs2KqKWkTuP9e/0b3tEHkspK+86Vbuy376GvvRzdjXc1M3VOqlHvjcvsXALV+ujji96AP7o0jW+BM6HbeGWi/iVFlqcLSLzHNj0mN4uZ7n5qO3BiS8PrYcsFlv2cToPnas8tVza3vnneEaG1G1dtib73hsHsAqKp/kQU38xpiofVbkA2rJRx6WyoyQp5XKjLBvQPlCeg6b7mJEdjDGs4hhxHCGOI0ghEK9WYIxhu91iv98D4JWsFzyKIUVWuUfjYD/gZXCqcvHFsK5Dvb3b8bMuAoEpaHuZszuAm6/ja783FYfveBnEm9/aed7qH3wR+Gd+6vQCXG+R/PMfhHz7O5vPedo9bF78jcDlZvr7e4ab4+E0CCEQ5Q7ei4sNrq42SBLlCJYAOOeFnVfvF+rRwJquRXi2SF+rszjvtxiqdVQ4jPVnNNuN9aM6IrhvP+dLX3ishpt6+VIf3A81APhTJ77oAfihyzkyfgX6UWaBo7UH7VD8cgADKhLYH33O6QTV19CZYoyj+XfHx6ocv1eXz0fjekUo57BVms7r1+8F2DI1jisX8/nu85wMiYo+ZYG5zfYH4IcDGChfVKQEJNEHmfJLcpBtHJRlo4wr5UZRSspl57ps5zQCbZN42niIoggx51jFMVjEcgeEhMgk9kmCQ5rlUcGwroTvkyK674rUMQbMbVx5TLnt68nK4JwKuERbGjXb9zamaO/nWVl9ixgQCcz/yn+N1Rf/jenu/SePYP+drwDe+3jraauv/GLwz/jz093XU8JzUC4SEUJFlFxdrLG5uMB+n2CfJJBCFM7Y45wtaExra04o6fug4VzzN+a5+cHinvXjtpm0SvSAOmB9F+rrDPbJ5vBFF/f1UE9ScATTwwc9fNBB07aAKHAe6o6hIYtKKTHEweUOotDDB32m0mFK295+LftCRp09R38+/T7md03XM3/HrOc1tfmucqqe3z+quY0hc69eRwCbSCmREV2ZSf1FmbJ8lGUD6MtHFUrlRkeSflAquzqUZQOWdQbXjYgmRzDnHMj/Xa9X4JyBCbXvnACw2+2RZinSrH1/sT7OYP3dOCNYTzgKIE9TrY91XcYHY9uETrv3q1wDt4+2Z6lrle7UqUnpPNd+0TcSmH/Cn8Xqy74Q+KCnn3S/7BffgPTHXgkkh/b7Pe85WH3dl510r9uAdv4B/o3lfak7fwFACoEo4ri6ukIcR9jvExySRL1j1KINTLocwebnvufoxfFF1JFRZ/Xz6nrZEEfn6X8FpLwdUWe+6OKHHrIxkt5VfKgXH3TQ+KCLTw4u12iKLHS5TroW57qFBBzfE9jGOfWYw1ncFCk/9fu43UFaLJnMP3fPZyqOncNaBzWlK63n2eU5lgHoI0f3glEvHcAACgcw5UkbqrJRlUtDVT6qcrkAlbKjIcUwqJRdHQpytUUl9JVvDgOqvtLNvI85GWeuE5NSIopirDmw2aiUlBICcRwhyVSK6CzLrClIbHo0rQizydJN83jLO5bn+2Jo11m+/ftZroHbQZ/VrUOdwF0OlDHyqN65fQFOoIUBkcAAwD/nM7D6gs8D7t0ddBvx2w8h/dc/A/nH7+4++d5dbF70DcADw+5xGwkOYHsaOQkOziQgBSLOcPfqClEU43q7xSFTW3aYUbkmbWmh65li6qnrmiaZ9RHtpI7y6CNzC5I23QCASUCy0qFs3rfQu8Ps8aWNBD0oERzAVOibVco1fNLDF11coj4nU19s70Kd2OycOi7o0Yx7TuA+Gd2W0GNqZ3BbtPzcc21d1z/+nhfP+vF42P2uNESftmdQyvYofW8dwDq0WleAlBKSWJ6Y5SeIu6EqI1W5ANqyUaZPJztbRGbX99aOPPQnfaAql8kQGc9V703ODQk1QRhFEdZxjIgBUcTBwCCkxD49IEkOELLbQTJAmkIm4PRnse2n1J6rU5m3/ftVVgF6NC0Uqb+QWZ0OPScIKD7zLoxbPpF82/dDPvz2XufKOEb05z4O/AWfhOhTPhHYrO0nvvOPkb7htyHe8EbIRx7tJ8h6hfW3fC3YRzyr3/m3GHMyE6D5HFNiFUe4c7kBpMTN/oDUcL7aSs48Vs/iYoscLiKLbdfSk5rGNczj9cUxXZliir/rn4s20fhzb9qJL3poGicJHdKTMT+sYpfKvIugCy1ccnD5jO2dKdQJDVRd+LG40QXZhwTkMH0qrzpZmcxt357qnvMd/5TF55PeWwkACQkpvHUAl+gGIhjNfPuUJ5qCbOOgLBt1zl12fZy/bSve6iw52FJtd1Tl0pwi3/kHc0C3Ws4YOAdWq00eaascM5v1Grt9gv0+QZpHm/RZwNBUDtKS1u+UCDr907ZruGC0DmGeZ8CvMgrQZIqV3lNG485J3ZlBSTavORyQfO+PQL7x4eG/vXMJdvcO8LR7wG4P+dR94IknR4mxeuE/AH/ux4767W2Cuk1HjSJiNstwcbHB1dUlDmmKZL+DkHaHadH31BbcCCGK6OGudMttaSdtfXDTQp6u9yAzKrhyLDiCncEfPfJ/lxVjFLbHxZfoZl/aF+C+LsHZuDz2jGk0/RS3DWXvtEeeuoYPz3vhALZkOdQIYJbBf453njF1cpIcEkXZCCH8dwADqsDyaXCcuvHy1FB+kaYsG0BbPsqyUWfqsjvlakMdwMByAy3lNkdZNsAdJ/BRKiHOwCWDlCod9Ga9Uo5hBkTRCgISu90e+31SC73VqaLtemhHsy6WU521Ngcwiv2Cm/HBaK0z3bPgX9kE5qKpzY1rQ8Ofefv5lJys9eeSCPrwBwAAIABJREFUkmy3hcO//H8gfv5Xz3/je3ew/oYvB/vIZ5//3o5B3ZajiJSAAEPEACkFOAMuNmtcXl1gvz9gt9urlfmW9w0zureI4DWubXMQ2Rb7VaIlRjiBq/rIUhBWPS4t57U6uD0g6EILtThiaSna6duLBgcwTVzXpx4JHOzd+eibUcN0OIa6WI5ybkzNUflSFa63Ke0A1osfm54rkaupn6bqwu7j82VHsTRmZDxl3tgSWNMlxxCsC/dR2h1M4nY4gAH9YiIhVNNZWhwrlF+sqcpGVS4NdfmoMmW5TV0DQ2VbMuUDNajKNrVcc9S5LR2NPsY5L/Zzi6MYcazSRDPGEPMIYEB6SJGkKdK03IPuWO/jSL22aOFp0qJ3O4pcN1zrDG9vfukfGI89Irf4q+lXtfNZJbLfh1RXY6A6Ht1msp97PdIffeXZ7see9SFYf8NXAE9/8Gz3dJXwvIxDzT4AkADLJxWlFIh4hIvNCqvVCmmaIjmkyGr78vadoK+neTaPmZ+b0j23ZWmoy3Gkn8VBfBwR3Nx+fBh3fNBB44Mu1J3AfXvStkh/Fwm60COkhZ6XMXOGoS6oILyqD9f1MB3AxbG67QrlTNXnVr7r6cg9csY2PMLaqSr1h55w43ptC+SPjlnuYxONWQ5Wfut7CmhpWZ2qXq1oPgAuvFxTlZFy6r4+ZUZNZgqMbWvnaKGnPgfnqGvqzyrFNn+6Y27e1KaV1aL59UWLY3izjrGOYoBJcB4himOkWYbdLsEhTY0rq30hGNhRvVQijlvuVTeS+9avlKJ2zfbIYGpt5hSaog7LyGv9jT86u8i0DtKuPqZtkn+C2zfcg2J/PJSueqI6JgaqyIffjuSH/w3w7vfOeh/+2X8Bq7/9N4H1atb7+EB4dk6hWHePTAIRUynYOOeAkIgihrtXVwBn2O72ECLL99cFcm9xNZJXT25ZnMNqibualpLoTivZ5tgdkv1ISJk7uJUQsvid/s9MY119B3B93DHxRRcf9Mhb7+K6TNFz+uQIDnrQ4tQtngLT2EeVcbwjs0fgXFTnpyjXwZB5Csp6tKEdm8VsYYP9WFrcdiplVZ9wMxYwttrAxnna6XwkQCVNTvl3OdtZ/s8mq6UXsNgT/ZzawrgYuw0OYOHgRFCQbRyUHUtAdxoQijJTYGibO2cLneJ5mLPeKT+vQgiSbb67zIbJPLWKOpWfcpS2O0t1v8I5xyqOsIpjMKn2NuFRhN0hxX63QyZVumc9yaCdvrYIkabymSMq+LZEBAPVetX4qKeLtC18MOlXX+39y3x17n9bansBpjwWBuyI1/97pK98FeR7Hpv0utFnfTqiz/9csA/+wEmv6zPh+ZkW076SuU22ihguLjaIVzHu379GmgrwKC5SEtoyttS7OmMZ3VGX3yeC2JYWun68Tae2Y7J2XEcF++QE9kUPwBdd5OJplIMDuErQgxbBAXw658gi52q9tKXrdQe1kM2XfYFdbUsAAFkuJ6TmmO+dKvpIXAY+8jnpst1t8kjpqQNYR0RJptbC2qDeGVGWj7JsAF35qMrlAo1OpzPLYWOqep1rAKPa7qjKpWmWb2g9TRsVXM120OxgPjYKGDhniOMYccQRMZUmGowhyzIcDgckh6xc0VY4mauTdF0OYPPeTZ+7dRSdhl3XalnXKBcwlcd80Is6fY3mPtCuLtLCzQb1cSbQTfZr/xHZz/865MNvG3+Re3fB/6tPweqvfFZI9zyQ8AzNS1G+UoJzhtUqxuXlJbJMYLfbFXMaAMAYL7KmoEhAV9pp6hzW6DAaEh3StvivzTnc1F6KuZlKRLOfC6B90ccXPfiZ1JizpzSjnnzAFz0Af3TxaTHOOZnTRjLnOVwbK6d8v14WncmkO6uKKzhbH9LMJ6Og0m/1jcJuDyAQpWnfa97meF626/5CCPcdwJJxI9XQyGsQjhyl/OIdZBsPdfkoUjynC8thY476nLIvotzeXJBNOVqnqw91rdP0bosYrctqjm1CCEQRR8Q41jFHFMWQUiCOY4Bz3Gy32O8TRPHKuEc/eZomDuvM6WijNoYPp2rMUbRLlqTvJPRxmfV/3twvb9flHwflsSRwOvKJJyF/7beQvuG3Id/+zu4fXF2Av+CTET3/eeDP/dj5BfSM8DzNS91O0mmhpRSAlLi6usLFxQZJssdut4MEhxn1W6+epmHLFgFpW6Q3dBKpSZ+mz/qYlBkYi2oRwYNu5QTu2xElruvCOrIKTcW5mnGICKaFDzpopsni5SfntIn0veoOx1AnSxP2BaYGA8B1emXn9RHzTOHI6nyZ8w5gYc2HPQzKDmCA7ks4Vbk0lOWjLBt1pJS3xgkMTDeYUW1zVOUC5nMAG3cAMP7aOo2exnTA1g1Um4NWp7RZr1aIOAeDRBRHYDxCmqbY7XbIBCq0jZNDVnuOnVxkbPprU8WsX190OhV7G5NHE8h9HcC2NOfu44se/aE8jgRmQEggTYFDevwd52BxBLmKzy+XR4RnahlMO40z4PLyAutVjJvtFlkmIaROH2379bF9Vh8N2jKnTDEe1hewNX2npK3/1jzq9jjmjz2hcFsfOUsE7ZI9pC9OYB900Piii0/ZtaZkCQdwvS5crBO/3rHLfYH90Mf9emGyHA9d16W0KuRsJjApB3A1jqn5nKOXBaDHL3vKQPhlN8g2HKpyaajLtyStTibjHIqcKlffhSh9zqlfi1KZ2R2SNGlytI69Vnmd4i/Lsb7XO75+PZqjnkJI36gYd/OUy+s4RsQ5OGdgEQfnHCITOBxSHNIU2kQwb2kTV8fqM0tqElOmekqjfi8Jx3dvOt19Q7Bse8fOfMDVCVLTmV9lmD4eVO9M+F8w1MeMQMBlwvM1DX22sLB+r34NSAFAIo447lxeAgB2SYL0kOUL+tmRzadtA9vlzUMcLLcDq7K02WN930+aon+PdWxua8EZTBO3dVHvJcB4+5FKzziHQ3spXNehz5ZEVIOMmgiRwDTsINuCftfqw7QlXHsOjim34CjrZfl2MgXO1ov2lTKfIoGl8mxPRt5u80hgMg7gNsrUryzf13f+e1GFqnxU5QJoywbQl48ygmjZLVGnQwY7ym2OqmxmNLBm6ujsuiHZ7/rVybo+EbpNL6pCCDAGRJxjtYoRxxE4oNJEM4YkOWC73apJQynBGK9cV6UvbJ/4szn9T1+116/MXDcIq5E16t9z6dQ38ruO3VGtndq+RuCeg9tZVlTHh0DAF8Iz1sxSY5SUAgxAHMe4d3WFDMD19bWREcnmpFVL/OoiqyGZWUeQpknmUyZt+6aGBpqnUV1vkj7ZNs7rUtjO49o0paYYnMA08U0Xn/Rpg7rt47ITWOO+A7iKej5ot5u++FAvPA+Y0Hau2+Shr1OqIdW8rVMOYJGvVj3X/ahCWT6qslGVy8QFGalx29JB96VrEF9avjboyza9w6rJAaxpv0//KOo+TlcVcaoigjnniDjDer0G5xxMCERxhFRI7Pc7HA6ZWk/PGCRnEJlQu9QZ126Ty1aO48r0djiANUukhu7/0lat76Y21vRdoC+3s+wojw+BgA/4/oy5Ne7oxXY6bRsgmcRmvcLV1SUOhxT77RapPM6oYr0aM//OFwIenWOPBj613Po6ghtyxjjfLt1qd+04r4sEGCQYd9sBrLHt8+0izrcrA190cTn18FiojzUu14lvDmBg/GIiiriuh5kOGnBfn8kjgSUAcJoOYFn8t1ylUe/8AboyUpULCLL5CtVIYGD5em1NMxfKbTDK+aYjK84/RvVNvzfWyVaeX6bek1JiFUeI4wgRY2CcgXEOKRkOhwOSw6GI6JRSRxK3l09bevKxDmFbeu2m37tsFNrSfgN9dRr2XE05ARwYy+0td6rjQCDgK3M9c3OMIb6PSXr7BwlRBAJIlNkzLi8usF7H2O72SPYJGOcQotkeaFtk2McRXMo1U0SwrNo3xZ+1W7k6LvjUXp3XxUgbWVfFxdZlTnq7TtCDHr5GArs+lrhaJz4tBJdSqq3T3FcFgB91oiOBNW7rJCafBiLjANZ3FhWziy8hSgXqAwNl+ajKRlUuExdkpIQE/TKjIF9bNB5FaMu2xD2P94JpOm/MdTVdjtvLywtEnEOIFBHnuFivcX+7x36/zy/AISAHLVprcgD3kcl2DaB5T6Yh+lJHqawWJFi+BXCcyrF+7PzkKxrzlYiBZuZIOe8ClPv+QMBHTlk4Npbb1KeNoUiNzJkaKfNuMcsyRFFULLZ78O4VeBzh/U/eV79h7eOqrdhtDiTbArCpM99UjomqLVNEBDfc0tVxwpd277we2n5mHuiC4ASmig+6BAcwPVx2AtOYi5gaWSzOcx0fdACAyJs9gad1AhNwACttMut6OxqVRX1w6LP34xJQLjfKsgH05bNRxgsudP/aHlLC+I6Ci4FinfZJ02t+f+7+hWKZaUrRzP533pZm6+Ob6sQcF/qcy3T4LljjQ6xTDEJKcMYRryLEUYQoUnrHPMJBZtjvEhzSVJUHY5BCVK5RXMci73QRu+b1tQHYfDalsdN/jusmYOc2OoAp9/uBgK/M5QAOWSROQzl0AUBF6epylMXCL7XYbhVHuLpzBWQZ7t9skWUSjHOAqfchBgkOVmRM4tb0t8q2M7Gllp6yHs3+vljQZnh9VUY4ZV9LKRp/23V9am2Pmjyn4KwuhQPYv+gtT9Rxt23V8EGPvvMPVPHx3cJVx3y9LlzU4RgzSMPtttY34IQ6TKotEkw71s13EfV+ps3zU3VYxAFcPvTlS4YgXg/UBw3K8gXZxkFZNsrU00EL0HAAA3TrtK8jeCmoygXUZaMxkNnq82RjwRJFa0aYXmwuwBjAOc8nFiX2e50eWk/e2SNS+6SC7iNTf7r2yVuuHk+9N+VnJdAHGn3IuQjtNRCgwSnPonuTKf4hpdo4a7Ne4fJijUOWYbdLIITMXb+AkCpyWDv7WYPjqx5JODYryxDZj47lvt56BHBTO3VxLPHpufFDFxcnho8JkcD08EUPwB+noy+4nNHMT0cw4Pq+wHNkfVkEqXwApi5uOoB1nQBT7Al8dgewAPJVqwAdt8wwKA4gFGUyoSwfZdkA+vJRJewL7B/Uy23JtNAmTQ7VKTg2mtT1RW5krdcrxHEMyAwxj8BXMba7BEmSVKKksywb9BJpcwyf4gCu/97mnB5zfUpGJfXnJVCHTts5F6GNBgKBwGlohy7nHEIIcM5wdbnBxcUG19c77HcJeBRBMolMSnDJEfHyPcnqBGbt+wLbPjdRz/piywJj6lJ+UHa1tNymbexwaVyhZDOeivu60IwWHwP3QAeND/UB+KWHi7q4NC4MxVVnXVOduKaHDZUOemkppsHl+mCy36JGd5AnO4HP5gCWAIQnk1uUBxCqslGVC6Atm8YFGU2WTgcNGHtHEYV6nQ6JxDwnlMvNjKrQYi5Rdk1O4TnLTrJ8j7rcExxxjvVqBR5FAMoI4P1+X6SHNp2utnQzZkpKU482hpS3Xk3XN+00peegD5SflduNW+1oakK7DAQCgXmQAKRQqZyjiOHycgPOgd1ujyRJwXgEALmj+HghvmnmdEUSDpl0brJB27YEMZXSqaBLyfptX0M1BbSGqlxj8E8X922V4Aimh096UNIlvFvQq5PbTnAC04Llc5UMjLxt2EyegfEEsWd3AOvCFQAw8/6I54T6IENZviDbOCjLBtCJuHXplY1ynTZN1FCAcrkBetJJ/U3FATzmnPr5GuvvWP7sSRTWrhQCUcSxWq0Qc6b2oItiSAD7vdonuG7BDFl4ME1UsKxMQDa9PFFp+2Oh/szcLtxuS2MJbTAQCASmp2oLqX+FZOBM2WGr1Qp3LjeQUuJmu0MqyoWKx6aNZQEh+qeBPiVzSnWMKO3oinRSQrbYjTZcGXtctzM1/uixtASnE9JB08QXXWyLiJbElb5+LoIDmAC1KCjX00Gb+KCH3he4+OyYTkWwER9voxQO4OubLbJMnFgIDABD5oz75XQoDzT16CpqUJRJQ1U2qnIBpWx6Z1DJAF5fxO1o3v05oVynwOmO4DnrnHLZFc+DdM+4GEM9greMhC6jolerGOvVClJKxJxhFce43u6w2x+U+cCZsh5yW0T/jnNea0f6hVPMokvpTG7/3lUoPzf+4XZbGYqtvw/tLRAIBJZFConLyw3uXF1ge32D3W4HROvcmSohpbK7MkhEYJBSRQjb0jcXf5vHjEnPpuwp9YV2UkoIIQybqzmy137seMGvkACrHC0Foz4WuW5bmvihS/P7q2vzGcERTBPXdanvrWkeOxfU+/Vz43r2Ml8htlZiFL60Ja2H6adwW7dhaaEndQAL5GkvXS6/EVAdeEy5KIa5Uy03IMg2FjPt8i3sCkZDuU6BbvmW7Feoll0p1+14Ctr2Ii6d4WqPujiKEEUcnDHwKALnHFmWYZ8kSNMMnDGIwqHMkUmJ6KgYOeZwAFd16DfBQ2lc7QvV58Y/3GsbpxLaViAQCNBDSgnOgMvLC6xXK+z2Cba7HSIeAwzIsgzgTO2ZxhgYA4SQ4Lwh0reShU4vAKympuuzbUfTVh+2eYyqPvaMTy7vD+yiPdmEH7rQmz8bQ2WxhgcEPWixROQp9b58aUI0MC10VfhQJb60q8i0YB3UqewD5aBEyxUHsBiQArqytrKI/PMo0flAqA9ClOWjKhtVuQDisoG2fNShWnZ95Fpq8KRaZooyChZw08Doi03H+qpg8+8o4oh47gzmHFEUIcsyJEmCLMsgwSCkiuXghrP3XGVYvQ3rNG9crVvaz4/ruNkmxhDaUcA1qGZICgSmpmKHCYHVKsbVxQbgHNfXN8iEAOdRxV7V55d/m1fMI78sWW7qE1F9nMCmjF3Hjz7n/6s/yX2ebarPv6v2ZB1f9AD8mOIMjmCauKyH7kPNdNBT69O1qMfl8psaszzMbBuaUFbLElJC08GMAKaWzn44+TxpjyopHMDb3R5ZJnobwgI42oMlQPdFwozAAmg+sNTLjiKUZQPo7AvsEtTrdIh8t3s1aKm7C/3vnJjpoc0XE1UeqkxWcYzNZgMhBKIoQhzHuLm5wXa3x2q1ghClA1inhJ4bs57KumuuP5frldaz4xru1vtUhPYTWIpT+93QdgO3BcaYivJFPtkllb314IP3sN8fcP/+deG95ZwXdld9UV8dXhsDmdoRrPE3dtvKvnVAny0FjiKFpQ5KaP6NDWp9gcs2pYkvegD+OIF9qZOgBz3qKf1PgVqf7BL1rbk0PrU1N7FnVXEVl9tTPQW0y7oUsO7siKUDeLtHJrodwGa0r6wb5fqiI2T1CYqDFUWZ6lCVkapcAG3ZgBANfApLl1vbisqhsp1zQF263EpKJ6e5Fy7gtrHUl6ao3/oewYIBkLLYumIVx4jjCFEUAVIijiIcDgl2h7TIUjJX+dVXyNbrSx073q/Ohqt1TOf5cQddZK7W+VhCWwnMyTmep9CGA7cFveCOsRjI9/wtfKRS7Q98sV7jZpfgcDgoJ+oAG2dMJLAtwrfPnqvF+VIWHjlbumjjqkfnNN2DGr7YFb7oAbjvBA5RwDTxRQ9gOmcKxT7ZRboWcVHHt/kzpUaIBF6apj2AXdOn8nwMcQDvdnsc0sy6KlNKCTEksXQAAN1Bi6pcQJBtLNRlk+bfnAFS7eAZaIdyvQKnyTf34HqeshuuA/U6BY6dt3PVVXNaIrVX8Ga9hpQScZ4m+pDvE5xlApKhcBpXV7nOszewTXYta/Uz8vtzy3E3cKGNLod79TkVoV0EunCpvwvtOXDbMRcoAkAURXjgagPGOO5vt8W8kJRM2VzqR2BGdK3tmT8+JsEZwxDbqOv7pufXli5aCuTpW4qrKwe4IZ8+3nX9pXCpb23DLz1otZExMMa8sGr7LBxxBdfkNbEt+Nafh/w+MA8uO7k0PjmDzXTQLvZVdVyXH1AWKoMP0cCiccqqkgI6tTiAtfNGBgfwYCgPYlRloyqXhrJ81GUrk70qXO9WzwHlOgWmke8c0Zzz4bcDGDifMWezPRhj4GBYr9eIYo40TbFer8E5x36/R5IkEFKC8wiccySHFBE3o3Xne0moO4Db7uOqEelCWz0/btblqYS2EOjCtX4utOlAoIoQApwBcRzjzp0rAAz3799HlmWI4hXSNM33Sas6v7ptH5XlhdXSHrZFBJ/iMLBFAwNs8B7BlPoI1/rXJnzRA3A/EljDfVEE/rQv3/Toow+l/tZnXE51u8T81Nww5o9DG/BDDw5WjIvuOuZ7OIBvtjukaQZo41yapr2O4GO4rZNfY6E8mFHel9KFcqMMRRmPU3IFhkCxToHp5JrLYTdvuY2Xsz5BRan/BezlVk+/N1V6J9skYP3+nHPEkYoE5hxgUoJFEdJUYLdPICDBGAdH/xcE2yTh2Bcj8yc+pYam2u8sg1t1dwqh3m8PrvVJpxLadiCgqNty+jPPbbDNZo2LzQZpdsB2uy/SQmcAIsv1WqOBjSBbHYzbL3p4mC72YwLKAWzalv2jfan0GT711b7o4okawQlMFFd1Md/t6/9S6U9vK0Oc8tTw0QGsEPniOj9wtW5Mm5hLt58Vhd6jrNrnVhzAB9MBbLuElAiJW8dBdbCjKhdAWzaAtnxUZTPTQQeGQbVONXPIN9WAO41s0w/+PtTpHHVkS1Gko4I5Y1itYkSRskWiKAaPIuz3OxzyfYK7DDZbikDb9henpkpSzuTm71yDenudDvfqZgpuT/36h4v9yRKENh4ItMMYQ5ZlajJSSDAOXF1dYhOvcb29QXJIITF8odvxQr/jkXbK9JTVBX4ZdBCDzL9T6awByYC6JOYCeZttuDS+9Pe+6AFP9nJsWpThIr7oAfijiy96+ILr6aApBrCNR21J4b7DscRlHWS+1QmH2xHzBbV9gSsO4CTLOhWU0vECWAgqLw1NUJaPqmxU5dJQli84gsdjpselxFztbaoo0wkkmeAaVYOV8jOqqWeKAOZ9aai3bc45hCgNY1OeOI4QxzEYBBhjWK3XKiJ4t299Rvo6gPU9zX+7qNcrYyE1tEuYqrlaP2PxuV5d57a1xbkIbTwQqGKzSRlj5bY9DGBCIGIMd6/uIIoZ3v/UUxCSD7Zt9D304rgmB7CUcpJomPJ5l9B30++fDLZ30W5nL4U+xJfxwCU92t+76b2TjyVEAtPEB1180ME3XHY4+uUABtQ4ov7yQScfdAAALuF+dLbVASwkrvd7HIToPcUt9VtBeURf8nQhPYXCS0MXlGUMso2DqmzBCTweqnUKnEe2KVPEGVcdJ8yEaEcnQM9w0gsP2ibm5kgR3ec+UcQRRREYAyIGRPEaUkokyQGHQwowhgxCGXFgyLgy6PS1xpT7Kecx1pz62iUo90P9cavMp8SP+nMD155tHwntPRAYR2mvSFyu17jYrJEcUtzs9pDakZtvHdZngaAtGhgoR2NWi8o9pf9sTw1dRRjpoc1ZrSHO4HMu0K0vXHR1nHFVbhs+qGJGArvcrgDf2pYfuviih+uUi7H8iAQG3JT/GNkaPOASPugAqDGRw59I4NEOYEA/cCqlTvD/9ofyJESQbRyUZdNQlTE4gsdDuU7PwbSO4Hn2IB4C1frU2KLPT4mWHXrvputLqQwazpUjmDMGzhnieAUA2O33yNIUMrdVBFPp/2SeLlpHGQ+ZTGv7rt6ObPse+RQZTL3dtuNWWU+F23W2HK49mwFFaO+BwGlomyjLUqxXK1xexFjHazx1rTLIScYRQdljNudRnwVvrBZJ2SeVtI2mDDt9HLkuRgX7Mi75o8fSEpyO6QR2HV/0APzRxRc9fMLVVLf18ddFHY5ROVicjzw1cL1eTCewuwujhMrqM94BbKbTCQ7goSz9stBGkG04VOUyoSpjcACPh2qdAud1Ag913Jay1VLeEUgnQ7lOAXv6ZM2cEa1NEbNmemhdh1IKxFGE1SouUg1uNhvs9gfc3NwAuQFnm6Qcs5L02CHdrx1x3v69a8Yl9bZrx60yHkNT+w7Yce25CzQT2nsgcDpSSDDOIKQEjzhklkEwYMMj3Lu6BBjw1PUWSZohiiL1G9ntzLUdr5tFXU7gurPXjGoa4gQGAAgBaYyVuSXX6/dL9jU+jVm+6OKDGj45gQE/dPFBB41PuriKOV/hugOYyjzeNCgHsKt1YsMHPXQ6aG1nutne5GkRwMVlpLpYEQ0c6AX1iQmq8lGVS0NZPlKyyTy1FlAkESAln0NQLrclZOsaiE2RqA7ertdpUxTs1HJUHb9Vg4wzBh5xxFEEzlVUMOMcWSaRJAnSNAXnUeV3p6TUGxoVbKZVbFtRSK1ttkG53d42+5R2XZwHl56dwPSEZyAQmIaKnQSGjDFImWHFODbrGBcXF0gOB+x2ewgBsJYFbq3O4PzdkPH23zR9NrO59Fn4VFn0JyVEwzl1Z7D+XdM7xLn7Hp/GOl908UGN4ASmSdAjMBdzztvMjV/poEsnMOCDPn7oYG4hp3FNr0kcwICZDrqcXGdgt22+bRTUJyjMlxxKDZxyuVGWDSAin6x2DzqRAAnZHIRyufV1GM7dv6jJq+b763Nsn5egHtUA0DIytNMVaJ/8shlJUxnpldWr+rPhBBYMiMAQcYY45tApn1erNdI0w3a7g57aq7SNfHVKKVs1zUlXyqEmneyRzJVPrZNGlOq/jaH90XnsCzfKbgqmGg/syfKP73WOvjsQGEIfp09oV4FAH9Qcj3b+ArmJxBlSKcDBwaXE0+5eAhHHzc0OaZoh32yj1a5WfxfLgQsnMFjpci1PP46GsX22Pfv1xX1WLU2bW+j7C0jG8/dT6896O5jnxqf+zF1dyrZcHHFVlRwtvrt1cowvugQ9AnPgitPRtnBfQ132fuglaawza5wrOF8vEtDrE111Ak/mANaoCWH9chAcwH2h7LwByol+ao2bcrlRlg2gLV9ICz0e6vXa9t0SDiAzelR/1vKYn5eiK8pgScxy01TkK9eFtTLWPfQPAAAgAElEQVSXTsftTSKKIqziGIDeM1hNKN5s9xBCqAk+xiDzKJK2CJIuucekkDafg6afU2oDfTjvAhC3ymZK5uj753IAu9aGA+5B2RYKBHwl4sCdO3cQAXjyqWtIplJHa3vKjNAF1HNq7nNXHRt0Npfi26Nz2qKJ2yaDezmBrSmkTcm6rzf0nFPxaWz1RRdP1AD3RRH41LaCHoF58Cn9sMuU2e7o+WHG4IMOgFqjyHvYohSZxQGMkAp6NFQnLKjKpaEsH1XZqMqlKRxfC8tBEep118b5ZD8eg9rSErdFsy4N5fru46CWOsbWUIMxtUCMz2ArNMmiJh7V31EUgXOeO4E5Iq72rUuSBIc0BeRx3zO2XYxLI91vFSu1tmrjvO23OZW2r8xavpVn9vhY+R0rtnMIBJaE8ngZCPhK1e6S2Gw2uFyvkRwO2CcJMiHA8rzOfbYIqTuCmSU7St+00OY9247b9Gk7b6gz+Fx9k0/2j1+6LC3BaahofMeVMPBFl6BHYC6CE5gOam4j7AtMCSbLcdElXSZ3AAPVdNBhNmg4lCcvgmzDoSqXhrp8grh8p0C97Odket2HjzWV9MG1aAGKzmDq7aXuDC729m75TX0vjanL+zhCWR8HpBSIoghxHIPnbWC1WgEA9vs90jQrIlcACSHG7cVSjertHxV8PEna/RvqnOO598EBfErk0pHuNoet5fenlllwAAeWhPr4GAj4jO35ixhw5+oC0WqNm5stDocDwHgxRjenhK5SpIWGsoOGOoLNY62LFXs4fSFFntq6RBQrHLsdy13fTYnrdpDGFz3MdOiuE6KB6RH0CMyBK+mgbxO+RAMDjrerPB10HxuUErM4gE10OujAMKhOZFCVS0NZviDbOFxMB025PKlAyQEMHDuBgwN4OGbZSQC9hn+tEiu3jYjOFBVcHlOfoyhCVEQFR+AcSFOBJEmQZhk450XEx1AHcP03Q9NDqwnT7t9Saq9NTNeO6es6lt4O4EpRqgnooybQUNxTtZXK5f2tkgBxqI+PgYDv6Ew6URRVnsdVHOHO1SVEJnB9s1XRwHnaZ8HKhYCarojgPIFM629sn9u29DB1GHNMBz/IAY7gPt+figs2YR/80IPee+VYfIoG9kUPwB9dfNHDF1yLcPQf4U2dOK+DPN4TmLpOwQFMGOqTGVTloyoXEGQbi1pc3e0IXjLyi3L5UWaKcjMvMab+zXZDPXqQejszncACqMzSVafFmjHTQp+6ytzmfC2ievPJSqZOyI3pfG9gHiFiQLSKwRhDlqa5I1g2huJyziFFHvNsmWjss1dd1751uaiwOvpq16TO6W2Zmo6n6NPu1JdSghF49BtFoFYVgVtFHyeLC31iIDCWPrbGXDTZMDorBIdKlbfZrLFZr5AkKbb7fZ4lhhVGfNsiOSklwJlhH6q00PktijGI1Q50p5duppfjFwCTQqnAWLHdiTRXNxb/yl7XtH1/yjuKT32f+7oEJzBVfNHFFz0Av3RxkXoGsz5zFJShGNwxBim1A1h9pqxPX5tFB4E4ieEEBugvmJjdAaxrsmrc0i0QilCd8Kcql4ayfEG2cVCOBqZcbtQZXnbnHUP6ROMtMdBTbnM6EqTuBB5K1DKJN5ajcmOsnFI06pNzjtUqhhQCq4hjFa+QZBn2+z0ycdwmhBDgnOMU2qI97ZOjzYY1ZePTpL0dL6tDU/nbZO5T3L3rxAzy1VHn/X45KyHNc4AalMfBQOBcTGWLzmU36ElADoG7d+6ARRGeeuo+hJSQUi2gE0JYHZ2N0b3ScK8WplfpeG1yKDdNztXH+6NtTVrObzpPNHRPfa+pj01RL67YhF34o8fSEpxOcALTxBc9AL90cR3XUt3a8MUJrGHMD324BKQtvYsjmJlsqEcCn8EBXFIatadNkN5GKE9wUJWNqlwayvJRl42udLTLjiouOYABuyMoRJ5XqUQCT1A0ZlroU8vajE5R/0nE5eyhJTV4mR6aMYb1eg0AuL6+RpaJInp4zKpYWxrytu/rmIdcdgTbJz1pyN579eqUoooJrzUhwQEcoAbVMTAQWBIqYydQymJ+XsURLi8vkaYprm+2RerorNhuo9kBrGGmXQjTCYzG39Wv2WXPT+EEbty1oefvp0CXJ4V2cSo+6AD44QAGghOYKkGPwFxQd3C1sfS83fTosX1pOU6Dm1lUHNSlvpUJgJODQubirA5gTWnbOli7C0F9goOyfJRlA+jKR1UuDWVHMPWyowrlTBGmbKbxSCV1tI641XItjVleAsrJekoZ2SYDtbE19Jr6WhISWa3c6vsPM8aKqBTGGNarWN2bc8RxDCklksMBh0NqbQt9nMJN6f36OR3rjuf2e1FoG21UnzP7OeOi8Pv3yWcvo1y0Sj0u2P+5+vIVuD0EGytAibntP+rjdhtN47MQAlHEcXmxQbxaYbfdIUn2AIsgJAO37HnQatfoaGDWvjDO9rkpGthmx/VO4SwkjnZBk8pes+2ONiQieCj1d5al31emwgcdAK2H+2Na8e6ysBxT4UP78kEHjU+6+IDLTmD/MMf4hUVpIrfRWmdqpDpJerJYbY4shlOwiAMY0MZoVzMI2KA88UFVNqpyaSjLR1o2EJePsGxL0zQBUa62pzdgAlVHqwmFlfVatqXlqKNTQU+1ZEOXf2Sk+BuicyGFrkcJCJ63STOlYD5RVtwvior7x3GEVRxDSoHNZg0pGbbbbSU1tClrXxkL57RlkUHbOeW/3fej1j5slOXWR9bhkdcUKOpPbYRYQ68aPX8ocHAAB6jTZltRHAMDgdAmS6SU4Jwjy7LCbnnwgbuQQuDJ6y0EY0fRFNaFl6jaa/qgzRFcOQf2+tDRGk2Ry/pv2/G6nG3HZOV4c9YXG0KUNsEpbcqX9uiPHktLMA2MLbl8cXr8aV9Bj8D0uOwEbsvu5xqmzUI06BT1fXI7ToVkbtcJUA1UoaQLAQdwSAs9BsqOJTNdEpXGTrm8NFRl7CPXUnUt1c0rL9NU2hxAt04p0OwALr/X2NLHLUVTOriuKINzQLHvNdHOVG3YnXIdQE/yMeVAY8qR1seSsDmAlTyyEo1pvtjY2iNjDFHEEUcxGMujgjnDbp8gzbJ8gQqOrmGLwGibcKwbjk0O4Lw4CtVKOe3lMEcbmbLt2SZHbRBs6vne1wJctrRIUS6KoDBlVunRlhcnELDSZVdRHf8C/hPaXT/qtksmM0SMYxXFuHvnEoc0wZP3d2CMgzEOIVJEHJBG4EDTIjgpZWkTsnKm6chRbPlsq7+6TdZkuzXpWHxWB6uf9d8il5Bp2639ujbbb+hiwzHnUqJtkaTL+KAKg7vtyoYruliSJhRbMHFjgYzr9n3ToujAMlQWZTlUF746gPW/zNYhLIApxpASLsIAGCvmDF1qXwAKJRjKPpiC/Is5gDXlwxccwEOh7FyiKhtVuUyoykhVLgCFU4kqlMuOBm2RwMs7fW30rdMlHcEU0ZNXU+wLXIdBGZpNUbNA84RZXb76uU31KKXat45zjohzRBEHz/cKPhwOSA4HqyOTMQ5L2Kf1+rb7D21X+vS2383RVuvO867jviBgz3Fjqkzk3azglEUZgcA58LnPCNCHmh3qCyx3fKroXYmryw3imONmt8c+ScGiGJmUyHOwgKFcRNfmtC2RR5GJfR2/9c9jUjV3RgXL9lwfehsS/bume41pn760aV/0AIITmCIu6mL2Ek19n6v4oINPuBwJ7CuM0RhLzMx6YxGORwPrMqCyYG1xB7CmtGWXLxTXoDwhQlU2qnJpKMtHVTZBVC4N1XJbnuY+/2gFPTFn8NA6bXN6Ta1TU8o6CqiU0PMu2iheRlBGV+oSGDpR13Z9jRCi2Bd4FUfIsgxxHGOz2WC322G3T2oTiHzQ6kx7uud2uW2RwW06NB1rY9poX8+QgI4/ksVnGlZuIVog4ABe9g+BI6ay8dqcdZRsoYCirHe1MC7LUlysVri6d4n0kOH+9RYMEQSXgORgMrNmUmlfKHfsBK6fM8QRbMpt1+XY3u9yHte/tS0eFEK03qNJjzZ8eyZ80McDFYwsTe7jQpvqYyUFx3xgTvosnKcItXmyaZD5OHKGfYFbOh/bYvhBl87TokhWzucVF3YIPeXHCTwjhBzAIRL4FKhOjgS5xhHkGw9VRzDlMpuf00YWytHAU9XrXDrpaAFKZQaU5Ta3IxhAMenHMSzqtO1cW7ojs32q1NBR/neEKIqRJHskh7Q1hbNNBlv66LbftZ9TTW1t06lJ1znxsn/MI5ooEhzAAVfwsm8IWJkq40XATYr65xyZAIAMV5sVri432F1vcZMKCCEQRzGyLCv27DXpdug2R8P1XRTXZwHe0ONWJ3DuAJZG3lZbBHBbKsu+9r/rz5ipp+u6aDxRw7rwwkWot6shlpIvjmAfdPAJVx3APnOOSOAponybsI3tEu5mLeNy+Yh5Mg7gEh5e9kdAvcwoy0dZNoCufFTlouoANqFadvMxbmSpO70oOoA1feq0bY+uOXWi2N5KBzDO4AJWmEZXH/rWaVtKlyItNIBotQLnHGmWIUkS9KkW2+SdrR3VI0Hq0THlv8f3OHda6C4ottchSInm9M5Sv4m1JXycWB4gOHsDztKVsYGiPRA4xjZG1aFs4wXmpXiWZW4RMgkGDikAHgF3Li/AmMR2lyA5KOdvfYEcUE+xp2NPdCRMeR632OB1m8lsj7aFfl3RwHX92j4b3xzZhlJWnTtNaaBt5aFl7SMblfSEU+CHHv6Mb744HAEaekzxluRLnfigg2+4OpYMHTupU47t0rpgbir4GadtdJ3MsZXcOdBlteQzohzAAG62OyRZRqahl2lvaMjjCpQnTinLBlSjDKk8BxqqZTeXXPqqXbXQdneqZWbigozjmNeZSXWSsB6pDIyXca6U0E2yLV2mIk8LPTVtqxKndga3XTOKOKIoUpHInGO9jpGmAtvttvDKClE6k82JvL4Tek2O4qYVuUOcwUu0CyHEYvceQlHOhJajBqdvwHX8tY/8gHq/HPALKSU26xWuri6w2+2w3e7BeKTeCfLEgExHyuZNs89CyzIiWEKloD6mLaqpbSFDmy6nHJPF3933HJMi2qdn2xddfFDDl0hgYLl2NbVVVCx4mfi6S+DLs+4L5nyG/uwSU8zlLUptMl1vOzaZLlKqObalnJh67gxwsgNj8nghInC8GHGee5N2AAPBCTwM6hMmlOULsg1nabmCA5gqwQGs6Stjl7NuCrnGynYOJnEANzh727QcWj99sTlj9ctQHHFwrv5brVYQQuBmuy2krUdj2KI9mhzAtlTR5vnVc4Y7fM/ZZpzoH43JWAoO4EqJLS9OIDAaJ57/WwgluyFwe9AjLZMSFxcbrNdrbLc7HA4HyGKw0xEV/bPtqCi44/sNdfrWF1TaFu5V9Onp8LUdl8ijgmV1i5e+9+v7DPvwrPugg0+RwLYJb1c56/vQzNf3pV58eU58oZ49w6X6oTpf1pujaCpRmQsaynFmM5WaJDiAx6EDVPrYl9Pfm6gDWKPEC/sCD4X6xAlV+ajKpaEs39yyjb065TLTuCDjMOZ1ABeDvhBEx6zTftsnuvOU61JtbyJPfTc6JbTsZy2MdbaPKTdblDVnXDmCYyVtHMeI4xiHNMXhkCLLMkRRVETA9pG/TY/m81W0jDlh2tcQPfdzt2SbFai2q9YUz2fGKgatLjEQGAzVMcpVqNlJgcBQJPKxWEpACqxWK1xebiCExM3NVmVQ4TGkFLA5gFttG2lGAkMFEfewr4ZM3A2J0G37rh4B3D+1tP2cNpld7jfqdq/Luii0vb60HKdjm/R2mSl1Wcry8aVOfNDBN+qZzVyBarDJaQjocWSoXudM8zwE2bSKzwH0XJKqD17YoOeLAN7tkKT0HMAaSSCywjWoT6BQlo+qbFTlMplSxim1vW1ldz6W65vrEbeUxi9b1O2U8p16LcptTTs+u/b3mMoYHTpR1F12ZuYS7cTluX1aRvNyMAgpsN6s1WfOsV6vIaXE9fU1oihCmqbgPLI6ktt0qEjT8NshUcEUJgmXa7NGJE9DlPlS1E1js4QoyRkI9IHyuLQ0lOybQGApdNRrxNW4fHV1hfU6xs3NDfa7FCzirQ7Ots/mZJw5gA6NBm47PoUjWMLMhtS+sLPPdaWUxZ7KWnZzAtKXvscfPZaW4HR8cTiaTKHP0haQT/Xiix6+YEYDuwrF+cbxiMY6oerstaEjgF2tE7NNRWi2Naek4gA+ZPZoEwooKd2s2CWhPplCWb6lZesaZJaWr40pZJtDO8plBtCXz86y/TLllNDAsDptW/HV5Lg7l2xzYas7fUx0xAJPaaDWy15PiLWd38yxA1jK6qSNOcGm/43jCBHP00THMQBgv99Dm2ZtTuA+KaLN+5qfy2Oycsz226ZrnpOzt1uhYy/oYXMAS4bWva8DAapQGJPODUW7JRAgh1SDmgSHkABnOv0gwLnE3ctLAAxP3ewq2YH6LPArvtPdDzu212zntx2bMhrY5gAGk5BCz4+NvE7DedVFge5P2pv4oIsHKgDwJ+2wxnUHsJQS3KPn3Rc9fMGHsYTyfGNfSh1kY5246AAG3K4XQJX70OyEYygcwLvdHplQtZ0JcTSpRIXSWA1poYdAeVIlyDYOyrJpxso4t2aUy46mbEQHBAPTEQzQMgLOEak89pqUo6gBtKZBPqesbY75UpZeCaiBlv28GGOIogiMAVG+R7CUAvt9gjTLAJ6niJHl+aYc+li9fOoTenUdhk1o9jv/HJzWX7a5dRkgluuLGY7HQap2eSBwKjTtnmFQGzsDAd8pbBowQEqs12tcXcbYJwfsdgcIMAgmi8nMrom1Y5sn/7fyfWk3DHEMN9lZQ1NA17/TKaGllBD59bkcd902h7BP/ZsfutB8ZxsDY8yBWYZuhtYFZasnRAIH5uAczq3AMApHsMMLxwt7iPthq3Aw8JZ5u1OxOoCFEEUUAUWUyMEBPAbKkyxBtnH4KNs5NPKx3OaD6GBQw5wQoWgAzFGvU6zSp9feSuoOak09kvUcdE/e9XcAt11PSoko4oiiqDAALy8vwTnHk08+qSY7GUcmMnDOwTmHkLISCtJkNDY5gtvOP/69aIyOXuq5m6ot6MtQ2ePXhKpNHgicylJjEEU7IRAI9KO+hEtmAowz3L17hTiO8P4nn0KainxRHSsig9vSG1c/G/Z1wzl9J7TNFMtWXQZ+V9jGle9Z5/l972d7t/Clv/RFj7bFpK5xG53ABF8zCoIDODAXvo0n7iMQwX3HqZTSGwewCimZb7GE1QGsjb4MdCfTq8YuLdmo48JkPzWoyqWhLF/jSuMzy2HDxXI7P271r3TKzc7U8nWNz4NeRi0TRFSiOpev1+bo12PZuhzBR7Gdjc5gzhk4jxBH6ppxHCGOImx3e6RpBsYZMuSGYsMEX9/UzV0R9Mfpoll+zK4l+dTQsuFvQqhUj0tLEQjMw9L9OrV320Ag0B8JtVUIh4oCVjYrh5QCq5jh8vISIhW4v92hvq1HmxPYdowDlbG4zXF8ynYZXX2izU4vHLaVRX39r93XYaxtwK6oZldwWXYTT9Tw3ulI9DWjlZCmOzAXVDKJ3Xqk9qZJMOZuHRQ2i2mjuKkKICU45lt8Z3UAl/eWyCDJGnhK9OAAHsPSky5NUJVLQ1k+12SjJC3VsltGLj/609tcp3VHXt/x2/zduVNC9ymXunzns0u0nVFG76KyMq88c5xdYqS5ZsqRW49SkVJgvVqrz1Li4vICQgjs9vs8TfaxkTikrZkTevVjtvOOad5Lpv1309JXZ0qRvlYR/OiGA4FWho6HFN9FA4HAMphOyTJbjDajBBiAq6tLxHGM7XaHJDkA+bm2LCZdNg/LzTvztKFppZuONenX9V3dLj5aXthwifrv27Lq2I77lBbaBz3K9ORu45MT2BfPvFd1Ar90cZG2zHUu1g1VP1lv9LSVBDgTeTYGh/UxYcxNJ3DuANZM7QhudQBrtBN4aWwPmAzhEaM5V50+/NCb8ZIXfzcA4Mu/8qvxvE/65EZ5xk5en5sg23AqL6kLymGDapkBS8jWvz99yYtfhN9/21vxYc96Nr7uhd80o0zjoFqvlJ3A9d+eiyH3PKd82qHLWFNUr3IKVx2o4x3AaZ5eOWa82P9YX1unL4zjCHEcQ2QZ1us1VqsV7l/fHJ1fymPTqWpQ2o6Z+y/ryVLbi5uU4uiYjXO+ILVPnOo/jmuJSQbJGCpO+ZnpY8YKhI1PAoFAIBAwMW2V0gnGIKUaxyUEVgCe9uDTsEtSXN9sG7Of9HEKMwmY5mCfCexTJ7mHOoJFxanbfZ0hWXfq6aGbtgNxCacn73N8cQADfkSdFkuFPdAlOIADc9J3GwWquO4AlpKBFRMiypaKG+e86FOZz3LUAaz3YzYXOQLTPR9xn5N4PjtVd+Ccu0DtRnU5+XgbpscOhwPe/ad/0noOYwyXV3fwwAMPYLVatZ53rol0cyK5iT6NWgiBP37XuwAA9x54AA8++ODJsvnG2Hp97LFHcXNzg/VqjQ/50A+dTJ779+/jiSceBwA885kfVnlZfPzxx3H/+j44gGc9+8Mnu+dQbGVGZUC3Reb1Ybu9wWOPPgYAeMYHPQMXF5fmVSeRTQqh9oyX53OWDEHX67mjWbuo12m9bqeQsztNsf0+73vfE3jqqacAAB/2Yc+aVCabnmP6qrpDdAr2+wSPvve9AIAHP+DpuHPnTn6vTmmg7CMAEEbdDrFLynNixgEpISzPlO47s0wgyxJEEUdyOCAVGdarGFEUIUkSHNJMJ2guIl1Uf6ajY+x9inm8Gn0sj5zLtudKStPwrl6zfr+5n8X6fYt7y/Z1rZKZUd7T8J5HHsE73vH7uPfAPTzzmR+GBx/8AHUvoHdXTNW63d7c4LHHHgUAPOMZH4SLy8uOXwSW5JF3vxuHQ4L1eoMP/pAPGfz7977nEez3e6wmthNvG+974gncv5+Ps896NhnbJDA97/qjPwQAXN25g6c//QMXlsY/jt/V9ESZtoIiZACeePIa63WMBx+4g90+wc0uAedRJURWX6tu/6jr5Z+1HQUGxiSkKFeS1W3axmsMtIXq160vxKvbclwfk/W9fHO7o4eJU42qPl5Qqu0qc35nrnSFc0Plff8UqnW0oCATIKT0xunY1rZcaXcSVVnpS9yOK+V+W2gbb13ANs/hlvymQaBmG1IpnHUCV8peSuVMZQzCnSpRdiagHNjqCADlk51igVQvBzD1qExz8tF3Hnv0Ufxv39wv0i6OY3zkR300/vrn/w18/HM+wXrOWOfSObDJtt/t8O3f+r8CAP7bz/ur+Jv//RcsJhvFMtOMke+nf+qV+A+/+RvgnOPrX/iN+PD/4iNOlkNKiZe/9CX4/be/HQDw4pe8FJdXV4Vsr/7//h1+5XW/DAB42Q/9y5PvdwrUjY+hz+pbf+/38MMvfxkA4Cu++mvxnE947myyUYdaXWrqE03npO7k0//+wmt+Dr/0C68FAHzP9/0AVqvVZH1d0yTWWLRjcwre+QfvwMu//3sBAF/8d/4nfMqnPn/UdWxO0eFUV/Lb+iQppdoDmAnwjEHGMdI0xWazwWolsd/vkWYZWB5JzBiDrF23LndTFo62icAmbBOp9e+7rjElTHu/i1xH85OmKX7ix38Ev/kbbyiOcc7x6S/4DHzeX/vv8PQPdN8Z8Nbf+1284ge+HwDwlV/zj/Cc537iwhIF2viRH/5BvOuP/hAf8ZEfhX/8T75x8O//1Y/+CN7+trfiWc/+cPyTb/qW6QW8Jbz2Na/CL772NQCAf/GyV7Qu1g24ixAC/+zbvgUA8Bc+8y/ii77kSxeV57YihMqsst3ukSYprq4usV6vcf+p+xDQjl0OZmS8a7OHAEBCVswJZjun4xpNx2zUo6RsTlmbM5bX322L/9UX7A1zTtedz3Xbvp4RhrrTRdvJlGXsi1roubQUp6EWKtBuM0C/5aJNzzh13eoUzzhzP0Es9f7otmHLLuYSlH0CY0mlQAQ366OO9GBQlFJCMFS2LhtbN4Nd+5QbAWMyr1uBc6bvo0qapnjr7/0uXvyi78SP/B8/hN1uN+v97t9/Cq959c/iD97xjlnv037/V+EP/uA896c+SI2VTQiBn/jxH+sVtd3F6375FwvnrwmD+ysIl4Jym6OIfk5tEbEUDDZbpOISsvW55+8+/BB+5XW/hGS/H32PqRn3PDDLf5NIU/yXB1+gtEeG6s4NuZoXf0gpkWUC+32CNBPYJwn2+z02mzWe9sAD4GYkrqyOW03PRUWjhnOzLMOv/err8abf/q3Kb8t2ZLbr5vZ1jnbO8skKxthZB55Xv+r/rTh/ATW+/od//xvYj3yGXOLdf/qneO3PvQpPPP740qIEAgEi3L9/Hz//mlfjnWd6VwsEOOeFEziTEk/ev0ay3eLpDz6Ai81anSSyIyemzW4xP//pn/4JXvOqn8UTjz3eaOJ1XaPtWBvaHnvozf8Zv/jzr8V+v7fadIVVarGVy/OZ5Vg3pqPX9i7TNI9A5f1L05SNKbAsOvKUKkMlo6zLEIIegTlxsV78m5vlkBIQElDbaXign5BgDdvdukQ2gf3UKwJYczRRSHKvCVn724MG28B/+bzn4TnPeW65ypNzSAAiy/Doo+/FH7/rXXj4oTcDAH79V38F9+7dwxf8rS+0XmuKSOB/+8qfxOt/+ZdweXWFF734JViv16OvNUa2n37lK/H616n7f9f3/IvJ7t8F9SjqMXL90R++E7/w2tfgc/7SXx597/c98QR++qde2SgXVahnPADG1Ksng7enNKWcm/ueJm0TIIwxPP74Y3j5S18CKSW22y0+9y9/Xq/7UH2G5nkebNc8RX/zuWUARCVS1+wHGGPIsgxpmmK1WmGfHJAcUmw2FwBjyNID9kkCgDdGZtiidm1RJgDwxt/6j/i//9WPAQD+0T/+Bv719iQAACAASURBVHzUR3+MJcLbVgbHq66HpkMcheHIVqkRcVrVdCCEwOtf90sAgM3FBb7wi/427ty5i//0O2/Csz78w/FnnvnM+W5OhFf8wPfhPY88grc89BC+6h/+z0uLEwgECPAz//aV+NXX/zIuL6/wv7/oxWd7VwvcbvQWGtre2KUCyZP3cXV5gQfWV9he3+Ag7JFhTZG8L3/p9+E9j7wbDz/0Znz1P/xfAChTQ0pZWm4zRAPnZ+OJJx7D9373d+V2+Q0+76/+9eZrWq4rCttL237qb60H0J2lqEvm+vYhFGl676cscxem/e20HgDJSOCxrw++RJ4KKf3Yq/nM2agC7TTNQbmAOdb78ZwzFU4pJXieQc5pnXQUsKPRwOYcmwBDdMK1BjmAbYJAytLIIFKW1dSLxISbkI/5sx+Lz/7cv9R6zu++5WH8wEu/DzfX13jtz70aL/iMz8SznvXsxvNPcUDcu3cPAHB1dYU4PqlpjeLuwvenytg6/Xc/89N43id/Mp7xjA8add9/869/ArvttpdsAFRfUj8WaGSsc58ajz36KH7nTW/EWx5+M77k7/69Yu/VOahPxFAqP9tzes4XE1t5mJ8vLi6xWq+R7Pe4d+9e6+TVWLbbLf7z77wRb3n4YXzq8z8dH/fxz+kte9P91TXfhN99y0P485/W/5pzcHp9lga4zG0v9beAENopzLE/HMAYQ8wjCAEwSGw2a6xWK+z2CbI0NYzfcoKvyTFrO17ukcxw5+5da9s51lMWkdFlZDE6f3dKuRXPev7ZjASWwtgfbwIb0SyBJ973BJ58//sBAJ/12Z+LT33+CwAAH//cT/TQGrVz9+49vOeRR3DvgXtLizKYpp5sWN2ZV7kttR4ItNP2rialxH/6nTfhLQ+9GR/w9Kf3XmgW8JOp2oNtkZtkHELI/5+9r45u40rffmZkyyQzY8zsMDZNw8zccNoUUm73t/xtd7vUbXe3zGnTJk2TpmFmZgbHToyyHTOjLFkw8/0xmtGMNCLbSWxvnnN8LI1m7lx4773vfREtila4SKVwl7lBoVLrDeWY9ZpVcrACXTbtCMuLyNxlqKoE3D08uCWei4BCcC/X32+QlxnzrMa8jUKhwO1bN5CZkYEnnngSKWmmqRVomoaTkxOkUina2trg4ekh4OPZepI8/sdchBfDd3BtN9SVuSZmJGpsiCimPOUElkYhlm1Rhj+WBzwGYFACAwCIBxR+WERc/KAkBD1F6ciGVe3OrRAzfDZ3z2M8PPDnSHeeL9217gJHABjyshsMxLpXe1gYItQZ/2f+dfW5LojER9OcEtjeOrdLS0bwF3uCAEUz1n1UF3O4Za0w9d8eaV0eFRISkzBt+kxs2bwJFEUh/fYtiwpgFu1RjsycPRepab0REhrGWdp2JqyFJZo5ew5S09IQ+oDebw1dTaFkDHvr19bWhs0bf8Krb7xl97tu3riO27dutuu9XQld2bsbEKtf91vnzpw6gRPHjgB4eP1sjdl/lOALVB423VlSArq4uODPf/0nGhsbEBYeYSLw6Yy+lOflYuOP6wAAvfv0tft5sfkqz8/Dpg1MmWm9++HhzxH+XtTRPjN4BPMfJQiJ4LuEYFhCiqKh1bbB0dEBbWo1CIKAo4MEMjdXNLcoBEJNfr1sUQgnJqfgD2+/A2dnZ3j7+Iq2y7SdhMl+YEnhK3y2feCEjaI/8l9quESgnUlEeOU16pW/ABAUFCT6yp6O1976Ne4XFSIqOuZRV+WRgD/WLBn3gOhTj/EYHcL0mbORkpqGkJBQk7NaW1sbvvniUwDAyNFjH0X1HqMLobPowVjpyMixaNCgQRAkFKo2qDRauLpI4erqjsbGFuY3MHl1JRIJaJrmIqywPMrrb/1GZI8z4mcAvRLYfL2MkZuTjbVrvgEA9Os/wEybSLi5ueNf//4A9Q31iIyMEo/iIvI+vlewoJ6Ce/XXjZTW4nWxHDmG/U9RlEkdjZXIXQU9xRPYwNV2rf61FwaH5q55dv9fRFf1zrYH3bnuj9E10RNoykQuQwA0TUDSfgnJIwfHZ9AMb0fwnIG7ZYsIQAdanzDO1HjOknFLp2jJujKhd+W6PSyk9e7DfS4vK7P5OXv7jiAIxMbFw9XV1a7nOgvs+10e0fvZOnRl2FI/R0dHjBg5CgCQmXEHV41yGFqDsrUVv/y8EQAgk8kwaPAQm54jSbLL919XxeN+sx/mDEq6ihCCLxh5lOAromXu7gjVGxAZ162r9BtgbT4YlKgPG2KKzgep6GeZPq1Wi7Y2NTQaLdQaLVoUrZBKHeHi7AQHCQFaR4HWe2cAACERDyzD1pUV4gWHhMLH18+iAte4nXwls9h1c214EPOAIAiQJCnYeyia7pSDQEtLM/fZydmlE0rsfpBKpYiNi4fEDD11ZZAASKJjf8ZrOEPjXWedfIzHeBQgCAIxsXGP9Kz2GI/B50XYPUqhUEKhUMLDwx2uLk4gCZrLH0zTNBwcHKDT6Tiehb/HmfI9vM/672Kesux3s3ygFf7Q08sLkZFRAIwU3Bb4JuPfWE9hUu/gQRjdK/asWPnmrvONDfn7IctPiuUKflSGsHyYG6/uBjZiUE8B9SAawyP8h8WpdXe6YkHRXTMppL0wt948ajnM/zJ6yhzpEe0g2PyzD15+9aBgeibXX+f93t1AAdDRlInThVi0FRadpgAmCIIRmHRB+QZTPxpMF3Wxyj0EsKGZAaC+rs6uZ7uCAsIsumq1unKfwbbFbfacefDw9AQAbP3lZygULTaXv3PHNjQ2NAAA5i9cxIV7swXs4dMYj3rWdhVlnHUwCzBj4fSo69I90FWVmEDXWEv4dTAW3HS1/uLD0G9dq47WBGSd/S7G61YCgIBOR6FNrYVOp4NGo0FbWxukUilc3VwgcSBB0xRomgJBmwrjjL012GusYNS4fcbPsr8Z0w5BkPrDhOHPGKyA8IHSGwkQEsLm6CE0YfpnDo96Dj8GA+6sQor/EQRjCUzq/7PyQP5fe0DTNCgaoB+h8cljPMZjPMZjCGHMkxCkBGqNDk1NzSBJCTzcZfow5TTnwWrpzGDMxwl4GxhCKZozfjPHB3aE97F0frWmxDXw/vw/y+Xw+T0xAaRhHyZN+t+UP3y0Z6CexruZ47G7Iygzc6U9oI3+Hia6+lnaVvSUdgBdTxb0v47ubIhjvDd2xzYIQDBnWZomwKoRu3O7uLrTNEgaILpnM7h93ZzDhTE6LVEqn7i7f6CRnoWqqirus6eXl8nvuTnZqK+vh4ODA/oPGGixLK1Wi5vXrwEAQkJDOW8wACiQy1FTzbxr0JCh7aprfX09Mu+ko6amGuo2NTy9vBAbF4fomFizhw6WsAsL5Kiurmbez/M6bWxsRE52FgAgLi4eXt7eUKvVyLyTjpKSEiiVrfD3D0BgUBDiExLtyh9cUlyM7Kx7qK+vA0mS8Pb2QVJKCoKCgkEQBGqqqyGX5wMA4hMS4alXqrYXKqUSBQVyVFZUoKa6Gp5enggMDEKvqGi7yrYWbsnF1RULFy3Bt19/iebmZmzfugXLVz5rtdy83FycPX0KAJCSmobBQ4ehqKjQ5nqxdYORNSFNdHxRbmpsRE5ONgAgKjoavr5+qKurRUZ6Omqqq0EQBLy8vZGa1hv+AQEmz1dXVSEz4w7qamvg7OKCgIBAROrLsRUtLS3IvJOO2poatLa2wsPDAyGhoUhISoajo6Nd7SktKUZOdjYa6utAkCS8vb2RlJwqWner9WpuRmZGOmpqaqBSKuHh6YnwiF6IT0h8IOHUFQoFsu5mAgCqKiu567dv3oCzszMAJqdWfEKi2TJomkZhgRwF+floaKyHo4MjvLy9kZiUYncfmJsP2Vn30NLSDFcXVySlpAIANBoNsu7dRVlpCVqam+EmkyEgMBDJKalwtuDpl5+Xi4aGejhIHNCnX38ATP7jjDvpqK+rhU6nw/iJkznDC5O2yuUokDNtlUqd4OPji+TUVHh5edvcTp1Oh/zcHNy/X4SmxkY4OTvD398fySlpVg011G1tyLiTDgAICg5BcEiI2Xu1Wi1ysrNQVlKMpqYmuLt7wMfXFwmJSaLvuX3zBnQ6HUpKirlr8vw86HQ67nv/gYNsbqegzOL73LUCeT50PE+D/gMsl6lWq5F1NwNlZaVQKVvh6+cPf/8AxMQl2Dxf1Wo1srPuoryslKEXNzcEBAUjKTkFTk7OJveLhWqxhJzse2hpaYGriwsSkw00mp3F0KiipQVubm7wDwxCYlIyXF3doNVqodEw3iuqNg0AxrOlqakOOVlZoHQ6JKf1hqurG1pbW5GRfgsV5eVQtakwYOBgRMfEoq62FoUFcgDM3ubu4WG2jgqFAvfuZqCmqgqtylZ4e/nAPzAAScmpov1oCE9IcMwkMxZ3UVZeguamJshk7ggKDkZKahqcXTrRw5YEQAM0RaNAng95fj4aGgzrS1JyCvwDAs0+nnEnHSqVEiX3DXSXn5uDtjYVAMDXxw9RMR0LidxQX4/7RYWoqqpEY0MD/Pz9ERQUjMjoGDg5OXWobGOUlhQjJyuL43G8vL2RnJKGgEDzfcDiTvpttLWp4O7ugYTEJLP3qZRKFBbIUVnJ8jVeCAgMQmRklOh6CADZWXfR0twMJydnpPXpC51Oh+yseyiQ5zM0L5MhLDwcKalpJjSm1WqRcScd5aWlUCgU8A8IQEBgILPfsVJuK6BpGgVyOeT5ecy67uDI7MEpKQjg0QcjUBSWl33vLpr1+0pyKpPbsbKiAncz7qCurhYuLi7wDwhAr8hom/qZ34+ZmXdQVVGBFkUL3GXuCAkNQ2JyCqRSqc3l2AuNRoN7mRkoKy1Bc0szZG4yBAQFISXFvrnZHjpgoVS2olAuR3lZGerqauHj44ug4GAkJqfYzMMwa0wm047mJri5yRAYHGx3O3Q6HfJyslFUVIjGxgY4O7vA3z8AKWm9BQa5nY3qqkrcu5uJ+ro6aLVaeHl7IyY2Dr0ioyzuKWWlJSgrKwUA9O03AA4ODmhubsad27dQXVUJmqbhHxCIkNAwREVHd7iehQVy1NQwZ7WBg5izWl5uDhoa6qHVaLn7qqoqce3qZe57QmKyaP/V19XhflEhystKoVQq4evnh8ioaET0iuxwXY1RU1ONe5kZXB/7+fsjNCwcMbFxNpdB0zTk+XnIy81Bc3MTPD299HxFvM300Z494Ob1a9BROnh7+3D1LSyQIyc7i6PT6TNnmzxHURQK5PmoqqxEZUU5pFIpAoKCEB7eC4G89AbWYE/fdYQeWDQ3N3PyBKWyFZ6eXoiI6IX4xCRBLl/+Z5qmcevGdegoHTw9vZCUlARXV2fk3C/C7ds30dTYBGcXQz/x97jEpGTu3fzoK0WFBSgrLUFlRQXcPdzh6+ePtN594MpbU9j7r1+7Cp1Oh+KiIu633Nxcjm8lAAweOoz7ra2tDbdv3gAAhIaFIzQszOx5RqvV4t7dTBTfv4/GxgZ4eHjA188fySkpcHf34OrBglsxeO0oKS5GVWUF3GTu8PPzR2rvPhy9se81fj//Oh/sNdZ7WkwZ/ijSebGwZMj4GD0HXUVebe/ZryuCRs8xcewJ49GTYLyndGd0d9qiWUUfTdlyXH6MhwG9dzYJ8agtxug0BTD/JQQACkIPNFpfuUcJMZfvR16ph4CTJ45xn1P1wiY+Dh3cj/RbtyCTuZtVALMMvEqlxLfffAUAmDp9hkABfPrkcVw4fw6A/QrgmupqbP3lZ9y8cV3094CAQCxauhzRIsIHtm6nTp7ARfb9PAVwWWkpvtPX+aVXX4drVSW+W/M156XKh6enJ5YsX4k+fftZrG9Odha2bN6EYp6Ql4/E5GQsWrIMRYWF+P5bJo/Pa2/+Cp6epv1vC7RaLc6cOokD+/agpcXUG1cqlWLy1OkYN2GizYoJ8flgQP8BA9G7T1+k376Fi+fPYcjQYRaFuFqtFhv1uTadnJywaOkym+ohWjd9NHsm5wDdKRY55WWl+EE/FiuefQ63b97Eru1bBUomANj2y88YMuwJLFi0BM7OzlAoWrD5pw24oTd84IMkSYwYORpz5i+wGOayqbERe3btwJVLF0VDXbm4uGL8xEkYPW681fHLzcnG9i2bUVJcLPp7QmISFixaYtPS1tTYiN07t+PalUui9fLw8MTMufMweMgwkafbj9qaGqxbu8bk+uaNP3Kf4+ITzCqAb16/ht07t6G2pkb099j4BMyeuwARvXoJrvOZLmteiwBwcN8eyPPzEBYWjoSkZJw/ewb79+xEa2uryb2Ojo54avRYTJo8FU7Opoq940cOIeNOOtxkMqT16Yu9u3fi+JFD3O8kSWL6rDlm21pXWyva1r79B2D2vAUWFcE0TePShXPYv2c3mpubTH4nSRLDho8QfT/bT83NzVj//bcAgElTp4sqgDUaDc6ePoljhw+K9pGDgwP6DRiI6bPmwMPDINDftGE9pyBjcfrkccF3exXA4mUeE3y3pAAukOdj/fdr0NhQb/Kbu7sH5i9airTe5vMUazQanDpxFMePHDKpBwA4ODpi9NgJGDdhssmct+egc/jAXsjz8xAaFo74xGRcPHcG+/ftglKs/x0dMeKpMZg4ZSqkUieoNVq9AI4AoaNw5dIl7Ny2BQDwp3f+hsw76fhl00ZB/dk25+XmYOOPPwBgcr3K3N0Fgj+CINDc1IT9e3fj0oVzJusswIQUHzFyNCZMmiJieMXwkBqNBieOH8HhgwdE+9HR0RHjJkzCxClTO0n5SePG9avYtW0bp6QwRlx8AubMXyiqYNixdTMqKyoE144ePsh9HjRkaLsVwC0tLTi4bzfOnj4l2p/e3j6YM3+h3XPFAAOt5eZkY9svmwQGFHwkJCXj6cWW9/itP29EbW0N4hMSRXkHrVaLc2dO4aAVvmasCF+zf+9u5OfmMoqpsFB89/VXKCosMCnDw9MTK1c9j6TkFADAzRvXsfmnH9HUZLoO+vkHYNXzLyAyyrKS6/q1q9i13TJ9zFuwEOERkaLCxf17dyM/Lxdh4RGIjU/A1s2bcOHcGdGyBg4agqeXLLMYLrdNpcLB/Xtx6sQxaDQak99dXFwxbeYsPDVqjMV22QuKonDuzCns2bkDSqX4njhqzDhMnjpddE9kodVqcfb0KRzab54OJk6ZhrHjTemApmlcPH8Ou7ZvgUKhMHnW19cPs+bOtzgnNBoNjh89jCOHDqBNJb7GjJ0wCRMnT7WoSKdpGhfOncXe3TvQLEJfJEli+IiRmDlnrtky2oPi+0XYvmUzcvUGjsbw9fPHrDnzzPbB9WtXcWj/XgDAfz7+DBfOncH2LZuh1WpN7o3oFYkVzz6PoODgdtf37OmTuHThPACDAvjY4YO4k35bcN+9zAzcy8zgvr/569/B3T2B+65SqXBw3x6cOHZElH+NjIrG4mUrBOfU9qK0tATbt2xG9r27or8HBQdj0pRpGGSFV26or8faNV9Bnp9n8pujoyMmTJqCSVOnm1V8dWQP2LD+e7SpVOg3YCACg4Kx7rs1uHfX0L9R0TEmCuCMO+nYtW0LysvF00cNGfYEZs2Zb9FAoz191x56YNHU2IhdO7bi6mUz5xpPT8yeu0CgSDV4qpKCfgoOCcEnH/wHd/UGqwDTT9NmzQZoYMvPG1HH2+P4PFBebg5+/mm9aOotZxcXjB03AVNnzIREwmTVo2ka69Z+C5XRGnSMd04AGB6CrW9zcxO++eoLAMDM2XMQGhZm8i6tVosTx45i397daBVZIx0cHDBoyFDMnb8Anp4GJwGuHTnZ+OnH9SjXG4kYt2PMuAmYOn2m3qOXkfeRBGmRjxU7ixl/p2maoXFS743MygUegfDcOIqNufd3FcG+tXqwQ9MFqtohMKHVaU72bO+zttzTmV1kfVy6v5EBpXcE685tYNFV5vNjGNCd5khPph9GPq93ziIISNjw+d24zTRvY+yW7SAAigYI0CC5fAZsdBvhmaJTFcD8jiJpwDgjQFexsiKI/w0lsFKpxP69uzmlaEBgIAYMGtzu8h7URJDn5+HTjz8UHExIkoSjoyPa2toAMJa/n3z4H8yeO79DdTtz6iTu3c0UPRQCjLfwl599gldefxO9+4gL90+dPI7NG38S0JCjoyNomuYEJll37+K9f/4dw54YblO9rGHD+h9w+eIF7ruXtze8vLxRX1+HxoYGqNVq7N65HaWlJXjuhdV2lW2p755evBTZ2VloU6mwccN6/OkvfzMrADt0YD8qyssBADNmzbHLM9akTu1+0jbs3bUTdXXiCjUAuHzxApRKJZauWIkP3v+XiUCfBUVROH3yOHQ6LZ5eIi4MLym+jy8/+wRNjY2C607OzpywUalsxZ5dO5B++xZefOU1s1btZ06dxNbNmyzSXnbWPfz3vXcxYuRI8x0AxrPry08/RlOTsF6Ojo6cELmpqREbfliL4qIizF3wtMXyHgYoisL2LZtx5tQJwXVHqRQ6rZab13k52fjg/X9i0dIVGMqbg8ZhyGwNK0PRFDZtWIcrly4Krjs5OXNKKY1Gg+NHDiH7biZWv/qGWYGYVqPFZx/9F/l5uYLrAYFBgrlF07RoWx0cHASC2Vs3rkOel4dX3ngLQcGmSlmdTodNG9bh2pXLgutSqRM0GjXYML7nz55GgTwPIaEGAZI9IV1UKiW+X/M1F22BBb+PtFotrl6+hJyse3jm+dVWFS2PClcvX0RebraogBVghG7fr/kSz77wsqgSuLW1FWu/+dxEyOvg6Aitfm5pNRocPbQf+bnZeP6l10S9gQHblcEURWHzxvW4etk8jWo1Gpw8fhg52Xfxwsuvc0p4nZaCTtsmeNeencx6ZIzw8AibBBiVFeX48rOPTdJOSJ2coNbv6y3NzTi4bw+y7mbiuRdfFswZgiCgaFXgmy8/M5kr/DVKo9Hg4P69yMnJwquvv9Uhb2CKorB18yacOiE0PpBKpdDy1pfcnGz851//wOJlKzFs+JPtfp89ULa24uP/vMcJ4kmShI+vH1xdXVFdVQWlshX19XVYu+YrtLYq8ORTo9rxFmbszpw6gS0/b7S8z9y7i3+/+3c8NWo0d4+58JBMfU2vb9qwzma+ZpUZvqapqRHv/vUdUQUkwCgDPv/4Q/z2j39CYUEBNm/cYLb1NdVV+PjD/+KPb/9F4MXLgqGPn3HqpHX6eP/df2DJspUYaoE+Wpqb8d/3/onSEnFjLgC4dvUyamtr8OavfycanaapsRGff/KhSRnG/MXWzZtQWCA3y/vaC51Ohw3r1pruibz3ajQaHD18EPfuZuKV198yuydu/PEHQTkMHXihvr6eo4O9u3agrLQUzz7/ouDZXdu3mihI+KitrcHaNV+hqakRo8aMM/m9tVWBrz//1Ooac2j/XuRmZ+Hl194UXWPY/rh6+ZLgutTJCRq1YZ89e/ok8vNyRRU17cHVyxexYd33gr1KIpFAIpFArVYDAGprqrF2zVfIzrqHhYuXWvSqW7/2Wy7ShxjuFxXiw3+/i9+//Q58fHw7pQ3tAUVR+Oyj/3JRKMRQWCDHh/9+D6+88RaiY2Lb/a5bN65j3do1AuMKkiQhkUi4axXl5Vi39lvk5uRg4eKlogahJcX38a9/vIOW5maT3wCGzvbv3Q2lUinKb3fWHlBZUYH3/v4O6uuFe3N4hNBg8vzZM9ikN+oFADc3N/j6+UOpbEVtTQ0oisLlixeQn5eHP/75r6IGWJ3Vd7aitKQYn3/yocl5S3CuaWzE+u+/xf2iQsx/erHZsqoqKvDeP/5qwsNERUczUjYIz6pcGGmCwJ302/j680/Mlq3Sy2fKy8vwwkuv2NlK23lzpVKJLz/7BPd4CmwAcHZ25hTNWq0WF8+fw73MDKx+5XXExDJzhSAIpN+6ic8++chiOw7s3Y0Krh2sktZyeh1z3sLsb/x7tPqw2wQISPTGulQ7lX7tha3ewF1FYGxrPXqMIvgBKYU6u1u6Cn08aNAAR1zdvc3dUhHUw9FdvIGNZY49DgTB6fbY9BjdrZ2ixmkUmFxQ3RF6IzwdmJDWMDMmne4BLKiDlTCzjxJCS8PuOcg3rl1FdWWVyfW2NhWqq6tQXlbGMfgeHp5Y/fKrHQ4D19nsdl1tLT756APOWymtdx9MmzELvSIjQZIkmpoaceXyJezavg1qtRo7t2+1UDfryMy4A4AJWTl56jSER0RAp6NQWlKM3Tu2c+GKN6z7Hu/++wMTb4NbN2/g558MAsTxEydh5Kgx8A8IAE3TqKgox4mjR3Hm9EkoW1tx4thRQ/3auSiePnmCE5L26z8A8xcugo+vQfDSUF+PnzduwO1bN3HtymUMGjSYCzHbUXj7+GDm7DnY8vMmVFdV4eD+fZg529RLsLysDIcP7gcAREZFYdSYsR16r8A8w4qncnvAKn9HjRmLIcOeQGBgEGpqa5CRno79e3ZBp9Mh/dZNvF9SjNqaGpAkiUlTp6Fvv/7w8/NHdXUVrl25jKOHGcHjuTOn8eRToxAWLvQ0aKivx+effMQJfGJi4zBtxixExcTAwcEBSmUrMtLTsWv7NjQ2NqCwQI6vP/8Ub/3GVNibfusmtvy8kfs+dvwEjBg5Gn7+/qBpGlWVlTh98gTOnDoBpbIVRw4dFDzPZ2KbGhvxxacfcV4qUdExmDJ9JsIjesHV1RW1NTXIzEjHvt27oFIpcerEMYRHRGDw0Cc6pf/9A/zx0mtvAgAunDvDhTB75rkXOeGqm5ubyXP79uziFKIODg6YOmMWBg4eAi8vb1AUhfKyUpw6cQyXLpwHRVHY+OMPcHVzRe8+/bg+aA/KSktRVspYv3t5+2D23PlITkmFk7Mz1Go1crLvYdf2baiqrEBJSTG+/foLvPnr34kKstraVMjPywVJkpgwaQrS+vZDYGAQdDqht82h/Xu5tjpKpZg4eSr69O0H/4BAtLWpUFZaisMH9iPrXiaamhqxpugaeQAAIABJREFUds3X+P2f/mLyzj07t3PKXycnZ8yYPRf9+g+Am0wGiqJQVFCAA/v3ICfrnqCd9vQXTdNY//13nPJXJnPHjNlzkdq7D1xcXKDRaHC/qBBnTh5H+u1baGxsxJqvPsevf/f/4OPri2dfWM3UpbCA80QaP3Eyou0Iq2gMsTLHTZyMmJhYWNstsrMYT5Wo6BhMmDQVoeERoGkaZaUlOLR/D+dpuGXTBiQkJgv2VYqi8MO3Bg8fL29vTJsxBzGxcfDy9kFTUyOKCuTYs2s7aqqrIM/Pw85tW7Bo6QquL9uD8rJSzkPDy9sbM2fPR1JKKpycGBrNzcnCnp0MjZaWFGPtN1/i9V/9Vk8vBAiCBEUZ3s0qfwcOGoyx4yfAz88fFAi46OcnfzszZvoVLS344pOP0KD3ng6P6IUZs+ciNi6eW/cyM+7g+JFDKCkuRoE8H99+/QXe+L/fcuueTqfDt19/wSlmvL19MGP2XMTFJ8DH2weNjQ0okOdj546tqK6qQn5uLrZs3oTlz6xqV/8BwJ5dOzjlr4ODA6bPmo3BQ4bBy9sHOp0OZaUlOHn8KC6ePweKovDT+u/h5uaG3ryIIcufeQ4ajQby/Dzs2bkdADBj9lxOAcGGWrQXmzas5wT/k6fNwPgJkwRelYUFcvy07nuUl5dh57atSEntDW8fHzvfQiP91k38sukn7sq4CRMxYtRo+PsHcPvMqRPHcfrkcSiVrdy+z8ISm8On7TNGfM08Eb5mM4+vGWiGr2GVjb6+fpi78GlERUVD4uCA0pJi7Nq+DUWFBaAoCt98+TkX9SU0NAyz5s5DRK9I0BSF+0VF2LZ1M6oqK9GmUmHH1i1Y/cprJu/as2snp/x1cHDA9JmzMXjIUHh5M/tPWVkpThwz0MeG9d/D1Yg++GhoqEdDQz2kUimmzZyNxOQU+Pr6oaa6Chnpt3Fw/15otVoUyPNx5tRJjBk3XvC8TqfDN19+yil/AwIDMWf+00hITIJUKkVbWxvuZWZgx7YtqK2pNlFOdgT89cbX1w+z5y1AUkoqnPV7YnbWPezc9gsqKypQUnwf33z5KX712z+a7E9nTp3glL99+w/A3AVPCxSLDfX1+OXnn5B+6yauX2XogO3PwgI5Thw7AgDwDwjAzNnzEB0TCxc9D3Pz+lUcPrgfWq0Wu7ZvRVqfvgLDRIqi8O1XwjVm5py5iI1LgLePD5oaG5k1ZvsWZo3Jy8W2LT9j6QrTdCg7t2/h+tfJ2Rmz585HvwGDINPvs4UFcuzbswvZ+vQRZaUlHR6Duxl3sG7tt9z3J4aPwOjxExAcHAKCIFBfV4crly/iwN7dnLe9o9QR8xYsMlsmq/x98qlRGDpsOIJDQtDc3AR5PtMPzU1NUCgU2Lp5E1582XSOtBdTps/EU6PGQKPRYM1XnwMAUnv3wUie13pIaCj3+eTxo5zyNzE5BVOmzUBQcDAkpATlZWU4c5qhK5VKiU0/rsMf//K3doWTzc/Lxdo1X3GGE4OGDMW4CZMQEhoGkiTRUF+PG9eu4vCh/Whpbsb5s6fh7OyMOfMXmpR1X3++9PL2xqw58xAVHQM3Nxkqystx/txpzlj7xLEjGDRkqEmEic7aA1jak8lkmDF7HuLiE+Dl7S1Yn0tLivHLJuasGxIaxkTg4inR1Wo1Dh/cj8MH9qGmugp7d+3AvIVCuupI39lDDyzv0djYiM8+/sBwromJxbQZsxDRqxdcXFxRW1uDzDvp2LNrB1RKJU4eP4rwXr0wdJi4oXYpr59mzpmPuPh4eHp5w9lZCg93N7S0KDlHBz7Xpmxt5aIZObu4YOr0mejTrz88PDyhaGnBvbsZ2LdnF9f+q1cuY9DgISAArH7lNVD69BN7d+8EAEyZNgNx8fGCtloDQTD5itd89QWn/HV398D8pxehb7/+cHV1hVqtRoFcjmNHD+Pm9WtoaGjAZ598iLf/8jf4+vmhtbUVP/24DgDg4uKCaTNnoV+/AfDw9GTSBmVmYO/unSbtYN9vTkhvThlsrh0SUqIXytKg2PskJGjQkHBeLuiuorzH6CSwSkexOdI1JdHm0ROUjuyU7BFt6QFt6GngGxA9HptHDwoA2UPGgqZpkDQBqjs3heaxRQ9TAWxKAMb+wF0DBsaz+41yYUEBCgtMQ96JYcTIUQgLj3jANbIfG378gVP+Tp0+AzNmzRHQjoeHJ8aNn4ikpGR88dknqKkWD7lnDEsHiieeHIFlK54RCAO8vLyQmJSMzz/5CHczM9DU1IRrV68IPHhVKhU2rGNCXkqlUqx6YTX68gSSBEEgODgES5avQGx8PH78Ya1JCLX2KDNZgWNMbCyeX/2yiRDDy9sbL7z0Cv72lz+hsqICly5eaJcC2NwMGDlqDK5evoQCuRxHDx/EwEGDBd4LNE1j44b10Gq1kEgkWLr8mY7n7SHMf+2sdWTW3HkYP3Ey9z00NAyhoWEIDAzEt19/CQBciOFnnntBEFItLDwCYeER8PD0wvYtmwEAF86dxYJFQmvyTT/9yCl/R48dh1lz5wsEoC4urhg0ZCgSk1Ow7rs1yM66h6LCAhw+sB9TZ8zk7mtTqbBpw3oAjDLwmVXPC4TKBEEgMCgICxYtRmx8PH5a9z3n/cFA2GtbNm/khCSjx43H7LkLBPPOz98fI0ePRVqfvvjPv/6BluZm7NuzG/0GDLI7V7EYXFxckazPqcsPCxefmASZTCb6TGGBHEcPHQAAePv44rkXXxaEeCZJEiGhYVi8bCXiExLx88YN0KjV2Lj+B0T/Lc5Eodwei+GY2DiseuElQR5bqVSK1LQ+iImNw/q13+Fu5h0UFRbg5PGjGDdhkmg5BEHg1Tf/D7Fx8byrBiViSXExp1iRubvjrV//XpDX2NnZBdExsXjptTewd9cOHDtyCFWVFbh04TyGj3iKu69Ano9T+hQAfn7+eP6lVwRewiRJIjI6GqtfeR379uzCiaOHTeppCy6eP8uF5usVGYVnnn9REErO0dERMbFxiImNw4VzZ7B18yao29pwJ/0WRo4ey4WHpXheTGHhEVzY1vZArMzw8AgkpaTy1l/ze/+AQUOweJlwLfPw8ERcfCLWrvkC9zIz0NLSjNs3rwtCF54/ewp5ufpc4zGxePHl1wXevR4enkjr0w+JSSlY89VnyMvNxrUrFzF67HhRD24+bNk3omPj8Myq1SY0mpLaG9Exsdjww3e4dzcD94sKcPrEMYwZP9HgcWHUF1Onz8SkqdMgIUk4OjiCJCVQqpQGIZwe/KcIgsC2LZs55e+w4SMwd8HTkEql3L7s7OyC/gMGoU/f/ti88UdcuXQRVVWVKJDnIzYuHgRB4Ozpk1xI05jYOLz82puMsJtm+sHDwxN9+w1Ackoavvz8Y+RkZ+HyxQsYP3EyQkJC7N4nCuT5OHyAmXM+vr548eVXeQJ4GhIJifCICCxbuQoJiUnYtGE91Go1Nqz7Hu/8819wc2PWLdarnR/GMSQ0DHHxpqEqbUVjQwNu3WTSY4yfOBnTZswyuScyKhqrX30df//Ln6BSKXH71g3O45EZXlOenPnNcF2lasNGvcBXKpXi2edXo08/4T4TFByMp5csRVxCAn78/jvhPmNlveC/67TeuCUmNhbPmeFrnn/pFfxdz9dctsDX+Pj64vdv/xkymYHmE5OS8X+/+wM+eP9fKCos4Ly4InpF4le/+Z3AWy3Nywtx8fH4+zt/Rl1dLe6k30ZTUxM8eHmtC+Rybk328fHFiy+/IlDQECSJkNBwLF3xrEX6MIZUKsVv//hnQUh9lr+IiYvHx/99HwBw6sQxEwXw8aOHuTNASmoannn+Rbi4GEJFOzk5oW//AYhLSMD3336DLCMvsM5AQlIyVr2wWtA+qVSKtN59EBsbhx+++waZGXdQWFCAE8eOCHgugDEEAIDomFiseuElUTp47sWX8c+/vs3QwaULHO/DKtoB4JXX3xLk5g4OCUFwyEyEhUfgl00/YejwJ+EgER55z5w6wRktxcTG4eXX34IzT6Hm4emJPv36IyklFV99Zlhjxo6fJBivgvx8nNQbe/r5B2D1K68LfidJEtExsXj1jV9hz87tgpDw7UWbSoX1P3wHgDFGWLriGZPww94+Ppg4eSqSU1Lx7VdfoLa2BiePHUVa774W07ksW7lKEDnF2cUF/gGBSE5Nwz/e+RNa9PmBa2trOhTphw92LvHXTV9fPy5HtjHOnz0NwNDffL40KiYGUTGMcjU3JxvDRzwFnU5n97lEq9Vi3XdrQFEUSJLEwsVLTbxqvby9MWb8BAwcPARffPoRSorvIzMjHeMnTRGN5BMcHIK3fvt7wXxh6+vnH4C9u3YAYGiTb2jQ0T3AGN7ePvjNH/4ETy8v0d/PnTkNnU4HV1c3vPL6W/DyFqYYkUqlmD5zNpoaG3Hh3BlcuXRBoADuSN9NmDwVvSKjAJjSQ0pab9H6AsAvmzZw55qx4ydizvyFgj3H3z8Ao8aMQ+8+/fD+P/+K5uZm7Nu1EwMGDjZ7rvH28cFv//A2PDw9Ded3ioJCoYSrqwtIQk9TvPNEZsYdNNQz/M+ceQvx5FOGaExOvr4YPmIkklPS8P67f0NKam8EBjHh1GmA43kpise3RkQg1Uy7LbGEZ06d5MJoR8fE4KVXX4e3t8EgQCqVIiExEQmJiTh14jh++nEd2lQq3Lp5HeMmTELGnXTU69sx/+lFGMEbP6lUihFPjURKahre+8dfkZLWGyHBIRyXQYMWsAP8eooJ7c0piNkUUCRBAKRBrqPT6UAA0IHg5jWff31YYUJ7ivKhO4VVtYTuKc0VR3enLZoJA9CjlMBA958jPQndxRP4fwE0CFD6HBAEuu98Z/kTiqIYL+Du0g4aIA0fAdCgaRiipfAiqwCGex8YDC/tuh3I9EVXVE9bho+vL2Lj4kX/goKCBYfd/Xt349//+icUIvm12o+OufoX37+PzDuMR25sXDxmzp5rtrzQsHAsWbbCvtqJlOXi6oq58xeICgIkEglmzZnHfTcOZXr+7Bm0tDDKvHETJgmUv8YYMnQYRo42n2/N1n6jKArTZszCmHHjMX3mbLMCDIlEwuUtLhPJ0yMGNlyVNZAkiSXLVkIikTDhZH9aLwgjePb0Kc6DYsKkKZ0W2s4Y7FrSGZuKh6enWcFI3/4DEMxTxIRH9DKbO+3Jp0ZyCkt5fp6gbveLinBX73EeFR2DuQueNhvazN3dHc++sJpTUp48fkyQQ/XC+bNcbryx4yaY9SgCmNzNxm3j91tFeRnncRsTF49Zc+ab7VMfH18sWrocAFBfV4tzZ06bfe+DBqucAYBFS5cjolcvQQhnPi0PHDwU48ZPBMCE4z19Qpj7FTCf99ccHKVSrFj1vECxxoeLiyuWPbOK85A8dfyYaA49ABgy9Akj5a8Qx44c4ubY8pWrBMpfft0JgsCU6TM54emh/XsFCpnjPIXuwsVLTRSMbJ+RJIkZs+a0KySzTqfjQnA6Ojpi5aoX4OXlLW6FTdN44smnsOLZ5/H//vJ3jBzdsUgB9oMG9IIpS2MvlTphxux5ZveJqdMNefLycnO4zzqdDieOMX3uJpNhxbMvmA3t7CiVYsnyZ+Ds7AyKorB/7y4A9q9z/BXcUSrFspXPWaTRJSsMNHr6pHka9fMPwKix46HR6KDV0WhTq9GmVsHNzRXOzk6iXhwEQaCysgI3r18FAASHhGLh4qVc+GCKogSHaAcHByxaugKLl63EX//xHjcntFotjh0x9OPK516AI+tlTehzz4ChJ0dHR6x45jk4O7uAoijs3rFN8B5bcXD/Pu7zkuUrjbyv+IdMJu/h+ElTADAhZE8eN11fOhMOjg5cOPuxEyaCpWMDLTN//gEBnKFfRXkZSJIASRL632mjP9M5cOHsGcM+M2GiQPlrjAEDB2G0kULSVlAUhakzZmH0uPGY1gl8zdhxEwTKXwAATcPRwQHjJkwUXJ46fYZoqFJnFxeOZ6MoCgXyfMHvB/VRBABgyfIVAvow3n8GDRmO8RNto48x4yeK5lMHmFzCafpUJLU11YK0FUxeR8b71dnZBStWPS9Q/vLh5iZjoms4tz88uhicnV3wzHMvmlVuu7i6Cup14tgRwXpDURSmTJ+JUWPHYeqMWRbpgI3iwc9DyaavkEql8PMPEH22d99++Pt7/8H0mbMFyi6dTodj+gguMpkMq154SaD85UMqlWI5b43Zs2u74Pcjhw9wnxcvXW52PEmSxKy58xEV3b4c4HycO3uaMy4cO36ixdyz4RG9sIwXGeHA3t1m742JjRMof/lwd3fHxElTATA0n5eTI3rfw0BTI6PoCwwMNKu8mzN/If7w9jt4atSYdhkuXr50gZtzw0eMtBhW38PTE6tfeR3Pr34Ff3rnH2bTuMycO9/sfBkzbgJHoznZwnzODo4OeHrJcv0eIG5YCDD7NhuJSCz3LIvps+aYVf4CwMDBQzB52gxMnTHTRPnLR9/+zDlYoVBwYZcJgsCVSxcFfTdi5GgBb8P/8/TywkuvvoEXXnoVb//1n2b7zhLKy8pw6wajII+Ni8fseQvM8lA+vr5YvHwlACYa1NnTJ82WO2PWHKHyl6YBgoRWR6GpyRDKm+QpJ/lpdULDwgT7A8ubeHl7491/f4hlK59FeHgEaL1hG2eaaMS/8M865vgb/iWdTocD+j3L0dERL778mkD5a4xRY8Zi9cuv4t1//5czXOW3IyxM6EDAjp2vry/e++9HWPHMKoRHRJiMLXc/95zhP8uDiIVRZv+YvMKmimIJQTLKd4KAlqagpSlQFMX1zcMMRWmrLKVrg5HTdvtmgBF4U/q5xP51V3RnunoQ0fu6Anpaex6j66A70xY73yma6vZ7Isc7ULRlK7suCmY3F7p0GI/HA1cAc5UhAAkISECAoMEkje5SfdrlKmQVo8aMxe/++CfRv7//63188c13+M3v/8jl9cnLzcHnn34syMXzKHFDLyQGgFlz5lq9PyU1zaLVuhiMDwD9Bww0FRTy0CsykgtDWFJ8X/DbjevXADBCrQkTzR/AWUyZNl0Qnsta3cRAkiQGDByEBU8vRmJSstn7aJrmDvvVVaZhwe2Fcd1Cw8K4Q6E8Px9nz5wCwFil79rBCMMCg4Iweeq0Dr/bGLTRX2cgrXcfiwKhhCQDnQ0YJK78BRjBYJQ+LFpFuVDYcv2qIefq7Hniuav5cHNzw5TpMwAw+VQz9JbbAHDrBqOwdXFxwdgJE6yWNX7SJLi6CoXB7JDe1NMxAIwbP9GqV0TvPv24OcFv08OEUtmKu5n68O2JSUhMSraav3fshEnw8mIEV9c6od5PjRrDlWcObm5uGDWWUYo0NTWa5IBlMezJEWbL0Gg0uJPOhN8Nj+iFRCtesA4ODhgxchT3ztycbNA0DbVazXnlxsUnIF6/drKMmVi/TZs52+SaNRQWyFFXywj6RowcLQg5aG6N69Ovv0Wh4sMFpf8z9EdKahqXH1cMoWHhXJjSslJD7s1CeT7n+TFoyDCBF7QYvLx90LsPI0C9m5HO5eoVgy0M9YinRttEoyNHMwYiTU2NKJCL0+iQoU9AInEEQZDQ6ShotFpodRQUila0tbUJw17z5uLNa1e5ek6fNcfq+iKRSDD0ieGCvbJAno8GfX7CIUOf4PrRuA9oABRoeHp7cwZZd9Jvo02pEhtWs1C2tiJTH/o0MSmZi05gCRMmTeZo+Orli6Dp9udW5StxjQWjBEFAJnPH8BFPYcWzz8HT0xMkSXJ//PuUylZO6N4ePuDGDT2P4+KK8RaUDCwmTpoCV1fTUP3WYA9fU2YDXzNg0GD2AcOfHonJhvJZz1RzSOLdW87bz5WtrVz6ED598NTwRiVRGDfRsP9YCr385IiRZn8DgD59DYaGJcWGtSY/N4fn7TbBrFKJhUwmw9jx1nkHezBuwkSryho3NxnnudzU2CjItUuSJPoPHIT5C23hbxnFL58OwvXGDmq12iRvNx9ia5A8P4/LgTpk2HCLyjCA8QRklV0Z6be50ONqtZoz9ItPSESChXawmMkzNG0v2LQObm5uJl7VYoiLT0Df/gMAMGdB47ymLKzlDu/NMwoxPiM9TIRHMGOfnXXPbB7gjkYi4lJnODtjyvSZVu5maWSAWb7Hw9PT4vojlUo5D9DammqolEruNzc3GYaPeArLVq4SnXPs3qhQtKBOP7bV1eJrplQqxaAhQy22hU1XY85QlgU/ZUh1dRXX9mtXDOHQp4p4KxvDWt9ZA1+eMH7iZKtj36dvf857nR1nYzD9NEygyOBHjSP0oYkBQEKScHdzAWhKEG2NNXoR49/EIpXRtPmzrjmlsNi1/LxcLoLVmHHj4etrPV/3wMFDBOH3IyIiuc/HjhwyaywoISWmSlz9H6n3AAQhVP4aw1aFrVgfSvR/FGjoaNqgEBZ79gEKc7uzwLsnwtJ4dKex6u7KFNZYl1XM9wR09zHpaeCPhyUZ12M8DBCgQEBHd6911hh8JTBBdeF2UEyUFDHuic9XcV7NejzQHMCCSoDgakeCtXTsOsE6DIwnrecPu0a9OgIHBwfEJyTi//35HXzx6ce4k34bebk5OH/2TIdztPJhKdyyJeTlMoIgqVRqc3jEtN69kZ11z876GT4H8ELEmYOvrx/qamuhUCi4a1qtlhMyREfHwMVV3NOCD5nMHVHR0ci6e9fsPe3tO4ARPJWVlqBALselixe4vJT8CW4J1g5bxnWbMm06bly/iuqqKuzasR19+vTDls2boFQynqpLl6/slPDAtqAj/QZAcMgVA18Y6G3tXk9GUaTVaqFWqznFCBu+1NXVVZA7yxLS+vTF1s0/A2A80AcPHQatVsuNbVR0jFkvHz5cXFwRFRPLKTVYEASQqw9PS5IkVCoVZzVvCf7+AairrUVJ8X1otVqT/MSdDeOxzcvN5ei6d+++NpUhlUqRkJSMyxfPo6a6GvV1tVbH0hJSzIQiNEZiUjIO7tsDgBHCxCckmtzjb2EdKiosgEbvxevr52fT+PAFM0UFcqSkpuF+USF3na8wt4TYuHg4OTlbVEQag+8BKxaez1h4JrwGPFzDK2E0EkIvmTLuGz8b9glvX1/U1dWilbdPsKGfASbf8u2b1seOVXxSFIX7RYWIizelF6au1nkSNry1tXsTkpJx6ADjHSLPyxV9p39AoKAcmiag1epAkgQkEgnUaoMhmZOTVH8PzdGDRCJBsiDcNnhlmQrx+GDXTqZsJ24OCO4zaiK/HwsLC5GQmMR7D2Exik9ebo5hfRGNrmD6MCusv3j+HGqqq9FQX8+tLwQhfIIwqntnOqYoFC0ovn8febk5uHj+HOeFJeQDrL9Qq9WiSB9OODrGNh7HxdUV0bGxAmOljoLP11y2ga+RSCRMqGYz65vMTQZHR0doNBp4+/hYnBsePIMNJS8CR14eb//py+w/Bk8t9i5huVKpFEkpLH1Uoa6u1oTvkEqlVg1h+HmR+WsNX5GalGLb3pSS1hv7LXh/2gtL4Vj5SEpO5d6bn5tjkyEnSweFBXJcuXRRlA6GjxiJQwf2QafTYdsvm3DtyiUMGDQECUlJCAkJtTjWubwIP07OzgLDOHNwFqwxBUhITEJRYQG3zybZYDgCMMpYJ2dnTolsL5StrSi+X8SUlZBo01wFGGM+di3Nyc7CkGFPmNxjiTcBmNDBJEmCoii0tios3vsg8dSoMcjJzoJWq8UH77+LtD590advf8QlJFjl722BTqeDXD/HekVGCcLBtxf+ZrzU+fDhhdRWtCrg7GLda1+haEFxURHy8/NwibcH0GbWTF8/f4GC1B6lK0VRqKyoQFFRAW7fvMFFEwIM+7pOp+PWp8hO6jtrYOcze66xZT77BwSgtrYGxfeLRM81fv7i/SR2/tTqKNAAPDzckZycgpDQMJSVluD61SsoLCjAoCFDkZySiohekQLjOcA4jKV+T6EFN5jn68zwV1n3DHISvhGRPUhITERYWDhKSopx5fIlyOX5GDrsCaSk9kavyEjDeZ9T7IqHcRbUW++ZabgmnivY3PPiZ399SEOCAE3QnKKXonVMXkKCAEmQ3LMPUrrXE8LE0nT3iXppDebGozuOT3cNo9wd6/wY3Q98BXB3pTn+/tZd2wABH0DxLne/9gh4D4oGbSzcedgwp+i14VFBaGs8RAWwcSUAgKQZ72qmRV2JMLqOYrozIJFIsGT5Srz9h99Co9Hg6JFDnaoABvRjaucYshboxoJmS2Bz5bQXvn7W81V56ZV/ylaD9XVzczMn5AkItK4cYBEUGGRRAQzYHiZFp9Pplfi5kOfnCRQ8Dwr8ukmlUixZtgIff/AfqJRKfPzhf1BZUQGAyTFtKaStLbBXDSRuPW3bxm/N24PvJe5l7V6eNb5KpeIO96wVvj006+PjywkG2fnRwqM9a8I5PgIDA6F3mhWA9VCkKArrv//W5vIAhgZbWpqtehl2BvhjW88LfWnX/As29H1dXV2HFMC2GI8AwjFqbGgw+d3JydlsnmMAnOcjANy6cd0mBTAfbNg2dpwB2xSaLPwDAuzy7OG/JzAwyOx9pvOVEazRNBvKlgDNWwUepuWg8Zrh42M+VB4L1kNYqTLsE2zeWwA4fGCvyTPWwApuxeplS3+wtGftAOHvz6PRRlMaBYSCaEN9SFAUDa1WA/6KrdFo4OrqAq1Wy+WM8w8IBEmSRl4zwrYY733sdz5N8UPv2orGxgbhfkLTAqGWcb/U8bzhAoNMadhcOHN+qNeGhnr4+fuLjpMl9sjew55C0YLbt25CnpcHeX6exTCf9kCwz9ixxgYGBiED7VcAs3xNvr18jb6bPT29rPadzN0d9XV1VvctvmedSmlQzrERDgDDGic0XhF/Pz/kfn1dnYliyppCGoCgzkqeR2A9b44EBFpXLAGW1+f2wOY9kUdPDSJ7IsDQQUb6beTnMXTAKmUswdPLC8uffQ4UjeuoAAAgAElEQVQb1/8AtVqNwgI5Z6gpk8mQkJiMtD590H/gYJP0G/z+sxQS2Rya9Osmf7+2tT/Ye1klrr1oaGjg1o0AO8aUzw/xeSo+rHkKSiQSyNzd0dTYKKDHh40+/fpj3IRJXLoMvjLSPyAACYnJGDh4SLtzr/PXw6AOnjtZ+NjghckalAIQeADzoVC0IP3WLcjz81Agt7wHiK0vfn7+dgnj8vNykXXvLuT5eSiUyzmjX3Pg911Hz+y2op53rvnhu2/selan06GludnEGMfX19/sM8b9RwNQqtogITVwc3PB6pdewScff4DamhrU1lTj0P69OLR/L6RSKWLi4pGcnIphTz5pUwQNg7ERw8QQMMcP8s5MvHUpOLj9Y7DqxdX44tOPUVNdjZrqauzbsxv79uzmDPdT09LwxPARcHUztIN/DufqyV7jNNxsbY1zBbP8EMDuq+I8lYiiGKyziYHfpAhASzHhYCR6JTB4PFe3F7Y/ILDd2lO6pTsrhfjoCe2geEYY3b0t3X39sGaM3d1gzuC8u7eru4ICAYLrf9ooKHH3A0HToPDo5onQhYW9aFtdjOfBI1EAsyAIAqReMEd3IZrgh/jpKfD19UVYeDgK5HJUVVaiVaEQMOyPAqyXmT35fiyF5bQFtniosha/fGUE3yPOnvq6d4Lls1qtxtnTp3D82BGBIJIPV1c3OEodRRVOnYmExCQMG/4kLp4/xyl/Pb28MLsTQtq1B+3NMWLNg5Vv9S2RWL5XQvIEi7x6qNvaAMBu63cPDw9Uq1RQ6b1D2vTlAIDM3XKYR2E54nOFX1570NHnbQX/cM5/p7sdawA/BG97vW1YmMuragw3NzfOQ0Yl8k4HR8v01PHxUev/G95tSeFsDHvXLLZfSZK0uqcIFYH6MHoEP1SfMGzfw2Tc+a9xdJSav1EPbo2g+fvEg5tbtkQ9ME5vYO5w6mqFRgHAwWiv5L+fyQfPqwsNKBStIEmSW/fc3d2h0+k4xYstazU73h3tR1VbG2gI88BZei9/rtiiUASY9vCVc+b60ZZybEFDfT2OHTmMc2dPm13LvLy90dbWBmVrq4B/sQUCHsdCmgxjeHi2j8dRq9U41w6+xngcJQ4S48dMQOrXGHv2fSH/Z6BHD09PCEXO5iFQ5qhMlTm2rTPiRiBsnUiStDkMt7OLC6RSqSBPfHshlUpt8k4ELO+JarUa586cxsljRwQ5jvmwxN8OHDQEoaHhOH70EG5cv8bNjZaWFly/dgXXr13B3l07MWf+Qi4EMtB5a7WQP7PnLNP+swF/rtpzJuLzQ+bWK0epDWckgm+89WhAkiRmz1uA+MREnD5xHPfuZnKW7dVVVaiuqsK5M6cQFR2DxctXIiQk1K7y+f3Tnpy0YrBlvvP5ITHDqONHbdgDVG1QKlvNjo81HpTFzRvXcfTQAbMhtqVSKTw8PFFTUy2oL3+t64wzsC2wJ2qNrc/b2k8AsxNQFKPMbGpqQXivCLz73vvYuWMnLl04zxkHsqlZ7mVmYP/e3Rg3YaIgvLg5npczNbLRPZM1HiBJEm528P/GCA+PwNt/+RsOHzqAi+fPcYp2tVqNzIw7yMy4gz27dmLCpMmYZiHUt1geXz7vbGklaU/EL/YZktbPKZoGBSaUIyniUdzZZ42eoXRgDSe7ezsY9Iwx6Rnt6FkS9u6J7hye1xra6xzUFdBd6mkTCAI0CNCMKphJC/Go69RR9JDF65EpgI0t77raMiTCn3Z7BAQGoUDOHOTq6us4YT17mNdorAuF+GGRLcEWht3VTYbGxkbu8GgLaszkNBKD2ILPCoTt3fjceIqNGn1eHVtQXW1728S8vRSKFnzy4Qe4X1TI/ebj64vYuHj06hWJ8IheCAwKgqenJ/bv3YO9u3dyz3fmJsIXos+dvwAZ6bfR3NwMAFi4aInNIeg6G4I1pItt8K6ublCr1aipqbGZ5rRaLScMZw/tfKVarR20Z25eyWSM94bM3R3PrHrB7lD83t7WvSM7Cn6oNYqiBPmMa6qrEBoWZlM5/BxoHRGCAIwXmJ+/eY8AFg31dZwg0k1mv5ENX4k3ZtwEm8NKsvD09ARBEAK6qautQUxsnE3P20NjgKGNFEWhvq7OapQFw1xg/0Rt2jg8Ggtbft1sB3/slix/Fh6e9hksWfOWsbaO1NXVws/PlEbFBMkcjVrJHWr8frHPWp0OlI4CKBqubq5obGxAbW0NXJ1d0NbWBpoQzmlLoGlaYLCwbOUq0X402dt5n4OCQwR7FkEIFWnG5fD7oKa6CmHh4RbryKKqqpL7zNaZa6eZ97UH94sK8dlHH6ClpYW7FhYejqjoGERGRSEkNAyBgUFwcXXFx//9N7Kz7tnN47jy+8Aunsz2e1koFC341ApfE6Dnaw7w+RqKeiR7PJ//q66uRkhoGMytDXw+hJ+v1p55Zk+dKIpCfb2pd7EYmhobO0X5CzAKgKamJpsUmfW8PZE/txWKFnz+8YdCOvDxRUxcHCJ6RSE8IgKBgUHw8PTEwX17sG/PLgCm/G1wSAiWrngWCxYtRVFhAQrk+cjLyUZeXi7aVCrU1tZg7Zqv8MxzL6L/wEEAAHeeMd2KZ58XKOttAevdzVe+19bU2BwJx545ZgzhecT2M1Fn8kNdBSmpvZGS2hstLS0okOejID8POdlZuF9UCJ1OhwJ5Pj798D948/9+J/CAtgZ+/9g7Vub2SYKwvv8ZP8t+v19UiM8/Fu4BoWHhiIqJQWSkcA/45IP27QF8bFi3FhfPn+O+u7i4Ii4+Hr2iohEeHoGgkBD4+PhCnp+HD//9LwAGPkPQd+3YH9oDd/25xt3dHc88v9ru5zsSHYgFa+QCEGhVquDkIMGSpUsxd/58yOVyFMjlyMvNRW5OFhobGqBSKbFvzy4oFArMf3qxIFqKIGIKDGseE3hCxMOWvVf/neVFKYpCbU0N/ANsixIhBjeZDHPmLcCsOfNQWlKM/Lw85OZkIzsrCw0N9VAqldi9cwcULQosXLzELP0b15cVCNOMS7NRaGih/M1cGGhzn5nyeVw8QUACgCIBHd8rGQRIfV0686xhNgR2twLTg11NttIR9JS29IR2GK9l3RndzRPYHG/QE+gK6H7jIYaesYcYwEQwAbq79pQEY+wHG/j5jsBcXt92l2dU10eiABYjaiYqS1fTthpC09B6F2Wa4e27ZViUel6YQ2+e5wp7WGtra4OipcWiYIAv8LQGa8LqwMBAlJeVoq621ua8ohUV5Xa9v711M4ZM5g5XVze0tipQVWl7H1SU215fPtj6bd+yhROORUT0wtwFCxGfkCjatrLSEu7zg9rECYKATOaOJctX4vzZMwgKDkY/nleFNbBqFf73jtbH+HNXWUcCgoLQ0FCPqsoKUDYKrqurKqHT6QAw8wNgBKaurq5obW0VCJStgfXQNkZgUBDKSkvQ0twM/8AATnBM0zR34O5KzAZJkgLFWEVFOfrY+Gx5WSkApj1+NuRfs4TKynKbFMCVvPXBlpxvxuCHoFUoFDblSxRDQIChHFvpRqvVotZOYac/7z2VFeU2hdkXzlV9IDjaeO4KbQUNIVcfAmNPCCLV2Qx+iFWaphGf0L6xswRLe1dVZYWoAphfJwCorDSsDbbQtNU6gQQNAjqaCf1cXlaGutpatCha4OvtjVaVEjqdDjqK4sJCSyQSsPlIOG8NvQcmf77TNI3EpGRB/c2NvbERlYHOAIBmOGrwmGqCoSb+nCsvL0Nf2LanlZUa1hdb57q9ezNFUfjum685wf/AwUMwZdoMQfhpYZ1KuPfYA2af0fM4dvB59vBkLPh8TbgRXyPoH5pG6cPga0SiEPF5lQA+fZSVWcypyK9fGW//6YgAXgz8cMPVlZU2KYDbM1aWUFlRbpMCuKqCv94Y+mHnNiEdzJ63wAJ/W8p9NkcHbFjSuPgETJg0BVqtFteuXMaWzRvRplJh44/rkJyaBmdnZ5M1JkG/xtgLfkoKW+eNVqvtkGLMy9uH8+S254zBPyN0djjwRw2ZTIa03n2Q1pvhDpubm3Hi2BEcObgfzU1N+OXnn/DGr35jc3lubm6QyWRoaWlBRXn5I+WJKYrC2jXCPWDy1M7fA1hcvnieU/7KZDLMe3ox+vYbYJK7FgDKSoTrM8AYuxj6rnNSFFhDYHAwSktL0NzcjICAQJvCbT8IGOiEgIYCVM0KuMtckZKSgsiICAwfNRqgaWTfvYvtv/yM8vIynDx+FH37D0BsXLxQ0Sss2cCPEgBN6wCQxqmCORjzNHw+r720TJIkwiN6ITyiF5dG7N7dTPz80waUlZXi2NHD6D9wEOITDGHXLYUYpXlxNgiC4BS27HP8atL8xpttAwmC4O3c+jL5eeMlPAGeTq/c1OprQwBwIEimHpT+XKIX8lKwz3PJHC/a/WDgoZk2dA35SkfQvcfDgB6h5IJ+PNC928GiJ9BWT6AroQG4UDbcXdrVc/YQwJDyDdCCaRtJPxqD7s4ACYZHIJiFq91RjDtbyWsPupQndlcmBJapJMjuqfxVKpUo1ud19PLyEih5+SHEqqwoCortyA0JWB7T+MREAAxzzrc0NgetVouLF87b9f721k3s3jj9oYbJu1dq5Qmg+P59s6GzbIFGo8GVyxcBMN4xv/rt75GQmCRab4qiUMB714NWgvbt1x8vv/YG5sxb8EDf053BKi00Gg0unj9nE72dPnnC8HxyCgCG9livkgJ5PsptEKiUl5chPy9X9LckfbkAcOPaNe4z623wsNdhwfvM0G1kVDScnJwBAJfOn7MpR2RtTQ0y76QDACJ6RQq8ZtqDC+fO2nTf5YuGNSo2zv78c0HBIfDSe1nfzbzT7tDVIaGhnBfwtauXbeqzq5cvcgYItiIxyaDkPH/ujH2V5MAwt6RI+EMhCCu/izxB2P8Mv172CDzieUqEWzeuWbizYzA3Ry9dsI1Gr16+wH1uD41aQjzPYOHs6VNoam4BRVGQyWRwcnLiBHuswpckmbxs/PC7ibx+vHn9quDgxj/MmfN+4X9nr9E0zRjw0axhAcl8pglERkXByZlZX86fOWPTXKmprsad27cAAL0iox6YR11uTjaq9Yql/gMGYtULq80K/quqKrnIHPZaLxAEgbh4/T6Tn29TbuHysjLk54rvM+agVqtx1QxfIxgrmoaOolAof/B8jbXDW2RUNEcfF86dtY0+aqqRkc7kRo6IjOx0D+A4npD9gg38MwCcP9ve9dlceadtuo/Pt7P7BUMHlwAwXr9v/vp3Fvnbwnbwtw4ODhj6xHBM1YdYVamUKC0pBmDMB12xqTwxhISGcbzF1UsXbaKNyxfP273P8uHo6IiYWGau5mTdM2vsxwdFUTh7+hQAJoR/fEJiu9//INGx/doAd3d3zJw9F6l6hXB+bo5AEST2Xv4fSZIcD15UWICiwoJ216Wj4O8B/QYMxLPPm98Dqnl7gL1pAFjw14k3/u93GDxkmKjyF2DOwyz4+3Rn9Z2t9JCUbIiUc70D87kzwabAaFEooVK1wVUmg8zZGdDpkJScjKUrV3H35uZkAzC0l7TaboN3Jh/s92Re5CD2XPkg9s+k5JT/z955x8lRnHn/Vz2zE3ZXK+0KJUAZBBglkgADQkjkaBzO5/MZAzZnny/Y7/kczne275wzmJxzMhlkgoTIKOdAViIbhVXYNKmr3j8693T39OSnW/39sGi341Ohq56qp56n8LV/+ob+99tvvWmVssRCfK/FfAyGxq9c53Gtbpx0MJ27jG0lqN86gBiUyVhZllGQZcgSIEum8nBNhT+oLEyPUIjKgxbljbRpE9UtWpjnASjbmfY1orJoLg0zAAuh7L8hQ4AzFP1oUK4QimgcQeyqHn34QQz09wNQJrPMTJx4kP77C88vcH1GJpPBc/Pnl/1utzI99jhjUDn3icdKhqZ78YXnah5OqtSEspkTT5oJQJlIefyxR0s++5GHHqhKtt27d+mTSQdPOgQpdQLSiSfnPm7ZR68RCoifb7UZXwmVNuT4T5+ge7X/9YnHkBkY8DSyfvD+e7qRcdjw4ZYJyk+feBIApe49+fhjJd8997FHXSe7jjr6GD0M45NzH7dMHhpiGSXX29ODm66/Bq9vWF/yvZVg3jdvYKB4n0RAmcg9/oQTAQA7d+7ASy88V/K5jz3yIPL5PADgpJNPqVrO9WvXuBrVNT784AOsWrEcgOLR5DeUrJ2ZJ88CAPTs3eurHXnhuWfxxGMPo1cz/kBAkhiOO/7TAJTw1aUM2NlMBs88ObdsWYePGImDJinGiNfWr8Nbb77heb0sy7j7jtuwdPEim2c8t4QHddozU8FqJC7VbpsNc271yw2mOwv4a8mGDRuuT3y+/tp6rFy+1PN6zjnuv+cOLHrVn9HRKltxO7Jh3VrLhKwTH334AVavVOrogaPHquFsy6No0tE0ETn9qGOQVvcGnff0X7Fn715wWWBvTw8YY2hV9yFlYOAyx+7du3DzDdda6s2w4cP10OevbViPFcuWupaz5kV87123Y+ErLyGfz7sYgqH/cC7AuRbxgCEeb9Hb2B07tuOFBc+WzIOHH/yL3r5o3jCuOESq8ItZ5zl8yhTX6zjnuOeO2/W/K1EBTjDpOHMfe6Tk9Y8/+pCnUcWJcvSap+Y+YdkXtp56jZtWr3irm/qfHdvxooeerPHoQw/o9ePkWSXqRwUcOHoMxowdBwBYsWwJPiixOPO9d7dixbIlNZVh+dIllvDNTnz4wft6Ozh23Hi9vdljqgcHTZrkXQ/+6lwPZFnG6pUr8NTcxz1lMC8s0spk2PAR+NThyve0Yf06fVGCG8r3dRtefflFS1stSRKO+7Shm7z68ouez8lkMniyhLx+OEnVETjnePShv5S8/uUXn9cXrs447njf+zc3mmQyqY8LBwb6Xa/r7+/Di88vsJSb3YjLGENMXVjEOde9KJx+nDDrjQ/95b6S7c8br23AHbfejE9q7Glv6QMml+gD7rxd/7vS5lILed3Z1eW53cqbb7yO5aY2xZw/tco7c33IeOhvRx0zwxjXPPFYyTLo6enBDddepS8SrTWbN23Eg/ffgwF13JfN5dHT248YgK7BHZAYQyxutEsFtV3S6qlFb+0fsOSfpre4GUSFEBg5apQePWjtmtV4/bUNnlGyZFnGLTdej4WvvGxZnLJp40bcc+cdnvMz5sV7hUK+6Hyp78wSycv8Y7tP08fNP2qKoUXsc3qP/ZiuSwqBGBhikCAxSWkrJAmcATLn4Jwjz2VwF73Trz5iXtgWZMxRwsJAOWVImTCkAVDLA0GbXXeGct0qp92inI5KCEOagiy7hhACsgB4UeS/4KAtWhdc2f5MDz7iJzmiud6/QAMMwPrHBv+NOhUDjhOKIgkoqSlv4qsZ9Pb04PZbb8YLzxkTVqefebblmkMOO0zfw3Xp4kVYsbx49Wx/fz9uu/lG7N27pyp5zGU7aFAHTplzKgBlT8I//PZXFiOmmWfnPY2H/nJ/Ve8uJVcp78cpU6dh/ISJAIDVK1fg9ltvdhwUDQwM4IbrrsHrr22oSqYhQzoRi8UAKB45TgaCQqGAZ+c/g6f+Wr7hphY0w2PUDxTk6hg8WK/fe/fswR9/9xubsdWQb/3aNbjq8j/qk+nnX/g5y6D68ClT9YUba1avwl233+pY97KZDG6/5SasW7PaVa5kKoWzVY+YfC6H66+5Els2bzLJZci2c8cOXHvV5Vi3ZjWuu/rPWOvx3EoZPGSI/rt5EtuuFMw5/Qx9L+C5jz2CeU8/6ag4DAz047abbsDa1asAAAceOBpHHTOjJrLedN3V2LB+reO5je+8jWuvulyX6ezzzq/4PSedfIoeSnnxwlfw6EMPuE7AvPj8Ajz28INYMO8ZXHPl5ZBlWTd0nTz7VD3PHn/kQbz84guOz9ixYzuuvPwP2LOnsvb93PM/o9fXW2+8TjeC2+nv78cdt9yIFcuW4L6778C8p/4KwPherXXhfcu91rJWPTht550U+8Gm7Q4+/OC9onscEVajYTlq2jnnXagv/Hjw/nuwdIlz1IpsNoN777oNSxcvxIP33425jz3s+x1m7JLdcuM1eG2D86Tmpo1v44Zr/qyn+8xzzgNg1tO8tbRSAychBFpTrTj1DEXHGBgYwJ//9DtsffddFAoystkc+tVJ3FQyiW2ffIxrr7wC69asxjV//hPeevMNve059wIjH/9y791YsuhV3dhrnsjPZbO4+/ZbsXjhq7j/nrvw+CMPWdPk4SVsGIUFTj/jLH0/z0cffhBPzX3C0bg50N+Pm667BqtXKh7eo8eMwTHHHueZb0CxccIv5pDq77z9tuM1vb09uOv2W4u8b8plslnHWbUSd956i2O7k8lkcOtNN2Dt6vL7A7Nes3nTRmXiWy0IbeJXLhTw3Px5eLqCBSnl4Dles9Wd00436sfjjzyEZ56c69r/3HLDdVizaiUAxVB79Ixjay88gHMv+Iz++1VX/FH3IrPzxmsbcM2f/1QXGa7585/whouu+87bb+GqK/6o59O55xvyDjbrt5vd9dvnnp3nujDp/nvuws03XIsn5z7uuiist7cXixYqi58YYxg1yvCcPP/Cz+ptzH333OUaiSiTyeDO227GooWv4L6778SjD1sXZc057Qy9bjzy4F9cFwjs2L4Nl//+N9ize7fj+XKYOv0I/Vtdv24tbr7hWn2RrxnOOZ556q94+AFl/JRMpfR2nypan62FF7YagZRJlz/85pd48P578cB992Lzxo2Ober7772LN954HYASEtfPNkNmDp50CA6fMhUAsGnjO7j2qivQ29vjeO3aNatx8w3XYenihfjdr35hRGKoAZX2AZWOgIYOVd63q7vbsm+0mQ3r1uK2m663tIHm99Uy77T68OEH79v6b+P3VCqFcy+4EIASXeDaq/7suhhux47tuPqKP2Lt6lW45srLsUYdK9SKZUsW4w+/+SVeeuF53H/Pncjlcnr97OnvR382j/b2VrxkmpfZ/4ADdb1GSbOhC7//3rt6eo0fJ/3VmjcXfu7zul5+3dVXYtlSq7Fe++nv68P1116NxYsW4rZbbsLcx5UF7ksWL8Kvf/F/eOH5Bbjztlv0xTNmOOeY/8zT+t8HHOi96NVNB/JalMGgeESbjcPGfd5GX8tzfCz8iGkRAEznZKEZgrlFpyxn2joMhgcNY1wUDsJRJsGuW7ouHqLvJEyEoTyCXK/si7eCmg4zmhWN0wpGXBlqp6jNTLISxSOhucZfoIF7AJdbVb1WKkYorF65AjtdPGIHBgbw8ccf4+OPPrRMrJx97vl6mD+NVCqF004/A0+oXoM3XHs1Fk2bjoMOOgjtgwbh448+wqqVK1yNs9VwwYWfw9tvvoktWzZj65Yt+Pn//gTTjzgCY8eNR/ugQXjv3a146803dQNVurXVcZKjFphDELqd/9pl38Cvf/Ez9PX1YvHCV7F540Z8avJkjBs/AUINU7d+/Trs3LGjankTiQTGT5iIje+8jW3bPsHvf/MrzJ5zKoaPGIFdu3bh448+wrKlix3Dv5XrlVMLqv1S7737zrKuP3zyFEybfoTnNaXKtN6ce8GF2LxpEzZtfAcfffgBfvurn2PqtOkYO3YcOru68OGHH2Dr5s2WxQInnzKnaF9lxhgu/tpl+P2vf4G+vj4sXbwIWzZvwmGfmoyx48aBC453t2zF66+tN+peutXVg2LmrFOwZfMmLF+6BNu3bcOffvcbTD/iSIwdPwEjRoxAX38fNr79DlauWIa8agA4esZxJfO7EsaNMyISPPrwg9j2yd+QSCaxZNFCfOGLX9K9S4cM6cRXLv4abrr+GsUT+onHsG7Nakw65FAcOGYMctks3nv3Xby2fh12794FAGhta8Mll31Tn2iulGQyhXw+h/7+ftx47dWYPGUqxo6fgJEjR2H79m14d+sWrF+7Rv/uTpp1Cg6fPLXy96VSuPSyb+LKP/0B2WwGLzz3LNavW4Op047AAQeORiKRwPbt27Bi2RJ9b8SWRAL/+NVL9T1WAWDw4CH44pcvwu033wBZlvHIg/djzaoVmHjQwThw9Bjs2bMH77+3FRvWrdW9Y9PpdNmesuPGT8C5F1yIJx59GNlsFnfedjOWLl6I8RMmYtQBByKfz+HdrVuwcvky9Pf1AVA8pE+zLUbqGNyBzq4u7OruxtLFr6Il0YJhw0Zgw7o1GDdhIk4/8xzbm4uNwNrCLE2H6Ogwnrlk0UK0tCQwbPhwrF+3FuMnTCxaEGWlfBXtwNFj8NkvfAkP3HcXstkM7r/7DixbvBAHH3IYRo3aHzLn+PD997Bi2RJ9QdXwESP1RRmVotXRgf5+3HLDNfjU5KkYO248RowYhR07tuG9rVuwYf1avY6eOHOW7gHnh3La0VmzT8Omje/g9Q3rsGP7Nlx5+e8weco0jB47FsOHj0Bfby/efvN1rFu7RtdPTjp5lh76mTGG0aPH4PNf/BLuv+cu1Vh+B5YsWohJhx6GUfvvD845Pnj/fSxfstiSj+eUmY9KugQGDxmCi7/2dVx/zVXgnOOJxx7BmtWrcMihh2HMuHHIZjJ4d+tWrF+3Brt3Ke1LW1s7Lvvnf6moffHbN40bN17f73PJooXI5/M49rjj0dbejm2ffIIPP/gAi159Bf39fZb7KtEBGGO45LJ/wm9/8TP09fVh8aJXsWmTquOMGw/OOd7dugWvrV+ve4mVq+MkEgmMGz8Bmza+g+3btuEPv/01TplzKoYPH4Fdu7rxt48/wrKlS8joNVobMKSzExdd+nXceK1SP+Y+/ijWqv3P6LFjkc1m8d5WpS3V+p+2tjZ8/Rv/XHX/48bhk6filDmn4YXnnkVvTw+u/NPvMXX6ERg3fgKGDRuOv/3tY2zdshkb1q2FEErI9XhLC3LZbNXvTqZSyOdy6O3txTVXXo7JU6dhnKlP3LplM9atWa2X2exTT8enTAU/ZkMAACAASURBVN6L9nrwp9/9GrNmn4phw0dgt1oPli/zrgdnnXseVixfilw2iwfuuwdLFy/CtCOO1A1mH33wAZYuXqSXx9EzjrUYVkaPGYu/+9KXce9ddyCbyeDuO27F4oWv4JDDPoX99z8AMpfxwXvvYemSRdirLo4aMXIkzr/gsxZ5Bg8Zgi9fdDFuvuFayLKMB++/F6tWLMdBkw7B6NFjsGfPbrz37lasW7NG18m89DM/KN/qN/C7X/0cvT09WL1yBd7dsgWfmjwFY8aOQzKVxHtbt+KtN9/QF9YxxvCPF13iuU98PTEMLt4REcZPmIjt2z7Bhx9+gNtuvgEHTzoUH7z/Lrq7u/Ev//7/IEkSzv/M53DT9degv78Pl//hNzh88lQcdvjhGDx4CHK5LDa+8zaWL1uq1/U5p51ZkcwXXfJ1/O6XP8POncqWIr/43x9jytRpGDN2PDoGd6B7506sWbXSsvjiyxddjEGmyDbVYu4Dli5eiEIhjxnHHY+2tnZs26b0AYtr1AcAwKRDDtWj3fzxt7/C6WeejXHjJ2Cgvx8ff/wRXt+wHm+qhnWv99Uq70rVB42Zs2Zj88aNWLZ0MbZv+wR//O2vMP3IozBu/ASMGDkS/X19eOftt7Fy+VJ9YdOMY4/H9CPc93SvhKNnHIsF85/BB++/h2VLFmPj22/j6BnHYtT++yPeksTOHduwcvky3bA7bPgI3VgOKDrJ4MFD0NU1FN3dO7Ho1ZfRkkhg+PARWLd2NSZOPAhnnXu+fq1+n+lvxhgmTDwIn/38F/DQA39BJpPBTddfi4WvvKzq/6ORz+WwefMmLF28GH19yv7SY8eN13WoGcceh3lPP4X333sXSxYvwjtvv4UZxx2PUfsfgGQige3bt2OZKQrE8BEjyhojlprns5832gpnT1TGrIZJ80JB88R50XuZ9Z6Ydr0aOUJWDb+y+m4A+tYlfiY+3N4fERGhoK2z1gwnAs03kNSCMHzvWhrCkBYgHGUSCrS+k4lAlold5qClwTAAMwYhrBOnlSCEsIR0rrYFb7YBxw1DgdQGPI1fwbBl82ZsMe2P5kUikcAFF34Op595luP5c867AFu2bNH3tFu/do3+u0YqlcLnv/j3uNsUZrASzMp3PB7Ht7/7Pdx43dV4/bXX0NfXi4WvvoKFDuFKhwwZgsu++S38/je/qur9pWVzr7TDhg/H9374X7j6yiuwY/t2fPLJ3/DJJ877bx162GE46pgZuOfOOyqW5ysXX4Lf/PLnGOjvx7tbt+C2W24quiYej+OMs85GX18fXny+dGjcesDU1bn6CqsK2pBX1f3J/DKofZCvwaZTe9aoNiUej+Nb//4d3H7LTVi/dg2ymQyWL12i731nhjGGM846x+LVY2a/YcPwne/9ANdffSV27tiBbZ98gm2ffOJ47cSDDsbxJ56Iu2+/zVW2r1x8Kdrb2/HCcwsghMDqVSuxWvVasjNr9hxc8NnP+0hx+QwbPhyTp0zDhvVrsat7J/76hBHi+p67bsd//fj/9BBwk6dOw2Xf/BfccevNyGQG8P577+qTKHaGDx+By771rxg2fHj1Mg4bhjlnnIl777wd+XweG9avwwaX0HGzZp+KC2uwN/boMWPx7e9+HzddfzV2dXdjx/bteH6Bcwj+rqFDccnXv4FR++9fVLenTT8CX730Mtx71x3I5bLYvGmjq2fESbOUsH2vuHgKezH71NMRj7fgiUcfQqFQwFtvvuEaDnrK1Gn4h4suQUtLi+W4JEmYOWs2Hn9EecaLzxmheN95+01MmXaExYPLjvKtxwBTSGLGGE46+RQ88ejDKBQKeMH8zLfexJSp09V8szwJ1fTpx59wEhKJBB647+6SeX7wpENx0SWX6XtcV8p+w4Zh9mln4r67b0chn8frG9bhdRdP4Jmz5uCCz37B13MraSuVBSvfwAP33YUVy5agkM9jzaoVjvsiM8Zw7vmfwTnnXwBZliHLMhhTJtc+feJMJBJJ3H/PXSXzcdIhh+KSr38DyVQKnHNLBAWLl5JNp9T6fCGAyVOn4xvf+jfcdsuNyAwM4L13t7qGuB0+YiS+9e/fxvDhI8rOH+u7vfM4lU7joku+hptvuA4AsHL5Mqx0iNLS0dGBz3/xS1gwf17JsLxeDBs2HN/9wY9w7ZVXYMeO7dj2yd+wzUXHmXjwwTjhxJm487Zb/L9ACHzlq5fgt7/6BQYGFL3mdhe95vQzz0Z/Xx9e9BHyvxz81ehiXXDK1Gn4p2/9G26/+UZkMqXqxwh881++jWFV1A8/fO7v/h7xeBzPznsanHOsWbVS9z42E4/H8Y9fvQQL5s8rGS7aD8OGDcdpZ56Fu2+/Ffl83nHMoHHaGWfhM58rbm++fNEl+P2vjXpwx63O9eA0tR7YvXy7uobi2//xPdxw7VXYu2eP536jEyYehH/4ysVFx0846WQkEgnce/edyGWz2LTxHdetHiYdehguveyb+n7QZqYfeRQuveybuOuOW0s+RwsZX62+PnTofvjPH/wI1139Z3zyt7+hu3unawjqVCqNr176dUytxSI+ZvxTjwmPU+acimVLlH3qVyxbihXLjK0UXn35JZw482QccdTR+PJFl+D+e+6ELMtYv24N1q9zrn+nnn6mHt6+XAYNGoTv/uBHuP6aK/Heu1uxd88eLHzlZcf9tNPpVlx06ddrvlAylU7jKxd/Dbfc6N0HDOrowOf/7kt47tnq+oAzzj4Xr21Yr6f3ob/c53jdkUcfg8M+NRn33Ok8zqhV3vmpDxoXXfp1tA8ahOcXzFfGNStX6NE67Mw+9fSa6Op2JEnCv37nu7jx2quwedNGdHfvxPxnnnK8dtCgDvzwBz/E4I42ZLIFxZEESqS5WXNOxSMP/kWJhDD/Gf2et954HVOnH4n9DzgANn9Yo29TJyNPP/NsxOMteOiB+1EoFPD6axtco5NNP+JIXHrZN3S9XJIk/L///B6uufIKbNq4ETt37sTTT/7VNR3f+Y/vue4V7YV9EtXtvHGOQbIZbQHVaMRMx0zbtzi1U6XaLu1snEkAA2TTuEJb7MAZU/YS1p4nbDfrcgV3otgN3X4e/KQ4LDIIJkFNhwQUKeZcW8DRDIFqCKUyqXQ8b76fQjqqhVKZlINbXxm0dADQ+0gBQAYDA4MkrIsIKafLKeqIuV6V8gJuNjXxANYVIqAuS3aoGoEBurLFYjG0t7dj+IiROHzyZJw482QMHjzE9XpJkvCv//4dPDvvGTy/YD66u7st5w86eBK+cvEl6BjUUbUBWEPLu9bWVnznu9/H0sWL8MLzzxVN8HZ2dmLGccfjrHPOI6EIjNr/APz0Z7/EgvnPYPHChdi2zWqEGz1mDGadMgcnnDQTL77wvH5cksr3Ahk5chT+639+gqf+OhfLly6x7M2TTKUwefIUnHfBhRg5ahQeesDYB6xZdVIfqDXl7SWwhYJqFKlUCt/8l3/D2tWr8PyCZ7Fp4zuW8kkkEjh8ylScefY5OHD0GM+yGzVqf/z3T3+G5xfMx9LFi7B9mzU82/4HHIiTTp6FE06aiQ3rnEMVa0iShM/93d/jqGOOxYJ5T+O11zbo3r6A0oZMmToNJ8+eg4MOnuTxpOq56NKv4c5bb7GEVz5w9Gicd8FniyYUJk+dhp/87JeY9/STWLVyOXr27rWcHzlqfxz36RMwc9bsskP+eXHU0TMwZsw4PDn3caxft6YoryYdehjOOPNsTDjo4Jq988DRo/HfP/05XnrhOSxfuhh/+9i6r9nQofvh0yeehONPnKmHenZi+pFHYdyEiZj/9JNYu2YV+np79XOSJOGggydhzuln4pBDD8OjVexdPnPWKTjs8MPx7DNPYcO6tei3eQZOOuRQnDBzlucE6azZp6KvtxcvPr9A9w7t6BiM0848G8OG+TXmM2itkBACs2afqhiSip55FvYbNqwu7eVRxxyLiQdPwoJ5T2PdmlXo6bHW03HjJ+CEk2bhyKNnWIyV1XDkUcdg9OgxePrJJ7Bh3Vrk89Y6evCkQ3HaGWdj/MSDSj+synBJ8Xgc//CVS3DMscfjxeeexTtvv2mJRtLSksC0I47EzFmnYOzY8chn80i1piCEUPb6EwyxeAxHHTMDBx9yCOY//STWrCrOx/ETJuLEmbNw1DEzdK8MuyJuTZb1uPlvIQQOnzIVP/35rzDvqb9i1Yrl2GtrX0btvz8+feJJOGXOaTVrX0rpkUcdMwPtgwbhybmP4523rGF+h3R24sijjsE555+P1tY2LJg/z5KuShi1//748c9+gefmz8PiRQux3abjHHDggZg5azZOnHky1q/17mc09HyG4kX5w//+MZ5+ci6WL1tapNccPnkKzjv/Mxg5ahQefrA6vUbYfvd6gp/nT5k6DT/9xa8x76m5WLmiuP8ZNWp/HH/iSTj5lDk17X/cYIzhM5/7AqYdcRTmP/Mk3nhtgyVUZzKVwtRp03HOeRdg2PARev2oBUcfcyzGjlX6xHXqAjeNlpYWHHb4ZJxx1jnQtq+wM2LkSHz/Rz/GM0/NxQqnenD4FJyj1oNHXOrBuPET8F//87946cXnsfCVl4rKY+h+wzD71NPUxSTOxoljjj0eB086FPOefhKrV60oesb4CRMxc9ZsHD3jWM+2+sijj8GEiQfh6SfnYs2qFei19bMHH3IoTj/jLBz6qcPx0APORrVyGTZ8BH70k5/hpReew6JXXy7SEToGD8aRRx+DM846Fx0dHfpxe4hV4/fmb58ydtx4/NM//yvuufM29KkRQ1paWnDyKXMw/UjDW/OEk2Zi3PgJeH7BPKxcvqwoXP34iQfh9DPPwtRp1Rlkh3R24vs/+jEWvfoKFr7yUtEig0EdHTj+0yfi5FPmoLOrq6p3uaH1AU/Nfbwo1LvWB5x9ntIHPPdsdX1AIpHAv//Hf+K5+fPw0gvPWzyLNX3xtDPOwuFTploWfTi9rxZ557c+aPJ9/otfwtEzjsX8Z57C6xvWW+pFLBbDlGnTccqc03CwGl2oHnR0dODb3/0+lixaiJdeWKBH6tFIpdM48aSTMfvU09E2eAiSyRQSCWVvYgaASTGcMuc09Pb04PkF8w29dfBgnHXOec4LW23ewIoRGJh96mmYPGUqnnpyLtasXqVH4dE47FOHY9bsOTjyqKMd0jEY//mDH2HRq6/g+QXP4sMPP7CcT6fTmDnrFJx2xpkYYtpupVK8jMF+J791z1/LsdLPd3uXRgxMn0fQFrtzCMiqVhETzHSv8X7m4M4YVAOEExWu+SdJZOCiRVjSATS/btVqnqPZ6agVgTegmghDmWh9qtv2DEHA7C1vh1qamFClzGRzkGX/4YK4ebc4czrrmD6KhlYNRTSBZngC14O+3l5s27YNsixj2PBhnsbjarGXa19vL3bt6kYun8fgjsHoGjq0KR+OJbSSR+O6a9cu7NmzGwwMnV1dlkmWB/9ynz7h9j8//RlGjxlTsTyyLKN750709OxFW3s7urqGNmSCsRKEKLWbZPNodjvS19eHnTu2Y2BgAIM6OrDffsMsE5PlyLd71y7s2bMHjDF0dnZikKnulUs+n0f3zp3o7+9DR8dgDOnstISuVORy8p6rHb09Peju3onOzi5faeGcY8f27dizZzdisRiGdHbqkxC1kO2KP/wWmzdtxIEHjsb3//sn+vFcLocd27eht7cXra1tGLrfUKTT7gbYUvgpcyEE9u7dgz27dyvl3TUUra2tZaeTc45d3d3o6dmLRCKJzq4upNPpSkV3RZZl7NrVjd6eHrS1tWFIZ1eRx68XuWwWH3/0EQZ1dGDwkCGQpHLbuuKQa7lsFn/7+CMM6hisPlNSrzFPYNS+r+GcY/euXejp2YNkMoWurqFIJJM1efZVl/8OmzdtxAEHjsZ//vDHAJQ053M57NixHb29PWhtbUPX0P3qUs5+yWWz6N7VjWxmAB0dQzB4yBAwxiCrIfdiMQmMKRO4yUQLGGMYUA1K2oSaLMt63U2lvPPRaaKvlBeIPcyVEEr7snfPHsTiMXR1DcWQztq1L3b8tAPZbBbdO3cim82go2MwOru66q4f7dZ0HMYwpNOq47hin4xWsUsqyzK6u3eiZ28P2gfVXq/x25saOrz/vDT3P/FYHJ1dXXr9aBa5XA67urvR39+HtrZ2dA1tjJ5YKBTQvXMn+vp6kU63omvo0LK8wZRv26gHnRXUAyEEevbuVbbBYAz7DRtWdihezjl27erG3j1qGzN0KJIVtNWcc6Ve79mLRCqJrs4upD0WadWKXd3d2L1rFwpyAUOGDMF+w4bXbIFRo+Gc46MPP0AikcCQzi7P+qTpG7u6u5FubcV+Q/dDqk79XV9fL3Z1d0OWZXQN3a+m4Z790Mg+QAiBvXv2oLt7J1KpNLq6uhw94P1STd6VUx80lHHNDvT19aFj8GB0dnbVLSS/F/39fejeuROZTEbpJ4Z0GmGEoegayUQCyWQCuUwW/Vlj3+Csqrd2DB6s36ehXWMOj2z/27yoQ5tH2NuzF+3qPIJTPrrVp/6+PuzcuQMDAxkM3W8/dHZ21q198Tsusvxd4l6jnwe0vt7rPaVkkE3jDO1fSZIgwbSPsA+1gtrkcKWEJBmhKQ8gHGkJgycw0NyyqPXcZxjqlUZQdWSvRVKBRJ1/kRB8g7YdaunRDcDZbB4FWS7piWAOtSLsK9q0h9ZDUtv7qaKIR6uQgwLVsq1GruuuvhJrVq8CAPzxz1ehvb22EwVU8wyIjMDVQFk+JfSVfdKBBnoIctTXAFwviiYzCNeDRmHkQTAV9HrjZADWoFR/7J62mvFXO6aF1IvHlMnKlngMyUQCBVlWvF/0pDCU07M46bR+DMHWc/7vrQWUyq0sTOMDoPGacCW5FtSsjtj38N/M0NLLIiIiFKwLzGx7KwoBSWJIp5VtLAYyWXBu9SgpR3ex6C22g04thFNIQ0qU0ovcjb7OGqPT5X7eYS9DMKZGF1HPAbrRl5kNwfDOU2r5XT7h8dYEwpMOIBxpYQhHOoDGp6NeY8qwlUeQ0xMmb3lJjV4ChCM9AL10OC6vNiubeqOhKqf66ro6b9HuVpHtYfuoYZ1sdLCQ0yp/UlANp12pXHv27MEbr78GQAlLW2vjL+AsG5VQEPZvtVmTwk4Eqx2hhSRJEMJqbNWgWO/Mx2ig9a22o7aIAwA1uZuD97ca5Y8XTWnnLHqOqa5rsiiCKQZfNdSPLMuIt7SAc4GCLEOKCbCC4sWcTCbR1tqKzEBWCRErmYPrae2QMdnpFYLHfI+GfTLWHuXAjPJc67uMk1a5qoFy+++IvR9o9OsrvS9AWRwRTKIuPCIiQqNY37AtiuMCvb39aE8n0TGoDb29/ZBlZYc8wYrH0U7hj41nateY+kiLzmL1ErbPHVAbQ7mlVT+vKGHGOaH+6Zom87PMx9z1ZnN4SksZAuCqp7UEgAsODtUozIXh2SRJ+nyqLUi16lQTXE9HQ/cOR78XpnE4lXnBaghbeQCNSUs9x5JhKROneQPqaXIqV++5jeAgINS5JPcFa9QwT31ZSsbUITLQKQ/dpca+Ps6iBEIJ+cyZ4vUrlGU4RQ9jqF0hBTcGuLJqIZCiE4BquWsybdv2Ca664k9YvGhhyXuefOJxZLNZAMAxM2bUVTaKeQbQX7EXyVYpgmQbp30LtPPOMJ6bf8xQl78ZMH1Zvfknwg/Nq09KOekDEdMPTOEPY7EYBOdgUMLnCc6QlwGOGHL5Avbs7QGYQLo1hXg8BgjFW9gYfEoQwrpw0Tz48TtIVa5z+hY1w7CSCscBdR3Ca1Fvy/T2q9mClA2rdXFF7KNo4z23nwZI4PATERFBGTfnBqXPl9A3kENf7wDaWtMY1NoKpk6IKtH3mOO4AXCOImS/jDEGmdmvtepUQaBIN2K2c9p8JUyto0ujbG+v/eiM9nMSjElVSZIQA9P3DRYxJc/zgkPW9Ffb5hiK+hmc/LfDmISo/4moG0zxtOcB/kYi6BKkvs8NyvMF5SCYBM4kcCj2xyDCwcDBIKv/Uqtfnhss6RVJCHAiFmsN6h58gLbIz7QmgE72RVRAPp/Hn373G+zatQtvvP4adu/qxmlnnFW0T1kul8OjDz+Il158HgCQbm3FyafMrrt8llBShNAGXVQniil7W1Ft56jVMSdoy8hM/YPLFaTljwgaDWvnzJNokNQQeZU9igsBLgNxiSGTyyOXLyCdSiKdTiOfz0MuyMqiRMEhSQzCti+JlzdLUZQb23H3tte6xtP6LG6ZyK0VpPoBc341UQyNSnKERD5GkCC43WxgBY+IiPBAqK6rBc7R09uHdDqN9kHtGMhkkc/nAcHAIUFiokh/AazGS+OYc+QSJ49YJ+MnxbkFwF3HczvnZATmDnlobl+dPI/d8sjp/RIEhOqdLIQAuEABhl6s7T8ZM3keM9VtWwR0L8QgedKVIkxejhphSEvQ0wDUv0waNdYJQ3mE6fsAwtVuyVAWV1FOi+bdCyjGX930p8rMhREJhAKeBmDAWEUnEbReA7SNN4aXnCiamKQERe8ziuWaSCTwtX/6Jq744+9RKBTw2CMP47ln52P6EUdhxMiRKMgFfPzhh3jnnbfRvXMnACUdX734Ugwa1NEQGUlNFNtgatgjgJ7vHsX6ZoaqfKY1QiShmW+saNKZnow0KZ6IaZIgESXh6r/msDh+sU+qFbha0DEJmUwOXJaRTqfR2pZEf38/BBfKRFlMAue8qB+0e9LaPYSdKDV4cvKqqfd33Iz2zJz6ZrVTtXxr1GaEDwJDlgaxzyQ0ImKfxbKoDAz9AxnEYjEMam9FNptD/8AAJN0j2KqneBltzX8rJkdjMZswhYBmLvdTx68x2H5ecrrPwRtYO+juReysUzJzWGfGICD08NAAlO1Q1Ou052hzr/btTrzeTwtm+z1SvCJqh+IsLyzfbkRENTj1ldTsI/veXKESu4Oo+xhgs41yj/ohwPTzJY2vDcCnDMqeFgJGKBmhHCYBzUl+O9p0qOR5FQXc9j2JACYdcih++N8/wS03Xo+PP/4IPT09eOXlFx2vTaXTuPiSr+GIo45urJABgOJwIBjtCE1Mtn0ymCdEnELD0oFYxgUA+4RMhDP1bs+47e/i70uZyNInKat8nyzL4IrOjUwui2w+h0QigVQqhmwmi4LMEYvHlD3z1Ek0PwZeN29gpzQZl3jnrT6px2obxrlR/ZRT/gSZECQhAvuSsbcU1kgEERER4UPrf2XOsXvPXrS3pjBkUBt6+jMoFGRnL1dXIzBz6deFh77jrAfQG0dZKaW3WM6bhhNORlyzF7VxTP/N1Ujg9n6zkd18XUGNIMOEgJCFrjtSz2svNG9pSQpuGjSCVP9LQXMupHzCkg4g+GkJy/ehlYN5MXmz5Kjlc4JcJgCDDA5JgMSiD4vRV/vFRSx9YZ1Jbq1uNbNM/BmA9dApVrONuWo2ezqWpuej4ftiKJumnGp2pmmv9zHB1wyjMM0yBcaMHYv/+d+fYfXKFVi08BVs2bIFA/39ABSZDxw9GtOmH4nZp56Ktrb2pshIJe/sCo2lzgjVLGCOTdUkuTSo5JsTlGUDShtSmom9jfOrEHV2dWF470h0Dh1aJ7kAOKzwjijGufyaJQ0dhnR2YfiIkejsMupofeqR/Zn+6m01kjh56nKuHCtwAYkBA5kM8hJDa1sb4gUZ2VxOnWBT9VXT5J1Qo7Jo029O7YCmmHtN3lk8ZywrhQ2lzs0bpRbtYTVGWfNdrpI4fGuNpD4ev4o+HvRJFlo4lZR73oYr2yuJbeD1LNie5/XsyPgbERFWzBOGykIyCRCAxCT09Q0gmUhgUGsrMtkssrkcBGP6Nm0MAoIreo+y+A6W/lzRHTisfaGmTyjRibgQSgvj4Q0chEllL49g+3nlgHGdpOad5T5mbaeFQFFI6WJdT9FD7eMX7Yzi6avkP5eUldRCAAVwMMGUyW6myGPotSZ0j2W65QAERkzfhEGP9EpDEL5vANb2jRV9HYGjlvnezPmkoNQfJ7z6uXp+916L1St9p30OJYjlYYarkU/s/W5tKR7bmYtGc2jw6wjrFH3EmJZw/07qXV5MqDUuk81BlkuvdtAn4Yg6ZAP0J9GFudbUcg6hBpSbd9pEZKMaFapl29/Xh3yhgPb2dn0/FypQzTPAkI2ihJTzDaAkX3HnRpWiQTgRZcjJuBmhYOQHrXaVEo3x1jT7+0pFRxqNOc1xSYIQMgBgaFcn+vr6kM8XILhASzKBfD4PJknKfsFcgLkYZMsx2roubIIxyVUqVGCtKKfMBQDBlLlMuxSszGfVi1pJQCApoUWpw1EG1x4aOklERAQ9GGPgsgyJSegY1AohOHbu3otEIol8Pq/PP0iSsh2G03yElzevuV3Xoyg5XGd/XpCoxOEBsO4VbFAqyoykGtzL0NFMzyswASaM8WqcSU33HKqGMOkNQS0DM2FIg4bdsz7IBN0ArBGW+qX3hXVMTyPKKxzloSzkjzXwa9fsdlz5p2YwwfXFXY2mYgOw9v9mTgB6QaHhc8NiACZGJflWypuyllAuV4CufFTlAoJlBKZkNASolKvzyiXzv1GeeeNUz/ZlnNMfGYCdaFRd4cJ50V+zQyQJoewDxQDEYhIE50gkWpBItKBQKKBQKACavwUz7nGT3ex9Y27H7JNu3jqPbQK1AROnFXsEmyeCayVMldSiRu/jTWhZEOqe93GigoiIiHDGGm0E4IKjNZVEOplEf/8AMvksgBg0r1PzAv3iRWpW/cZqKC42ADvdb4fSOM8PnpFrHM7ZvXbsVxj3OHkPCV/vK/IWUrfe47a3aWWieQVTCIvplwCJWpKg1XkvwpKWIH0LblRbFpTmkMJSrwDUzcGr0eUV7DIR0HrfGOprPDXbJwSrQ9kLAcm0BUcjy6VsA7AdIQS4+mPeP4MKlBpBO4ZotPIMqC7f6m0Qpl2mkWyVQNkIDNDOO6CRqJboYgAAIABJREFU8vn/hu3GE2pQlU+bmNH2EN1XoLwwihrNaI8o1Uan0NCMMYBzMEn1kABHMplEPB5DPp8Hl2UI02DBbAjWME+KmhVyJ6OxtwdwsWxuXsFN8wYm0qfVWgq3Sdh9BUJdWYQrUSFFRESUh91rV6g6T5wxtLWlkZPz6O/PAmCQJAmyLEOJMGyduHQKSejmVarrLQ73hcEIbMYtQpSXTuXsFazdUxx6023cWUpv46oB2XyVXm5QwoObPSApl4NR36x/B5Ugy24nDGkJwjfgl0rSQHW+MozlUU6aqJVL0MtDCAFJXfzEWG3yVnsKb/gYTTFqxyQJNUpKSao2AAPGRFqtXaNrBbWPzo4QBDPNRD3zL0yrnOxEspWPfYBDCap5ptEY+cKjjGpoe35SxMlIFUaiMM/+aUR9MNZXmqEbuo1zjng8rk54KvvmSTGgRYqhUCigtbUVLS1xZAeykLkM2Dx8S+E0EVhuWGiUWOXZ0AVyTWpTGvFW88RrULHWBZrfXEQ1BLt+RkRENBa7kVaPfqIaeiWJIZlIoC2ZwO6eXuQKMuLxOMAF7FNMTgvd7OfVt1qOORlWwmYEBtx1KD+ho52ucLutXA9ky3kYRmHzPYwxxNQFAADNsjDqHk35KiFKBz2cFq0EkUoc8ygShrLQqMRjMyqX+iAJ6AbgatLCwVxH2+bj9cktY9YtXg9PYwdqYgDW4ILroU44aFUqqh8eYISHoTr53Yy881t3aJdrJFslOA2iqEzpUs83v9cUf18c/tqf6hTRUgaUZmAfQAPND2vrtFo8DFjT4ZTHNOoEVWpfD7j6f9u3b2qDqXynGr49JxgA3UOCIRaTkEokIMUk5LJZCMEtHsFaJBvFm6LYc8NJDsA9JFTxJKnVM1XzgDC/g/l4r5csHhcYv6J5X1lta682YFLy39Cja5m6YiO+P/wNF4l9WhFVUU7diwo+IiKiMvRxChgEBFRFAgxAPMbQ1taKbCaHgVwOXABgEiTVWKhe6tObV//N0le5GVbqHd2k0VRiCBY2XUs9CKG3+cpRDgYJ9m1GSjzP7gGuPk8Ik4cwF7rXt/avpPyibYRieV4zysiaDsBev4JO0Os9EI40AME2AlsjSLmnwW+0AioEtTzsePV3QSgHjaCVh2O/JQQkWKOtlX6O+q8WiQKA29isMQZg4/mMSWp/XUcHzFoagM3eSpxgfaL8QSqy0TQAA83NOz91kmrZUpULIC4b6MpHVS6N2slX20bc7s1KSemwG1opyQYE3xPYWXa6/R0l6lvuigFYNrmIUKv7bui6psmD38kzxTKIBkdLPI5UKgVZVvYHFsLqXVNNfpeaAC3+2z2/a7IIroltRqPerHn8NrPtDsgnE9EUosoRERHRGLTFZu3pFDiAvv4ByLK6gQeTwEx7A2uL1+w6hLOHb3H/ajeuhNEbWMOP96/bMac7ZQBMOPcOpQzMThPgHJoTjvO9jDHEJQnGAkRFMDrlE+ww0GbCkg4gHGkJshHYTql0BGmeKAxl4scTOCqTxqF5A3tFORHKCrgmhHkuD4kxbXl7fZ5f6wcyxgBGc8hbahVNM7F6iNCjmXmnKbzmn6BgXxFESXbS3wNotiEA/Q6yFvLVq5pqdY5aHpqVOGqyAcX5Rq0tccLbe5leHlOk/mUsgfoWFLVE5hLyskBvfx9yuTzS6TRiMQlCcEhgyoSYQ9001+VShkavb9MpIkI9vmWmPLymz6RKPdtu1XGm5E9ERERERESjMY8NGGOQBQDG0NPXD1mWMbijHa2JmHKtFIMsDK+ScqJGqW+znvMhH/Vxil9K6RZO+odeLg7XK95K2l9G+Ee/7yo6BiUajZdemisUkCvkUeAyCpwXOeoEYVwZBKI8pEVUGjQJw3fiNdaP2tPGw+E99SGrEYp5owSqAqX+1HE+vtYewPYJaoqewADthscqGy0vKcr5Rpkg5BtVGfVOlOBMK9U8A8qRzbz4xGi/620I5VzpgqkZXKmXKWX5NAwZafVfQaDe5WtXfJsVBs4PXt4MXufN95s9exmTwLkMAJAkJVRQsiWOZDKFQi6PXC4HFjMm0extlNn4aw4B7eVB4/dYOd7ARekWogEhivzROM9ff9cRrdoRoSSqbBEREe7UW9cyj+QE54jHJaSTCQjGsKenH4wBggvXrSvsuHn4mpPh5GFXyjM46JRvPLces5/RIpl4Pctp8aH5HTKcx4dOCw+150iqfixpoaIJQESMqglTvQ96WsLkBQw4pyUIc0N2wlImUTrowIS2jVexmZfb9tf17nVpwMAgwXmLiKqeW0sDsB0hBGTia28oN5iKaPSqJuU8CwJU84+UXLZWWYjifYGpQCrfHHBfoWb83ux9gCjix9uv0ZjzjHL+GWHKYs0WJXDUslw19dc99Lpku5IeXsbVcsLyafdpBlshhDL5pXpftKZSiMVjyObyljB7Zu9fL+8KpxDSfgy65YSFdnm5/2vrDB1JIiIaBQ3dICIiojFQGQ84oo6VFd2bQxtMp5JxJGIx7O0bQF4Ads3cbazjrquYt9ZwOl/67zBRKmyz1zmns8Y9pQ09+t+MgUMoCxcdZrad5OAQYGrcG0mSENNDRDcrGhatMXe1BCUdpRYCByUdpQiTIbjkwuAAEYYyCUoa/Cz6D0pa3GBC7Uns8ynu3SppYmBgrMaR4uppAAaUPSkAe4ATOlBuMBXRaHpSlVJog9541BPadY6IbB6DF2H6m0o9czLM0ZVN8XxrtgHYLJMfo0mjoWgA1vAOr9x8iuWi1X9Rwl73G2sApkM16XYKje7ktau+CUakA9XbgAnd8NqeToMxhmwma3FFMO817CS724pst4k0s3xO10iS/R6urgNFU42+NFuciIhmQbM9jYiIqA1+F3bRQNFvNN3GiFSieHu2t6WRy+bQOzAAxmJQtgAREOCQhUCcSWCKBRlCcDDVU9i+p56RBZruor7Hh+cv7fyrjkoMwZpO52wILv0suwFYCKGUoRru0vNe04vsxn6JMSOsNG/UWNiYoQ9DNQlKXfc7nxaU9HgRNiMw1XkgN5ykDVuZOEFlznqf+NZNht4wGIABtU+GsCgF1ZRR3Q3AZqMNJzp1RL3xVOSjN4FOPd8oQz3vKMtH1RuYcp4BZuMIrX1u7cZMirJRhXpI6CgUdDH1Ki99kZ11ZUdd3lVP/Hr1au2E3SPYrR1xm8A1DLHKVJkEgZaWFiQSCSWKTaGgGH9NnsNgzvu6VbKytlSoRItHMIFvvfkSRERQIHhta0REhAGlsUa90Q3BDGhNpwEI9PVnIHMZjEmQuQAHV6KiCCAmSeBcgEnFepaGY/QSh3N+t8QIE+VEp3E653xWyTP7YsRSYaD9jDm8rmGMKQsDHHRuP8+rtqzDUFXCUt9Dkw7t35CkhyrljBf3BSNwkAhDGsyEIT0SRM3arrobgDWEMPYDFlxbrVaXV1UM7Yl07TdimaZCOe8oQzXfqMqlwQnLRznvrLJZPdCaCWUPaiAYZUrfY1lXW5opStOodx0yr/SnWg/cKCdvSqWtPG9gxycAEGDgSKdSaInHUMjLKHBZ99QtCA6piuL0N5lqn2wTTfly6LZ8ERGNIhjtaERE2AmKTkMJi+cuGGS5gNZkEvFkHJlMBtlsASwWR17O6x5lkiRBEqV1FScjMKC2mLaFcvuiIdhMud7BwuO88afVpYkLoTvb2D19WQkZPGUxRapRvIMlSChegAmUq2+XJkxVJCz1PQzpCJPBkSrljh/DZpiP0kGLoKeDaY4KLpHlyntWgwzAZjjnhrcKsbIIwkQ/RW8qyvlGHap5R1UuDQG6MlKVCzAbDI1jVDpFqkZMyuWpIYS69xNo5R1gzj8Gcp1+A2hE/aG7i687jmHsUJ2HgZMB2O2ZxYtftNCJyr1xSbmvpaUFqVQK/f39+gRpJaG3vLyXS0+OippMWHjdX2oSMiJi32Tf67MiIpoBNd01TBjhoCWAczAJaGtrRSEvo6e3TxlTSwxC2xdWNRg7ae3Oukvx2I05REsppfeEGb9GYPtVruGjXRZ1a/8qu0D7udcdxyhdAoipIaIlNUx4vQzAyjOqfgQZwlLnw5COyAhcXyoZP4bJCByGNGgELS1Ozk1BS4MbDNA2BKs4TU0xAGsTcxxo2rjaSzGhPNkvCFrNzR8Z5byjDOV8Iy0biMtHVDazQY6CB7BGpYaVRkFVLg2qIaH3VQNwvcqCm0LEUfl2y8EtRJ3ZuFrJc7z23nE671Y+Zn2GSQDU0IitqTQkSUIhn4fMrTLr17uE5vPyWLZ46DgMWIz5Vfe8qef2LRER+w7Ba08jIqgQRH0k9JgD8EAdJzCACQYGgdZUC1paYtjbm0W2kIeQlDDQmj4jMQbJFjHKrucYx2yL3NRr9vWQ0Ha8Ftt5jxs4AKmkQdh8WgspLVjxoks3fdX72cX6OmMMMW3BQA28kxzerr8z6IQhDUBI0qFGO4ionmrHitZIFSGpXwh+Opz6/KAT5LRo30kM1ZVLUwzAGtpktRACQqJVGBQn0e0oMtLruIKQd1ShnHdUZdMGN1Shmm+AuzcwBaMw5XwD1EgWRBUjc9/qZRRrhtzF5Roeo3B96qwWL8XIJz2CSgBp5nddrkeCYQhWJjYlKJMFba1pCFFANpcHwACmeM2Al36+33OOhmBbqTPBIZhUdF0tobzdQkSEf2xWEAvh6H8iImoBNX02ov7EJAntrUkM9GfQl8lCxOOKJ7BpHKEZ+MyU8rAx/ymVvHbfrncyd5+D9ecBbMC5u95m8RB2eJ1fQ7AbTPUMZurCAT+l6mSMdl/s6OOBxCkViSdI30KQZPXCKVpBhDf1Hh2aDcFMKH87tVlBIEzfSRiwLPQPcJoYE8p2DGW2XiQMwEDxB02hQKgbIAz5IiNwWKCcb9RloytdAPKOaDhoypj7Lyp5pmEe4FOTMWwG4HpHwBCWAG7BzSeg+fXRz7udwjRzwQDBITHFIBpjAjFJwqCODmSzWRQKeWVyk0ngpkk8v2k11yGvSVG/6yRrmbfU+9aIiOoJdrsaEVEJVHTCiOYjhIDEBAa1tUKWBXb39ACSBJlzxGIxfXuZuBRzDenstiCWMes55nCv29/7Ehbv3BLnS94vnI/bj8kltDuvd5Y6xxhDXF2gqJe9zwXmTtdY9eTyF3RSJejyA+FIA1C8SCXCmUaOCXUjsDmQXAAJyzcChCctVByeqoODAYix8myBTTUAA1YlxE+4vEZD3QgBaIoejfwyE4S8owrlvKMsG0DXY4l6vlE0BHuFim02mtJAuVz9eAI3C2u+BdMIXB+Dr/ffQcQedg5o/jfsFRa66Jga2ZCp5817nqWTCSTicci8gIJcbPytVBZvb2D/X0st8jkyAkeEj+D1NxERQPP7zojwoffwQqA1mURLPIae/gHkZFkfUysendDD/CqXu++z5+YN7GRkKRUiel+j1BYcpY3B/ryGlWhCAsp/5b3TPv71MjSbDcBxJjkukiw1NrDWNXpj2moIS1rCko7IEGxAYexn9gQOMmFIg0ZY0hL8dKj9JoQSccNvhLtmG4A1lAkmoUTQI1oWtCf6AaoZRznfKEM936jKV2rg1Gyo5htgGIHNRg4qNNt70Auzxy0F2ezGaap1rlguWnXOjboYfrVoKKBXvyuBUp1zaje09sSrnfNalKidkxgDYwLpdAoxKYZ8Pu84qVUpJUMsljERVm29orq4KiLCH8FvVyPCQxj6+YhwIABwCMSgRDuJxWJoa00hk82jfyADLjggSfrYwhzat9TCNcP713hfqXu9ju1LaBqX3dBaKlS08o91UbfbtXY4io3Cfu/1cw0TQllEIEmq/uy8YKDUM7XLw1JHonTQISwGx1rgGI1A/beROcQAIERhusNSv6J0UEHZpoz5NAKTMQBD9TAQQtmTgspEuhlKE5p2FNnoelFRzjvqUM07qnJpUJ6sppx3Qij721I0AFOGmoHaazV2s3AL62VAq87ZqVf+mZ/LTceNvJKgrdUPCs2ua2acPBe0437CQdvvtS+wYIwBjCPOJKRSKcRiMWSzWcv1lWKWj4MhZhO3HAOw/XnlounoERHBpCh4qfrDHa6NiKgcCjpgREQ56MZDiYFxZTKxNZ0EpBj29vShIDiYxCDUaChmb2B9QZzDuNHQmWwGX0QG4HLws7jdyyPXrrqVimJlPu4WKtpvdJ9Si6RjUPYNLm/egc54uxZE6aCD0wKVfRWnL58DAAOkBg8H7VsIBJmw1K2gp8O6tUCQ06LMETIBxHz0o7oBOJvNoyDLTU28XZEQUEPvEYPyBJghm9ScJTou1NIjZl+Ecp5Rl42udLTzzryXJYlGRIVyntk9bqkpE9o+XhShbgiubb3j6v/ppbNWuIVvB2h9F5WWq1caEnHFEMxlGbLpm9MiLDAmAeBlL3T08gZWJlONc6VSVe0Kd+p9a8S+Dp02JiKYUOqnIiKagRACqZY4WhJxDGQyyObyEGAQTF2UyIVlslHfq9Hs7cvsHimiyPBrPhsZgv3hJ0y0l35rOI5Yn2f+W18sCcUb2OyRbLydaQd9y+yFJCkhorXdpv0u2AxLFQlLXQ9DOsLuCczsi0JA0+5iJkxGYCA89ctvO02dIMtuwBErtVUHVQOwhqwtNSQGfSNE5A0cRqjmHVW5AKvXEsWOiXTe6bLRzLOwGZXqhX0BDjX5zCiy0TSMVpNvRXcKAUGoztYa+nWs+meY9zUzP1cIgRgDUqkkWlpakMvlwLkSVhEAZFnWw09XsljE0RAsAMZU3c/Ho2qxwj3yCI6gQXjb0YjqoKQXRkQECU1H4XIB8Xgc6XQa2eyAMl8oBLh6ngnDUFJyywrGYNaGzZ43RfEZSjwrQsFpMZ7FeFvSEGzcAxRvlWJH8/4T3GwSdpGrhH5baqGo8iNBkpjqde4cQSoKB02TsKQDCOe+wEEdvYXNQztKBy2Cnw7F8cBsBLZTZAAGmpdwNwVFCz1CzXhDdeKLugEYoJt31KGcb9Rl0yarKbUhGlTzzh6VQfFcaz7UDcAA3TIFSk8INBPKnsDV5Jnd95pqW1RLvMKQU1tsWMkzvMIGaSEPJQik02m0tLQgm8kox1WPGVlddFlpXhSFT7St1/Hz2Ko9gbV/ibYnEfsC4W5HI0oT9r40IqIZ6GGCOYcUk5BOJSFJEnp7+5DnHNymxzpNOBb/7XEODnqNjehbt2LRvMxjY9vCROOSUuGcyzfWAsq2W27eg/axutez9N+FpF+vLRBQ9gw2Fl86zQGEqXqEoa6HIQ2AqX41WY5aEvRRW1iM8mH5RoBwpCX4aVDn2QQgMUntE61fOykDsBtCCMgQZCdMqU58WcJBE4RqvgUBynlHVbYghKwknXfqwhJKbTDV/NKgLJ+TEZ0KlI3AGp4r6t2uJfTtNJpmh0evVz2366Vmr15j8lRGoiWOVCoFBiCfzyuB8xizhdovH+/JVn95XavV1EHoYyPCyL7broYRSjpmRMS+iq7HSEzx8hWAEBzJZAKJRAsGBjLIqLqMGXP4Xvtxt78tRmGYjXku3itRG1ES88J3N73b3RgMWEcypbdv42rERu1eN3tyqefYDcD247pBWFJGh5JDiHHtujAQhnSEIQ0aQQ0LHcaxWeQJTJewpCXo6TAitNi23hAKyOULKBTk4hsJJFzr9GWIIg8Hp2ubIbOfycXmyab91vyydIKiASIoUM07snLBGAxRaNucIJt3tjBRAI3+AWi+ccmOPfyW+TjQfPk0nIzAjf42vN6nLTowLWYHlX7M6zvlLtcZ6dT+pfmt15tGevA3yvDrdEySJGVSTAgAAhIDWmIxtLW1IpdTFl3GYhI4r0x/9PJCtoda1OqcVzZXPJhWBxiCGX2sr9sI98MRjUaLXFSKqL5QJvqeIyLCgRKthEFWx8xxSFC2mOBgAAa1p5HJyegZ6FeMw+r2E0wIxevEpHNofb0fI7BFDzF1C5E3cBkIAQam62Sc22MQOd3iYgxWf5jFKGzku33Bo5mCKP1e7d6i8bHQxtDKe4UpsqFQR1mS4JAY0/cNBhTDsJORjuLcRTkEUWYnwpAOBgAsOHvRhn2mIahGeSfCkg4gPGkJejqYUAzAgPGtGAbgQgFcVk6aFQVKiTYrCJyYAdj8fq9zzQ2vHTxPqghvqOcdVfkoeypRzTMNpX/wXp3dDOxhjanJZv6XkmyANe/sxutmy1q8YMOYBKCAxXiO8A+0aoGux5m8X+tRz5rdljqmSQjEJIZ4PIZ0Ko1CIYd8QbZcyzmHJBV7P5T05C0ROlE5XlruWoTV8tPHUmhfIqgQGYApEn2fERERGnqfLZQtLpLJFiRb4ti1tw8Frq0EExCMQQKDBAkAhxBC12m89BQtrC+Der3N3lgqvHSEM9wpxLIHpcNEA/a+2OseoZtsq3kng1uMae0aZdFlAbFYDDFJQowpewgLwcnOD/gliDI7EaZ0BCEl+8qcRBQOmh5hSUuY0iEJ7m4AbvakmRtmuWSiTRrVvFNgIC0eqOcfTajnGVX5IiNwZRhemXSMmWZZqHp40y/T4lXYFPLROd9oLWbihNsSitjLtB5tCaXvzZwuI7ydYghOp9OISxIKch4yt357tXhfcZ76+6Yr9gY2vyn6LiKqglYfHmSa3Y9HRESECyE4GBha4nGk00kMDPSjP5MDj0mqtygAMEgMYCg2lrgZc83RS5z0kMgTuHy4iz7pOxyzI8VRrtzuEULbH5hB0wr9GZmtx4t0aWFdEFw0lmDKQoK4FjZarVuU5i/KIWjyuhGWdFA1Ou6r467IE5geYUkHEI60SFosDScDsAalCTQz5slqDpCbI6CabxpW8WhlHvW8owz1vKMqn9vAiAJU88zq0Vra66wR8jgaWRzONRuqZaogwDntwbFlNTiR/ktb2U41egp1Kv0maH9LxWgTUIVCAS0tLSgUCmCMoSXG0N7ahrxcgCwXb8VSaRvmZ6LU/CeH+9KKala7U+5jI6gRtZuliPqWiIiIZmHVRyQIISPOGFrTSRQEx96+ATAIcDUEsRJ+UPEIBopbeHevXlFk+PVamBa1i+742ZqjGs9g5Vzlz/UKFe0VYlozALsZnQEAjEGo0YZicS1cNCAJIyS5UN8fhDoUBBn9Eoa0NNvoGI2unAmKh3YpwvCNAFE6qOFpAAboTrDpIQRB18OAat4BZsMNvYpMOd8oQz3fqMrnZ2DULKjKBZgHZcYxSh2j56CRADRDVgtVrmbL4Y3h5R1rtig6WjuiGbtolGcwqKSdo9w2umFfpMLUvcsUQzCQiMfQ3t6OfC6rewPXIp2lQ0P7CC/tem9pKPexEZSI2kyNqP+IiIigijlKCWMMXJYhxSQkW+JItMSxt7cfWbkAMMlitGUAJId23llHKdZL7B53UUhof5Srg1VrLLafLnm9aSZXixxjv8PxGcLyDxhs9wmmby6h3K+ObxkQYxKYxCDFGCTGwNTFCkEgLPU8NOlAEx0gmvJW+jTbMF8rwpAGIDzpAMKRFsMAnC+Ac28DMLXJaquSQHPilfKkl917T4FG3lHON+pQzjur5yidb1UAgI9wlc0yJtIuU/03/fdm7yGrvdd7tXJz66CTAbjZMsEyCDcbWml8pxqGXJq/Ii35uMkQbIZaPlLBa/W+Pc+cws01Ol9r8U5zGylJMdV7XICBI5VMoaUlBs4FCoUCJEnSJ6/Uu+1b4/mS0flvrzanWEdUoviVv7LaXGqU+7OIZhKO9jFq5yMiIsKPaREwkxRtgctIJeJIJhLI5HLoGciASRKEuj8wY6qxTbi3k9ZQ0Nox03n1gKc3sL5pcHUpDAt+5ziK7vMRjtm/V7BQVUrrfU66qoAxv6tf56CPllqtrM1zmh2M7XIzJvToPDGJQWIS/HThFMbGzX5/LQh6GrR6UOnYqKx31fHZYcH8XXr1EUEkTOlo/nxn9QRa9lIGYA2yhhtdMVC8gSlCfbKLqjcw9XyjDPW8oyqfqGCA1Cio5pmGYdBkZPoHDfL9F7HFVRp2IzU1DNlo7QlsRCihV+eo4mTYBYonayhQy+/V7X4h5zF48GAwxpDNZtVrAc6VPc0AQJZl/fdq3uflceP1nEpTruUepTKNaDTBaA+jdjsiIiKiPCTG0JpMQjBg1969YC1x8IKseAtzjpZYvGQoaKdjFkOwTQcxX6toRUzddzZCo9rtOPzqbG6Lh50dTxyeawrfbI706GWQ9hbIaRzhvLiUSQIxJkFiDJJk9Uz3O55rpHE4LDpKGNJRb6/TaMRUGVT3ai6XMHwjgNGWhiE9QU2DbwMwQHOyJggGYA2K+Qe4GICddbSmQDXfqEM53+yRBag1oJEhuHy8BlbNhroB2O04BTmdjNSUoGgE1j3QEe1/6hcvAzDFulfrb7T4OQKCC8Tiyv7AEBy5vLJnsOoWofRRVby+OOyitoDH3/2VDqojb+AIKjoChT42IiIiIlQIQAiOdDqJlngMe3t6keO2PX0FEHNYwFZqywo3I3C0L3BpajUe8aO3uV/jvKjT0QisLQA1HbN4BbsUr/1Z5nGEfk4U1z3NU1pDUu+VJAmxmHN9avYcVljqeGjSgdqkJRoZ1Y6wGICBEH0nUTqaSlkGYA3KEzZCCMj6yrHmyuIE1bwzxKKXaXbFrdnKVtCgWucAurKZBxgUoSwbADWcKaC1J1S+V/vCA4CGbKW8Hymgtb0U219r/tEwBGv5xGF8D9TyjTLBWXigUGtDsLYEiQlACBmt6TRaWlpQKBQAfX9gDlFDD2S3kIuu9zrcVy6UF1tFVEvt27uoDY2IiIigjRACEhi44EgmW5BKJdDXn8VAJgsWk8A519tyiTF9X2BL+E6PLSy0X+3GFvPvml5Rb6+8IFGvuQ2nZ5YTxafolDD/6v5szqx/u3sdu8ulHRMCEMK+oECTXR3D2byDjWdo85TNGx+HoY6HIQ1AbdqcaFxUW8LUD4QlHUB40hK0dFRkAAZTS0R1AAAgAElEQVToTMqZO3vd60adpOZEy4JK3rlB2RhMPe+oQjnfKMnmJAkl+ZygKp82oALodIxBMQBrxyjIphEcgxwNA7CGEAKCqBcrdajXOaA+hmBtPzJZlhGPx5HP5xUvBHB0dHSAgSGfyyoTY0yqKH/M7V+5njP2U25hGP0JYjwk8pYPG8xZqWI0+t2IiIiIiNqj6S+SFAPAwTnH4EFtKHCBPXt6EEu0IJ/P61tYxFwWC7t7A1vHb0WT+/bnOD5j36QRelZ1HsIOBmFYdVa7wdfiJCK5j7fKkkswLSaOixwAkxR54kxSdemSj68rYanfYUhHuQbHaPRTfyIjMB28FnsFlSClo2IDMOA8OUch8eaw0KXChDQLyhOaAN3JdIB+3lHFr+Lb6G+4meXp583U6xtl+TQjsH2/h2YZN70HnDQMrtT7Vfvv2t/NltGQSQtlC9PfrOi6Rskr1DBmlvwDjTyz4/R9NFtGbUEfZeqVb+Z2U0CAQdn3N9nSgnQqiZgUQzafVWUAhOqFwFC6jjt9B6VCLzo8RQ1JbYTqszzDTyJNn61Zd4+oHu5wrN4aPRPMWvAuBuCmz5RGRERERNQFQ2/RfSgBLpBMp9ASl9DT0weZc3DGIMCUcNBMWdBm7xmc9RLVNMecr3P6Wzu2r/c8jYq64mdhs7uh1vKXfowxyXJeCF6kSli9nFXPXpdC91oUbhiCzWqqVHyvSeXRwkVLkgRJ9wYWqhczB0PMU45ajRvCgFs6KI6dvfBrdIzGPY0lDGGhg/Qd2LF/x0FOi50gpKUqAzBQPFlNLdFa6EWK3sDBmNS0Tp5TgXreUYZy3jVKtkreEuVbZZg9gQE6fQTlPANoy0fZKFcsF60+TAgBDnqDWLfypCBjEDyBgfoYgp0mNQUvAAyIx+NIJ5OQZa6/2xxu3OwdUY4c5Q3K3HV/s3dOOTRqgjKiOpym0v1MrwvlwoiIiIiIfQCzLpJoiSOVSmJgYAADubxyHoqZTFINwMqPP29gJxXFS4epVC8JC81YaOdXdy+1SNup1Eo92xwRsvTzvZ4DU6Z5y8EYg8Q0g7C68ECSAFPoc/M9dqNzrccNQSboaXGKPhCNb5pPmPqBoH8jGlE6GkfVBmAzVCfntM5fm7SmVjBU801DEJ2toZ5vlKGad5QNwADdfAOoy0Z3kRDlfANoymcJt0WwXK15RssADAAywTLVsJcppXIFzPuL06SeRmCLUZfJ4EJAFICO9lYkEglkBvohSRIEGDjnFX+b5YaEVs57yF3W2xUiI3D1OOa7U9lW+oIKQysRHVJERERERNQJoVrQGABJktDWmka+kEdf3wA4GESMAdzQWWJOC4wcQ0YWz+v5+Vti++7WLM3Wr8rJdzdPYgVWdLzIgxfQ54D9ymAe4xbpvkIZU9q9h7XxklsUL0mSEJckMGZ4BtvfxyTUTEGiNnarlDCkw24E3jdbHXqEKSJEGL4TIEpHo6irAZiasZUL7hgSjQKUlVCqBmCAdr5Rh3Le1VK2WqeScr4BdOXTPIEr8UirN9QjWVAztJpl0QxNFORywvo90NrSgKohuNT30Myypux9rlFPb2pJktTJOwGmTkIJIRCXgNbWVgAC+XyhaDKqUsoN0eRmCK7ICAy6/VkzccrLpnyTPgzAhIcPERERERENQuuidL0SQDqVQIxJ6BnIIFPIg5lCQEsAJOasszsZgu1GtVIL2cI0+V8JjdgLuBS18Qyu7NmaCdzsEe10hy+vYVFcT5X5Dus4yjD0CkiSpC9EkBhTPd5rWyZUx+XlEpZ0AOFKSxgIUz8QproVhrRQTkNNDcCAtaOkNCmsySUTXndDeaLLEI1GedqhnHfUoZp3tZLLUaGHMnfJhHeN9mrDqOYbQFs2qN5pVPoGM1TzjaJc5aymbjaGbLQMwFooaGrY9Tgzzh4YjSXIRmCg8rzjam8WUydJtTeYDfbpZAKpRAKyLEOWZeU4hLNltgRmrwb/aXCegHUKg+ZLBpMc1Mu8FpDdl8qnJbdCp+CIiIiICGLUujuyj6kF50glWtDSEkdfJodsLgcOQ0+QBACJWcbxTt7BxYsUvc4ZuoQkFe85vK9AwQBsx08459LPqOwdQogSc8TauMO7xhj6uGZYKl70bpGBMTAmEJMkxFR9u5bfHcW5lkoJS1rCko6wEBmB6RGlo77U3AAMOIfOaIYx2B7+w+0cNSjLBsCkAGnLOdWpyCbXcc+QLRGeUK5z5crWyJRQzjeAtmdrcZheGlBdxKTRaG9gu+HF12pooljDhjW/XJnQVp0L3RuYYn2z/24PSWw+1gy5grqgpBqZ7YZ4c7vAOUd7WyvSiQQy/X2AJAGa53C5/amLEdivh7HZK4eh+nrCCep5RVLUMsxyMynTfTfy9o2IiIioBfbFim7+ieVh95KlgBCK8as93YpMbgADuQJysuIhCSEDTJFYMhnSdAOxaadgp2glQnBX/cUM2YVXdUSJJEMff0bfUuNS74X8buMYLoS+8LIaGd08hZ3u0+prXJL0/YOhhk73NE+XGItR0ZerJUpHRL0ISz8QlroVlnQA1rkaCumqiwEYKO5UgcYVJIcoXgEWIAMwQFu+YgOweqL59Zl0vlGHct6VI1tkADYIjnwEGg8TlPOt0bKV9e0RzjeAngFYQ/MEpqIYmnHS5QAaYaHdZKNEPY3ATs/QvWU5R1ySkG5NgYEhn89XXOXNE0uVlbvzHn2ViKN5BNeaakqB2jdbNZbsLeHt4u+yiIiIiIg649wV0TP6mtH685gkIZlogRACvf0D4MIUUULzM7BNokqMwe7DazUEw+Oc7T6X42ElKAZgoDaewcp1gNukpdMzhBB6HdTHG9XIIEz32+dS7e8VApKk7B0sqZdJMDyEy118G6Z6HZa0hCUdYSIs3sBhqlthSQuldNTdAOx0rB4ZoL+Poax9foM8cdh8NEW8yWI4QD/vaEI53yyeaE2Uw4mg5BtFlIGO8julzjEI+Ub52VTzz7oavLn1zUlX8fK4bSZeOh3gbBxsJGbvVmqGdD/fQq0MwYwpIfY1OOdIpZSw0JxzcC6rx6vLo/IMwYZ+bh9al7viWsvJatoXx5DWFT8t4Lhmo21ytPQlERERERGe+Fv0Skh9aRyqQTLZ0oLWdAK79vQiy7keOUSL9FLkvQtt0t5q3LUuXIPlnPlfM7WIUhIkKIaB9qKWC5LNxuBysoFruwdrBnQXB2Nz/fMcP6kWZu2Z0I28TB/XCKa8QtKNwMpzJUlS32E8s1T9DUv9jtIRUS/C1A+EIR1hSIMGlbTUzQBsp15hK80rsioNP0Z1ohqgLZuGILrpVxDyjhpByDOqq1ap5x1l+TSDBWO09milnGdAsbGw2r61lumlnHdW2ZpX54JqAHYrWy/vikagT5YQMwADjTMC258lhFAmlYRAe3srYrEY8vmceq427yh1zHre9rfP++w4im6sJCp6foQLPgdOlY6vIiIiIiI06Oh0VDDra0o4aAbBZQwa1I6+gTz6MgMA3EI8GyGh7fsCm6/18gS2/x2WMKB+CJoB2EwtvIKtUYTK1EFNz/feO9hdFsfjwuoVLBTXd+g77Qllxlswo+5LkqR/A36qb5janzCkJQxpCBth6QfCUrfCkg6ARloCawC2d5oCplAxNXgeJSjLpiGIzg4FIe+aRanvkWreUTUAa1DNN4CubMYgjEbHaIdavpVaWVzNM2tNs/POyRjYDAOwbHqnVx1vdn65UcoAbM/nZngEhyEkNODdJ/vKU1PER/NEKecciXgM6XQKACDLsv5Mr32UnXQFsyz+vYGtzzFfZX6WPZ2UyzPwOOru2gQk976s6DklromIiIgIAQSHKaFBCAGwGCQm0JpoQQEMfX39KAhtobDzQsMYnLeoEKrBzK6TuuotQuielWEnyAZgO+UYfLXfrXqm5Ur9mGUxpQv6PsEmr+D/z967tseqK9maI0TaTnve1qqq0/X//1w/T5/uU3uvebWdoOgPQiCEIIG8MMB6957LeSEhkIRuQxGaMic9LgwLoDLYtdKgn49660PnGezKrzHdhRFxP55xriXFR/Fq3sM97Ik9eQED27iXj/Csh4vW1uRuAnBI6+01/ebj8K+3cjplnmhitg3InsB7hTH9Qs9/RhjTLITZPtc+8A1QwkVMHlb7pth1zzIwJB4ypF83Hborr6/JnK0pgGExkyHNPGGo4ZihTu697B+zjYE4f4fS5dL0ioVU/+/l+Iinh0eUVQWrtiMCjw3Czg3Qzg1sOl+JQuoFGCkxeOj6malEz4ACQHH2V6TrOTOZzIdiaiW0rE0g6kplRvh0fIJC8fP3K05VBYiBBmKwMQZVVTWirQF64aC7wi8634Wk3u+9mLAvql/K1L7i2HFjpxiK1tRzUFKFlSmLkUeuZfu/S/WTbSInCzE4FMaJw+0Zus+FV483DNPY+BL2ch97YU9CcOo+GCOmnWNr9o6x1r2sIgDPncCJj7fATScnmCeYmG0DDKV9jDZtCdb0ixeFsMGabsA2bGujea7f0KcGdACHbSFTF1etkf9bKHOOy1f+Xyo9MgvmIalFEUPce8XjHNvWYk7aXUL8e19PWK3w7fPnNiy01gssa2/h4uEATBgXLPUADvHhtvSM9w17nlIzUdnNAnAmk9kLZN2mzEwEQFEUeHl5ws+ff/D69gYUBaxaQASiQCECC9c/KKTdD3jc4xfJ7+Lj9hIKdIi9CsCeuZ7B6e+B7kzTsJCbFIPrw6vEXMIUD2MfFjpcODpVAPY3oFZhjMIURRsy2pjkiFdVnckbKvts4+NL2NO97IG9tAF7KVd7uQ/PGvezigAMnPd+UARhNIJP79EYbWGCidlGJZ49Yk43ZtjTjXUAw5xuzLYBrYDDsC9wPKBjFYCBc6uZ183zta8/RmvbPC9gxe0WoAwtPAA4yl486TG0wj0OOXYv24e8qJmYY9sl6dYTgf31qxKPDwWej88QBU5V6Y4VQVlVKBLXTIVnHptAHbapWz5Sg+yhFcuZCYxE5TmbgutXL5lM5kPTr6UY+j2Z+6KqMPUCwufnJ5xOJ3z//erEX9TjssoCpl1k6EuJiOn16Nsy1N+2JPXav99ryWOdP7kV1/cM1lFP4d5voc1ihaXX79vgiKfxB+fYVSECGDEwhQufbsREC3W3WSr21Ebs6V62zt49gbfKXu7l3vexmgDsCScNY4++tSMKs08yMdvnbLtdOM1LYU47ZpjTjXkvG+Z0Azjt6wpyfEwN4boGQx6kDJxbwbw2c9oufy8W98l/tsUH8er1JWLmvdONlbn2XSoGKwQCDUJlWzw/PePp6QFleUJZVjgUD7BajYq8sfA/N+R3/PXYIDuLwSMkk2FEAF6/+shkMpvG95XGIeiqZLaMcQKvL0bPz0eICH78+IkS3TFY2C8J+yQFhvolwyJw/F4GjtkDzPMnt2aOh/CwoNp5V//tL5QMy2UVRMSZOkY56wFsBY2c30RR6+av6/8765r7qQ82RmBMATGCg5jgO9lUPb6nZ3RP97J1sgjMxxbvI1V/f0gBOPkaHBMU7JNLDPYNdUScbet77w3BkHZbgznN2FexsqcdI6RmAeBNMw+r9+NQqCwmpojATfoiGO7f+D5YxfNUnk6x8V4icBaAz59DFVCtoGrx9fMnPBwe8frnDTDd/ByabPXfp+xqPxN0YvrUXgixLUOD7KF7Zs/buzBzwMQwvspkMluGqx+S2TciUi9YEzwfDB6fn/Dv77/wXlU94TcWdccE4O77kdeJz/bCRxaAgXki8PTjgrIT/NSHaa6QXtA7NC8eO0olr23T5wmvW5+ssTBeyNvYYxVigMIIHopisH/PyFbsnMKe7mUP7CUcNLDtsjW28HyLfDgB2OPEmzoshjduNWu6sE8uMdu3NKzmPWCdTN8C7GUuFIPZ8pY97Vjp2saTp2vXI+cGkKn3TGzJNif2rr+oKSXus9RzS8XWe0wuxOnG2PavKgSruolAsTgcDng+HiHWoipdeEVrvQjc3Wvc/229idv8jNN42Ls38Rm6k17nBnvMdclyRrzsFMPfXX54JpPZPXx9iEzmHKrAoVC8HJ/x688rXt8rWFFI3d+wqjCm21c3xgDauiX0F7S1x57rs+xJBPB8dBE4xVnBdfT7ccFW1Ts7pcdMYdmcY4eqwu8Z3P3ef6atl/DMYixwz1FhgEJM/YzFizpj7+jhOYp7tDl7aNf2cA97I9wWwDTP8poWLWNPZWsv95Ka97j6NVgEYKBtECpd1jDdmi1MLrHaqKQzT3GnaS+Vxz1gLWtA9ga+hG3Yxvecrplu1wgdtRbM5Q1wolYo/LKkHWu6xW3q3PJ2ayF4aHU9G0tF9EswxjR5VtkSRgQvx2ccHg6wVQW11om/Ynr2hYJvvDBhKNRRv2z0Q81lb+CAeA3UmX71FicjMpnMfEi6JZnMXbBqUUgBqOLzpyPeTif8/vUGa5xnpREDVBZSdEVgAWDQX5zWFXyD4z+QN3AWgMeZO4a21g5ER+yeLz7G1vJsvGB1bJF3bKdbozlkYxuJR1Xn9RPrn6pqLQYDIgaFMSiavn7X3pSQfXdvtx09p3u6l60zND7dGnu4B89e7uXW90ElAAN97z1G3ZB5gonVNiWfiVoyUZ3hLW8eZiGYNe1Y7fK0K1mzmDn3eqz1HHOZU1VYSDNoZ0o7xnS7lsB6SyF4CyLwUrsuSTOtvWYsFAYu3KJVCyjw17cvMAKcTifnuZAQgP3f1MTVHBHXfZf6TDrDgQ8lAje3JMGbgfvH6NeZTGbDEHVBMplVUFjACowUUC3x6eUJIgX+9eMnKlGIurDPVSJMuYjAQBolrh8yunts6nX82R4eSeb5EgaWjLfPf58QiOv/up8718JRPRfpMY1adYW54/074KXb7grs/++IrunDSJv4C1VIvXjT1N7BIgJjTOd58veUBeDl7Ole9kAWgbnYy30At70XOgEY6IcQtGR5yT65xG8f0Ia148lc9nRjhjXtmo7wynaMwZ52jIQCBFtjf8t0u/Tc3HnKaxsAWPUTJFz7IDGnWywELuGWe7zEfU1G7ukJLOJEX+8J7P9aa6HW4unxAc/PR1RV1Xg3DHk5TLXt3G/jr4dCL6Y9LbjzdhIzV8EyLprNZDJ9iLoRmcymcH0PrfcwdX2QhweD49MRP3/9wVtZwrreeuc3IX5f4JTIGz+bZ4Xggc+3RBaA5zGnfzl+rBNew0Pi+Y1m/16txVpNR3gZG3M1H010yIlLg9eRR+dewvldcc+nGIERwUEKGCOrtXtbfz5D9nQveyFeoLxV9lS29nIvt7iPjgBcVfMmcm5J2IjZ8Itzjc8dYZ9cYravXfnGUd48zGnGztQwtPd+brcwqGEtd2vb5euJeAVpHFqIzat1yMNwiY3XyIO4zVw7X8dID1w58lf9SuxghfTadsXpxdA/ist8PCmxJD9vJQRfw7Z7cBePYGuBQPwVoNlLr6qqOrScxdevn1EUBd5eX1EYA6va5LOINJNU4x6+Q0Jw6OHaeiXE4ReHzj20/xgfCe+PQVPPePpOOzyTyVwJwiYik/k4uIGgi1ZiDLQCVEo8SoFPn17w/fcfvJ5OTV+96V0ED64BnEAsQDx1v0QIlvoHW60aml4XbZ9pGwylXzy2OO8hPH6NxjlKA/FYMDj/0L53c68yaMNwCbao4P1/Fd17Gro/PybwRhYHgRiDg5i6L4/mQYuv7M553q45MI7vljAWAWkv97g19iICA/t/TtiJn+Nr34doXUO/vZ1QWXuTi1yKre3yITAYJjg97B0lZvuYw0Izpxs7Y2m3ZseEXQhmLnPr2jZcXlKhRllIrcSdax+zN/EtSQ1amfLWWguLdm8xBtuuUd5ujfe4XZqft/QG9v1MtrIWcg1P6kt+Z611NgggsPjr6zcAire390Y0ttaiMGZSW9sRjUdtTOdJarC9nbDQQwJwd4LaYhjiLnQms3lIm4FMJjOCADgeH2GM4H++/wTEoCwtHg6miXDSOV4EAoGJz5MQgc9FMNmDAJD3Ar4esRg71g8fms9oRdvz16gQRDWqdVdJeBaPnQMaPwnjxy/th/pzFGJgCne/D/WzGQvJ7u2AuDxy7qkLRbdIFoD52EP979lLGcr3MXA+LwC/v5coq+omF7kUX5FbnblZ/Z3gm1hqYfamQrCvIiPM+coOa9qxi8AAd9qtw/m6ijXNgPm23fNe9pRu98aq+wdw9JnCAe8lIuut8bZdSrxv26WMhU5j4lL7LkmzcELKWguFxcvxGU+Pj7BliUrbsNCpa4Xe4GN2pW1MT+gMeQNvMiR0JABfeFgm86EgbO4ymcydCPvARgQPDw84GMH3X79RqaKyFmKk3qu03zcxPT9g/90Zz9+JfZKtkAXg2zE1Ut607xN9XHcAALd40Gq7hHBy/7fuYC4pBs0c24Ti78eoqs5NX0Qg9RjCGFPvHyyAERQiEGjrFh0KxPUyyf4SjmG2/Hym2Nv9bJmt1/8he7kPYD/3cq37aAXgOgQ0wOlNBaQnDr1X8NqwTyxx25eF4L3CmnbNZP/KdqRgTTPgHrYtr8znrLS9N9MHdOuw9vXHYLfNKqCJvcTWhv15uFa+XjtEzjVtuyVreAOH1/bevlVVQQD853/8hbIsUZZl893Y6vvY/lQYuXN2+q/niMD+GqvSXD6LvZnMEERNViaT2QDdsLOKx0OBx+MTvn//iTKxADzuPxpN9yldn2dY+N2TCJwF4PswVQweWjg5FHLZH2qjXXzPReebZLMV9GfOJng1Jw7r3Zu2h8SRrEQERSE4GAMj4qIQjSzynPrsbfUZTbGne9kDZkf5sZeyle8jOEfoAewb/aHV+wzEk3MsArBn9YmlEZhtAwytfax2bQHmtGMWgQHetLutXcsqc/aQwZ6hQRwDLHbEsNrlYfMEDgm9Wplsu7a37TVF4OwJPO23vi9eFAXKsoSq4vHxgC+fXvD+fkoKwFMnZ6blZ39f4KHjh+xYhTlevp7ocO/TMd3fIZO5N8NeSiFEzVImk9k4bV9Xmu0oRBSfP7/g9+9X/H59g54Rbk3kIdxuVTHu/bsXETgLwPdjyuLwlMib+q77vcCqwiZmuOb0f68WMjr8r7afavsxwjdD94x6U29TGIgYFEWB4uA8/jGy6HSMLT6jKfZyH3thq/V/ir3ch2cv93PRPE4oAFdB5RlPgjElVigCxwKwYsrUym3Z+8ThdZHOXy7burBOprPDnqe81vGm3e3sukwAjtsvNuLB27lj713PsKYbwGmbs0kAL4hBqPpMjGkGpBdsXKu8D4WGnpov4XFhX5MpXz3XyN8p6dHfj6vroesnd8qyhAHw+dMnPDwUOJ3KzoLS2F5jTDJ9w2PDa3U/a3v74S3EQrAmPgvv5TYE4lfyEgOeydMOmyitZfYOUzkgqhYzmcwHR2tP3rpXDlHF8/MzKlvi9+83lFr3DRL1ljHGTd4nFna6l9L73ZAQLPWPtlQ9KnjHDh+FS0Ta0LlBE8eoNtLryJxJt3cxao9K19d4bPViaE/dje96Lgc/FoXUESLb/ryEVjUXdE7BbehoY5wonHq+h8Zy7UIPGT2Ona3Zu3eyJzAfe7kPYPm9DArAHlZvYD8xZ7nM6sDcgeKwTTDaOSCE1a4twJp2zIMdVrs8y+y7T6XNlnZDgzRGsm3LqJRLAPYwp9mtPG6nhhEeIxSmmReA3UMInnqOqqpgrcXDocCXL59RlWXjDRyPJ2IxeUgMPn/d/n0Mib5jiwOuzkRv34WHZzItYRFeUIbIqrRMJpO5iHgxn4jg+PSIojD45/tP2Lq/4vcabR0T3bEGaPYL7i986/cDR72D3Qebad7ZF8d/NKb2UWOh1/21QODVbuu87Ym20u2bh+eKF3BO7jOrm+edcnx6LGixJNaNqqKow0Ubg0YUbu+jfobhF4IMe/L7tDBmOzF32MaoH53sDczHXu4DWHYvZwXgrUwcVomwEQywpt/97RoWe4fIabcv2NONPewRa/rNt+vjCcBDYZuYbAxhtQvgti3cD5gJ5jQD+gP/a3oDh3/Da0w5/5Cn8tTf34tr5e81wkL7c1S2gqri6eERnz894+3tFda6kNFVVfXE3fB1LBRfIgL3JmUVgEmf72rPSXOaM97VqQ95ilWGndBZJvw8VYZyucpkMh+QeBHfw6HAy/EJP37+wp+T64u42M91vyGoVw2cJ2HcJ53rCeyO5uo3jpEFYD7m9k9DATj0ok2dy+8XHEY+mqoHjHsH+z9D0XCGz7XEC7pzj+2OwnUkAPfcFrUYfDBFMMbonq8bcQi1XryNZxfYTj3zkZANLQA6x17K117uA5h/L2cFYGAbk4e+s8K6gp41DW9v13zhN+bjpt3+YE4zZk/gEFYbh+1at0JeI73mD9Q48xTIti3B22Xr8FUehs4ma5oB/VXn12KKF+gQ8YIN9vS7BnPLqU+j8HeVKqAKtRaHwuDl5RlGBKfTCcaYUW9g/z625Zxd/QnaYU/gsXMuSseJom/nJ+tXBxkGEsUtWTQSDijxcby1UyaTydyPZDteT8IfDHA8PuH3n3e8vZ/cXqm1F3As1BZI90H6/cr0MR3v4cTvWGFfFP+RmbuQ3NrxRejxe1Vtnon4ulNsm4S24+PU2GrsORm7RvNdo9wCgNYirnYXzomgqD2EpQ4bbWrPYXearhf0Ftmq3XtlS23AOfZwH3u4B8+ce5kkAHsoJ7607rSFjRZpXlKmH25t13VCZnzMtNs+Y55cNGmnrpRa/164Bz406Zagb9v6lfG90+uS6zHmLaNNIaz2ebsqnRbG9p4wplnKpluJwdcIC83KNW2bK5R7/D5cZVk2n1VVhZfjI47HI97f33sTo/G5hsTgad7APrBbesJWeseO388kzqw+zWLvzgmLS8cbvX/ooqJwZn0db42UyWQy9ycWuny/wEciUWvx+dMzTu8Vfv75g0oVYtp+SV2oOvoAACAASURBVPjbog6hO7yYMPYQ7h8XnncL3YGtLIrPdElHLup8AhGTPDb8LLUty9j8+jlRdiiMdFd8dp7Cs7vfZ34wFrlJ/bIP7/lsFIV5dF7ChaBoQj/zRX8CxudZPWw2f0S2uhBojD3cg2cv9zL1Pi4SgK8Vpu8itF2h1DRQvsECGPSHBuaO1HVsu23csWuHh7wWzPm6NufyiiLtEpqlhnUIIRTpliAeLAAc4leq7QKmN5Tnjr9WfjDmK6NNIcz2+cgkgAuxJSiaz9d8Lhj7cmOTDtdkTljhIUIRmCHtQoYmdJbauLSOVG33zFJV1y9XharFp0/PeDw84PT+DjGmSc8p3sBzBeA4TKO3Kz7LtHB33j8IA52DdnCviA7jKSK7RzGt73ZJfKKx38ngmwvQ/nJai7ywIJPJZFKkPQpdG970DlTx/PwEgeDnj18oVVGJwEhXtFVVGBFABKYWi/w5w/MOLT7r2rANASALwNsmzDvnAdz3Zk1l75gnbjs31orKGvWvY6G3/gZxZ2is3x/+XC16vxO3l4u3sD4+OjBBUnQW9GxD7f0bLtbwHsLF4eD2Fa6/a5aABHrEMLb+b12PXIGp4zv2+mbvxPmUWpC8VfZStobmILbGpPpgjgAcwtwp8I2KgndwzJp+y+26ZCplOqzpBnDbxgxzuoUCDhPMaQaEQrB7z9SQMou1W8lXNuJVxUzlDXA2VdB6UGko7EutxAb4ntVbl7mlgjC7JzBw/ed1aejsdkJIYG0FtRYPDwe8vBxhK7tYTJ+/6l17g+6xSVpvU/Dz8Kiz9rEtQs3Mx83n15P6oWfvWgYFcNc+mUwmswHU4vHxAY+Pbl/gU1XVi4jq/oI6scfXt35f4JiUN/Cwx/A2RGDmaGiZ6QyPV8K9buN+b1e0HVqYqRKPJ5HsII15/6aOaT5L6LoKC2grAPtPhxgaC80dc8RenMYARVE0IaT9THgoZItE15Hadj8l0CwemWzGItjrmo/IFtqAqezpPhjnEedydn5kqQAM8E4GA21j12s3SPKTOe2AufZdJ8zzFPaVbhkPe7oxD4KY0845fdkm3BATS9PtXunNmq+sdgHctlkA1o9k64Hr2h3MIW9RYH3bPLf0Bg6ZGxp6SEBn45Ze1At+CRFBVZWo1I03Pr0c8fTwiLfXV4gIDqZAaasoLNzwRNGUfAsnZuNzzRKCE4ruiCNw5iaEKT6Q0Jr4RiY4ZxDDW8NkMpnMdlFVFHUkks+fX/Dz5y/8OZUQI7DWef6KqlN7agqIa1MCAc10QsW2gs45ETh1DAusi+Az8xgb63Vj1qRIefQOX0MRzL2PdNemRKIava562y4b5yz7rQGgsLYCUD+/4j4tjDSisDF+wbcX4N1rv4Ckfbru9/yz1jUfEed8vo0tAc6xp3L1Ee5ltwIw0F/x1ATAIMnXLaRfn/WFnG2mW+YczOnGHAqJza70KlG+sBpz0m2NNGbL15Bs23KsKio7X3C8B030FNVgMouDW3vcjk3SnYPdG3hNT+Du7wystSgKg7KeOLGVE3u/fvkEUXT2DR4TgePPzoXnD8tzeOhUAdidC715MqYxxaaZWERd6D//+uMkPG/tkslkMtvGWouiFmuOxye8lxX+/PnTcSJJtTcHMYOL0Tr9jI16AmcBeB+cGwOcF4jrV1Gf/Ny5WlEYvVDR4fnGznl2/KLd/lHH7oGfhtdfMj7q/D5SuQXtmFDERQsQERQHaURhg+DexdUyau83J8Ba33xUwlDjW2cvZWsv9wEMzG9cIgCHsE5+xd4jFqCarGFNN4dvGEMbE5PCifBo94A57ZhtY4Y53dgHQtxp5/9yCMBTO/8sabqmHUN5xpI2QywNK3tr/OpotaDyjI/7SkxpBtzPE9gzNTR0ahU7I/f2BB5azW9qTxtf91ZQqLWwVYVPL884Pj3BVhWqKlhZf4FNaaG4LyKKJvJb4nvQQAT2K/hBNabYAtL8J/E5Je0itnteMZPJZDK3R0Rg6zlQIwafnx/xVlb4+duJwEMir9F2cVm/r9E9f/g3fg0gGVZ6bZgXv2fmMTcf48X7QFoAjkXcKeJuE6HzTJEPhdRxO1NfuAuEIrTvWM0Z56Y9lbvnG0sXbX/YflcLwYUAB2NgTB1RIAvAu+VcOWZeCDSXLd/HnOhiW+GmAjDgVtCFF2KaePWNCJsADDB3rtrJaV4buW0D+O1jhDnNtiwErynutAMDvkY1tWKVCUabPMy2AcMDUgasKrTZNYgP36cDuNLunh63cz2Dm8WGUX+YhXsLwVN/W1mLsiqhqvivv75BRPD29gZjjNuCQdzee6jb33BCNX7Gp4eETr839fMYfiqo1zrWH1ruam+EVNosu5lUMnOV9u2x2WKVyWQyK3LNvpa1Fi/PR4gA37//hIWBNQpYbfoYKoAPSFEkxrVD3sD977i9gdnnPDLTuEbfPzzFlHH1lPF3eIwdKfrn5rbOeSenFv6rKtq9hKff07WI08cY0wkffTDhtWy9FNGEJ0D940Hb23m3vv1skb4ywdh2ZTuuAVtbtpQ93sfVBeB4xQ9Tollr3Wb1axuSgHMSPQvA14DdPlaY0415QMSdbtnDcCnZrmUw2+frESXYWiFFKAADHM/sWp7AQ+9jQvvY+sAeRhFYoW5BhCqstXg8HPDly2eU7+9tmlrrAqyZ/iLTVLsyRwju5TPgAttEHsDir13/h7d2STBg7OSsiyexLzImE7KpcpTJZDJEXLufZUVgrMXDQ4HHx0f8/PkH7+UJGgi/hZhaknGewEOLz1IewbHNzCIw83xHZjrX6ve3p/EvhqPuzLmm1gs9w3OoohM6Or5G+H4o+k/Krs5nCQF4sr03oLHTL0Q1AmN8+Gj3WqSVgc+Ng0avsSMvx72QBWA+9nIfQBj17IoCsId5whUAqth7hCxf10m/aV5IrHk7paFfiykdlUwa1vIGbCM0Eot9cZnXMHbOHSvgvh399GFJsxTZtmUw2wY4b0Jl64jUsHlQpwa7a4jBc4RgRm5p15Jy4j1qnABcLz5Qi69fPqEwBrYsmybDRtORcz1+x74fFfxVm0nctgyi3gNs/WdjCmYbZu4Wztogk8lk7g9DnzKFFYFRharFoTA4Ho/48+cVv1/foIWBKCCq0EBAkWDBmEi7p2NvEdmQ8EsqAndC6GY2zS37/X1huPv5OQ/d/vlar+B4PJV6NpaIt0PzsVqvfdbOKs++wHxOdJ5jT1rEDT9rzy0SCMMiKEzRhJB3Xr1eRO7OVY6GICapbzKOsA3ZA3spX1u/j5sKwB62iUNPHD5wLOTEGjALwADvhKbnXMO+Fuzpxgxr2nmraO0jtQuIbbvPczolTFB4LCvZtmUw21Zp36uRAeY088SeyrdmqhDMLgIDnEKwFAeU5Tu0srDWojgU+OvrZ5SnElVVoSiKTsQh/8yM1e1TReAxD57w2RTptlpWbyQCa+KsMsNrN0MFb02QyWQy14GpD7sEa23Tz/DCyqdPz3h/L/Hj969AXknfqzEGYfTW9pjpAvDQudfCEvdjM9O513gkvI7rO6cjXcXHecZE3lAUnmPH4uMUAMxFY7pzv4mdEzr6idj6c8BI0XyvAtiqgsB7BSuKwuBweIQIGs9hkb5AH881MNU1GceeROC9lK/d3MetBOApq3XWIOWNySYAA7duoKeLvUOwTWjG5YzNvhBm21hhTTN2ARjgtW0tAfiWx98TVttY7fKw2qcKWAidAAzwppknXtR3D+YMnO8tUM/l0rRTIBkeeMrQNVneRSFw4RWtVVSVBaTE55dPeH58cnsDizQTkmGoOS8EpxYCju1BFgrIfkxQ9G5K4CwL+5sarLIfvk9JfedchrofDZ9i9FypMHkZPrhr0kwmk+nC1h+9B4oKgGkj86iiEMXz0xNUBf/8/oUSLvRz3H/wxPsChyJw9/3wa0OU9qo5FPQeuOdYqb3W+YWywwckPosEYEUbGWhsDD0U9W1K+Oj+MWg63ku8j4c+716nW2fEx7vL13P66r2VLVRMMyARcVvXHOA8ht0ew5Kssz5iXc9OFoH52MN93NQDOGTMG3jtCU8/OWfd0prV7Ehx3Yb6NvsMsk4Os9oFTOtcZNKw5ms4OGLKT9b08nTFm9usRrxY4CBOQ1bbWO3yMNtnrYWt22uWeiTEWjs46bUmzaroO9e/Y+kQCpFsCyJjpjwTKbH3HGbmD2LR1loLq8CpKvH48Ihvn59hqxLWKtRaJ8wacXrqwPn8ucK/Q3gBeWyCRKCUz0CGB94WJpPJfERyW7Uc394/PT7CGIMfP36g1HoJmAKmABBtB+G88iQQY8LvponAAEdI6M4cB7ay6UUmhmXsG5sR2xX2w+Njhpx+wsWfFtMWLEwa90z1Irb9+7LoL4aVEctu4ajgxeI43Q6FgTEGhTGQ2lNYxKAQGbDRAnWdF26D485ZXwvtONHAItxjObOMdty5bdZuw67FHu7jbgIwgFUm5qYwt7FYg+s02LerhFk6FDGsdgEfw7ZbPOvU6QZe+1jtAqatEl1+Tq5z3QJm+1htY7XLU2m9oFe13tOHB2Yxc01v4I8QDnqJABx6rC4LC+0nMAzKsoItT/j85QXPj0+oqqpZkBCe3783xvTuayi0XHhs+MwlQzxyFfsMIbxPeiaT2Rpsfa2PiKrzCX58fMTT0wHff/zG+/sJUhhU1gJGOgvevPdWvAguFRJ6bPsJYH0ROHsA7wPWMciwRtAXgON+/rnxqP++SpTgOGLQHHunHSiNSLo07cd+t+ScbXoVXU9qVVhUeCgOzlO4AB4OB5iiAAAYn06oOudRVRg5NOdp8gcVoN0xWG7HlsMUEeIS9lQGtnovWQCu8R2bnmcmQLHkYlmjMdyIXhvWDgWQbVsKs23MNB2ile0YgjlfnWkK1csa1VvcI3O6Adz2sdrGapfHqtZRncIJouGQUPeENe3WFlrDybxzob4YSfaBY+ZWzZ0V4tPCWfUmdMRC1UChUBVU1QkPRYFPL88uHHRZoXbE6XkQx/e1dD/g9AQtxRAhszK8T3Qmk9kKjPNjGYdfJGatdaKuMTgeH/H+XuLnr1+AMVApIOjPpQrETd4H/Rq/hYR/3xw70u9YOxxo3gd4HzCPQTz9SIlAOD/kxxNj46zkYs/wv9qGjp5iw1ybh4/x7rLBoopoH5cxvSQWbefYNt7GCLSul6yt3OJdPx4yQCHOY9gY1HsLu5DSIhIs9O2PwXO7dj2yCMzFVu/jrgIwwN3opCpUC1DN7kxPv3Ci+H5sJX+ZYLXLw24fI526ZEU7UrDnZyvguPdT26R73Rdz+rHaxmqXh9k+VXWrllUg4r0S24mjNZm6+noN1vAETgmP545npNN+3SJLdZ43i7qYY41XbmVdGLKqqqBQvDwfcXx8hK0qWLXN5OrcBaf++GnCbxsKeuiYzH7hfHIzmQwLuU3YH3GfwojgeHxCVVb4/utXLZ60xNtJmEQ0lLiYDG0rEYYBXatsZS/gfcA69jhHym6NnqmpC2/9M2Tr4bQfZwCYFA90rvB69jjbP37KPJjfxnIR4rdzMu5abqqhEXNFBBC/8LY+NlqUbsSFji6KAmIExgvFAkgTSSkQg5dbm8H6kSCuzV7uZWv3cXcB2MPc+IQTh2wCsOd8+q0XMpI9b1lhtY3Vri3APFhizNd2cC1NWM8p4W/veS+M6RbCah+rXR5G+5r6w6AZnK3ZtqdgTDfPGiJwSGp/qhD2tANuKwADU4Vy7/mi/g0gLuSiAijLdxRi8Ne3r4BaVFU1eO5Br4Do8zGPnO5nursBeeY8vE9uJpNZSrKuTxyXn/+Ph7UWRVE4D2BxY1RjDAyA56dHVAD+/c93oA6XCrRirhdpDmI6IlVKBJ4iAIfvb44v7MI9p5GZB+P4IxWx59xCzqHb0FpRTYWUPnfvVi7zsr3KcfV4/9w4drrXcZ+2LmqdxlSrTpopBBCBiAYCMBIN4wGqVX0+C1Onc1EUKA7OU9gYgweyLa22yJ7GnFu4jymLybdwHyGrC8CMniNAO3HIKgADYYW+jrfvEIydihBm+5htA/jtY4Q5JDR7frp6eG0r0jCn3dQBwRrtLnO6Ad2+CVu/xKq6EGwiEEIhmDXk09pCMDCeHmvbNkYcHs1PFVwbmRgaOsb10xVV5cKVHY+PeHl5QXl6h6qFkYObCBIXgtHaKvCkn2DXmXCMzog6TaTvzZPZJ7xPbCazPVLPU1yVDj1zc6vcfnjeYfJznpmCAHh4eMDhYPDP95+wACpVmFpgiRcyi7i9gnvS1IAgnHptIrHsHn3uLADvB+ZxxyXE+gKA0T7/nPmSRhuY8KiNeSJ3BNYZ+dAc2+yr6wVchROLLYD+3EV4zWvkeysAj4+l0nkhtWqhMIWBEUFhBGIMjJFmUU3qPP739as24tOSO5Da3ZlIP5nL3sNBM87DjbEpW9cSgD2sk62haDO2P8DatJUi12QwwN+5YLaP1TZWu7ZAvHdOuHxjbZjztenvEtrIaJNnC7YxLwBjwU+4hJ6YTs/iavNTq6VZ8nXJSu5bMuSByojvCd/DOtFlZUbr1fqlWqh1K8///vwZRgxO1anjcWOMWZzWk/bpUyAvcN8XnE9mJvPxmOqhu5T8rGfm0ohD1uLp6REPDw/48fMXTlUFq+ltQfxrLwJ3ReL+fOhgdBLcv5+d9wHeB6xjjmvTerjGnw2HjA4/HxvPxmNLlfE2ZK7YPH4cmoupXknYnerBfOFq1/Y69V8fxtrXi8aJwsYYmNpzuGjC6ftDtbMQfko+hfuuc8y+Xs7a+8JfE5Y5o0vZwn2sLgB7WBuiUAjWO02CzYXVExjgzVcPs32MtjHatBXilbNMXRDufG1XXQK8i4XYYLXLw2zfmrbZYHwy1OPojp3S4eLWIDVgZnle4xXcazK22pURBYB6cuPmFxI0fjGjnlnhivb6M78avKoqVFWF4/GIv759wevvPzDuxE3YxqVMDRMtwtG2Z5bD+TRmMvsjWVfeWOxNkZ/5zBJ8f8Rai8eHAs/PR/z48Qev72+dFWGhF54P9Vyg2092n/evMdbfuKcnWBaA9wPrmOPadG9zXiyJVBqNio0IxsE6vpXk3PQfXswsUDtdPJ51LelrC7cuNS7t2hkQVQWsC0NtjEFRuLrz4bGAiOAgznvYzRfa3rlSacIwL3Et9uIJDOwnX9jvg0YABrbhFeTCzYFuZofZExjg7mRk2+bDatcWUPCmH5td4QrH1heNE7a087Da5WG27562Nf0fM23wFoaC8rD0mbbSl2NgU97AfkB+h+wUcUvp517KD/ZPVYnSVjAAvn35iofC4HR6h5hU4MUFtiXf+8UYbf4ZrqKfmQjnE5jJ8DC1amNr/2Pys55ZSmqxo4WiEMHL8xGvr+/4/fbWKWPxHsBeBEZ0TH2F6YvOEp/dAuY5jMw8cj4mhNQBj+Dw+G4v/8z5vSSs0fuztkw4d3C8reaPuceu1471EgJwNC83h06d6c+hUxZGt3VizxC/cFiAQuoQ+8Y0//yimmZxjfprzjafljWiQdyKfB+3h0YAHlphw5R4HQHYw2MegLBS5jJsC50MVhtZ7QK4bWMmjCzAxqp52oSzaW2IJ+s7ntRBuB2Jjsn72/ZhFObiNOt49hHYGa+4vaYt7Urh4LMzpxdFV4RTP2AdDhm3BnFZ86xtFzBs25qk9mzyfxnSzNOZzLixWbEf8LnLWT9ZWhuoCpSqqN7f8eXTC56PTyjtCWJD91xp7uVcfZPKCzc/4oXf7gSFxN7MnUZqwg1lbgrP05/JcNJ4JV7znIkH7+bRJRLk5z9zLRrxVxUqgoMAqCxePj3jvazw688rKmsBkWZfYATPlsB9LsG54gg6bejT4b7+PUSALADvh5yPffyjOZQ0XgCuVcfms1nXkHbvXq0vOuUMo4KtbY9JH3e+XujMd3T+xpHGumOd1LzNVBQ6cZTXn+0LPYTjX3bsULcYV4zAFK6uPZgCIi7cdBxG+ZI6NDUnef7okOXXbpYie/s18eWGYJr7uBS2e6ERgGPiCozFLqC1TbHOoOUc7WQwn3HMnY1s2zKYbWOGfRC1hvejTKhQO+I5mQAcXp8RRgHYkxK8mOz07f41bamuOAWpUWgShjTzWGs7k1osMHkCA2kReO26bAjVO4SErlnqDQy4PX/fywqnqoSoxX/+9RVGDKqq9GeHSAFry+Y3U0OXj3njxJ8bd6mWLACvDs+Tn8msg1/A0vvsA5Cf/8wt8X2I4+MDDAT//PqNCoC1FYqiaBdyuskAFEUBSfRzzgnAnWMwfMw1YV7EnpkH0xiIldDjtXE67a3qnCaEDmkb4XzArChgyc8U8ddq59UJGtxoX17tHztW56xZxuJF/AC6eaS2vjs3KHMhpt2+ww9FPX6rvYiNMbC2GrzW/Hq3q9AKrlOn+pDQvj1hjFw7Bca5j6Ww3QutAOxhbZg63sBcSRakGWc4aIA7X5lhto/ZNjbClGJNt1vb5b0AWuFvXgPZCMAfNP2WwmoXMN5RZ+ASb+A2ckgwMLvirYWDO6Y0Ay5Lt1vC4gkcp0kcboyZZj+4O2SpLAnerBVUClhVJ/pai6eXZ3x+foZ9PzmPZiO99mhOGT0nBLdeyYAIT/n/6HA/WZlMmj3t93YP8nOeuSedEKd13+7x4YCHwuDHz98o1S2K9PtDxP2BQsyAkNvtmwx5At9TBM7P1j5gH2ewotG4wZN6bs8l8dDYOBynnnM864y1rUK1FTpT9YW13f1yU8c2C1U6x1ynblm73PWv358TaKI4NWGkXR9MjBOEBQJj0ITyb9KuScOxhQDt4p4gKMRVuEc0iHuwh3vwMN1LTwAGuAzciseSh0kQZvYEBtav+MfIts2H1S42hlKJOf1uYlu/3zmbjhc1ZzVHm6+sdgF9z0ymdh9YlnbX9PYdRSVYocyXbmsJwOfyjO15GAoJzYjVhe65C3A92ukXM1BYRTNxoqo42QooK/zX33/D2sqFjg6Sd2n5HPPMad/Xfy+4TmY+vE9P5qOShdzbk5/7zFq0fTbn1/V0KPDw+IAfv15RliUg3WFwKOCaXsjV9pj44zX3BbbE/dLMdJjHF1siTsfhdO0/37NDJ9fH22BP4VaXkE7jd1GI5olj59T5VUzv2Ln10b3KpqvLnArbjDEn3ruK85E2xjR1t4iFiN+HWOow/3WdbFy6pNz0bK3dNFGcL5gz2dM4M9/HdaEXgIHuZDCzbQCXAAz4uovIoAjmTke2bRnMtq3NuZRhTruLbNPo7xXRWnxgTTnWPGW1y8MsAp9LOz+ps8r6eG1X5zKlGcAtAM897p4sHbjfC713Sdf5A9uq8mEXFdbWIrACx8cH/Me3r3h7e2tWw08NAZ0al4ztz9e+r3838x4yy+F8cjJ7ZOoTnZ/925Of+8w9GZqr9P2PQ1Hg5ekR33//wdvbO6ykPXoFApM4T6f/cMYTuD3X7eqaLADvA9axxZZJRTQLewf9ZzL2FFbEvYlU/RKOvlQBVYtK61WvZyKBXkMMHtNnQkfh9qfL6qKblVFx/1HrBOAxL+zkz0Ua8bizONmLw81b6yJAwXkPi9R1vHF7EhdFATGm9ipGLUTXZ7CXaWDxHsdbZS99Zob7EHXgVFawtl9ZMRg59OAx2egnjizaypjBLg+zEMzW8bhkpdQ9ybbxMzcVmNNtkW31grpb47tZQUQVKraar2uKdWEdzBQ22NMb3Ik0bf+qua2JlwqI8GwJkYqecqv8vfZq5zVh7pe0a3zu5ekOQM6HhW7Caqnfs7jdZ6lUC1uWMCL49OkFx8MDTqcTxPjfBUJw4+WczoNYBA7HJyLSmcoJJ3G9RWTV26bhezoyW2HuY8jWL9krTYh+oJNJ+VnPMNKdn3Stv4UrugcBjscj3t5P+PnrN2AK15fx5dq6vooR7yUoTSw/fz5VwJhh4fdceOhrEQrApMPvzAQYxxT7Rnpp3n3fF4yHSM2XhNv9qg3O7z9XN1aTWiAOxyzDNp2nd0+dd74OkkS73RW/w3FkPO699l7D4S8W118Juyf9LDmOtK0AXN9rUbcDxphaMDYQ2GZexxjfgLjz1bnavG/ajunFqnYmQGcNgSxMrPQcXj8MpDQdvGCBQ/CzvfW317yfjgDcPHjEDUHsFQRwFIhUKAY2rhWz/1awljtWuzzM9jHbdkuucdesaXc+HIz7K8DdZ2dUg/qXsLpjzVOA1zZWuzzevruFeJ6BtqMpiBTrGhNwDwH40nLDWu6YRWAATZjleyI6rfy0CyLaRspai8paWGvxcnzCl0+f8P7+3oRaVHXHFEUBtbYXas1H/TG1iCtoReOUENyxu4nxBeS9ga8H55ORYSEZWnUFOzLTaQTgKKPys57ZIgLF8XgEAPz7n++AKVDZynmFGdPrNxi/cC2YSJfExPiYN/AtPIHDMXcWgLcL63jiIzAuvI55DDustXVdIEkBePS6QYPqfjutjrBnIsbGgnLqftrvAC8GNuPb4Nh2WX2gXrdXOmtr91pc5fwSe9RWXU9hAaRwY1EjxonGdXsi4j7zHsdxGgZDUW9Y18s81t3CoxNhvp1ZZ/Km46zQXRDdXqVdtDA2lt4ya93L5gVglkLQTGhCr7G15c3IIvAyWO3yMNvHbNut2LMADIzb5lcbrlXTNHUxaVXHmq+sdgHoDSRY2n2PmwQhbvsVOBcK6p7co6xd4xpsz0RKWGTDL+C9q33BwvZJkxe1bX4PTlVFVVWo1EIF+PvzVzwcCpTlO7SyMIcDqqoCjNtfW93y+frS2pt8NRp5D0d29V4r4H2EiRz1NwPnk5C5N3lP3Y9Ffu4zW0XV7Rn59PQIYwTfv3+HNQeoALa0KArjwn6atv9QIO5TtOebGg762nVkFoD3A+uY4qPRzYdpHsCoxyGq9XhmQeuoPoSezltknHLKO+8oosn3o969QGJsuWzAtIWyPsdG792t8Ivl+rWx1N+pjowhGQAAIABJREFUVo13sQ83LQVqkbgVjAtTNIuGfAjp2Hd7ioWq6oRnF7KrwSZ+rHEYbW/4AGxzgUugEoA9sZcGU0KzCsEhtvYW8JUWE23acc02MVfKzLZ5mG1ktu1SbnVn7GnWaSMU/YZ7RSzp9NBW8tS/ZmpbmfskgCtzzHtiuee0bfPXSr84/xaFbbpjOrM+s9RCcLMoEnfv/54NC+2/tG4cYeqV0qWtYNUCChwfHvHy6QitnHfw0Kr61KRnkVi1P21v4FDIHr/HTAvnE5BZSvbQzaTIz3lmL1goDARQW4vABv/8+AmrgBjjoo4Y0/XT8pP20YOQEoS7n/f7GtesT70InAXgbcM6lsi0TI3Ad04AnifuSq/xtX7O78xA5dx1Lgk9fU547jqBXKZ1rPlsDF071sCW+kW7xUjxLwQqtjmX36ao2ZJAfHuEZq/69l/dFjWeyQIDE3kNn7MxXFY0nnds84BLufd9jArAQFdoZUnkVFgBFttCmlAMwGq9orH0cd9xCcAhrJ0RVrs8zPYx2zaVSSue4Dofotd59EPhi62u8/Wc+N6W+tb//v6QTd4EScRc5rJt0wnLPpttHkW9DQSpfQCggYq1dl1yaTplEXgbrLEYJxUWWup/VbyKXF1YLFOvnn8rT87b1yr+4+s3qC0hIjjZEmf30lZAAk/t5tqJ173nryMC601CNm6d/BRug+yNm7kmQ899Fp0yW8V7ASuAh4cCn56e8K/vP/FWlS6sZqIfAbgFZqHYYUy4qLN/neT2E7he3yILwPshj3O2Rz/PBNXCeYjUb0S8sNyXGP38n1tIe+hoDq3G0LjCJQXfKSJxeFxKAwqPaz6XwHiNzgU/wro+/TS4/HxDWk5qXiy+buo3/c/7ZUhQ53ug/4dlQX3Uqvr49lQWvjXwocmhtQdxvdWRmEg0Nn5xk4H4qFq1yKzSLqoe2grEjTd49cA53Mv+RgAuKwub8Mde4qZ/a4YKOYNtIa0HhDZhGVwIufvZeV4A9vipMR5YOyKsdnmY7Zviwcf2HMdMFYA917oL1qgHjU0KqNVemI9b4Jv6ZF5E12Z+HgBu+xpxn6CsbUUAdi+0mRSx4Eg/T5N0CviHZW1PYG/DlP4c40pcdtZ+hv2zANV6fVD7HN/MrnrgmvIG1vCA+nUzwAUAEVhrUVoXGvrl+Qlfn5/x+v4GFAZa2ea8wWPUltnIA7gJDR18hsTr+oPuSmmIjwK5a0T7g3rPNp+6fTBa9MJyfHNLMh+NqWOtXPYyW8T3f3y/4VAUOD494vfrH/z+8w6YAwQuXGfbL2j7KE25l3aP4OCj4PVwf+MaC3Wa/l1m82x1jJPpogDUhktv5YKFuBo4CHZGSr2Ldq4Q1gv+BAJATWIuJ15Yay8SsMd+G3vLNp+jtb/9vXQr08m0Z7vFQvfU2PmyufOEjX5AljydBv/tHjR09WQ6SPQmahOlHsN3jgvF/Ho+oWkbm/G2QEQ7n4kIDv7IwDtZYN1fL1YnhORu2xotNFhCsgBatKJ5PXeh6bLa2CQC1XYv6OSl/AIN033GRGvrT2WVFIDDE4R/mSY0ga59jLY1K+S4TAPg045PAPYwd0iybctgtu0ca1pOnW5aL3Sx2msgb1EnNykx4dTU6QZu+7Jty2gigHA2q/Ve3esKwEOkFh4w5TWTLVuhXRCJu3c16+Hg6DHx5IiIwKrCWuBUnvBgBF8/fUJhBO9lCTX1oLReXT4majcDUB0eSJ4NEY2F8xA7ID9t81haTNjagczHIT/jmY+OtRaHwuB4fMLr2wl/Xt8QBtYyiPcA7vYfjMafoXdc73eJ75eQReB9kMc2+8FaJyh5UvqD1uMXv/fq1PHZlHIyNPcXRiFLjfXPec6eFXcHBNNztvrfdz53n/aODQ9zEsr8OjS+5pCN+Zk8TzqNzgnO9Ue9bdE0ys6+EO6Pa177xebBwoF+2Q8XT6SXIPTbar92Il0GwvI+dFz4eZhMkwVg90PuQshsn+sc1eGgYzMJxtxZBF5Otm0Z1Is21jbgDLT5qgq3hWK7p8Q18neO2DtuHmm61TB53MawPq+x1yiTbQBQhZ77XKbVAzHF2bC2KxFHPWCC1S4P82LNtURgL9Qmc076eeoGV27QVFUVqtM7Pr8c8fL5E17f3gC48ItuoqX9zaANIm6/JMSDvGlC8EfwBOZ+qvi51oR+JnNP8nOf+ej4iVojik+fXvD6+o4ff17dlxKExhzAGNOEygSGvYDjz67VZljyPnFmGuxjm8w0Ol6sk45Dx1Ft6ZzPJeWnI2YlFOsp49qxyGJTrz+Xriesm1dplsSEp1xYzy4RtWedP/HZHkcR4TKlVnhPzX+d38JQTCTcD0xseE/kISF2KKFVzy9cd7/v5t7QwoKeY9YcATh1AUbYJ6qHKtu1YReBAd5yx2oXkG2bC59FaRjTDghEm6mN19i5wjdXqpZY0w3gtg3gF6gZbWP3BAYAJDvA65MF4OXEg2AmmonCe5ql7UTnuT53WO6MMRAxsFZR2hJWLbSs8F9//w1UFd5t1YSh8r89JwIXyUHieVF4z17A3E/T/WmyOYdZznwA8vOf+ej4voOqQqB4fj7CquLHj1+o0IoZcShHj4jA1OGg28/6xyRf4/J+YhaA9wH72CYzjVBMXfJsh1GbzoWPPjfuCUWoad7D0ukUXLLQf4nH8JTfzr6+YGDf4fS9zE+zC4T3pGfs4tPRkpoXSaXblPKlGi78Bob1s/a4Viw2dXnwJ0jbOs2OflSQcwsGdi8AMxKLwEwCcIuANQm3kLesMNvHZBuPJedhSjc3w66ABvWHvWwhTmpBlfY/WnZuqrTrkm1bBrNt1ZX2g7kJxAJw6i+boMmUp2Gd2aQXLl+Mc21W8wRWTHKlVbf8Fmpdu2akcJ7A1kKtRVWV+PLpM56ODyjLEtbadvL2TPkUuHCOUz1/u9/pLr084yfoWu38muwxnzKZS+FpLTMZPlq9QPHy9Agrgu+/fqNS18cIt5zwdCOFDIvAtwwHnQXg/cA0pskswfWg3WLWsed6vu+nqjpBeOEWTeEYaeh3Y+OorpDVjmyHPJ6nitOXEe6TXFvRE25Tolz3eACwZwThe7GlOuDc3NCQF2z94aRrjB11Lq1S5d2/UgF6hQfT2+Kp82LxgoLFAnB8QsaJuTHvkbXt9HZZ1Y4AnK6+1oHdI5i1cjpn15peapesoro1a9QlnCVoPszPQuOZWf9v9PgViqOfuGeEPV+B9dvSFFNWdK5WB0ObsNDMaScoVrakC7M3sIfVPus2eQLAJ0oFgbLuztQBU+o3ZVmhLCscTIG//v6Cyr6hqhSiD1CU9aDSpbsBoCMhGE3QMsYi8vkJ22n3ysSt81sT17hkJGO2mMiZDBGcLWMmw40VxfFwwFNxwP/8+InKCNQ6f7xCDAzEbUFh+v2FQtoxd7hnYXxc3Me4pL3LIvA+YB3LZOZxqRfwpdetoL3P7hFOOp5PhpoJ1099vmze4ZynaXzOs2NRcWL3uWv18SOf8yGNp7A4P7wpASkP40lzwMqqSvVRWe5Jfe0F8t4OV5auIAAD3A1FuDdXyNoTYK0HsF7psbw+bb5yewexwWqXh9m+e9nGmwLLYM/TpuN1pnVfQwBmTztGmBeSALzpBrRtPrWNCghZu58F4OVoIAAD9aCEyCfY6rojuqWrw621KMsKAPDycsTz8Rnv768Q8eXVhZCuZ18Gz1NE3sBDdvUnbbfpDbya4H/2gPQR20rdTIYHzhYxk9kKrp1/PBzw8PiAHz9/4a1sF5gJXNjnxnkk2ooiFIHd98GZbxAOuonsktk0rGOZzDxCIVTkvmN6jcZ1jePbNc575vt+/eXqS2vbMPt9+qPiKc4E5767hvexF+3G9Ji0Z3XjHz3pOtdg8J7iZEgJwBOvsZUx2cUC8BVpSsI1BWBPanUDw8REqjAyTFqHdtkVvSDOoddehnBFWDsprHZ5PpJ93Hd6XZjzVVWhVpsOVtLSFasZ9rRjpfHwJmjrQ5jTzA2AlHe1fN3mp0IarUlvVS8hjLYJAGi98ABOEBYRqtDQmxWB1aKqAFtZPBwKfP36gqqqAGsBAay6AZdJ3FzH0xfO8ybOj/Nhous2tf6KIzcdazwJLkRm+spr11+ZzJ7ha/kyGT5mtUMKoPb6PRiDT8cn/PPnFX/eXoFa0EkJtiLtojKj8edpW3rnmGsrsgC8JxjHMpn5+MWoLP3fNkJPt3y54pb6Zub5Lyi38W/VdhfU3MuO3rnQLuRuPYHr83fMWr5wh4E17Og5Hg2ZkDjuXGpfIgDfipsIwEDfS4OmwiEVp0N82lmAaxYHYYXDB0vFNQSrfax2AbcXgMPPeEv2Mujzte79sVUpzOkG8NmXXm3IB7Nt1PsCwz+v7YpThj7TFkRggM8+HwCwGUh6lCcstCWZOpy295S6iVTjAjiXJ0VVnWCtxd/fvsIUAnt6B4yBnTEInOsNnJ60nXatW3KNnMyhlzOZ7fDRxlqZzK37TU30FhFI3ed4Pj7j9+srfv75A3MoBvtwncVlUb/iVvsCK/j6vpll5HzcC0Mer7yMOcW1YZ0xu1Ph560uE4ml7tj0daWW/jhu6JpzbHHHCsKwzo2PXuCs15fR79f7YilrKTumzlmmROElKahNvkR2LDjXtbipABy+ZphUAvgFYBG3j4e11hUYIts8LCJwKv9YKpwhmO1jtg243L6hSQm/MoejVF8X5jxVVcDyCcAelrRLtZ9stoV1MYttKVhts9JfNEfHpL1z7kdvhS5x2jHa5geG2v2AQgTW5u/66XZeBFbXgbB1/QcDiOD9rYJVi8eD4K8vn1BVJaqp10TrKRyHcoxt6r+uR5oKGILo7Z2ph5SgvfC8i/eNymQyNyVVa/sF7Xsda2X2w9r9nxRWLIwaAAKFooLiCIOH4yPeTiV+/PoFMW18kVQfQdVtFVF4j+HIC3jod8AyARhw22MwpmdmOozjl8xU4rkj4DrLMtenEz1VJoh5I/MXscNi/1ypc4+fp3ucNqeYE031fHjpVlkMBeC+bhkKxoK1Rtd7rEum3NPQ2HRNr+CbCcDNBYKwkEyThx5WT2XAeQWF9jHZBsQrULjYwgIEVlhtnGsX512sA2ueAuH+7BL85bH3fAdsnbqZOU9Z618Pc7vqn4cKfOkGANA2JDSbfdZa2nxlfl57HrckYrBOGNTfiynewPHxZWVxOjnZ9++vX3AwirIsnSdwvYeT4vwEqRFpBornRGD/PnwOzI2ykO0Zy2Qy94ejhs7siWuNG3bdRtVpdDw+oqwsvv/85cJBa72BhLG9kK9x/6FoFpqhd0z82r+fmqLZC3g/5HxkoV5wqkB6H9jxp/Mj5WMYJcwmkmXJXME10i/UxdrPpN+REosw8pr/bbigZ14EvoQ7a49+mVLtL13WO+8jnWIvZXmt+7iLABy/ZuqQMRcg5lDQLf39AVlgz1tmWO2bFSLjhnZsEfY8JTUPAG/aAdm2JbDaBbS2VaQ1mAYdEqa+HNAd8DHCahcwHna5vyPt/VBweAID80Vgv/dvVVmc3t7x6eWIr1++4c/rTxjjB+3TniMT5MG4B3Df5nMCMNtznMlktgNH7ZzZK7l9SuNdMKCK4/EIwOLf//yANeIUXXUCb8r7yO8NbJr+R39+9hrewJa4z5uZDvPYJROSBeCpqCoUtcYy4VhgWr13jYh4qtpW8C4cbE809s4C13BivFe5cBZvoz2/V5rsVgAG+jfHLAQz2XZu9QoLc8IZ3Bv2xo7VPla7PL06ZSU7tgZrvrKLwKzpFsJoI6NNHnbbvPC1mpXxrJHvEwUGsUVP8QOkNqoAF9RlDuNCq0wUKm+B72IypN+58GHdgy2MFKiswipwOp1gYPDX108Q47yDzcQ4zamw0FNfjwnADM9tJpPZBuvXwJk9kduf5VgBpJ6vNSJ4fDjAFAb/+v4TFSwKKaDWOQXHhPsCixgY9Pvyo57Aic9S+LFMZtsw9L0zU5guUGYGxryJj0qtpcs7CayqCtOJ+uQq8WpAT1t6Dc/YfU0KcZw4ZnDBNNBplPZaHmc5rK2QBn6Bwk0F4BDm0Itr2jbWSQrnYZkfFJa9gVOwphurXR5G+4y6VUT+ieGzkB/GfAXCOpizLmFNNyDbthR228K67n4X7m7tMNQfCoVWpv5c2JdjhNW2URG4GYwCjQf4HeyJL8SSdlOEYL8YwYu81ireTyeUZYmvXz7j+fiEsnxHKiULSLsyXdDsnRlOzoYrwlOhGj1TQ0AzPcOZTGZdOGrazFbI7cd9aRY8qjaBOx8fH2CM4Mev3zhVFpB+5JBUXyH0BO7sGVx7CiM6B5BF4I8GS9874+H29qTC3/IFTVScbnHUrGtOXfaE3QEv4NSYc+i7sWvMtSmu91N2jJ2/KwJPtWPf5X0NO+8iAPtCyx4SGhgOI3gLO5troe9wM3Y8Jz4ctHY+Y4E57ZhtA7jsa7a/qItWDjE0H6b8TOHaoCv02G4Ae9qx2sdqF8BtmwVg69Wnbcy3G+HPHww2xjwUmaO7hP04Nk9lgDftfBFo2tUBc6T338uv2/8svQ8g0/M6NhCu1OJgCkAtVAUigDXA6U1RlRWMqfDf/9d/onw7dc/Rniy4EDqJFAvB/nV6slabANLNt1O8gjm78plM5sbw1LAfl3v3Cdbue2Tm4YVVMdI+sKo4HgocHh7wPz9/orIWqPse8L2AOps7i8S8ACwCmeENPEUEzgLwPmDqd39sLq+nP1xe3ng60aenlaaWdYupr1n3ab0SOLimqkKtDNbBc71QL/UEnnINoB63+szQfsS2tB6WGJcmvr5WJqf6X2cvHf12yfXuyV09gEO89whbp/NeAnBHDMc0ATj1Wza6tq2/UXjIdtKNC2bbgCwCL4U5X1vvQq72wcOcdqy2sdrlYbbPd/ZvsR1Es+J/ZCAxRtxnYurTNYMkQtsA3rRrJu3OCYV6JQF45klYntWlArW1QFmWqKoK3z6/4Pn5GeXpvT6pO4cRA6sWUhSoqgoigmIktWMBuGOfAhIK6hMEYO91bM8cn8lk9gNHzZq5pE/A0o/I3B9rLR4fHvDwcMDP37/xXlpUVlEYA2jlRN5I0PWT/0ak0RhCj+BwoVm8KFQCL+NBm0j6a5nlsPS5Pya30x8y92HM4e+S/LBV+3rI0fJeTPU6TnkqT+2zJK/RrIHqRq+7hNiu5roJO3sC8MRrhL9rFnTNsnI+Wi8mF6woAKc8IJg6rbeamJu66fjZ8xBX4K1tXAIwwJ1uAK99rHZ58irTZTDnqyq7fdm2JbDaxmqXx9VxV+g/dDz7wsmbZe01c7oNLehjgtU2F4fhvG2C8xOAZ6+18ARMaXcuLFZ8rBeBy6rE48MBXz9/gYFFZS2MtB74VVV1vHziAWk4QTu6b1/w1kwYzzCNxzKZzG3gqUEzQ+S6OHOOjneVKh4OBzw+HvDr9x+8vZ+cd5oxgB3uJwiAQkzQl+jOfw6GkU58FpIF4O3D1NfeN7et61MRWTMcdCJITeyZpQTglJ7WEzJn2BITniu8VhyRasq5LjnmXv2ijqjeu2YbaSv4wcQzC3xYjns9j6HIvJoA3BizAe+Ma53rFt47zJW4s+16KzKuCX+68cJqn4LXNnaY0i3uvNy7TZoDU7rFZNuWw9wvqXDBHrfqRN5bbmnBSrhnMSPM6Rfvd5RC/GDmjFfwOc/iOTCl2VRv4NCTRkTwWlao3t8hUPz15SseHx9xOr3hUHv+FvXf0PMmPO/gZO7YvsBRRqVScUp4x0wms114as/9k+vSzD3oTsgDn1+e8ePnb/x5fwcKA6j2luyl+gphtJHw6yXhoLMAvB+Y+tz7JAvAmelYaztisc4NKYvxxcvnwkLHx45F903NqU0dN89ZYL2kbE8RvMd+d2n/TuuFV/dkdQHYG8E42ZryUgbm2eh/c6swauyVuLOPzxMY4E07VrtCWG3MnsDLYcnTWADuevHxtA8AT5oNwWzfFmxji0wC1IvJJvlmxj8UiNy2Lb5V5JRrEA9W2GC2bYoA7JEgjODgr65cLNjS7tzq5673roG1FU6nElVl8fT0iG9fvqAq35qde+PzWbgQ0XEIqXN79tVn8y+AgX2W43vheYozmcy14Ko19wFTnyfzcVA4YTflJfXy/ITXt3f8+PUbpig638XH+r/NvsD12cNjxkTg1Hnz4vz9kPPxVtzfmzGzfVQ14S0sQDgXHs2LCwCr6CwEGloUMCTQpsJNp7YWiH83xFyP4VQUiridmeP1fOlzcelcYaNdLD7F1Lm91hHCsgjAQ5+t3ZkeK8CDG2/HqzFwXa+H5DVXrtTHCn/XNK7B0drpNka2bRnNatO6Ql27DtkKqbqOKe1CAUf1/vk6XsfxPg/XWMh0K5ht81hrqezxVOHzEAs5cW8fANTc5T4Y8zR8drMQPB9t/mr7ZiQ7JTzgjtnOlHbxgHaw/VB1oRkhOJUlyvIEUeA//v4GA8BWFcQEq4NFXD6MhHI89747sdt6C0WGoTNFIJ1czWQy5PDUhtuCsb+XyYyj9b8gUkgtBBhYPD8fcaos/vn5y3lqqbo9f4FO49+IwNru9yvN+R3+89Sis54nsDqHNKa+WWY5OR+XMjZwkuDvbdOXfZ4vM59KLVQBlX50hyk0ZWKi53AoLLfMve7ZOGHDv0yIy4K+A9hcUXkJ54TwFO2cGBCmw9S5qWvpiQqsLwA3xkSJCPB1xIe8lcMVEbcI9TzVNmb01ir4BTCnXbZtGdkbeDmjE9Yr0m0guWwD+J8HVrJty/DPQ9jnEBFAzarPL3tfLvZUZoTVtqkrVdcW/BmYErYqfj6stThVFqe3d3z9/Anfvn7G6f29CQcd7/M09nyNefj0j512LzxPcSaTGYOjFlwHpv5GJrMGbqF0PUltSzw9PQFS4N///ANzOOB0OjV9Ck/82i1NCz/D4LHNa/SfvxwGej+w9K+3zfrtU87HfeC9gNea81EFoMvCik8Vaafc163L89D5p4a7HjvH0He3nD9TBg/g0Jj4NVsnPs4g24hMen4G5Q4wV+jMAjDAnnactrHaBQST/Mir3ObCmq+hBzAr7GnHCLNtHlYbVV1IWKsKgbl5mOepNsWv2epfdhGY1S7AeQLPsW6NaA0snNvjKBRyfXjFqnJjsff3Ex4OBb59+wIDhdr2WPUTu6rNRK9J9K+neQKH34/cy8BvMpkMHzy14HXJdVAmMw0fwcj/7/hYQMXgX9+/Q4yBRbpdbxd8CYpO/6Tbp58qAucF+fuBqX+9PXjarpyP+8ALwKy0HsZAv/xr95V2o1iETPWsHWLKXr5LuWa46Xs+lzQCcEzsDs3U6fcbXd9qb99LYK/UmYVg5rTLti0jDzyWwZynQNs+uMlvrvpkC2nHCKtdHib7uotaBKquP8LYZ4rFVha7gOlhd9aA1i4AWBDWLwvBLUNho1QtjCmg6vrKp/KEqjzh29cveHx4gK0qGJF6X2YJflsvAhGBiQKBDaX7sOfP+XvheYIzmQywPcGXqR+QyeyNeOG7KlAAeHw4QA4F/vXPd1hpPXxTIrA/hxGBNGPtNgpXG1Wk+zv3bfcLpv5YZjk5H6fA37blfNwHzgkAM5dl35ZrzEPNLZ/eEzm8buocc8TaOQxdbwn3eDZVdfLOwavAOkDwdoWJp+AYgLGmmYfZPOa0y7Yth9s6TtjzFOivQGaB0aYtkNNtKf458P+40pLJlhSs9rHWb565tt1bbGdKu8n3Xue5E4INjk+PMEWB77/+4P/7n3/j6XhEVVVujz60/yQ4dRxucW66e/H54nvJZDI3QRP/mPFtWfgvk8ncDmNMsFBaIQJYEfw5nXA6nfAff33DQxAxaKxNXxLCmb1OymQyma0jIpud5D4X9nn2P4P2n2j3ff3PrWSy9d+Rfwlbz4WBvkX/NmzDr03rAXyqUFnrVGHDoQvHN8wUytXb5jpGCoW4sGzrmtWBdZKmNWvY3X8tWNPMw2wfq21tcdMmJDTANUHMCmueeroNI1d+Mqddtm0Z69uWKuP1ikb4trWt55jquFRoaGDdejjVx/R/WdPuloT+HZ3rpw703y2w7Z5pu/4z2ye1CtlP0vajWghOVYXqVAKw+Pb1LzwdBFVV1qEWDSqtB7eqTSQnMQYmqA/G9vpLvXbvz98Hz1OSyeyLoZ7trWs0prYvk8nMJ9WHtQoIFALg4eGAh6LAz19/8FaVUGMAVRhVoBaP/WS28xZyE5xFIxqHkUsAY/r9iPgcmX3wMfNyzjzTNtrPj5mP+0OhqHael3efk0klZ3T5pJag0c8VHUHZuWm4KF5SR9bw0bz8Xx/N65bPp2qwB/D7qYS1SiUAe1gn5YCuAJFDQs/H2cdV3jzMaZdtW4YlrkuYYc5TZhEY4E87VrJtMa2H7xgKV8+x1nGs23tswbNxDfuaK57JJmYhmDVf5w3yBG9v7yjLEl++vODT8QhblU4wNga2qlAUBQDAqoWF2xdYgjCP4XXH3oefnRWBB36fyWQu55KaKz+XmUymR93neHh4wMPDAd9//sJ7WUEMYFTCSJoAuh5ORv1n4XfpLSTixWWs/bDMPHI+hmy7jc15uQ8qtTnqwgYx6trP8Dmsbjx310QFaTyAyfYADmEXgMO/jCIwwFvJt3bxicCsaeZhto/VNlWmnRK2BXOehn8ZK2D2tGOE2Tbg3vZNbx+99y9r6jELwOFf/5rFPs+9yt1U4bfzG1IRmLUu6e7RN26jL4tVVeHtvYIA+O//+g+oLWHLCuZQwFrbOXdV56LBeRE4/izvC5zJ3BdR9MSXpCMCWZuUyWS2Q7f/bfH50yd8//4bb+/v0IMJ9vv1x3Trm6JeWFZ/O9hXyPXUfmHtU9+fbZfxnI/7oFKl2gc40+J8e+MP60VV0chc1e3pfFN7UgKwrwdYK4ShcH0MnYxw8tCub04S1nwFAI1HvUQwpxvAbR+rbc3zurIdW4TOyOR2AAAT0klEQVQ5T0lNa2BOO1aYbQNuYd/1FkNVQb/Ew9RfYmQoVDUbt7BtiejbO8dCuz6qEHxOBE6NcyoLnE4nqLX48vKMl5cjyrJ0IRzrCE5+LNKEYQQABcyot++YSDy+GCJ7Amcy8xh7XvhqqkwmszdULQ5FgafHB/x+fcevt1cIJBkJMqyvjIgLCx18F4Z8jmuw3DfYF4x96fuxr7L8sfNyH1joor3aM3dEAann98ZqkDJayH0LrLXbFoCZiCcNsyfwfLIIvBxm+5htA5AbzQUw5ym7EMyedqx8HNuiiRf1n2lnL5EpWHB63DLnJfBxReCk91k9lze31GQheBrdSdNh25pVuyIwRmBV8H46oSwrPD4c8Pe3z9CqcnnlJ2PrffvCyYGDpBeYDIm/7efnF9xmETjz0bhmeeeplTKZzF7x/Qj310DVoiiA49MRb69v+Pn23uuXeJr3ChSJ79vj0l7EjFF1Mstg6kffjv2X1Y+Rj/vGAlDwb2P1IamzxEzcxq1SdQLtSPt6kTn1XMJZVxOmfdHixGCyDWjtExHXMbLazNcqOAZ3zB0vEZ9K46nFlOcsMOcrO84zRkGtGmYm4+rgta3YB8x1LZNt8+tfGfmXwnXvZ9sFt1K/EKlX7LuVf2un3ZbaK2ZbL7Et7m0NlogbFpUxj9ePxpTxjB9fuOMsnh4ecHx8RHkq8X//73/BiuBQGKit3HH1hIDAoKjXHpdqMbZbVCrSUrioasxERb3/ODjGO5nMNQjH9vG/Sb9PPAyT699MJpO5It3FXX5rCeD19R1PxyO+PD/VjiwFrAJF0wdo+ygKRQVFpa23kvvORx0a7it81D7eHtleXqYCrC4bX2cyLAgAM99HIHNl1AJqBVCB1P8MutEyzmGQjtR1LZrxS8oD2BPvicY0ERZ2RJjsCrHWTbN4rwoFz1oi9kZbG9cTLtjTDeC1kdUuoK5P1jZig7Dnafs31yVzyLYtZ5p96+x5r6pNWGimftMW8pTZxjm2XSPM8xwuTbdbllO2PA3HNF7oPe8xY2CtxXtVojqd8PJ8xF9fvqA8vcNN7tYhoeEiEwm6kwTjYZ+H3mdv4Mw2YSmTXDVPJpP5qDR9jPrv8ekJp6rEz19/XJ9BASlaD944PLTRUFRG0+fwxPsCs8/dZqbB1n++Dh+vTO4zHz8efrzIug3pR0BtuzD0klm+atLY/zJGBWDPUDi8tRvv1Ep1YH27QpoJ12AvLhb4K33J4s1CWO1jtcuTw0EvhzVvu+INV13CmmYeVvtY7fKM27eO+OvZSljoe/bn4msPheNlLndjtnW+WSGrr5FuH0EIHivzYdhGf0xHMDYGr68lTlWFg1j8199/QaCoqhKAQAoDawWFtJ480/b+nfJ+/L5uuZo583FhaLemwFG7ZDKZzARU8eXliNe3N/z4844KBQRlr7/Q2RPYfdJ0L7vhn4fr6q3U4Zk+LP3my8jlbx/5mPH5WOUe531IJrMJ/rucLACfIUycMDxJvDptTUIvNIVCyTo7W6j4GfcGZk+3bN8ysifwcljzFAgFL766BOBPO1ZYbUvbNW3/j3ugqk2oVmD9vlzMvSO8zPKgJS1zQEKwDt9I//N75jqzJzDAm6+hJ3Dqc/8aAKwCMAZ/fv8GbIVvX7/h06cjTm9vMIFXTnguC0VxZl/gVNrHE7yj95D4TSYTsteywVmrZDKZzLBjimiF56cjSlX88+NXE8nQGNOLVOIxAwKw+914/b7X+n/PsPaZp5HLW8i28zLjUdUsAN8JUQlavOti76BFTBKAPbEQzNRgh0Irk11A1zZW13zuyt/Q2sdql4fZPlbbFLy2scOebtZy28eafqx2Ady2AaF9PAIw4DqYNhKQWFgjT/cmAvtdNGKxt9ldQ9cpjVkInu7dPmcRhFU4714ACsHbqcLpVOL4WOCvb58hlYUGoRoVTvyFqgvxOLCn6Xnxt91HcOAmOr9hq2sy1yPnbRfeViKTyWRawggjrWONxfHpAQaCf/36A2ttou1vX0vdp3Qisa0/N8Ex2RN4bzCPhYbJZS1mm/mYickC8P24qQCM2z+TswTgkHt7aMzB2naTdzbbfLpVE/bQWgNmIR3g9AYGuBtPZtsAbvuyR/AymOsRVYW1fHZ52J8HVrZim9YKHFP5q6z9cN7A1ygv7GWOtLsEgFsIZstX346mvIHH0qEsLd7eS5gC+M+/vsFoBVWBFYHAwijgVgK4fYRtPUFr6o/D84+FgR73BraIA2KJ3GrYnLkWbO0AI0MxZbhqj0wmk5mP73c8PDxAtML336+oXHehWTQW9k08ItLsCxzPAYTisD82JLc724Ojv9zvZzpyeZoKRz5mLiWLwFdEnQOiYJ2tjKqqcjXbDSIb88RKviJb6EAMrbZngdc2BePwmje9MpeSc3Y5YZhKJs6Fo1oT5rqE2batMLYKfi0KYzZRz6VC4S49zzVgy8ctcWnaMbYrt8QtXPJeNcNpF6ZLURi8fDpCUOB//7//B7/e3iEHA1QnFCKoLAA5NOcUbZ+xUHCOzxu/H3rt6A8zP1reMeLzd+hf5jw5lTKZzF7x7f/7+ztMccBfXz7jUFd6ilbcjfvlqT5D2J8I25exfkUmM51dyhmZTObeaL3AyUe02OmY6CIP4NR7lkRi70R09uDjSLIOzOnXNY0v8VjTjtUuD7N92RN4Ot0QUrffyH4pzra1rRiG/XlghTk6STfduEJCq2on7AxT2sUTSnNsGyur16ybGJ8Jbf7y2ea5Rrrdoqyy5efy51Jg4VYSv76+4vh4xN9fP0FtCSMGlWqzsnnoOTvnpTPdGzi2jKue2RI53VYg4fLLVUtkMpnMZST7GhY4Ph4gBfDPr1ecKgtjBFqHhS6k3qqtXlztRjfdSB/9/kS7QUl4qdy2bQu2vnKXXJamwp2PmalkD+DliLbtF6JXa+C3artFm7hYAA5hneSPV6Qx2edts8RTc5yNQdthdPZxrvriTDteu0JYbcwi8DJY89PDvC8we9qx2sdql6e1j6/9Kq1NhnVbm7ki8BplgLncNe0XT5Y2XDPdrl1m187TS8Yu1loYYyBSwMKiqixO7xXK8oT//l9/Q1QBreDroXDhlt8f3EXLELdXcCT0hrYNicDu/bCNWQTO9781eGv5TCaTuZww+offssFai6enBxhT4J8fP1FWFuKjaQXbOoTtWYH0IrL4M1WbDCWd4WftPjLloGaDrJ+PmWtRBmH2M9O45Z6+y1AoFFb9WLuAC3V/Ha42+5gb6ulM3b9rbTht4wwBvRXYJvU3Rd63bhHs5Y3ZPmbbmGFPN2b7CsI2YisDU7Z02wrM6ba2bfH15zwLUu8bZPUdoopCgMcnA/NwwP/zf77j549XPD4ce+dVF/cKKkClCgvtqbhTQzee88DfO+dCLa9dvjLTyaPPTCazdaa0R/EiUAWgRYG39wrV6YS/v3zG4+EAtRYQgVXbRpwJFmv6bStS27eMbx2R2Qq5D7MPcj7uh5yX++FWbeNVPIA9l4TpuzWpjgabfcwu++ydM2V1bQFv2rHa5WG2r/GQEQHE7RWQOQ97nrb2cdUl7OnGDLN93fJ22zLn6yjtXMZNzljtChHeW5SxrwRcby/gW8Jqn/0A/cxblVfWPJ1MUN2cygqv7yc8FAb/8fULRGovHHTrgspaWKhbGIJxwXLIg6edTI6MCH8LjnqGwYYMFxt/6jOZzA4xgTjraVovjfv61+VQGBwOBX78+oO3UwlTCGC1WXQGRFFBAJim/5CqUb2ncDe6SG6Pt8H9+8a5XNyCzY9xMgCc72iV87JHM/pUwBBG4ItpdFVcfxxyVQEY4K48mAVqAKjAPanJbRsQdwiYwn5zpx2nbax2eXJI6POw5yHQ3daNfV9ggDdNWe0CtmLbOp1Rv/evJxRtwr8sbSkw3ftwbRjtCrtKjPYBOST0rehKr4KyqvD69gZV4D///gvHxwLl+6mehDWdPnSptpnEdb+WZgOb1ETt2vsCM9VXme2y3ac9k8mwcqsFT/eqr1QVh6LA09MB37//wltVNY27GRBwRd135+7bfz3Wp8hwcZ9+cc7/W7Pl8U2mi9bRm3KOBtSJYe7gdHEtGoeMK5/36gIwwF2BMHsCe51ftc5wDrM6sOZt16yu1wELvGnHaRfAbRvQXZ3z0WHPqyFiAdh/qJ1veGBNZ+a2FeBNN2DIttsIwvEOIkNbUjCnlycWqdlgtCu0SFUZqzgAvN7A7PXcGI34X+97b4xBWVmUVvD65xdenh7xv/7rb7y/vQHqvm9COBo3qWvU7wnYF4BTfe7UHsHu/bCdZmZ6biX9M5zw1dKZTGarJFuj1B64N7j23QRguHba2ArH52f8++dvvFUnGGOcJ/DIXr4H6Y9tVBXGmKBPlQXgrXG78U6c70Ne5JlrwTh2zczjowvAbhtk6TS9ppnX3VZbYm/gcHYTARjg9h4BuO3zNtksAi9CGRMtgDX9WO0C+G3jte4ymNP9mqhqrzvg8pWzLmHOlzA8L1O76mFNu75d1+2kNiGdF+QJs+i1hX3E2O1i7DLdbN+bK5Rb1vw8h2vRuh7+YgxUgdPphNOphIjB3399wYMRqLX90PDNogHp1E6h0BvWEXMEYCcs98+ZydySbT7NmUzmVqwp4s5hrbpLAai1KIzAiODp+ITvP3/jdCqb/uSQCFyg2ycYGlOkPIFzn4Cb2/SNc57fm62OcTI1tRPLRxaAxe5nCwGr9baTuF4bePU9gMc8SJgnDuP397JxLM0U9X5tnI5oxA2E71SubMYAvOmWbVtKI64Qw27fXZmYFuoPJa2DbTRBzwRreWO1y9Pa15FClp2rPWnj+dstLxIfecam/nuG8sfuCexhFNJd9cbdz7x2NJdrnStV3tgiz3g6z6w60V/hVkSLCBQGlQK/fr9Cq3f859/f8Pz0hKosG68eGGnGJQrA2PQ+fWN797XfRWkXpFvsTcyYnpntwt1KZDIfg6Hn8P9v71ybW8VhMPzKJGnSs9Pds7f///t2dmZnektJg7UfsAkQk+ZGeQE9p5m0ORRUGWTZsuQ+rH3Vh3T4iNf2MZI4Xd8L6tqXTI0Y+kahcOKwD1VBFqJYPz7g+eUDeZ4DWQYPj0xc0LmUXyKQsIZMnFTCV3lZjXbo9i8MTm4fg6XalnRwMmHYx9LGF4Tm89CQPco5Lr0bidtVwr8pkAoA3zrX0FsGcJs4iRPLfDCRmjwc8kGJjVqXyxPfw8wdhVY177hg1hnALR+zbJ5YNma9fTsX6kJ9rXwmGaztyioXwC0bkJLvcp+p3Eaihz3GahneANdkTFs2RhirzzBnAgP3f17vqXO28UMXXfdbfbwR37e7Pbz3WIji7z//gN9/QiCVf5NlGYrCNxKiUoPS00Hgw89ODueOH0uYMG6f3zBuhbuHMIx5cEsAOHXMXPoIVvuVCbB6WOL1fYdtnsNlZYA3Vvbw2gzoClAGiAEUqsg62s+5830LY1iu89WtLdlgH0cb5+O9RzHxALDodIK9KaoYKlCVxhhVADi+s92EY5DNA9SJ/MydBWsQGGDXm8l2DcwloZn11iepv/sii6ClHWE1Jcztyiobq1yRpnxfB4Bjhu93BKKYdceeDcwqFxCyQpnlIw0Es5e9P0U98BsX6IoI8t0eu88CIorff33CepHBFwUkZAGXW620dxTv3revPb46TOSGn6M8td9t7wU8Nt0anPBaOMOYN8dZoPNmVLZKAeeAH5s1nl/f8bbbAVKWiG5X+IgIwgIwlD5ChnT1ENsTeBxc7qNbGzLCPA40LsOrT4zUpsXUA8BASAYN8YV79H3fFgCukyonyNCRd00cMslWkLuDrJ0GcxAYYNYbp1wRVvkqWzKwHClYddYLrWD8zRZAU1PePLC2rcl1PecGgoe4L9mzgb0/aIVJtgjr/VeVhSakL53NeW9goDUOUw8VB3VLvL29AcUnfj494XFTloQuj3MQOX/MVK9q1KwCVZsYrj45zvaR2nkM41zG+0QaxrhoB/aM2xmT/fIot5Rwqlhv1njd5vjIc6g79O/iy4WG7dLOIgLvPRwAJwf/wDnXWFyXqjpi8HDaB7b2GhNjHs8YBxSKYgptqUCcA+OqI/x97H1zpu+WPnCuOhwdsZGddaCThNWRZZWLnTi4YdTeHNpUVasyz1J73Yxg8qvM+oD1nju1VyULKfkU5WRL/cUA24DRysVND+as9lR5ZSbqW/GcRBzUe6DY4ZfNA5arFf59ecU//z3DZctwUPfiiq69kVNyKMqFrRqWHKQkS/2uYbTRxMswjOuQM18uZHnedaw1Q8ZuvxxCpSyXYfuxw4/NCuvlEvA+9NmCegQ31Y97NLfSah6j5guQc5l/btbCMIwTKMo94xWQEAGbs8W45/zH4BnAjGXT2vIxyQaUm3qXjg8onwRWh+xYLC7l8eqNU646Q8t4ZCdqz+aYSkIz2ruzuXe2b+d1+Afn5zwPQ7Q14/6nkcH9knj5jkvGCZRoTxTDBzfb95n3vpXdNxzt0rastm3oviuFVu9hIc3M9Na1T+65sG4r09RZPfumeUx0XyT8p8JhVxTIP3I4Af76+RsWmZQTu2GvXgGgodSYa5xbkva0+l6CuyShjFdKXarJxTpMujWGh8+SGsbXpO7bvi1bskpDz9c0jpmSzfLqkWULfO73WDgHB8XjeoWX7Q7veV528mGbiYZv1Fosr6pwEIiTMPGP4CCgDAhYOWhqun3zVBuFRjXoYBybGoFU/Kf1WX3u2Y/tGauJ6xLfzRVfWzgN3Nbv/Q/tGiTOTIxAEAAAAABJRU5ErkJggg==) # ## Data Overview # The dataset contains the prices and other attributes of almost 54,000 diamonds. You can download the dataset from [here](https://www.kaggle.com/datasets/shivam2503/diamonds/code). # **Content** # **price:** The price of the Diamond # **carat:** The carat value of the Diamond # **cut:** The cut type of the Diamond, it determines the shine # **color:** The color value of the Diamond # **clarity:** The carat type of the Diamond # **depth:** The depth value of the Diamond # **table:** Flat facet on its surface — the large, flat surface facet that you can see when you look at the diamond from above. # **x:** Width of the diamond # **y:** Length of the diamond # **z:** Height of the diamond # ![image1](https://assets.diamondnexus.com/q_auto,f_auto/diamond_nexus/blog/2019/may/29e/the-4-cs.jpg) # **Diamond Clarity** : Refers to the grade we give a diamond based on the blemishes and inclusions found in a diamond. # **Diamond Color** : Diamond color refers to how clear a diamond is. The diamond color scale ranges from D (completely clear) to Z (a warm yellowish ting). # **Diamond Cut**: Diamond cut refers to the proportions and technical specs that determine how brilliant a diamond is (Fair, Good, Very Good, Premium, Ideal) # **Diamond Depth** : Depth % refers to the height of the diamond, from the culet to the top of the table. # **Diamond Table** : Diamond table % is determined by dividing the width of the table (top surface area) by the width (diameter) of the diamond. # # Get the Data import pandas as pd import numpy as np import matplotlib.pyplot as plt import plotly.express as px import seaborn as sns from sklearn.preprocessing import OneHotEncoder from sklearn.preprocessing import StandardScaler from sklearn.pipeline import Pipeline from sklearn.impute import SimpleImputer from sklearn.compose import ColumnTransformer from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import RandomForestRegressor from sklearn.linear_model import LinearRegression, Ridge from xgboost import XGBRegressor from sklearn.neighbors import KNeighborsRegressor import lightgbm as lgb from sklearn.model_selection import cross_val_score from sklearn.model_selection import GridSearchCV from sklearn.metrics import mean_squared_error from sklearn import metrics diamond = pd.read_csv( "/content/drive/MyDrive/Colab Notebooks/SHAI_Level_1/Diamond Project/diamond-price-prediction/train.csv" ) diamond_test = pd.read_csv( "/content/drive/MyDrive/Colab Notebooks/SHAI_Level_1/Diamond Project/diamond-price-prediction/test.csv" ) # # Data Description diamond.shape diamond.head() diamond.info() # **Basic Data Observations** # - price: Continuous, This is the Target Variable! # - carat: Continuous # - cut: Categorical # - color: Categorical # - clarity: Categorical # - depth: Continuous # - table: Continuous # - x: Continuous # - y: Continuous # - z: Continuous diamond.describe() # **Basic Data Statistical Observations** # - The minimum values of "x", "y" and "z" are zero. Since a diamond cannot have a dimension equal to zero, this means there are incorrect values in the data. # - "y" and "z" have some outliers that need to be removed or filled with another value. # - Since the id column is independently and identically distributed, we will drop it. # # Data Exploration diamond["price"].hist() # The data distribution of the target is good. There are enough number of rows for each type of values # ## Categorical variables: Bar plot plt.figure(figsize=(18, 3)) plt.subplot(1, 3, 1) diamond.groupby("cut").size().plot.barh( y="avg_traffic", x="weather_main", legend=None, color="#e8a811" ) plt.subplot(1, 3, 2) diamond.groupby("color").size().plot.barh( y="avg_traffic", x="weather_main", legend=None, color="#1192e8" ) plt.subplot(1, 3, 3) diamond.groupby("clarity").size().plot.barh( y="avg_traffic", x="weather_main", legend=None, color="#fa4d56" ) # We can't see columns that are too skewed, as they are not correlated with the target variable and provide no useful information. However, in this dataset, all three categorical variables will be selected for our model. # ## Continuous variables: Histogram diamond.hist(["carat", "depth", "table", "x", "y", "z"], figsize=(18, 10)) # The ideal histogram is a bell curve or slightly skewed bell curve. If there is too much skewness, then outlier treatment should be done. # Two option to handle outliers : # - Delete the outlier Records # - Impute the outlier values with a value # # Data Cleaning diamond.drop(["Id"], axis=1, inplace=True) diamond = diamond.drop(diamond[diamond["x"] == 0].index) diamond = diamond.drop(diamond[diamond["y"] == 0].index) diamond = diamond.drop(diamond[diamond["z"] == 0].index) # ## Replacing outliers diamond["x"][diamond["x"] > 2].sort_values(ascending=True) pd.options.mode.chained_assignment = None diamond["x"][diamond["x"] < 3.73] = 3.73 diamond["y"][diamond["y"] < 20].sort_values(ascending=False) diamond["y"][diamond["y"] > 20] = 10.54 diamond["z"][diamond["z"] < 20].sort_values(ascending=False) diamond["z"][diamond["z"] > 8] = 6.98 diamond.hist(["x", "y", "z"], figsize=(18, 8)) diamond["z"][diamond["z"] > 2].sort_values(ascending=True) diamond["z"][diamond["z"] < 2] = 2.06 diamond.hist(["x", "y", "z"], figsize=(18, 8)) diamond.describe() # ## Missing values diamond.isnull().sum() # There are no missing values in our data # # Data Exploration | Part 2 # ## Continuous Vs Continuous : Scatter Charts continuous_cols = list(diamond.select_dtypes(include=["int64", "float64"]).columns) for predictor in continuous_cols: diamond.plot.scatter( x=predictor, y="price", figsize=(10, 5), title=predictor + " VS " + "price" ) # There could be three scenarios # - Increasing Trend # - Decreasing Trend # - No Trend # ## Continuous Vs Continuous : Correlation Value correlation = diamond[continuous_cols].corr() correlation fig = plt.figure(figsize=(10, 5)) sns.heatmap(diamond.corr(), annot=True, cmap="viridis") correlation["price"][abs(correlation["price"]) > 0.2] # Obser # - carat has a strong positive correlation with price (0.92). # - depth has a weak negative correlation (-0.01) with price, indicating that the depth of a diamond does not have a significant impact on its price. # - Table has a moderate positive correlation with price (0.13). # - x, y and z dimensions have a strong positive correlation with price (>0.88). # ## Categorical Vs Continuous : Box Plots categorical_cols = list(diamond.select_dtypes(include=["object"]).columns) fig, ax = plt.subplots(nrows=1, ncols=len(categorical_cols), figsize=(18, 5)) for col, i in zip(categorical_cols, range(len(categorical_cols))): diamond.boxplot(column="price", by=col, figsize=(5, 5), vert=True, ax=ax[i]) # In our data, all three categorical variables looks correlated with the Target variable (price). # Selected categorical columns for our model: 'cut', 'color', 'clarity' diamond.head() # # Feature Selection & Tranformation diamond["volume"] = diamond.x * diamond.y * diamond.z selected_cols = ["carat", "volume", "cut", "color", "clarity", "price"] diamond = diamond[selected_cols] diamond.head() diamond["cut"] = diamond["cut"].map( {"Fair": 0, "Good": 1, "Very Good": 2, "Premium": 3, "Ideal": 4} ) diamond["color"] = diamond["color"].map( {"J": 0, "I": 1, "H": 2, "G": 3, "F": 4, "E": 5, "D": 6} ) diamond["clarity"] = diamond["clarity"].map( {"I1": 0, "SI2": 1, "SI1": 2, "VS2": 3, "VS1": 4, "VVS2": 5, "VVS1": 6, "IF": 7} ) diamond.head() target = "price" features = ["carat", "volume", "cut", "color", "clarity"] X_train_prepared = diamond[features] y_train = diamond[target] # # Select and Train a Model # ## Train Model lin_reg_pipe = LinearRegression() dec_tree_pipe = DecisionTreeRegressor() ranfor_pipe = RandomForestRegressor() xgb_pipe = XGBRegressor() lgbm_pipe = lgb.LGBMRegressor() pipelines = [lin_reg_pipe, dec_tree_pipe, ranfor_pipe, xgb_pipe, lgbm_pipe] for pipe in pipelines: pipe.fit(X_train_prepared, y_train) # ## Evaluation Using Cross-Validation cv_results_rms = [] for model in pipelines: cv_score = cross_val_score( model, X_train_prepared, y_train, scoring="neg_root_mean_squared_error", cv=10 ) cv_results_rms.append(cv_score) # ## Measure Model’s RMSE print("LinearRegression", cv_results_rms[0].mean()) print("DecisionTree", cv_results_rms[1].mean()) print("RandomForest", cv_results_rms[2].mean()) print("XGBRegressor", cv_results_rms[3].mean()) print("LGBMRegressor", cv_results_rms[4].mean()) # ## Grid Search params = { "n_estimators": [100, 200], "max_depth": [5, 10], "learning_rate": [0.01, 0.1], } grid_model = GridSearchCV( lgb.LGBMRegressor(random_state=101), params, scoring="neg_root_mean_squared_error", cv=5, ) grid_model.fit(X_train_prepared, y_train) # ## Analyze the Best Models cvres = grid_model.cv_results_ for mean_score, params in zip(cvres["mean_test_score"], cvres["params"]): print(np.sqrt(-mean_score), params) final_model = grid_model.best_estimator_ # # Evaluate Your System on the Test Set diamond_test.head() diamond_test["volume"] = diamond_test.x * diamond_test.y * diamond_test.z Id = diamond_test["Id"] diamond_test.drop(["Id", "depth", "table", "x", "y", "z"], axis=1, inplace=True) diamond_test["cut"] = diamond_test["cut"].map( {"Fair": 0, "Good": 1, "Very Good": 2, "Premium": 3, "Ideal": 4} ) diamond_test["color"] = diamond_test["color"].map( {"J": 0, "I": 1, "H": 2, "G": 3, "F": 4, "E": 5, "D": 6} ) diamond_test["clarity"] = diamond_test["clarity"].map( {"I1": 0, "SI2": 1, "SI1": 2, "VS2": 3, "VS1": 4, "VVS2": 5, "VVS1": 6, "IF": 7} ) diamond_test.head() pred = final_model.predict(diamond_test) data = {"Id": Id, "price": pred} submission = pd.DataFrame(data=data) submission.head() submission.describe()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/801/129801682.ipynb
null
null
[{"Id": 129801682, "ScriptId": 38603208, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 12056792, "CreationDate": "05/16/2023 14:59:58", "VersionNumber": 2.0, "Title": "Diamond_Price_Prediction", "EvaluationDate": "05/16/2023", "IsChange": false, "TotalLines": 314.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 314.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# ![Diamond Price Prediction.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAB4AAAAJYCAYAAACD9/sjAAAAAXNSR0IArs4c6QAAAARzQklUCAgICHwIZIgAAAAJcEhZcwAADsQAAA7EAZUrDhsAAAR6aVRYdFhNTDpjb20uYWRvYmUueG1wAAAAAAA8P3hwYWNrZXQgYmVnaW49J++7vycgaWQ9J1c1TTBNcENlaGlIenJlU3pOVGN6a2M5ZCc/Pgo8eDp4bXBtZXRhIHhtbG5zOng9J2Fkb2JlOm5zOm1ldGEvJz4KPHJkZjpSREYgeG1sbnM6cmRmPSdodHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjJz4KCiA8cmRmOkRlc2NyaXB0aW9uIHJkZjphYm91dD0nJwogIHhtbG5zOkF0dHJpYj0naHR0cDovL25zLmF0dHJpYnV0aW9uLmNvbS9hZHMvMS4wLyc+CiAgPEF0dHJpYjpBZHM+CiAgIDxyZGY6U2VxPgogICAgPHJkZjpsaSByZGY6cGFyc2VUeXBlPSdSZXNvdXJjZSc+CiAgICAgPEF0dHJpYjpDcmVhdGVkPjIwMjMtMDUtMTY8L0F0dHJpYjpDcmVhdGVkPgogICAgIDxBdHRyaWI6RXh0SWQ+YzMxOTgyNzktMjk1My00ZTIyLWEyOWUtNTBmNGVjMjQ5YTY2PC9BdHRyaWI6RXh0SWQ+CiAgICAgPEF0dHJpYjpGYklkPjUyNTI2NTkxNDE3OTU4MDwvQXR0cmliOkZiSWQ+CiAgICAgPEF0dHJpYjpUb3VjaFR5cGU+MjwvQXR0cmliOlRvdWNoVHlwZT4KICAgIDwvcmRmOmxpPgogICA8L3JkZjpTZXE+CiAgPC9BdHRyaWI6QWRzPgogPC9yZGY6RGVzY3JpcHRpb24+CgogPHJkZjpEZXNjcmlwdGlvbiByZGY6YWJvdXQ9JycKICB4bWxuczpkYz0naHR0cDovL3B1cmwub3JnL2RjL2VsZW1lbnRzLzEuMS8nPgogIDxkYzp0aXRsZT4KICAgPHJkZjpBbHQ+CiAgICA8cmRmOmxpIHhtbDpsYW5nPSd4LWRlZmF1bHQnPkRpYW1vbmQgUHJpY2UgUHJlZGljdGlvbiAtIDE8L3JkZjpsaT4KICAgPC9yZGY6QWx0PgogIDwvZGM6dGl0bGU+CiA8L3JkZjpEZXNjcmlwdGlvbj4KCiA8cmRmOkRlc2NyaXB0aW9uIHJkZjphYm91dD0nJwogIHhtbG5zOnBkZj0naHR0cDovL25zLmFkb2JlLmNvbS9wZGYvMS4zLyc+CiAgPHBkZjpBdXRob3I+RGF0dW0gQ2FudjwvcGRmOkF1dGhvcj4KIDwvcmRmOkRlc2NyaXB0aW9uPgoKIDxyZGY6RGVzY3JpcHRpb24gcmRmOmFib3V0PScnCiAgeG1sbnM6eG1wPSdodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvJz4KICA8eG1wOkNyZWF0b3JUb29sPkNhbnZhPC94bXA6Q3JlYXRvclRvb2w+CiA8L3JkZjpEZXNjcmlwdGlvbj4KPC9yZGY6UkRGPgo8L3g6eG1wbWV0YT4KPD94cGFja2V0IGVuZD0ncic/PsCvwXUAACAASURBVHic7L37ryzLdd/3XVXdPT2Pfc69l5fiQy+LkiiZetik5Acjx47C2EgiyHKMIEZgQQHym5H8Hfk1AfJLgCRwECSGEStOrES2lVi2bEFSJEuQxYck0iavzIdJieQ9Z+89j37Wyg/dPdPTu3v2zN7dM1016wPMOXt6emqqetV71VqLjDEMoXeYx/NYiejSWbCaMclSeDoiRzcQObqByLEdhrp0Fk7mWFk272Pm2vyEwQwkWY4szy9aP2ydMzEAlM+NiKDYbMtiAIAAYywrHwPEDGj72kWdqq5b9ezbYIbCro4V5SEYwzAAGLs2zcx7bXzbprm4r7o28TTm8ykUERhAluWI4gg5t8i8rVsgAwBQvKvbh56zYQYTAFD5Oh0Zv/phfwwosL6NXClFmzB78hNZuoHI0Q2eK8di2KPGtYdjYf1a27yfuZgBEIDA05iFwTZvaW6wXkeI0qz3/HcxRLJD5HXIdth32sqivNqU5jDp8iBtYEiGkld9HTFE+jakO0g+qdjdYmYQ6KlLr4fJnuGZtq1T+kj3nDAzkizH733hy/j663uQKICHY6wbBDKRP42xylE4DZGjG4gc3UDkaKeyt41TZPnYvcyMNM2RpimYaKsoOve8xZV5EhEVai4GzPZp2osLcnGhDACgiEGNzZKsUcHaNn+b75kZmhiL+Qye54EAGJNjtU5gQMiZoQggmAd95mMLcwIVmw5AqfR9PjJ2XQZX2o1g32az8BBpj3ZDRNux7FRZGmMAEIhU49rDjfO2v5vvNQHhZALf18VBMGas1xFuNxsYIpABdHlArMo3Ve9PyvleDqxRyAyZ7iBpDpWuKJUHSPNhux07Q+RVgXo7tFDv2/o8+Fvvr/t+BlXaSp22L9XMRb2X36ZJ9hyysTHdY2BmpLnBp77wJXzj9dKR3UfhJGTzQhAEQRCEMeJ5Gr7vo9qgucSk2ZV5UmVpUVhnCmPAlbplmGEateoxa862tkxEMEy4W20Qx2m5yatws5jC1wRVPi/TosF9rG9gMHIU+Wxuygh2UR0WEOyHef8l2Ie0R5tQ2Hm8KF47a979v4+hmJcD9Vllfa5+zFi7d3DMMFabCOt1hCwrZhWzWYj3vvESM+2DwGBFxav+m0flVhAEQRgCbrwEO/AunQHhMshGiHBt1E+7CvYicnSDa5FjeV7zwrkYD11yr1vyEQG+r8FgZHkOU532f6K1wlM59+8NhSvtzHY51OnTpdTlKCxwDO8sPjQKS1vDu23aQ3199RkTAQysNhHyPMe0dAk5m4bQOkEUp0DN8uepVPmQNZC9HKpLgg08lFN7e3Rj3HIdaY9jp7L1eeSw1IHm1hTlvpXv/vX9ufxDd/DNtl6/P0ozZMZgEgSY+AqeUnjzxRzrJMD9er1VVBPVfviJ9cw2Ky+b2pPkVZ7BUAxi/cv9WqgOtV9hgwV8M7QXUX/unm1mbG1MFMCCbIQ8wrUoKgRBEM7JdfSt7jtaOVWObS7i2iwFfU8DbJBlDKbL1RPblXX1J2dvKR5uNNssE8CFAwZFvg3KXo4BIgahcNdojNlan9c3f1vbf2HjA1IKmyhBbgwWsxBKKUwCH1p72Gw2yFrSOMQxdf+xvquZX9v7A1dpk6PIyRbaFEo7B68iRvuQ9jgGnvK8Hzr1bBsiK1ESMbjmnePQ/L4+dtY/q7sfzUtr4CzzMJtOioNgYYCJr7FcR1hHMRg7a+NTSjj02G1b/e4jv4YKBVrFULF/bXu2Q2CHS2nLGCjmr43P9VGPSkeM6fX3qvQSQQPsPIjMnof7O5MXxMaK5P5mvCAItmNj3yoIwmGqzZmtVYACPF9D6/Nb/7blTRCEdgwKq19gX0nadOHetllARFBgKFW2e62QZDle3y8RZykAA60MFvMptNYy/gtHU9VF6b9tZOe2tlAwSbu3HWmH56SvNkMtr30X4ESnK1ab84P6/4oUktTgdrlGkiYAMzwivFiEePliAa3V3ndOQergcNjWQ9s0l7Qpr1ZBe/8JPSP1dpzPQCyAhQfI6XZBEARBOA2+4jN1T7Xm7nKVVFwHFGkEgQalGbIs21n01b1Enmm6YuvcyL4cH8elDwX0hTPlICAzgCaFwvVXuV1cbhQb6u4fucVlJBNhudogCwIsZlMwM17MJoiiBEmSwSgqz5Z39zt9PFHXLM+vFZGj3ezilO5dxdbNoGAFYhk8BOdee9QVwdW1KgrkLi+H3D7vpdYY/7lKiwmrTYI4zTCdTOBrjalPmLxYYBUlWG/WINIwoMLylM3WNLltvr6zWn72AzgbY3dRXLf+PeNybLQMJa9+1592jptD5FeDek237s2g7zT7TPdQmvWnsedF6VjPS2yKddnI+65zMNa8Xu9upSAIV8dYO2JBuFZcaZNypvzpdNWBnYUA4Ps+tNYo/DXVbzpLFreI9cD4cEUmTpSDgBzcsA4q23F1S4tVcFsfYEzhWnITRbi9XyI3RXphGGA6C6FgAJMDNStPJ56hcBa4UW+k7tgIAVBgptrr0nkSTuVQW5R2aQtFW6zT5Q3ksTnA7j2BoJCmOe5Xa2yiBCCC1go38xBvv/kGCAytCMbkQM2DUGcuRSkxWF6HcP/cNT+8NuQZjBfm8sjKwId5+44n3NW2uPY6lea6TxgnogAWWpEJtyAIY0YmFsJlKF0SNl5y7rn/NllfSCgFeJ4GqeJ06SXnKDI/Gh+uKHBcKQNjXxmzdfMM3poMdbmDrq4rpYoNXeUhyXLc3q+QZjkAwNOExWwG3yMQM+iCMcIFdxBlsBswP3wJdtFUAkubrGh3yTxmuuR3SEmwPTJGBKU0iDSiNMP9/QpZloMAeFrh7bdeYh4GZbxJwBizTftc9cYGhc/Q2JPTAque7ZXHO+1dmcgAMaB6qrUMwIBhekntIbYpU22Ms35tbUwUwAMzZuE/hpyM3mGzHIV9RJaCMC7sapNtmy92bMKMmc6TqLX5h9YKE8+DR7rYHLrgtETmRcJQ2F6viAhcNs96USorYEX717rSAHPNooRgmHG/3CBOMhABShFmYYgg8EGGy4MiyvrnJ4wDsRC2lfa5mSiE7aPZ5tra5PW1zZGuPVpMxg6t7SqZNS2E626gi6gRu/uYGTkI9+sN1lFUhokhzKcTvPliAV+rB4rf/TyM5FkJF8UmJdIw2JLP4VDleuSpccSLvqh48W6ZMgiDe8RomRQ91QJ467nBmrZgU7vtD4kBfAaeGhvv0jQnT67EKXsqtspREARhzIy1Xx1nrtymGRO4WTc83wNgwGkGw8UShblyK3e+fFabVrbGBXYVm+epXfXexrIARUxgMKP03FxsuFSbA6XLNKA7JiAR7VnzAACDcLdaYpr7mIdTKKURhgpKKWySBMYYKKVhjNlbu9j6DIVx0TVXkfo1dtoPlz24S8RoLS6MmS5Sl0VXLOCmgqM6zNUc/0GlVxFSiJIMSbLEYj6DpxUmgQ/f93C/ThBFEUxtjl6lQeWcpErrOfSUjPU8VYl2VNpX/nCHKf/1zYcfrgtPi1a91z/V/u9abw4RU7hPHrhpbvmN7frsxPTq9avvtZdN9XbseRULYKGTQxY5giAIl2bsA6xwHOOVo2p5CeekOQ/hvIj35XsKpAiFT2icXVs/3jor2Iptbr6OgYlgQGDaWeZS6QpaY38zuK3sD2MGMog04sRguYqQlS6hA1/jxSyEVgp5nu+lJYc3BUF4yEgtKYVn445VsEVrkCOaUFfYhybM/FD5WyavqPoZAhPhbrXCah0V4z6A+dTDWy8XCCc+Cjviag5Qho7pZY41fF9hi1LVlnzamm7/VAtmO/I7lGveY9dbD7xQUHm4lR72Xwpl6JqeUUqdlOeTIXrQLz5ltGGuAqL1fxDbpnZrQ18gFsDCk5CTloLNyIagIAjCOGl6HWl+xszwPQ9AjjTLkNPurOolrP1stjp1FZdk4oIFK3MRH0vVrZwBENqtnatrlRVQXZ6GGQRClCbI8xzhNEQ4CUDMWMwmiKIUSWYAqvUhlj8/Ydwcs56wvQ1fA8xNGTGIZM/DVuyy2B+xcrdH6i6a6we8urwO1v9ua4dEGnGWIVvlCMMJJr4H1sCL+RQbT2OziZEzbxUzhWXw89RfXKZRz0ef2KTsEGyiHqX2evZAmxapRyl+cbrBG/WoVB9qDfvA+rentCrlr2AHogAWhBMQxaEgjAtpk25waTnylWy+DE2fcmxLi1FXAmsAjDzN9r5zKVxQ1AnjxIW6xSDk5d87xS+3tvOqrJUVUBXbl8uNXGaG1j4yY3C/WoMNIwwDKCJMQx8qyRElCZTSMj8QRkFXHRfGTOlytra1WfVbgMjQVsbVFq/P+rxSPhw6/NVU2tT/b14HCDkD9+sIqacxm4VQpDAPJ5gGPu43ETZRAgBQWsOAQUdPC/Zls2v/dnlskbwOhx2HAKo623OyAzLEc1Vl3PAm9b7IACd1y+oUf8kjoC9L4gdp0DDrVDkQ0z+y43kmXK5ksrki2IjLbVIQhEOI678hGbxvraXvaY0g8DCA16UnIfOhceGOK0i361ZXn9HmMpKZoWobwkppLDcRXt+vkOcMIoVw4mM+nYJMjn2rB0EYB1Xf5HK7dpfS0SFj+xLs5bJtUdYfdbrcRNethdvuZ2ZopZDkOe6WG0RxvD0w+nI+w1svF5j4Hkyen6D8bWPYOiJ7U8Mhz/a6Byrd0te29vtPqCZDuFYfUpna93i386pw3XXMlj5mJFtmgu3IIlYQBEF4DkNPnHjvRXsvYfzsWQ3U/vc9D55SWym2TUXOOT+RudB4qFuLuCAXl8oBNOL71TY86p9XFr/b97WN4K1CWCnkucHdao0kzQAGfE9hMZ/BVxrMBlU8QBatjTAy6gooF9q327QfINzJT7oWm2m2xWHb5HVvw3YpOR5TfmznBdh5BSKiIlwECLlhrKIE98sNstyA2SDwfbxczHEzm4KqeSGqteDjbbZeDwaLxTkQg1nQDZLqMNhgRTh06JqrrrP8cD3BzI19oeOOi27HA96l22tWz+D+ua/0tp6cekz3HNhkqTwE1z3zEHpHFq+CIJwbmwZd4ZKo8iX1xRbaNlq27ytFkGFM/AC+LhQ91cfNDZtzInOhcdCUuysycbkcbW1eKbVnAVS/p4rtV8QHBu5XG6yjuPyMMJ2GmAZBoQTexgSGDAPCaBGFsG0Q9ueXhKZCuFuMshVnA09pk5X8C+p1pHoJwHEeQJrjPlC4cqXa50qp4lp5MckyLDcx1lG0jds7n07wnpcLaK3AplBcMFdhJqp2urPwb8nV8wvcgS1KCUL57C2zehx7ukOuU2WbrGCr/AXDAGDafx3TvLdrj/LV1jf1kc8hQ//0m99dTPS+DSps6g9sQmYfwiC4vGCVjsMdRJaCcB2Ila/dHLYGYDAYvu8h8DTAZvsd6eMFYdx0KYGPudYKEVabGPfLNYwBFAHhxMdiFkJvk+ja3BWE8VFvIy6vr91iXyG8rxRuylDmKbZxmjJYDp4+xiFr4EPXug6IKqWglIIxBlGS4/Z+jSTJtve8dTPHGzdTgE1h+cfHyYdI9o6EYRhuzSpzhuqpGgCGanF+n5vuAFa6Q9SDQZXJHX8L40UUwGfk2jYj5fSyIAiCcAq9uaYBgaG2L5mWnpehT23W51M7566A7/nwtS6ujGDuIXMgYShcqltt5VAtgb27Nnub7qOV0kiyHHfLJdIsBwD42sN8NoFfWf5c0XpMsJ96ez+vm1qhP3bWn8xUvsRttO3st8NCroACkQaRbLWeQpfXn05PQHg4/m9DxVT3kUJmDJZRhOV6AyYFIiAMArznjReYTQIwZ9uAQG3Wd4Xid6BCwxKlMg/n6tUWK92h063obzznQevtEAzybIlgyrNY1zzU9un+GRh2Z82KPrHEprwCogAWzoRrC1PbGrrQjcjSDUSOwj5SH2znGMtAosJvE2EX1ycIAmitQHTZeUdzw14YB64pS1wpS7McxphHN36b7ys3kIZzAIScgdf3S2ziFDkbaKWxmE4w8bVoXBq4Uo+uGVEG2wyVimAuYpmKLK1mzwW4keHmVB5TBLeFd+kKE8HlXMIAiNIc776+wyYurIEVAS/nId5+eQPP03sWeM22Z5uLYkDGdVvpt04cE9l2HAzWFrYuiou/uQdFcFsf8VyGPrAwRH5t6ROvzRDzEKIAFs6GLGQEQRCEvuHy3Hb9JbhLfQLPzCClHrhNmng+fO2BSnfQbTOPc89HZA40LlyShSt169hyHLYQYmittyf9ldJYrddYrSIYZoAIs0mA+XSytwhmNiAGLnxu5KKIVak7iAxtZGc9uu86GnsvOeA4Fqjl1YAZQCE4aZP9UR/zH4shrJXexq0FEUhprKIYr5drZKaQhacV3rhZYFHOC5i5mEOIvAAM60J2KGy1/u0Hxk7xa0N+z0jZJT+H6on2bVHbJ0P1W7vDN7BiwcQo4ztfOzWDBO/CWRGuEBsnEW0McfJHEARBOBX7xxMXee4Yecw8YbsBpNTebxERPF1s+iR5hrbzjkPPQ1yY57hOmxWJMA664vw13TM22331tvAOUMpXaaR5htu7JeazKSa+B19pePMQy1WMnA2oxSLAlfXKIR7ro+suNV1/Fq5ySMYi03FxjDx24pTx67Ic8dw7bmmzLhUe0jXOV3TN4eqxgJkZVPf9SgAYyHKDu+UK02CCcOJDacJiPkUQ+FhuoiJ8RGMdY8s42HcebbL0GxJ7lMo1JZ0leyTneLbP/YX6YXMa4LkO2R567xNAoAEcavefz+Gwqf+qS0osgC+ATZVlKERxKowJaZOCMB5OaY8ykoybIRd0h1zBMjO0UvA8D1op0IhcYMn8RxgKV+rWY+Xoiv/XfL//mYIB4X61xjpKABSf3SymCAO/+N0T8nBNHIo9K7iNyHis7CulBLuRPvUwXQrIU+IFExEU0b71Hils0hS3yzXStHC97nsab71Y4GY+BdhAUU2RPIBi1QbsyKWd2KNUto+hnoHquUXYIqtqfFJEg4TZGqwtSA+2h1gACxdDLC8EQegTscp3h6YsWc6rCSdApes2AhD4AeI0RZ6b8rPLW7PJ/GdcdFmX2Mil63ZftLWRLsvfxyCibUxhBmEVxUjzDIvpFEoBYVjEDV9vYgAMA4kVdSwutZ1r5DHrYJHpWNmfE3eJcYhN2uviPGuPx8Y2aYcFx67zm/fV31f9mjGm+hA5A3erNXxPYxZO4GmFaeAjeOMFNlGMTZIU95ZhJIRxI9bKNuXVnmeruHIg8Px11lD7AEO7ft7FyrFDZkJBXQkuO6rCxXDhJLktA5YgXAvSJgXheti6YmrEAmMULl2rE/8T3y/iggq9Y+v8zWW2G5sOccjS95j3Sqm9/mKTZLhbrZHnOcAGvqexmIXwPQ8K3bHDhW5cWNcJO0SO9iNt0g4eW7tWsjPGXL0Mu8b6x6yBiQgaxcsYhlIKSlVb4YViN80NbpcrRGlWehICFrMQL+cz+Fr3qvy1xfKT0L/FIyD7NcCQdWCQZK1iiGdbrSP64Bz1f6iDEGN3f80Y3jOKVf1Xc2w0xlz3LOKCXPsE7hA2NSqRoxuIHN1BZGkrBNQiiogc3eBccmzG6WqerjXGIE1T5IYL9Q7VYoReeM5x6d9/Ki7HsXOlLC6U45jN8eb7evtv9g1792U5bm7mCMMAKGMBR1GCVZyAlF9e21lmufA8Ky41xrr0DK8ZkaP9FDJkyHEXwGa7mGtui22Wvm1zgkPfq/9tKjenSuHFbArPU+VawWATpVhtYjCp0qtI4Q7VnLCWsEXxW6Esyq9Nz3aYvJrR9wXNtflYn22bjatH/Y0R9XjCfaQFPAyP0yeGGYoIaiCvImOtB3X6lNlZMYzEGHzy81/EN29X4gJaGCeubbIIgiAIjyF9vtAfbdYAnucBmQGzGdV2p8x5hKFwoW6d6iqtre13KTt14ON+tUKSp1hMZyAAk3AC5XnYbGIYCS3RO4+5HRbsQNx/208hw2Z7HCa+3/hQANzwluHyYbxTaM4Vug6CNa+1HfIyxuB2tULgeZiFE2ilMA0n8H0f602MODWlpRlApAA8PHgqCMI+trSNZi6Hyncfa7RmqJy268/FDqkNiy119zHsPermAK5UoqGwxVWRyNENRI7uILIcP/zgpcQGwVHO1R7blD4VXDvR73kKWhEU7VzEjWGuMYY8nEKXRYUtc7fHcKUcgDtl6SpHl/vHtnuaLuNNbqC0hyRJcXt3jzTLYRgIPI2beQjf06hGpr24gZYz1vogLmvtpE1uIj9boMarUGoV8uuOL2w31RYoNd67wTW1xUMWWY9ZxLXOE6o3isAgxEmKu+UacZqBAXha4cViihfzGTxNW3ejhSKYBrOYvQRDlMU2CzrJ63DYYPUJLkYH1WP32beStt7H992+dunWffT1i0311qa8duHWbEdwElcnrIIgCNdFEUlo/1Vda7nbgUmWcF6O2QTSWsPXGqrmKm4sdU3mO4LwdB5TArfdT6o6AKKQGcbdco1NFAMAFAHTSYBpEECjiL8oscQF4TRkXLORam5ehmZhckgRTI2/r2c71OW22KX4eCxecJcSmErzXlIKuTG4X21wv1wjz4tDYJOJjzde3GAWBEUN4uK6OfCMrXJR3HuKwzKWddyl2NZZoReIi1cxCvarUAWGr699upYu0uNmKFnBUsQFtGAFY3czNRYLIuF5iBwF4dyMrz8X7KZtY6dS8tYtgX0iMHLkeb69fwzK4DHk4TEeGyfHPmc7BZdc+tlQtw5Rb8ePHfbocv24+97DOMFKKRhmbKIEWZLiZjGFUkA48eFpjSiOkeU5oOxWGFTltqU+dFl9C/YgMnSBh0pg+1xF76ycrxXX2+Jj7p7brjU9BlVK4Mq6F0RgAqLcIF2uMQ08TMMJFBEW8wnCiYdVFCNKUijL5wcVLtWJp2KFhWqVblFNrfGkZsOzHboF9JXftnVP3/SpBN9L16ZDMY70iW6MUMJV4borG0EQBJco3DsrcOk8Rnru6+Pck+YuS8D6/1opBFrB69gEEp6Oi8/RlTmnzeVobtIeG0u23UW02rMGqjZtVfk+Y+DV/QZpVhwQ8TyF+WwK39NQXBj7lPvCIEvdQtvcTq/Btanr1GUnsrSTwjK4+bqEHE3Lq+lxqLJoFpq45jK6y9K361rzu/W/lVJQRFCVgo0I6zjF6+UGcVaM/Z6n8XI+xctFWNSwhvt0Rg5DgMF+n9dXWftmqFZik7KnT4ZtSwwiBiuGGfdj2DJ2eQEo2iu19xt90Ve9GMr9MwCADNRAjo+GUtLaUL8uiSiAL4xU0OfhwiRVEARBaEfGSKEvmidkgdIS2PdG5/1hzPOaU/Pl0jzNhTIA7pTjMY6JC/wYr+9XWK2j8h1jPgsxnQbQqjjOxAywOl4xLQyDS8qLa6MpK5Gd7VRuo3d/D0+bsld4Li4ohvu0slOVHRwRcmNwt1zhbrUu3D4TIfB8vPVygTAMADZgkxeKGejyu+SMhbBwPLKXscPGQwB993nNQ+ljRZSpO1x6DuICWhB6YmwbyIJwzUh7PC884AaPyNINzi3HNoVvRd3iV2uNEECSZcgN733v0hP+MeShjrTD8cnkqbhi9f5YOdpcQLe5hK7Ldf9zhXXp+nkxn0IB8D0Nmk4QxxmSPAORbOiOEZdc0V8bxphWmYkcbaB+IAbo8vvTryilDz4ntrmRPuQitW3s75xPVPdUnxMhyQxe368QBgHm0wkAYDENEXoeNnGKOE0B2rWKurVeX+XqE9sUdDalO+Y2IuxDABT6UdQ256K9u6oeTKFsSo8GPScrXBRRAAvOIAt9oQ9E2eQOIstzIn2uME7a4nw1XbBpreEzgzkvTvJjPPOIsSjq+uhLXVOeApeXy3NxSSanKIHr36tfa8YLL2KqaaQ54/Z2icViDt/X8LSGniromBAnKUzDbeRYn+u1zolsU1hcO12yOXSY47HvCpeiTR78IJ4wUPa3I5nzCKdhQ1tsP+TVvUZofs+U4R6q+MCo5gsA1lGMOE0wn4bwPQ++70FrBT/RWMXJ9lDLmK3qhrLXH2t5u7BJ+Vuka8e8bqzK+urpVakortIdf92t+qohPAsQYZDYv2OtB+dO91LIcbkR4FqlEgTbkTYpCILgJvXNl+pvY0zpDtrfLqJsdHUnCKfQdyy6MdNU+naVuXmSfqvIJYJRCq/u7rFcbVDEXQPC0Md0Otn2GzJ/tAfp4+1GZGcz1PISmQrnoS0cxGOxgYFCyVJdV0RQxYkFAAQmhZQJr5drLJfrwt0zAdMwwFsvF5iG4daCWOp5P8h8S7CBoerpUP3Iwz7KDPI7wmUgY4yMQCNAJgLDce7JQZ+uXYTLIu3SfkSG/cMXOjsmsnSDS8qx/ttdrqGNMUiSBDmo1TLg0lwqH0PFQHIBV8pyTeU41Bc8+t7k8LXGbDaF72kAgGGDTRQjTjMo5ZXrgG2Otulc+hnLOHo8l5aV8HxEhm4gcnSDdjGOa0ysH4zrsghue8/7F0AAZtMJJr5flpuRZYxllCDN0sq9CIrDZKc9g0EsVKt0m+aQz03XMqs8m6x/q4OINjBmeRVHOMr0eNcW+spzfR+hl/w2PBX1bf3LzCDF9aNZ6MtudMix3K62e0YMIzEGn/z8F/HN25VYAI8FJyrXSKkUsvXX0Ig8BUEQBGG8tI3TzFy4g/b9B1bCY+ESCpwhflMUUePDFcuUU8vxmPXPg/dKI8kN7u6WiOIUhgGtNGbTEPPpBGzywiqIqfhvBIpfQNrcqZx77Sj0T9segMjSPkSGblCMh83XZWXZZQ3cnAOckgajsJlbriPcrVZI80LFpDThxWKCF7MQmg1G6bp3KD/QQu+MYFp5FGOY/x6imbu+1/1DpFdXKvfVh9ZD3xT5Hbfc6ojy93hEASxcJbJ4EITrwNXB+1ww6MFLEGylbaOn+Z6Z4XkeJp6GYjPK+cIY8/QUXCmHpSz4RwAAIABJREFUME5OrVvHbvpuT98rhbvVGsv1BnnOUKQQeD5eLGbwlAKXMf8AIMuy7XddOcRxbYgCyh1EjvYjMnSHS8uybaw/tF5oU+googe6U1IKWc54fb/Eap0AYGgCJoGHN17eYBoGQOl56Dl5fS5b619LGLM16XlgEEm/NwRqZIe+2xjyUPbWexFXfRlBVIZu4V06A4JwSYY4ld/nSRxBEITz8uAc5EVy0Yb0rW5waTk2f7/5XikFYww8r5gimyw/ex7HxDlkNRYLyafSrE+2Mza358/hKXXrgTXPgeehtUacpsiyHItZiEngA2wwn4eIohhRmgGgBy7amu3KhWd9bbT1jSJH+3Ct/75GHovpLthD29h4jjlJ29qk6QK6630TVaZVzT8UETZpgiRPMQ0nmPgeiIB5OEHgeVhHMdIsd66+2lQeu5TKdafF18kQz3UINedQbpqB/i2Lt+ulgdbkNrUxm/quUxEF8Ii49KbotXLMAt72zUlBuGakbz1MbesLY19QUGNRLdjJudtkc7Om6/eJCKa02qvcQXsA8tyAjSmW3COqd7ZvXLfFWLOxHHVcKQfgzty3WY6nWAZ3fa+IlaWQMXB7v8R8GmIahgAY01kIlWaIogTGtB86OZR2/XNhGPqu46IUtptDfYPI0T6kX7WfugyHPjjVNU947ODoA5ihCj+qMLX78hxYLiNsfIXFNICvPAS+B9/T2CQZoigurIGpCB+xH+O137LWxz6bFCjSdu1iiPlV73WgbKJDKFSrdPumrzSbRwmGal3SH4wHsecWBEHoQAYVdxBZHkKVL3lGgpu0LeoOuXOrNkaICIHWCLQCYece6VoYuqzSLwvnoo+6XI8NqJTavWdAo4gNvI5T3K42hcLXMELPw800hK8VALM9ZFIG2TpLvoVupA8SjkVcgAvCuBiyHbatGR5zA12fI2yvVS8iULncznODu1WE5XqzPRwWegpv3Mwwm0xg8hyF8pfKz3dp9l1mGQPtU07ZIjNr8glRigEErQhE9hz8vfb29VTEAlgQWjC1uF19nwYSBEEYC7ZuYUmfLPRB26n+ivrpeK01fADJlbuDPgcuWZ0C9vdVrpQDeNrGaduma9OCt+6VgpmRpilusxSL2RS+50EpwmI2xSZOkCTJ1hUcMz9LCXysTERZdVnEqtRNxI27vRyy1HdlDnItDOl14bHxv82LTfN7XesKBiPOc8T3S8wmE4QTH2DGdBpgEnhYRTHiNN2fLwxQtiGQ9jOkcmqQZK3ANov1IfPbF4R6v2SX62fhachhh5EhDWQcNCdrT3UXJ9iPyFJwCQbAUNuXTAOESzGWvvWQZXAd3/MQaAWNhxZAY1Cw9G2VdMkyuWRh5VI5XCnLU2ha9jT/rsf3IiIwCLfLNdZRXB60MpiFARazGRRzLzG2mhaJTRmNpY8VujkkP2F8HJKRyNJu6jITOdpN322xy9K3Oe63WQlv/0YRG3h7hYGcGUQKy02E1/dr5DmX1sLAzXyKm9kUVLqBHsICeC8/FnDdcxoGUbGLYwM2yaqt7T6HocaN+mHT/g3UjHUysyndMSE7v4JwJLKwEwTBfmTYF4Qmj7l6U0qBmeF5HrTWBzd5Lk0fcxOZ3wjC8XRtyhARNnGCV6/vkWYGgIHnKdzczOBrPVg7ayoxBLuQtaY7iAzdQNqj/Ty3Xz1lnn/IVTSVSlcigiZVhIQAkBuD16s17lYbAMU9k8DHWy8WmHoeiEsFTV8WwL2k0pKuZYqZMa3fhMcZRF4DVoGx169i7cJWHQQRnofsBI8YmWSOH3HrJQj2cC1tkovzwq0vF7gWOV4Dl5TlY+4b60qcKiwEEcH3fQS+V0ygR7Yh2Mcm5RjLYzsubR67Uo7n0NzMVUptLYHqSmBSCqQUDAj3yzVWmwTGGCgizOchbmYTEAxQU9j2hcjJHeQA8ng41uqmzUuIyNFeRJbu8RT51dt/3SVzW5/wMBZww2tImwcRIiRZjtf3K6yjBMwEIoX5LMSLxQy+ptp8oUiVcdp436cr6cog1ab6X39Wfa9Bh0pX2Ke3+sYMBTus4IerW/WxzfSctjAGuPynqjlkqsjzwqiwaSAV2hnCVYtwOUSObnANcuQrOdt1DbK8BsYmx2Z+2tyqGmPAzIiTFGzBQv+UBePY5FHHhU0VF8oAuFOO59LWX3RdZ2b4WmM+m8L3yrh+YKxWG8SZgdYaeZ7vxfyT5yycgtQX+xEZ2o/I0B0OybJ5MKBr769tXt387vb/MsYwM8P3PCymE3habdOPsxzL5RooPRPlYGitgdxs83uMskgNUEdts9K1Kd0iyfGuzypsii2t+PhDXU/BinQ5H+wZWFH+AdMcC2wYaZ7j01/4Er5+u7qSXWJBOBNtkzlBEMaDywO8IAjP55g+goigtUbge49u7AhCHVfqhlg9FRzyHNB2PTMGt/dLbKIEDIDYYFHG+jMm34spCMhzFk5DLBLtR+RnJ3W5ifzc4Sne/h4LK9O8p+v+LC+sge9WG5jS0nbiabz58gazSQBFgAaBzb7ymYnAB5YyfeyEcOM1FDYpkYZKt0jzuvuUvp8rXffj3GLbvqht+R0PuzHIu3BOhA7qp7cEe2h2SiJDQRCG5FqsfQXhnLSdoG870a+UQuh7yLIMGXcrf2xh7HMWVywiXSkH4FZZnkpzzVa9b/YdSunCxRoR7tcbpFmO+WwCrQhBoPFCzRBFEZK82FIlUnvfF4RTeCzMgTBu2tq9yHC8nLoHVJ8vSh8/bo6RZSXHauxvWzt0zRWaaZgyjcoiOE5zpNkK82mAwPMBBUzDAL6nECVZ4Y0ItTpYKoFb84rn9SNdT0L6pmEoHqu45u0N3sVAtc36t680H3Nf3wfSH4yLujRk51gQBOEIZCBzB5GlG4gc3WCscmyezq9vzlQwF67XtNZ7blvHxjEWRWPMdxuuWEdVbsQFN3jMuqfoO3axxLXWiJIEr+5WSNIMYMDTCov5DGHoQykCc96atiA8BYlbaj8iQ3eo5GeMKHds51Bc2QexgDvebw8EgOCRKmKTUhUrmAFSuF1u8Hq5RJqa0kW0wmI6wYv5FJ7WRWDgSgHd0TcMopiq/ui5O7JNOTVMuvb08bbNVW2oBzLO77BBXmOHWUyHBEEQjuaaBghhfDDowUsQhPPQ1v+rMgaX1hqB1qBSwVNRdwc4hgXcGPIg7OOCTMZSvy/NISVw1U/U+wSlFMDA7f0ay/Vmez0MAiymU3ikADZbX3XyjIW+edh2TeMldW7siDLYfkSG7tM1P3igPKZdr0vl51opAAztaRgmLFcb3K02hbcQIviexsvZFDezsNgZqOoRMxiAQaEY3ov7y+XU4kCVa7p5bt66V1+5H9fSgnAOhlTSjr8fL1SARHL46CqojSlgFhfQgiAIgmAPsrwShHPRdOF2yMWfUoTA9xGnGdBwATumw0NtZRn/YvUhtrsebj5/m8tS4Uo5nsMhd9CV0nfPPTQYBMImTpCkKW7mM3i+t7UGjpIEURwDpC9VJOEK2NXZ7riUzfuvva2PEXEbbT8iQ7dozgfq1zrlStV/+56HCFT6cAaSNEOWZZj4PqbhBJoIge/h5c0cUZIiSRIY3v3uKTXolBVB5VK675iqtln7SezfYej9uVLZjvpMcuD+uff4x1TrTwbApvHKprw+h3ovIhbAI+ZaKqQgCIJQsDtlS2CovZcofwVXGTIOzXNpbnQfcu9GRJj4xdnK6uR+PZ0xKVrHlJen4kIZKlwpy9jq+SU4xQUkEYEBKO0hN8Dd3RrrTVTG8wNm4QQ3s9nODWTJtT9jYXja3A0/qrgQRkdTdtJ32EdXWxRZjp9mPOAuz0D1OcEDt9ANBS4zQ3kahhTWcYLX9ytEaVbMJQiYhwHeuJnD931wnoO4JdY0AVy90G3lewhVL1uZVh/YNr70n9+nSONyDLWG7z3Nyup9YPrM91DPVg3UxIaqB1bUrxEjMYAt4poqpiAIgkCAKHuPRsZIYWja6lhXvSMiKKUw8TQI+5utY62rtm8g2p5/V7l2uTQ3cKtrzXuq/40p3MezIqw3CV7d3iPPi2fo+x5u5lNMPA/ERqwvhYsiiid7GVtYCuH5iCzt5JBHoa77VakIJgAmy4EqtASA5XqD2/slsny35lhMJ3h5M4evAOYcfSsUbZuH2KREKpKVdt0XxMXummqZmz+X+jq/f2vd/j2Jievn64UhCmBBEISTsG2yK7QzXjmONV/jZbyyFFyiy4qv7bS+UgqB7x/v7u1CuLJpaIxxoiyykesOXdaSTWsgoNiQqlvSEBEMA6+X91hvIgCAVoTZLMQ0nMAr3AucsTSC8BCxRLQfsSy1mzaLUpHdOGmuIR5bF3TJslAEK2ilUAWSKEwbCZkBbpdL3G9iZIahiDHxNF4s5phNwr0YwIfsSw0Ac6HlytjWScJhbJHXULkc+kBmn547mAv/grt+Y/zY5LJ9rDSt3skYY4f0rxyZzAnCeJD26A6XlCXLGazekDbpBrbIcRePq9xEaWzUVH8bY5CmKQxJWz8XLi3qXCmLK+Xog2Yf19ZvtL1/88UCWhEAA2bCeh0hzQxY7ZTJBGzjj9eR5y9cGqmDdiPycwOR43hoW+8ce63tc25cY2bMplNMJ16522BgQFhHCZIk3aqBqnlDPa22emKwsxxTA9Yju6x0e0qXUdNQGqva6eifbYmuqYDHnOdmG+wvTQZVIcRHXP5zpGtT++qLOM3xu+98CV979052nwVBEARBEAThWNriebW5elVKwS8tgW1RbtuOS8/ZpbIIp1PvZ27vl9jEKQAFIsJ8PsVsGgBswGy2sYQra4Rr3OAQxov0ZXYj1sFuIDIcD13hZY4Zux+TYxWOZr3e4G65QZJmYCgoEBbhBDezGXxNe8rfQ/kCxG3ooMh07WzI3NguRF794106A4IgCLYhm/nCsXTXEpnQCEITW/rWusunNvdP9WtKKQQAUmbkj3xP6Iexutx+Ci6UxYUy9EWXW/hmf7D3ORFyZizXa+RZgOl0Aq0UgsCH9jQ2UYwky0Glp4F6HyrPXBgLh5QVgl0cmqeJPO2gy72wcD661jz164fcRjc9iDTnE0prZHmO+3WOwPcQBj48reB7Cp43Q5Jk2CQJcnP8HG2oGmJb3Rsmv3ZZ/w5F7xafA6U7FMPkk0DEoIFasE2W+9eOHOaxBGkAgiAINqI6XtKn94mMkcIleCwucHVCX2sN39NQtP9dQbgmbDjccS7a+o7Oz0v3zlp72MQJ7u6WSLMcRAStFBbzGeZhCDYMYwyUUtK/CNYgFomCIFwrXVa/bdcPrTmamHI+ABSHyOIkxe39ElG8c/8cTny8XMwxmwRQdJx1uG1zC3vya9cYOJSnmSHSVBjWK84Qz+K5aTIKt+0ViiDK3wHTtQmxABYEQXgCtliqCYcROQqCcCrNDZiuuF11RXDlDjrLTbkpIwyNWJ6OD7F836fNAvhBDN/yBcMAFHIAt/crhEGA2XQCpRiTwANpjShOkGXZOYsgCEfR1R+7pnC4dsSy1F7EsvtyHLIGBmqxfjtkpJRq9LHVfQxmAkiBoXAfp1jHCWahj1kYgpgxDXwEvodNmiBKUoAflzcxwFIlemX3zOsRl4VnsRdfuX9s6BeLvQhgiEMGNpRf2EcUwIIgCM9ANjOFCpbJ+sWR9mg/Nh3KqHJZt/at3gP7G97MDE0EUoTMMLIjNliEfnChX3DJra8o5h/SdPm49zd2fY3SVH0BmyRBmuWYz0IEgUagAX8aIIqBKM22Vj4VLrSDU3Gp3dhO1/N/TC4iQ/sR99/2I4r94Wlz93zonua99bkDE213JXafA2CGAXC/jrFJcszDCXxPQRHjJpwg1B5WcYw8Z3A5/6hmIcUv6yK9HmVvm7Vf/+k2XT+Pez9p1O2ea4cm0W9e2+Yifcyrh0i30PcyGNjzPmYDNlkU28CuDy8Yd+8i7HHNFVcQxoq0y2uF8NCts3BppD26gY1ybHPFVv1dv661hu/7UErt3SMI14YtBz3ORb2f6Pq7ovIqkJkct/crrNcxmAsroNk0xGI2hU8AszloXewyzbJWh3TE5bC9iAzdQuRoN9Ieh+OxEDPNe5v3qcc+VwpZluH1/RJ3qwi5KVQEvu/h5WKOxSyEJgYZA0UKmjSqVG04qGuP8leo6OXZdiQx9voK9OtKer+84y+7cD7EAlgQBEG4ak5ZyJRnaIfMjiAIltJmxdd1zdcKWZYjN82T38IQuGR16pIlp0tyGZr9vqS4Vrh9BNZxgiRNMJuFCDwfvlbQ8xCbKEOWpdt+xoZNsHMhFm32IzJ0A5GjG3R5sRD6pzmeq8oTUe1zoCmT4v8qDI3WHowxiJMUWZYhDAJMJz6UIgSehj+fI04zbOIIDDWIXKV+yDMAerbUrebH2D/40Bf18E59pNtsy/3l1fRqqd9E6q2diAJYEARBEEoen8yJpe9YuUYLJ2F8dLlyrd4DRR31tIYiQpLlMFJnz4Yrm5KulKPCJaX2c+naZN0/UPJwvEtzxt1yg+nEYDoJoJTCfBogSRXiOEF2RYdNnjoPEEWU/Yi7YTeQtmgvddlJe3weh9a2bfMDVC6ga/fsvrs7QFZYbRsQMbRWAAibOEGcZZhOAoSBDwIjDDz4/hxxkiJNUzBoq0SuPBk9t2y2YFt++8am8jMAQ4Du0WhjOEXtAM+WdmsqGsBwpe/8Vv2XuH8eHtnJtgypwIIwHqQ9uoPIUhCEIai7bKsrdaoT+J7niWXeGXHtObtWHqGgbU5Sdwldv1a8rzZ9FdZRjLvlClle9DGB72Exn2Hi+6Cy33GZvtuEuDd1A5GhfTTbnsjQHaRfPY3HlCPNmKf1O5tzh720iAEU8wJSChkz1lGCd1/fIs9zAAaKCNNJgJvFDFor5HkOrcSOrA9k/8mumK+2ycu2/ArDQ8YYGXktQyZLgjAupE26wYN4cXJGykqkPbqB7XKs8t/cvKwre6v/jTHI8xwZ48G9wnC48nxdKQfgVln64jEFSNu1yjpn6itMp9Ptc82yHOsoRZqnpfUOFXu/apdWn67tLsU5xw+bn5Mg8nMFkaMbiByP49i5QN0tdNs9be8JBEahoJ8EPmaBD9/3UBw0I2SZQZRkSPMMQE1mDABUap9365xDB9r6xp50eUjvvIMwZNvsxZVyqdZSSvXrUpp36fadZtvBzn7SBdRA4hKFvV0wgDTN8Jl3voyvvXsnLqAFQRAEoYndqidBsB/brWKbrtnq15oLEqUUlFIwWY48z/dO59uuiBkzrrhRHsol2SVwRSZ90tUXPNY/GmOwiQ2SbIXFfAZPK3iexs1cI44VNnEMJgUiBWZ3LIPPPW641P6uEXE17AbiatgNRI7H0eYauvPaI7GBK7ZuWMHbeMGF6+cMwSTAPAygwNCasJj5RXiJJEVmDHY2xwxw/4ot4bKMXfnbZzpt6fY9rxwinvZe+r2naB/S97Qj5k2CIAiCUMIAGASZOtmLTPgEm6hbA/uK4Lcc2bVZES4IwjAc2mCtLCAMEdLc4PXdEqt1VG7qApOJj5vFHJ4iGN4dOnHB+veSVG5N6y/BPkR+7iCydAORYzttrqHb3lc7G01X0W0YVAfCCEQKIIUoSvCNV3fYxOn2u0V4iSmmQbCNK1yfR5xbVmJFaB/ybAt6ayuEwq27PFahA7EAthDbrWIEQRDGSHFKtlIAC4IgPJ+uxW3byd9q48TkZhsjWBbHw+OS1akrZXGlHH3S5VWgrrBtupcnFHH9mBmbOEGS5biZT+FpDa0MFvMp4jhGlBonFL9jXB8382T7M742xCLRHcTa2w1Ejg9p7g+3WfbWLhTOnFu+w8zQWiPPiiPxSikYY0AEEGmsohibOEY4mWA28UFECCc+At9DFMdIcrO1Hj6UV1sYVqk8vvnKORFXwv2nTzRsnbJJZkI7YgFsKdJQBGE8SHt0Dca1T8oFYQy40re2ndBvXq8rcnzPg1YKqClzxPJheA49X3n256UthrZQ0KZQrPcjzdd285cImWG8urvHOo4AAIoY0zDAfDaF1mp78KTtd4TTaeu326yE5VnbQTMet8jQfkSObiD9avda49B99blC9T4zZWgaIrAxIOJSAQwwEXImrDYR7lYrZKXCVynCbBpiPpvC9xTAZi/+cCUPAl35FgvDtn2ma1T6DdV/1NM9tr0+hSFSHSKusq0Ke5sRBbAgCIIgbGFQ+RLsRSZ+wthpbrgAO7etCgxfK3haP7hXGBZXNgxt3/yU+n46bd4EmtfBRay+1TrG7f0KcZoBUPAV8GIWYj6diPeBHjml77a5vV4Lj8nS9n5XKBA5usM1yrLVBXTzGgDVcA1d3aMr9RERSCkQaVQqJQVAUbFeSTLGu3dL3JaKYADwFWExDbGYh1C0r4jXWhdKZEv2WIZRfBYvW7BpHtg8zNB32mNOs37AggZQ/w6h/BUug7iAFgRBEATBOSRcghu4JMd6WR5zt1pZApsse6CQYS7csrnyXMZGl3tu4TKIO+h9DvWJTVfQ9XubG2NpbpCvIiSBwWI6ARFh4vvQC4VNFCMz4rr43Ii7aDc4NDaLTO1BXA27w7X1rc25QP1a/fohV9GPrTGqOUWS5nidrREEPmZhAK0ImhRezGdI0hxxmiA3XKxlahrQrrw9taxCv1hjnVmFvL3SarBtRwQUhizjfhDSXi+LKIAFQRB6wCUlxTXTtmASBEHoi7aT+XXqGzDGGEw8jSxjpCavviHjzZlwLSaqK2WxvRznoKn4BfbrQhHTj2AAbOIUeZpiMZ/D8xQ8rXCzmCGKEsRJAkaxYTv29uBin3htSotrwKU++RrpUgrL+GQX1xLbu2tfo1VB/PDLB+cR1XtWCsxAFCdIkgTTMCziAwOYBBq+FyLNDOI0gym9kPCBNN1k3POnJtYof1GcJ1Do3/K31YtOT+n2/hwqN+0jt/4dOl2b2tilEBfQgiAIPSGDjiAIgnAMXeNFW2wuz/PgeXJm8xJcx8aUYBvHbCB1HTapPAgQEcAMRUAGwrt3d1htojLQXxEb+GY+g6cIMPmo57jX0k6vPcala4gc3aAZG1qwF1fb46E1R9c91HatJR3FAAwDzGADLNcbfP31PeIsR3VoNfA1bmYhwkBDlUn0cWjCHuWUXXF/bUINaPM65nlvRWVUP3bLX2EcyG6SxYgFiCAIgiAIruPqfKdSwFQn4evuodG45mkNMCPLyzg/pNDcTBi7hZ6tuGLZ41I5bC9DXzzWN7a5hC62dYv3qvI2wAylNNZRjDTPMZ1MEAY+tFZYzGdIkhRRHCOvxwbEThZjqVvXWDfE3bAbiBzd4ZDr6LH0lcKOLnfIbdgst2OsgR+4hC5u2PseUWnBu7sRIEChCE2jSIEBvL5fwvc8zMIJJoEPAiMMAngeI01zpGkGw7w9dGbzs21j94yKv/ae2cixbi7VU16H6p+HSHeXJrZxvPvGngMWwrGIAthyXN0UFQRBEITnImOkMHqIQDUlcEVTCUwM+NoDYJAbs3Xhup/UgQVVkoD/6Jvgb7yG+cYrmG+8C3z9XZjXt+AoBdIUSDNwloLSHEhTcJoBSVp8fxqCwgkQTkBhAIQhMPFBk6D4bBIA4QTqjReg978N9b63QR98X88PSxAKZBN9xzFuFPc3eAGgHBvL62r7fUKa5siyNfLMxzQMQUQIwwBaF7GBU95ZSTVd5F1601Dqwz7iQtoNJBat/chaZLyc0pZc6FOPdQu9Hd9RHBLbO+xV+w6XF6o5RBGKlAFSyDKDu+UaE9/HbBrA9zQ0AXriw9MaUZIiz3MorR9YX5/q5aQPhpHn7tCdDdik9KMBXD9v07YoXZusf216ri4iCmBBEARBaFAscgwkUoIgCINTnn7fv7S/kKkUK54qfD0Z03GWfB3BfPHfgL/8VeRf+irMl74K/tdfAa82R2endZtiE4E3UffnXbz9JvT73wZ9y9tQH/wWqG95G/SB94Le/zbg+6ekdHEurdzqC1GeCsDDA1LN+rCOM0TxEovZFEHgQVexgeMU6zgGqf35kSvtw2VERu4gsrQbiQUtXJquQ9Jt15sei4AWb0WVKaIpvqNA4MKPNOIsQ3QbIwxD3MxCKAC+r+F5CrnJsdqkB/NkN8XBO9dKNQZssnrtmyKffNXur4XTIVPsIAkW494gKQj2Iu3RHbbuWAdzrCKcA2mT7uCyLNvK1nQJXb/HGIPo9S3Mpz+H/LN/UCh6v/w18Ou782S4B+g7Pgj9x7+7fH0P8PLm0lk6CpcWxVIWNzmmrzwUt7Ju4avA8AMf82lhDUwA8txgFUXIMlN4Mag8FVxABi6PC+dC2o47iCzdQOToBjbJsWsd0hzbTct8oXGhONO63UcpnSCXVsKKGOEkwHQSQikqLYyBNM0Qpyn2bYy76fvZDiUrBd5aSY8dW6wzFe/SHXuem+2n3/yabSiXPhn7Mz1Xui7AKPrWz7zzZXzt3TtRALuCLH4FYTxIe3QHZhYFsANIm3SDq5Aj8wNXa8VlBuIE6Wf+JeLf+T2kn/os8ne+fMGM9g+9/23oj3wvvD/5Eag/8f3AJLh0ljpxZbHpSjkAt8ryFNri9x3zna6/iQjGFKY8pBSIGS8Wc3h65/Z5k2SI4mTrkl4UwO5w7e3JNh47gCHytB+Rof3YIMOuMbX1oNiDz2uRb5l32l9UfxaWwibPoQhYzKYIJwEI5ZyDGXGcIs0z8AEvbPa4fwY0uDCKpvFHAbbluWrshx/pm75j9Q6lACYYEDH69lhow3MdMk2XEAWwo8jiVxDGg7RHdxAFsBtIm3SDq5BjFQOrVACnv/t5JP/iM0g++fvIfv8Ll87dWVE/+GF4H/0I9I/+MOh977l0dlpxZeHpSjkAt8pyCm0KoMf6zEMWP8DOHWPxtpgNTfwAs9kEWhW/lRvGeh0hzXMwKVCLo8OhZHIVY8JIuNZ2ZQunWOCLLO1G5OcOY5V5+UL4AAAgAElEQVTlAxfQKBW4Hda/1b8PhmQGuKZu4OofApgNiAHladzMJvA9D9XjYMOI0gxREoNIlzFOCaD9A7JKqV7mAcPIoWHLLArg3uhTAdzmjn8IBXBfnnJyMBSX1s8wZZuxwxrelvrlEgwgSTP87he+gq+9uhUFsCvIAlgQxoO0R3cQBbAbSJt0h2uQZfalr2Lzi7+K6J/9Bsw3Xl06O6NAfejboX/sY/D+7MdA73nj0tnZw5XFp5TDXU5VBB98z4BWwDQMi9jA5QZsHKeI0hS5eegyskmfrvWEcSDtzl5Edm4gcnSHMcjy0Ph6aI7w0DK49sf2zU6Jy1R8Fvga0zDEJPDKeMMMY4A4TZFlOUyl9LJGicQYgRiPxp7nCnjUn8VrUwE8hOvnPp+D4XJnUvVt91tgk/J3yHRdIk4z/O4X/g2+9uq1KIBdQRbAgjA+pF26gbElcItwEGmPbuCqHM2rW0S/9OvY/JP/D9kfuOXauW/Uh78L3o/9CLyPfxR4sbh0dra4sgiVcrjJqS6hu75T7OEaAAZhEGAWTuF5hRLYGIP7TYI8zwFg60b6GFmcKi9XxwIXkLbnBiJH+xEZusMlZXmMW+hK4VVXALfOKUz5d/169S8b+L7GzWwGrQlsGEorZFmOJE2Q5jsFXR/WlBX9PtsqX/bMUWxQ/vJW8am2FsBD0PcBRaX6VdMW9b6Iqd33U7BJSStj23HsLIBFAewUsggWhPEh7dINRAHsBtIe3cEVWXKcIP6V38L6H/0q0k9/9jy/6XlQb78Bevst0HveLP72AyDwQJ4HBB7Y80CBD3geyPcKLwibDbCJwVEMRDE4jsHrqHi/Kf7ndQR+dQuc0WpZ/eCH4f+FPw39p34YCCdn+902XFmMSjnGRZ+bnM9VAlfuoesbsIoIszAo4vgRgUFI0wxRFCHJzcmbX8eW1ZVxwGVcaYPXjMjQDUSObnEJeZ4SGxhApxK4olJDFCFMNfI8BVC6dTY5JoGHF4tF4fa58BmNNDdIkhSZyQGokSqAjzv0NiasUNBxYfE6lBV4xdgVwCjdPtMAe5OiAHaPugL4D1+9BnHBpfMl9ITIUhDGg7RHNxAFsDtIm3QD2+XI9yus/q9fxPrnfwl8vxzkN+iNF6Dv/CDUd30b9Ie+A/Set6DefgN4eTPI79VhZmC5Br+6A3/zFcw7X0L++S+Cv/BF8Ku7wX5X/7kfhf9X/iLUt71/sN94DNsXpEO5LLsUtpehLTZZ3+kee1/b32W4csAY+IGP2XQKT5WWv8yIogRxkoCPcNdXjztcp6vcto8DrtM8uGB7WxR2iCztR2ToFueU59GKYHTNIXbzB4BADOS8U5pWXkMMGArAbDLBLAxK608CA6VFcIrMMOiZ7oD7f3Z2KYBtUM5V8wldug7vX6laYIMClAaqXzaU/RzpukarAhiQRZQriBwFYVxIm7SfvcXLINE2hHMh7dENbJWj+aN3sfzZf4DNL/4KkGa9pas+8F743/td8D707dB/7Fuhv+vbkYR+uYmiAfRrRXiIR2VztyyUwe98Cfnn/zX4C18Gv+5XKaz+xPcj+MlPQP3gh3tN9xRcWZhKOS5Lsz0Nsal26j2PvZ9NAkzDCQo3iIwsy7De5EhNBqDYvFVQAIoYf3VslZPwdETmbiBydAORoxucS47tYSIezhH4wOdAofQtPqvyrsCcb+8nIoThBDehv+eBJMtzRGkOYx6ucboOlVUMp5iyZ41qi3KOuDDF6Nv6t6pbQxyw7FNJva3LYCg1fpkN8VzryDh1PAyDNDX45Be+gq+/vhMFsGuIHAVhXEibtB9RALuDtEc3sE2O2R98Bcu/8/OIf/k3e0mPJgH8H/owgo/9IPyP/QD0+97eflY9mxyMNE3LTZHz9FtPlQu/voP53DvIf+vTyH/jk0AU95If+s4PIvgrfwn64x/tJb2TftuhxakrZbGtHF3t6dKWwM33D9MwUKQwn04RBB4IDGbCJo4QJSmYCVRc3fuWbfIRhkPqghuIHO1G5Oce5zpE1mYNjMdiA2//3o8nDOysgj1NCGsWwdXnSZohzXLktWSr74sCuBtbFHTEgBrQO9FQFst9u5O2RQE8ZJpDpusiewrgV7eiAHYRkaUgjAdpj26wi2cjCmDbkTbpBjbIMfvcO1j+7f8b8W9+6tlp6W99H4If+SFMPvoDCD72kfYT9vVrBOSGkWfFyXiD4S2A+5KJ+fTnkP/mp5D/5qfA33z97PTUd30bgv/0J6F++Pt7yN1puLJIdaUcgB1lOaYtDakIbts0PVYJbKqNLzYIJyGm4QSaCteNWZ5jtYmRZTm42sw78JuCANjRZoXDiAzdQOToDudQBj8lPnA9xERTEVyFndCKMAsnCAMfVTFyw0jTDFluYPD4nKL/ulxZHPec7EDYpJwjAHrAw8xjjylc1OVCAWyLotam+uUynQpgwI7NNOFxRI6CMC6kTbpB4cJIFMC2I+3RHcYqy/wrf4j7/+lnEf/67zwrHf2+txH+hT+D6Y//Wehvfd/2eqXsrbtBq/9f/9sYgzzPkecMHujUcPO3+4KIYN75MvLf+B1k//xT4C9/9Vnpqe/7ECY//VOg7/1j/WTwSFxarLpQFhvKcGx7uoRL6Pp9rZu7RDB5DqXUNlbbzSxE4HvbfitOMqzjqHT7SCBSogAWTkLqiv2IDO1HZOgWfSmrjr12yDV08Z627qGbMYIJDE9rzMJJ6W0E5eeMJM0QZylQ7ts0yzVE3N9h0h0Om5R+CrR1AT0EvVss1w4tPC/tog1Ufw+1jLelLtjUvsaCKICvAJGjIIwLaZNuIApgN5D26A5jkyW/vsP9//r3sPmFX35yGnSzQPhv/yimP/5n4H/fd7f/TteJ+gNuWbMsR5Lngy2ehlIA7/3G176B7Fd+C9k//tVnWQbrj34E/k//FNS3vv+5WTwK2xeszU0M28sDjL8Mp7SnMcQFrl8jBgx4qwxmZigi+Ipws5ht46LlhhFFMeKscAsNIoxbKsKl6NpIHXs7Fh5HZGgX0havg+fKs2npW3fXvL2ndm/T0re6Xl0rXoV1bXVL/fPA01jMQvie3n4/LxXBSZahOGi2K9O1K4BtUaRWVArgoX5jvM/DYHuIAWb0Za+PD2PP67UgCuArQOQoCONC2qQ7GDDAogS2GWmPbjEGeXIUY/V3fwGr//0fAmn2pDQmP/YxzP69P4fgR37w+N9tuH0m2p2Ur99TbZCkaYo8z8GkijPFA53075u2fJrf+1dI/+lvIP+13wbi5Enp6j//pxH8tZ8AveeN52bxaFxYvLpQhooxluUpbWqIcjxFEdzmnaDeBykFhGGISRDAIwAMpFmKVZQg50J5XO3yERswyB5/isJoGGO7Fo5H5OcOIks3OFWOh+YPB90+l+9z3h2531cEN79TTCSq708mAeZhAE+r8hMgz3PEaYbcMJgJpAotMoFBIHAv1pps1VTFJuWcRr+xdIc4CFDVn+qAY1/kbKBAUANNhcer+N7Rdzzla4MBpGkmCmCXETkKwviQdukGogB2A2mP7nBpWW7+4T/D/f/y98B39yd/l16+wOw/+POY/8S/A3r54km/32X12+WiNU1TZIa3K8n66fzn/v7QtC7+ohjZP/41JP/gnwJff/dJ6Xo/8eMI/tp/CATBM3N4HK4sYl0pBzCusjynTY1JEVxn/2CKge/7WEyn8PTO9fNqFSHN8sJ6uLpVrIKFnhhTGxeehsjQDUSO7nCsLLvmEa2HyGp/78f/rd53Hz5jZhCA6WSC+SyEUrv1TZbliJMUplQomt2XTypLSynK7z/x6xfABgUwMaB69jw0lAJ4qPSq+tyzXnmLDQrgIdO8BkQBfCWILAVhXEibdAMGg0UBbD3SHt3hUrLM/uAruP2v/0dk73z55O963/0dmP/kJxD+ux/vLT/1GFlNy+Dq82rxlOR5YQm81f4+Xcly7ud/aAGY/9ankf79fwLzmX91esLvfRPh3/hpqI98zzNydzyuLGSlHP3jggK4ee3hBi5DwSCcTDCbTQtrHFLIsgxRnCDJDQwzFCkAMl4L/TGmti48D5Gl/YgM3eExWR6jBG5zE92mAO76Xn0NpIgQBh5m0xC6Fjw1y3KkWY7U7HtQaioHj0esf4dIV3H/oWeGVgD3nSbDFArgkefT5nRd56ACeHuTbIw6gchREMaDtEd3YCbZjnQAaZNucG45cpxg+bd+Duv/4/85+buTj38U87/8Cfg/8OH+89XierX+f/NabnJkeeFi9ambHpdqQ4/l1Xz+S0j+zs/D/IvfOzlt7xMfR/AzfxWYDGsN7NpC1oXyXLoMfbanSymC2+7bbsYqhXwvDnnhztFTGvPZBIHvlfcDcZoiitPiYIsDdUsYN5du+0K/iDztRuTnBl1K1WPdQ1d/m5a4wM176knWD7yyMdBaYRqGCAMPWqvtzWmWF8pg8xwLYMZOATz+emuNco6LyLdUHVAeqQK4zXV4n8pqRc+1Tu/mmtO8JkQBfEWIHAVhPEh7dAfmrbOfC+dEeA5NhZlgL+fqX9NPfRav/5u/CXOiq+HJn/oh3Pzn/wn0t75voJwVdLlhbf5fLaazLEOS5Q/iAT8WC2sM49kx7dZ8/ktIf/bvI//t3z0t7bdeYvI3/jrUD33fU7N3/G9Z3P8064ntZbl0OZ7brs4lj6cogpuxgItrBkQKAAFsEHgeFvNwG0MtZ4NNlCBJ81qsv3IfkGtu7Ecwjrc9k0vnSejm2DojMrQfkaE7iCzdoT4XaPLAg9He+7Y4wNUco6oju7/rn2sizMIA0zDYzZNASLIccZYjy3OoZqzR6seo9mftEFvxvvAQZ0PAClsUdFRTAPep/AWKOL19zRvr8+7nWZG3p1kpgIdgCKtqW+rXNSEK4CtC5CgI40LapBsU8WkIogC2G1EAu8PQfSvfr3D33/9tRL/06yd9z/uOD+LFf/HT8P/4edwKA0WvZGobHhVdMYHz0h10DgK/+xr8zVdAmuFQ/8YA6I0b0Ae+pe/sH80p7dZ87h0kf/NnYU501+194uMI/rO/OnhsYFf6ICnH8+i7HxuyHH24hW6+Z2ZoVcTum0wmUFRtXOTYRBEyA5Q93J5FyBjG8UPP49J5E56HyM8tRJ52I/Jzg6a3oja6XENzy7X6+zYFcf29rxXCIMB8OilDexmAFNIsRZoaZIZBSoFQWBAToZxvFHk2pXvpuuvn3RG18WKN9W+JR/2GXKs80fSd5hBuqqv0hlAADxKnuHlwokekz38eogC+MkSWgjAepD26gSiA3UDao1sMJc/4134bt//t/wxero7+jnrzJW5+5j9C+Il/a5A8PUar++c4Qf61r8N88zXyP/oGsj/8Jsw33i3fvwv+xmlWzVsWM9Bbb4De8yboPW9Avf1m8f69b4G+/QOg+bSPInVyysIw/+XfRPK3fg786vb4H3jvmwj/y5+B+r4PPSF3x+PSAlfKcjpDj0fnsgbuUsYejPFXe1/FMp/4HqbTEL6nURn8buIYcZIWbuuLLw6yoXcqz5GdS23l2hDZuYPI0h1ElvZxijVw/Zrp/M7D7+6ti7hQ3GpSmM+n8H0NIkCV+zppmiPOMhhuV1Tv6ljN1TTGvytkk4JOEW3l0Te9xugd2PrVBgXwkOlKf/58jlIAA7Ix6goiR0EYF9Im3aAIFyOTEtuR9ugOQ8jy7n/437D5uX909P00DbH4j/99TH/qL4ICv/f8nELymX+J9HPvIP29zyP+1O8Dq81lMvLyBurbPwD6jg/A+9B3gj7yPaA3X/T6EyctEJMUyd/9BWT/5/970m94P/kJBH/9L5+Ys9NwaaHrSllEAXwcz4kPXP+7qUCe+D4WsymUKjYxDBvcLzfI8mLj9WFc4fPzXNm50lauHZGj/YgM3UFkaTddsX6bNOMDN79bzSuAykUvgTmHIUCDYLIcnq+xmIYIAr86XgYQIU1zrOMEQOE2mIi2h9SK9I1V9WzUCrrS5XOVZl/ulIGHdWkI1899sUu3NDUZs8wGTnPIdK8JUQBfGSJHQRgX0ibdwIABrlsBKwDmgjkSnoq0STfoU47m1S1e/1f/HdLPfv7o7wR/8iN4+f+z9+Zxkhvl/f+nSld3z8zOHl7b6xN71/jCNvhrG4NtCOFwEgIE8gPClZAESMg34QjkgGAuxw4JBALG8E2IwxXCEbABY8DmML7wgTH42F2vvbu+vffMztXduqp+f+hodbekVqulHklTb16Nd6RSVUmPSirVp56n3vlG0DWrM6tHUvjCIoxf3w9jy4Mw7t8Ba8ejY6/DMJC1q0FOPA7yKSeAnnUqyOrp0fMc8iOR79qH9uVfBt/+SOJj6MmboL3rT0EmG8NWLzFV+tityrmM4zzG9R5a7nWBw9JGeekQQvwBsEZNg6YpINxZa88wbbR1HaYrBJdZAA6jKm1npSPsWH6EDauDsGV56J0Q1iXo9qQbdKy3PnBvej8Nd8Q2KpGAEAwn2gjnsCwThmmDIdjXYL6HKi/BbVV0gY4GzJiHWJtlnsF7KI+Q0gAHJeURaot+b610hAC8whB2FAiKh2iX5ccRgOH0+v11YYRdy4hoj9UhC1ua9+/A7D9+Bnx+IVF6MjWJ6be8Btpzzx657KTwtg7jvm3Qf30/zHvuh/XwcGvbFg3ylCNBzzgF8v85BfSE40bLa8gPRvu6m6H/z3eBtp7sgPVrUH/PW0GOOCxF7ZJTlQ/fqpwHkM+5jPv9UwQxOy6kY+igLSEAZ6gpChqNmj8wxjjQauswTBOcez0w4nTJ3EHiLL1IkpzHyHkG/h1W4yq1p5WKsGGxSfq8EHasBsKOxaZvSZuIkMxxIaQ7+9x0zhoSgbwAxmxQSgAOqLKERl2Dosj+e5hzwDBN6JYNEAIaGPbh6AjJwX6HV7flIO/QxFnnSbjfg+uq96jX0Ts+S6E2LwGYww1PTpFp+Ouy3Qt557vSEALwCkTYUiAoFqJNVoPOLNTlXX9OMBqiPVaHUW3ZvOo6LHz+m4nTa+efhek/fw3IqqmRyk0Cb7bRvvEOtK6/DebW7bmXt2xMTUJ6xsmQzjoN0uknAZo61OFpPhj5zEHol38ZbHPC66oq0N7+Rkj/52lDlzUMVfn4FecRzXK8f4oU1jrOG7j3b8YYKKWoazIa9bo/PGbbNpaabRi2DSrJsC0Lsiz7g15h4nIW5C0Ah+HVviptaqUi7FcdhC3Lj7Bh8fGF3ARhoXuPi9oW3OWvK8wASSLgzIYiS5hsNKDIzjgPIQQ2Z7AsG6ZhwQaHJEmhy1cUhbKIc5xzUBDQnCK65BOmOePwz7DdyDckl0XmynIv5JXnSiWxAAyIQdGqIOwoEBQL0SargRCAq4Foj9UhrS15W8fcxz4H/Y57EqUnq6Yw/Y4/gnbW6anKGwbzvm1oXncz9Ft/Be6uRbWSkJ55BuTnnAN65qmJj0n74Wh958cwvnp14vTq618G+Xd/M1VZSanKR7A4j3CW8/2T52BQ0Otm0MDoMCKw929Zkpyw0Krs79MNC622DhYoM08vnCxtN0pOea0TJxgvwobVQdiyOghbFgO/PxGxvffvXrtFh4ru3Qgw3lnSixICRZFQ1zTUVLmrb2NaFgzbdiOVAHCFuyKJwWUR5wh3LmHWIZX9/DMWgHPxqCXu+tI5CMBC/F25DCUAA2JgtCoIOwoExUK0yWrgiMBCAC47oj1WgzR25AuLmPmHjycOo6yddyam//KPQCbqQ5eVuE4H59H84Y1o/eTnsPfsz62cMkEm6qAXnAXlt54Lctghg9On/IBkOx9D+xNXAPtmE6WXLjgb2v99faqyhqEqH8RVOI+szqEI752iegOHDeoGB4AJIeCMoa6paNQ1SO6gIeMczXYbpsmcVToCQnBRxPtxWF2IwuVH2K9aCHtWA2HH5aerrxCyvfd9P8hD1xOCubsWMAjxl/oioGDchkwIFC80tCz7JTNwmIYNi9nuUhTFEX+Bkgh0HJAyFlTz8NLtnVyQeRhssFzyzSvPvPItUvupAkIAXqEIOwoExUK0yWrgdPZFR6XsiPZYDYa1I5uZw8x7/gX2rn2J0k+98ffReMWFaaqWrD77ZrD4v99H6yc/B0wrn0I4BycEhHtCCvx/xz3KCOAe0Fk+y0vvxUIg3s6cH4nSM06BfOEFoGecHJsu9UdkW4f+/74K+7ZfJUpOT9mE2rvfDDRq6cpLQFU+iMV5dCjKe6cI6wKHpQkbtPVCQNq27YReZAzgHBOTDdRVBeAMhFDYjGGxZcCyrMJ5/4Yd6fkb5TWd0AkrKCg7VXl+CoQtq4Cw4fIRFHu9SWG9+6L+jk/XvcYw4QAnFIzZIOCglLqhoScgSwDnzBGMGYdtM1i2BXuZ1/8NUgaBjnBkHvo5bwE4j+tKST7fBGUSf/PMd6UiBOAVirCjQFA8RLssP95sTwfhCVxWRFusFknsae/Zj5n3fBRs/2BPTzJRx+r3vBXq6SdlUb3+ujyxB4tf/x7aP7s9u0xdocQZzuAAd0OTjfG7isB5PnLuisNZl33EYdBecSHoeWcOrkuKD0rrJ7fC+NzXEqUlGw5F/b1vBdavHbqcKHrFryp9FFflXNKeR9HeOUX1BI7625mE0hn45ZxBkSRM1GtOWGjOYXPAtGy0WrozIEuIv+jfqF7BSe2X1MpeuiyskDTstfASLgeD7lPfGynkZhvn+14wOqI9lhthv/ETFhp6kODbiQzCAJCA8NvzJg78yd2Zr26vAwSApjh9DlmS/MSMMZiWBYsxcAaAEuc5zLj7HRQsJ18KL9C5YZ+9vkge0XXKIABzMFAQ5PX4KLqwHkQ8Q7PHE4Dv3fkY9swuCAF4pSDsKBAUD9Euq0Hn40MIwGVGtMfqMMiW1qO7MPPej4HPLwzMSz5mA9a8/+2gh2Yn7Pn12PEoFr7xPRi3/nrkvDofZUDuLrgj4a25ld3HLjnqMCiv/B1I55wRnSat0PPok2h//Arw3QlCca+aRP19/xfkmCNSlZWUqnwgr+TzKOr7pgjewHH7e8NE8+B2zkEJgaYqflhoTgBmc7R1Hbph+YKx50GcR/270qYqYbwELV6VNll1wu6rMMsJe5YXYbtqIeyZL8H+QO/2ZNu87dz/25so5uHZ0BeQGQPAUddU1DQNskzdML4UjHMYhgmbMWcLpWDMiVAyjp5BKcQ57rhOZC6o5iXU5iYA264AXHDBvoR5CjoC8H07H8PuJAIwUNyPVMFwCDsKBMVCtMlqIATg6iDaZDWIs6O1/RHMXPRx8KXWwHzUZz0dq//6TSCammX1wOfmMX/FN9H+2W2j5+V+QJfT08fxTOborLs1CuSYI6D+8f8HetLx4fvT5q8baF/+ZbA77hmcVlNR+4e/AH3qcenKSkCVPpKrci7DnEfR3zPL7Q2cVCAOegFzboMQqWv/VL2GWl11PG/cgdy5pTYsy4IkSe5gbLbifbEtOxhakfZYdZLeZ8LLuxoIG5YfYcP8cfoCyQTfsO1dYaVDBOAwCCFOXwJwJp/VNCiyNx7EwDmBYZkwLNsVf8dDKQS6nARgj6zzzM37lTAQXgKP7RLmKRAC8IpG2FEgKBaiTVYHZ+apEIDLjmiT1SDKjua92zD74cvAdWNgHlNveDkar/ztrKuG5v/+AIvfuCZRHaLgACgvq+gbDfHOyQvXmhL67DOhvuFlIKun+8sYIV/ja9+D9e0fDU6oKKj9zZtAcwoZ7lGFj+UqnEOQJOdT9PfMuG0yzIBt9H4OxvypJG60Zycs9GSjDkWWHM9fxmGaFprtNpibRZbifbEtmw7v6lStrZaFrO8pIfKXF9EGq4ewafb0hoUOCneD+hvd/45OF9zmBHd2opBIkgRVkdHQVMhyJzS0zRlMywa3ORhjzrfOiEtRxJH5Or053adShp6vvctfFFlQZYz5eRHKQXOK3JW1B3RZxHpBB08AvmfnY9grBOCVh7ClQFAsRJusBkIArg6iTVaDXjsad23G7Ac/mejY1X/7Fmjnn5VpffRb7sL8578Btncm1fEdTdRbq6raeDpw6lNVZCiv/B3IL/nN8PxTfmzat9wF/bIvJkqrveOPIZ379FTlJKFKH8xlPpdhB53K8o5ZbiF4mPWCg4OojmcvB+MUlBIw20ZN0zBR10CJ67HDCVqtFgzb7hvkTVrHclhxdMIGqEXY6PGR930mvISrgbBhuRi0VruwZzYwzkPDOHv4+4gjyHFGevaRrn6Gvw+d92AndDQBBwXnNiRKUFcV1GoaFIk6aQiBzWx3nWA71SS0QRRZ9OzLE4CUoVd0nuvTZi1+dtWVcj9STZaU6l4Qz7vc4BwwLSEAr1iELQWCYiHaZHVgVXPHW6GINlkNgnY07rkfsx/6FGBasccQTcWaD7wNytOemlk97N37MfepL8K8b1uKozk4JyBkZYi+oXAAxLsOwx9Ojj8a2l++AWTDof370q4L/ODDaP3zvwOLzYFptb/7M0jPOCVVOUmpyodzWc8jbCA36lzK9n5ZDpt4nhFe+Um8gbtt4HrcMOe5SUhH5GrUVNQ01UnHbFicY6mlw7KYu6YfQGi4TaPWGKw6ST2U+oRE3v/W4l7CFY53H4VenwIgROFqIGxYXAYJwEGEHdPjLRXhwYLXveud3nkqh3kBeyIvDz68ncy7JpHxwP9xUFDCUFNVTDbqoNSRiAEOxjl004K7jDA4ccaRCGEgICCuZ7H3kkj8Hi6D6OeKnbQEa/SGTR7IJG9/4oFrV+6tD50dpbgXcsxT4MA5YFhOCOg9SQVg58CidEkFoyJsKRAUB9Eeq4MQgKuBaJPVwLOjuWU7Zi76+GDxd9UU1n34HZCOPzqzOjSvvBaL//NdcMMc6jjOOSghhRkMLhJesLVhUV770lBv4NQi8O79aH/4MvCZg/HpZBn1974V9JRNqcoZhqp8QFfhPKoiAAPL7wk8aHtUGl+wDYaU5wyqLOY6x70AACAASURBVKGmaVAVxd1MoBsmWm0dNveCOXbWDQY6QnT5rLd8EJLmSb2yKfL9JUThaiBsWH6EDdPRGxo6bF/UtqjQ0HHHMcZAKQXnDJQAmqKiXtecJSlc4ZhxDmYzmLYFO5CVIwb2Tm6LpyyCH3Enh1GareCZR4jiPMNKE/DAxGYGZBhJsEwirXie5Y9uWtg8zBrAHmX8aBX0I+woEBQL0SarAfPjlYqOTJkR7bE6mNt2YuYf/nXgervS4Ydg7T++G/TQtZmUaz38BOY+cQWshx4f/uAV7OybFAKApRCCyfFHQ3vbG0EOW9e9Pa0IfHAO7Us+C/7YrviEqoLaB94GuvGYVOUMQ5k/pPNcX2q56PUgLTN526V3oC2tEBw2aOs77XiiLueoa443sESJGxaao902oJumH5rRGbzlvteQYHS8u6hK7TwJ/hr3Acp2V3VVXwj8hSTpPdVry75tglKx0p6no8AR3o/wJLgo4Te4zY/6DHR56fblGVjr1fMirtdqmKipkCUJnDvRRxhzRGDLdvobjvcw/EKSeACXRfSjgOPlXJL65pF3t/ibLWW6ruK5NR6EALzCEXYUCIqFaJPVoDPQKNYCLjOiPVYD66HHceBvPwLe1mPTyRuPwdoPvR1k1VQm5S588Uo0v/XDoY7hHKCkfIPBy00n2OsQKDLU1/8epBed38lnlA/QVhvtj/w72LadscnIRB3ah98BeuTh6ctKSFk/qIMDXGU9hzAGCZplYTlsMsgrJw5nLWDSJQL7kwzcNAQcE/UaNE0F3H2WzdDWDeimAc4JKKVCAM6Q3ruoSm09KVW5m1aqmF9kRrm3qLBjJRDtMRm9fQMgftmJvrWAecAbmPT3TTwPYEIIbNe9l1KA2TYIAFVVMDXVgOzbyxF/LYvDsm3YzHYm2rh1irNrmQQ6KWPxN6+1f7P9Jgl+sbLcImqU6T7IM19BN0IAXuEIOwoExUO0yyrAwDkRAnAFEO2x3LADs9j/zkvAZudi08knHo+1F78TpKZlUubshy6D9fBwXr+pRExBNym8punpJ0H7yzcAUxP+tlE+RNuXfhbsnvvjEx2yBo1L3gVMZzPZII6qfFRX4TySrGNbFpbLHmm8gbvW/nNDDtpuSEUaGFTlABRJQqOmQFUUcPeBYhgW2roOiznxBgT54w+2LnM98qAaT4BuwkSJ4F9VeH6XjZHus0C4fCc6vvDyrgKiHcbjePR6a7KSvn1df8N2hV+3bXDHczgsz+B/CQg4Z27+bp+QcHDCMaHWUNcUKLK3zznOsm2YNgPjng3DW3dZvF49JEJSLeUThXeN8wopDWQsABPm90kzD69dIqFWPJfGhxCABf6LTjQ8gaAYiOdrdXBCB4pna9kRbbKc8LaOA+++FNYjT8amkzceg7WXvhukXhu5TGvbTsx86DLwxaXkBwXXpRSMjBMWergnL1k7DfXdbwI9rnvd51R9Y9NE+8OXgz34UHyZR21A/eJ3AvXRJx0Moip9/KqcR5VYTpuk8QqOO8YXgjmHqqpo1DXIlIC73jutto6WbsJiHJCcAUbiehcL8qfoonBYaGcP0Yt0EJ7C+TDu+0vYsToIG4bjTQoDXM9gABI6oZzD0nf9zbz3QUfwG7i+MADOGBp1FRO1GiSJgnIGTpxPRdNivhDs1cqJZELB0QkxnSW5hH7m2XvpAvmt/ZuV+Buco+ytBy2EWvEMGhccgGFY2LzzCew5OCcE4JWKsKVAUCxEm6wOQgCuBqJNlpOZ930cxt1bY9PIxx6BtR/5O5CJ+sjlta69CfOXfzlxerHEb864gzfDfFiqb341pN98lv936o/Slo7WB/4N/NH4yQf0xONR+9Db05UxBFX6uK7SuVSFIonASd7XfeEZA3975+KEbATqmoaGpvlzdBjnWFhqwbSckIxiEvX4KaP4JHqR/Wt7e/8ujxWLy3LdX2G2K1O7FHQQdusnGBoaCfoaXdsGeASHbfNCRXO3/6HIChp1DaoiewkBQmDZFgzDcia7up7EgDOXOOs+SZkEYI9y5Mvd9X/LUNf88swzX0E3HI4H8JYdT2D3sAIwIAZEq4Kwo0BQLESbrA6cQ4QLrACiTZaP+f/4KppX/zQ2jfyUI7H2kneDBEL/pi7v8i+jde1NI+cjyB4OPlSIMel550J9yx/4f6f+MJ1fROsDnwTftTe+vN84F9qfvyZdGUNSlY/sqpxH2cnD2yJtPZJsC0vDA//2CJ6PbdtQZAmNmgZVlkEdlw0YpoVWS4dp230TTYQoPH78qx0UF5elJkLsHRYhIg5P0e+x3vYorFl8gveUF/YbEe/FlUbfmr8926O28Z5/8Ih0QXwPY0Kc8MCEQ1UUTNQ0qIrk9y8YOCyLwbRtMDcSSdG9f4l7yjTjugb7onn1v7LK07O7lOMqcUU+/7zzFMTTCQG9KATglY6wp0BQHER7rAbcXb9FUH5EmywP+h13Y/biT8emoetW45BPXgSyarQ1WPliE7P/+GmYW7YnPcJZ/2mkUgXD4gRgS+5zTc84Gdpf/wmgKt35DPmxymcOovX3HwPmF2LTaW/7I0jPPnOovNNSpQ/uKp1LVRinTXoH+tJ4AwPdHsBh+XPOwZgNTVUwWa+7g5fOubZ1A01dB2OdwV1CKIov0VSb3rtwHPelsHg2xFlqJT7zi3Zf9QmFA+hLI7y+C0nS+6xIk23GTdcawSH7Io9jAU/ibl2961jPA9j7N9CJksBsBq2mYqKmQZEl5/oTDnDAthls24Zt2wAcIZRlYJRMPYkDa91mLf4ih3yDZC3+EsJBC17XceS7Et/nywoHdNvCvTsew96ZJQw9B0EYTCAQCPJBPF8FAoFgeOxd+3Dwo5+LTUM0FWs+/M6RxV82exAH/uafkom//oCXEH+XA/eTO/GEHHb3VrTf/wlgfrE7nyEngpC1q1F/318MXOdX/3//A75r31B5p0VMZhFkTdyg6Djp7TsnHRCkAUGiazDR/ZtTAirJMC2O2bkFtNpteE8VTVUwPTkJTdNWzCB4GWCcg7ke3tz9e9x36PK3iHLCI36AeH8VBU6S3999thQ2LDUr2Y5xfYrYvgYJ/NClnXflS6nk96cooU7fhAOMAaAUbdPEgYUFzC4swjBMP2tZotBUBZqqQKIEPGKdYkExCPQ4UYaeghB/q8nQHsAeK/HhX0WEHQWCYiHaZDXorB+TY6wXQe6I9lgO9r31ItiP745Ns+bD74D69FNGKsfetQ8z7/0Y2IHZgWk5ISDi/ikUicNCr1+L2nvfCnL4en9Tmo9WtnU72h+6LDYNOepw1P/p3YCixKbLiip9fFfpXKrCcttk2LDQoaGge/Z5MMYgUYKGpkLTVN+TgzGGpZaOlmGDkOB6p55ATgLbBEUg7XrC4o2+/AQtVso2FROUZCXeX6W3Z4nI8/7ybLcSLBj03AX6l4KISg8E+xzelmBfYcBx3FlgTFVkNGoqFEX2+yGcc9i2Dd22QCABnLgTNpj73cO78k0laA9B8DFX1rV/s8qbg/niP82hhZRJqBXP+PHDOYdp2bhn5xPYM7MgRqYFAoFAIBAIBOVk7rIvDhR/p9/+xpHFX2vHozjwrkuTib/glRd/Pc+qOVPH067/Kk67/quYM/XlrlYsFASJ/MH2zaB90SfAdj7mb0ozGYSevAnaX/1RbBr++G7oX7xq6LzTUqVJLVU6l6qw3DbxBhmDg0xh23zcAVXPGzhsaMr3zKEUNuNYaDYxt7gEw+qsAzw1UceaqQZkSep4EHMAkpTLeQpGo8tDOPATFJ+gV6nv8V0mGwYeMuXwA8uXoNc+K5MdBV14dutrk8tdsRzoeO5Sv18RF5I42AfxnYL9JLwvTeRxxIkm1dINHFxYwuzBBRim5fdDZEXGRK0OTZVAwMBs0xWI++uTN14/KO/y8hQ/s8q7EwusPEKtoLoIAVggEAgEAoEgBNGxLjbtm+9E67qbY9PUX3Q+6i84byRbmvduw8zf/wv44tLgxDyfj7xx4a2LaXOOJcuExZ0Z7geMNg4YbQDAgmXgifYiDGZjj97EgtHGvNHGw815AIDB7OWp/ACcGekJbbPYhP6hT8H+1ZbO8SkGJqXzzoT8kufHprF/fAvsO+4eKl+BgxgoFmRFlxdPz3ZvTWBKKaikwGTA3Pwi5peW/GemIlNMT9bRcNfq4wC4nf8AqGA0wgTFYBjp3nDEguLRa8OgEBW0ZZHgPf8uWv3GRa/oFRSDWYVFxLxZ7udXMGR02DO1CgTv3TRLURDSHxY66livDwIAsiyDExkmA2YXmtg/O4emYfpetxQE9ZqKiXodFADn3d9kUf3mPPopErL3/B1Hvz8Tz1/3+UXc/5UF4f1bbUQIaIGwpUBQIER7rA7OR6uYZ1UFRLssHmz2IPb9+UXgzXZkGvnoDVj3bxeBqE543TR2NH69BbPv/7dkiWNC+42LOVPHvGXg6Hr3Wsde1XRmw+YcDUmGwWzMWwYmJAV1ScaT7SUQABtqE2jZFpq2hVWyCoVSNG0LMiFQqeSXM61o0JmNx1oLmDcNnDS5Bg1ZwZypY840cExjCjZnkEi5n4PaX/4h6Hln+n+n+Yhtf/BTYPfviE5Q01D/1/eCrFudpoqpqdIHeZXOpQoUxR5Dh4WOEByC3ixeCEVncI+jrmmoaaqzDh8A22Zo6QYMw0TvqnxFuS6C4RG2Kze+9YJedmOug/iaSEeYnUR77FC2+6rLcqRM8lh4CGV/6a8h+hteX6N7t/dHt2dxXFmEEDBmQ1EUTNRrUGUZUuCzy7Y5LNuCzZiTe0i7ybotUd7JNw8BmNLsvivjvLfTQQEwJ1/C/QgzeVCW8M/iWb18iBDQgj5EgxQIioNoj9XBWwNOIBBkz+y/fC5W/CWaijUX/ZUv/qbBeuBhHLzkM4MTUve5XYDH94t/8X2cf+M38c0nHgQAtJmNR5oLWLJMAMCSZaJtWwCcsKd1SfZF3SNqE9hQmwAA1CUZ69QaFPcjuxFIdzAQ6lmjEjY2pnF0fQp1ScaCZWBa0XB0fRIAYDKGR5rz0AvpFcyRZE6A/ukvgd15b+eoFGEKtXe/CWT1dHSCto72x/8rcX5ZUfZwi2Wue9Upim3iQiuGwTkHetoFA0CoNyBrg1AOxhgYAJtQLLbamJ1fhG4YAABZophq1LBm1SQURe4qtyjXRRBP2LMx+OwvVdhhQUdsQcC2aW05RLJBXpji7klG2LUMCwFeufYYczpl9aj1vIJ72+NA+xXkRMP6DmFhoeOO80Rk5zesN7DzC4qgnFIYto3ZuQXMzs2hpevgnAEckCQCTVVQ01QokgTOWNezD+i/tFm0pbzW/fX6UUVt6526efXLp55ZXttxhOsWFIPUArC4OQQCgSAfxPO1QpBidk4FgjLT/O6PYd73QGya6Xe8EdKG9anLsJ/Yg5kPfAJcN+ITUgLYy9/OOecwmI0JyREbVikqAEegPaYxhUnZEcLXqjWsVWsAAJlQTEgKpATvHJMx7Gk3AQDTsoppRfP3EUKwXquDEIIJScGedtN/j9UkGcc2VkFy1fFHmvMFWiuY+Nr9IPR/vQJswD0XW9JkA7V3/UlsGr7jEZhfvyZ1GSuR3gE1QbEoo028AVwauLco4IT3JwSESACnoJRCIgSSewwHMN9s4+DCIgzTdvMimJ6oY9VEHRKl4KxzPcp4bVYSwwxeF3kwWuAQZssihPgWX/yj02s/0RaLTdiztastllzQ94TgqH1dfwd+nf2hSnDIUR0RWAJxwi1TCpsTzC22MDO3hMVmC163gwBQZAkTNQ2KJEPyBGhK4MU9yd4btvhkf66O9y+hzkTrPF4uZbJNmeq6EkgdAhoQL9cqIWxZffKwsXig54dok9WAgQNcBNsoO6I9Fgf78d3Y99aLYtPUXvBsrH77H4fuS2JLtn8WB951KdjsXGw6TimIzZZ9BM/mDI+3FnFkbRIypbhjdjeOa0xjvVaHyZjvxZsGizHIlGLO1Lu8hXuZNw1okgQtYn8QndnQqIS9uiMoH6o1UtcvE7j7fwn6NNoH3gZ60vFd24bpC5lXXTdQ5K1d+m7Q449OnGdWVKVPV5XzqBJFs0mSMI1p1p7s5MGhyQpqNQ2qTP182rqJtm7AYsx5bRTsugiypWj3vWA4ekNGD2tN8eWw/NAKtcHu4MAr6/7qFkjLRVDMjpu0yMO2df0dHnY6CGMssDyFexQhqGuq4wEsS/AWKGNuaFjmRkNiIfVLe72lQF3LtKZsVmv/AgCl+Q0PFPXcx5mvIBmZhoAWxhQIisc4w1NFlVXWn0CQNXS5lSGBoGLMfvLzsfvpurWY/rPXps6fLyxi5j0fHSj+glJQtrzirxfOmXPg2MYqyK7Qe+LkWl/0nbcMLFgDvJgjWLAMHDCcMNvTihYp/gLOZJeWG1oacETpJ1qLoWk9kfhQrYEpWfXPZU+7CYv1rpg5BghAQJCkG2B87HPgew90bRumD6G8/EUDxV39M19JlFfWVKUvVJXzqBJFs0l4WMXuUNGeF3CUgBDhp+PmQaFbFuYXl7DUbINzJ21dUzA9NYF6TQXghJEu2rURZIf4xiw3vR6JLMaeScI9C8aPFyY6+CtTmwy7n1bi/eWH/WbM/5VlHM/zCO4K2cx5Xz+EAH6fw1s3tpMm2bq9XhnEjRPNQGETYKmtY3ZhEfNLTTDmTG4jlEJVZGiKAom6HsTOAy8y/4G4hgpbeiMr8sg3qzy7PKhHzi2cMom/guIh3JIEAESjF5SfUTt/HM4AdvC3nMKxaJPVQViy/Ij2WAza198K6/6dsWnW/v2fgdS02DRR8GYbM+//N9h79senowRgbNkGYDjn0JmNpm2hJsm+8OsxraggcMI2O+v4OoJrMyDQxrHorhcsE4rDasm8c6dlFatcMRcAJEJxRG3CX284irobsnpCVjCtqP67d3d7aeCxWcL99bfircqXWmj/838A7fRhrLW3vRFcliP388d3wfr2j1LnPypFHkgTlJuy3ltJB/P61qYjBEu6jpmD82jrhjPoCo7JmobVq6ZQUxUQ8FT9xKIPegu6EROPiw1zf4PoXX/WW8tUUB6i1hAWFIPEbbFkjh29E8yC2yPTI2yyWWdaQNxkNgCgxIlExzmHbTMsNds4cHABBxeaMCzm9kkAVZJR0xQoMnWWxmHpriGFEJgAMfbnIcbPisfI7VMYVSDInlG9WJfbk3Y5PG8zmbmVcupu3tckWZXC5h4Xr/O7chG2EAhGgS02Mfe5r8emmfj934LcE563l7j3xOwHPglrx6PxFaEUhC1Pe+acY6/ehMU5FEKxXqtHplWphP1GCwBQcwVgkzHMDlh/d6/eBHPfQZ44mwRCCOyedxchBCqVEnsg19wQ04QQrFY0P78Zo415M50X8/B01sKK5Mk90D/R74metD9DDj8E2uteFpvG+Nr3wHftTZRfHhRx8GxYijwQuNIoqg3CvFQ8z5yudIgbjO0WY4N5cnBnzWBKMb/UxPziEkzLBgcgU4KpiTqmJutQZMn3uvHyi4P3pBX3enEYxgZlEC1WCnEjCIOsImxYLqLGWVhIe+xKm/MwS5LsV+TdxXnnF5us2269XsJFwPMG7vP+DfQbep9FznYEfhHewIE8nfPlIASgYL4HMqEUjHO0DRMzB+cx7wrBzJXcZUmCqihQVQop4biq5zVM/HlvJJPrnYftIsdaM6ov4PUXR84ulCy1t7zbhNAJiwgREzQEHVZqI11u8VN8/I3DBhj4K5p22lV/738pzlFQBJYhpKkgc1bqO7IoLHzpW+ALS5H75WOOwNQbfz91/vOf/jLMbTviE1EJWI4QxXAGptrMxqSkQKF04LpmOrPRkJSubdOKiinZ2TZnGpgzdWyePwCd2X6o50PUOlYpal9+SdjVXvLFYw9KCBqSjH16a6i8apKMCbeuU7IKmzvXvWmZmDP1XPtKBGRgF4Ddcz/Mr17dtz1pP07+7eeAnPCU2DT6v391YD55UqX+aJXOpYz0CqxFtEevZ06YOBzc5nn+EaAvvKOfHsQfCKSSDMNiOLi4hIWFJdjM6airkozpyQamJuoAt+EJ0GEibxJW8vdkERi1ryjGBZYHgmjPmKHX/hXjPIUmro2GDQf5wnDgf5FrAOTMSvgS9b1IMxjM6hWEi7LsgteP8MTgIBzh/REvLHTw+M6h3P9y6eyjAEhXv6W3X9PUDczMLWB2vgXdsMDhfLfJVEJNVaDJCgjrRCkJE04JHBHY6wsF650F4xh/ya4MDkLyGUguSzhtQVEh7oQQgaACCLG1WmSvxZJUP87DfuO+r4gTm5L31i/JeSZrG4JxIK6zQJAWc8ejaP3gxtg00+/8k8T59X7s6Hfeg9Z1N8UfJFGA2YnLyAqD2XikOQ/A8chtyMqAIxyctXU5ltxwzh4ycbr+MiV45S+vw+/c+l1cvWunH755kLAcx2pF6xOAASccdJy38iAUSrFGrQFwhGHGnRDYnHPMmTpszvBYawGb5w8MyCk5CaJBw/ruT2D/anPqMmp/8brY/ez+nWC/uCd1/llQpT5Clc6lChTRHlGDYaFr7QUHY5FsYN5fH9hmmJlfwFJLhxdQQlNkrJmexmRd6woLPex1Cvbtg/8VfX6BoBiIdlg+QkVhCFtmzTjeVUUbC4vrd4SGd+5LF/yrWwSOyzMoQJumidmFJczMLaKpG77rgkSBek2FJsuQKXEmyAauV1hI6zzIKv/8xc7yiKlC+F2ZJI/tFkNWbv6CfCmajYpWn2Epcv2LXLdEhLyPGE8/XyXb91uyeiQxARngGcpDygp0uRLUItmJD6prZ7bh4JPyPBcEHbwwgABzDSjmXpUV0d9ZHuY++9+x+xsvfQGUTcemypvNzmHu4/8Vn4hSEHu8a/4uWSZqkgQO4NjGqqGPlwjBtKJhn94Ccb1wg0xICrx4XXUqQwnxXhuWQZ7DJmPYp7dwRH0idRmUEF8MBpxH6qJl4rdvuxqcEHzjzBfh1FXrUucfhBC3PxXzTjM++UXUPvK3IIcf0rXde07EvQ/JhkOhvOJFMK+8LjJN+0tXonH26UPWPFuq9F6v0rlUgSTtZDnoDqPYfd94A8a9fwOdCTTcFQfgeQkH+g6EEtiMgYKgqRtoGyYm6xpUVQEFUNdUqLKCxbYOy7J8L2P01GlQ3T3CPHai0gqKSZjNhe3KjbBp+emdaOORZCKl+JLsQNAdPndc9EbZ6KrTmITNYHlePyHYXwiLCOKJwF117nvvd9J2tvXfdV7+hFLYAEzLhrmwhFZbRqOmQVMUUEpAKYFKZcgSYNs2LM+TGgBzy6DI/polWSt51LyzGtfxqiee4+IaFBkxCl0xxunlF5Vv2Txvk9SVcQ6G7l9xzqCf0j90OQE47f6Nkt2QXuHjuke5H+Qm+PP2DV//vGsbXm63h3SwvoIO/fM1BQJBEvTbfw1r20OR++na1Zj6w5enzv/gP/8H+GIzNg1h4xd/m7YFiVBo7vq9aViwDDQkOTKPb575W/jJ+S/H89Yf5YdYHoWWbWEuZo1hhVJsqDXQtq2Ry/JYrWiYVjQcVp8EYzZqUvrr1Uui4SjdQPujnwNMM3T3oHeh8qoXA+vXRifYNwvr2gHe6WNAvNMFK5GoMNDBcM+RaQhBcE0+f4CXcchU6ojIhGBuqYUDBxdg2c42SSKYnqhh1UQDiuyUlee3vGjf5aMMYyyC4RD2qwbMGzt0BbLsI9tVBO6EeC7iPb9cz9be0NBJl6TojULidC3608eVI8EVhKkE02KYnV/Cnpk5LLQMPyYJJYAiS9AUCYok+1Gl/EIzvhZ5kZ2wHDz/zC9BJ+ucwj/nQel1iIpDeIZPsiI+vMtIUa6jqEegDhHbh3m8Lcd5OKEYy/gQThZ+rewEZ/R34MG9zpYx3Tvx1zhpK0hfVzp2ExOM6zOsO0yUmHtVZorwTlpJ7Hvze2Dv3h+5f+3F74T69FOGzpdzjub/fh8LX/72oJTI+z1668wufPKR+/CK9cfiVUc9FTZnkMjozwmTMcxZOlbJqrOmU0yeJmNo2iamFS11eTqzMWu0cXgt3sPX5gxN23LDVI+OyRhkdwBDZ/ZIonkYSd4U0vPOhfqWP4jOI+b9yjZvR/viy6Izn5pA41PvB+q16DRjpCr9saqcR5Uosk2i3v1d23teF10eRj3bgt4njDgBGQgHFJliolaHrFBfMLYZw2JzCYblegURAjDmCtH5vqOKbBNBh0HRDYQdi8sg2/UibFlswuzZu22l2rDP0xbljMwyLg/hKC9zd2NXv4IQEroMT2eT01dgAEiMhzEHd5xxvLVwuDNGV9M0TNRVUEpBCXH1ew6b2f7kNQKAgYCiP3rKsOecZXsJyzObfAG48jihzoayiLVCAF4ZcM5hWjbu2fkk9s4uiFHo5WC5vQ6T1m/U47P6FaEeiPilOY9x2bhTDinhL/x8ivDLEk8U7P55t1d3meOoW3z+4c2gn/R2T1p+dozveRt9ZwsEgiia11wfK/5qZ52WSvwFAGvnY4PFX1nCOFrudXsfxa17H8eVux+CxRkoobAzeNgplOIQtQ5KCPbqra59jHPMGO2utKOWqVFpoPgLOGsC16iMA4Hy0+KtcUwIwaJlQrctzJvGyPkG4f7/RWNffxvsO+6OziPm2tJTN4GeExPmeWEJxnd+FF+BMVKEb5UsqMp5VIki2yRqUKvLy4aE7wvzvAn+Td04MYQSGKaFucUlLDbbsCwbhACyRLF6agqTjZqzDh9nkCTJ/UbNfwC6KGMUgmgGDbqO69tWMDxJB8yF7cpB1NqrQVacDXvONTguUkbByLMdYwzMC4ecgz27vHx77yH0C9HUjT5CuvLw/E3cNCHHdXsTE+cYtxBHWAaabR37Z+cxv9iEYVng4G7/REJNlSFTgHMGis71yDrMchZEXc90eLG2XRE4By/oMrWPMtV1pSIE4DFQto7aKJ3QcZzfcgh1SULAYAAAIABJREFUWZLVgzHZ+YsmnjXLIRgnzaeo7TE5YSGxvV9QKO79FbvNC6qF6NyOB26YWPjKd2LTrHrLa9Ll3dYxe+ln4hPJMmDZqfIflpcc9hS85sin4s+PPdWd6MKxq72Eg244ZZ3ZobO6k9B2jz1Uq3fl0bItWD1hn9e6a+suWuHhjBOVZ1uJnscKpVinjubROmfqIG5egCNAN20LmiRhX4/gPTJk8AQk47P/A37gYOT+uOui/eErYvO2rrkBmF+Mr4BAUAGK3J+LGzgc1DeIG8gLbpVkGaAELd3EzPwiFpo6mDsIO6FpWLd6FVY1GmDes5YQ2OBjWZyoDN/ZguER9iwXQXuJ9lhOBk3yrwyB2fvxrh7lJu/x6N6Qzd624L5B4aF7w0J3i8vRy1pwzh2vX0oBIqHZNrF/dgGz80vQbdYJIS1JqGkqFLlTT+86BJfNGPaciz7u0hHYy0PRr6kgP6QPfvCDH1zuSlSBsr6oo+qd5Hy8NGkfIEW9ZqPUi4OM7ZfWk1YgSEvuYW5Cfsl6VK4wPKjN+NMZl691kGUtXSAoD83v/Bj67dEelfULL0D9+c9Olffc5V+Gee8D0QkoAbFHXxM3CY8053HyqnX4jUOOwvGT0zA5g8UZ1qk1KFQCJQRLloUDRhurFBUmYzCY7Yueg7A4x6ypY0pWYXMOyX2mKoRiMiIEs8UYDMagpgilPGO2QUASH6szG/v0Vqpw0DVJhhIoRyIEk7IKmVBMyMrQ+Q2CetPho57hlg328GOQn/vM2HxCxaNGHbAZ2P07wg9iDBwE0uknDlXnvKnKIEJVzqNqFNkuSUXgrgFXb1tYfk5iAF7YROdYy7ZhGAY4AyRZAgEgSRLqNQ2gHLbF+iIULNd1K7K9BMMhbFk8kggiXV6WwoaFJdSjM+AxGZa+VBR0nHfc9ArBWdgxLI+we6cj/Hb3QTxn4N5s4vov3jkwTgDqeBkzxtBq6zBNC6AEEpUAzwOZEhDqlNG9DFqnnLA6B/WFvMIeZ5ovYf7wIsl4fK9sIZpL94xaQTDGsWd2AUttA3KWGffGj68CZTufvOsblv84r1F0WaMInkkHecvpTbuc97B4EVSLtPfSKPdB0jKTlME5AqHyhhN3suqwl+yVIhAsC7ytY/Hr10TuJ5qKVW94eaq8zQceRvvHP49NQ2yed1RNn2Mbq/x/t2wLGpUwa+moS7Iv1k4rKqYVRyAlBFgwTTBwTEgK5kwDdUmKFFxrVMIhag0EwJyb74SkwOQMCmjos60hK2jbVqq1iKcVbah3hUYlHK41YDA7sWi8ZJnQme17LAdp2xYIIdCohHnTACUEkxmJwR0fhmjYlh2wb/4FpPPPjs4nYpBPefmLYP7kVmB+IfQ48wc3QP3d5wHTU0PUOh+8c4gbsCwTVTkPwXgJC2/Yu40x1ueZAzhPEi8qg+fJi8Dgp/fEseH8c6ndRssyMaFpqKnO+2GqVkNd4Vhqt6AbXsSK8d/Hg575om2Vi157CvuVi+D7Ofi3h7BnMUkq7CdJvxx0vQeXsR5FJNgWAYzcHnuF2bC8/H3uvNWwfd4mL2lvGu/Z4YeYdsM+M+bskyQJbZOhbS45oaA1FXVNhSIBlFBwAkjUCZVtM6c/FKz/uMizLOKp3CVDfPesTMqpaGVMmcII5x1udjnCkXDurCuQ5Bft3RfuPZjsl9TrdnBegm6GDZecxU9QPMZhp2Hz504X1v1v8Bfe/vueRalPgcMRn72fQCDopXn1T8Gb0SF8J37vhSApRbC5y74Qn4DSsYi/e/Wm/6wymI09ehMmYzho6n5o5KWQUMwyoThUq2NCckRNSoAl2wIALFgGDpp6X3+EgIBxjnVqHcRdb31Xeyn2w68myaAg2Ks3hzovjUqoScPNLyXEWQNzwRq8dq/NGSghoeKvl9cB3VlbeJUrmptsvM9a44tXAQtLsWlC31eqAvXlL4w8hlgWjO/8OKtqCnoQ/cjiURZ7hA1mRoVpDDvOW7MvuNc5d+fZCDi9R8u2Mb/UxOzCIgzTAgeBTAmmGxM4ZHoSqiyBwwbngM2528tc/v6m+E4rN+JbuzwkEVbiIgAKGxeboG3GYa/Ebw9xzyQi63H2qKgAXZ60NMSzF939jf4wxs74Vydf18fVdWbwwlE7E9ycv22bYanZdtYJbpqw3MMIAWSZQlUIVEUCIcS9r9xxNu7W1xebJaf8Ee+pfPULN7Q1ii/+juN5LoTk8lBpAbgMglEe4lYW4tkw18V9Xvf8kpTnpgWQLJzxoF9a8s5/+Vnu+3ycVFV0LnLd0pD39U76DOon2fOAI+aZFvj1Z08Cj5RKv4Irjejo5gdvtbH4rR9E7if1Ghq/96JUebeuvQnWw09EJ1BkYAxi4Zyp4xC1HphR7XiprlJUNALiKQNHm8WvQzwlq1ijaACAuiT7Yq/JGPboTbSZDYPZ2G+0Qdw0Nuc4RK0PrCchxA0dnfya2Jzh+7sfxpy7fnFSZEod72ejHZnGO696jMCsUQnrtJr/rpiUlXx6cHGvosUmjP/+dqps5ec/C5iaiNxv/fgWoBV9jcZFmJeAQJAHZbq3ogTeSHEYnS9MEkjf2YIur2DqDsCapiMEzy0swrCZ792zarKBNZOTUGUKiXNQzgFQJ2xjoNys2+ww3jxl+U5ZySQREMv23bmSCbMnY8z/ebYry5qbK5ko2+TVFqNGQYlTaNeARzVGTfMlKnwz5zy0TQ6Tr7fublg7jvq7v2/i1wqUBI/z6kL6y3AH9IPblppN7D8wh7mFJgzTBufOOsOUUigyhSIRyIQDzPbzZv7YXEdUzgLvOmb1bAvahYRM3suCcYRpzqoM8b4oF5mvAbwcN0BRO5t51MvLM+u8o/Lzy4v5OSLGoFc+CUknughAce/f5Ua8TPJ5SY9aj7KQe6iXYYRi9EcJ4OjeUMZrvNIRNsuH1g9vhH7bryL3T/7+hdDOOm3ofNliE7OXXA7oMV6m7kB6nhjMRkOSQQP3DyXE95qVAh+oGpVgu30EmuB+c/Jx1g2WCIHsrhNcl2Q8qS+Bg2Ov3sRfb7sdjxlLeOb0elBCYDIGk9mQKQXjHdFZIgQ2d9YDbtoWFEqxaJng4JDdfzdtCzVJhsUYZk0dX3p0Ky7e8UvcszSLl6w/FpQkDxMsEYq6JIemZ5zj8dYi1mmDhWsK4osp3nV5pDmP1a5QngnE8c6Lgj/yJOSzTgNZvSoyTV+WhACSBKLIsO/eGp7IZsCqCUhPPW7ICo+PKjwbq3AOVaQMdhk0yBjmERwnEvcc7Xw3EicylcUYTMuExThUWQalBBKlqKkqJEmCZdlgnrDTlTdFkS9lGewsGIywY3EJE4DiRCdhy/KTKtQwenq6Ytwyd4LtMKpNBsM/JxF2e/f7fzsbuyejkc6v+zhve3x5nHMwSKCUwDAtGKYJw2IghEKSKAh14ppIFI5ojY5TBqXdk9WyIo+JLYTkE0I5j3PPI9/e/AXFJbgGcKncj4o80zDLemU1qzLqmGHz7Yi8Ub9eSEia6jwYRvFcjLr2ed7HRWkjw5L1dS7ic2MQUffKuM+ljNcxz7oOdy3CnoUEvTFmy3RtBYI8WfrOjyL3kZqW2vt38b+/DT6/GJ1AlnPvqjzZcsIC934sGczGvOkI021mY0+7E3ZZoxIICPYb0SGxo6hRCTV3Xd0TJlZjUlaxdX4WP3vyIfznjnux6IaYXrAMzJo6OOewOceCZUB3RWCd2WjZFiYlBTN6GxZngXng3QMGCqFYr9TR1HVs0BqwXM/hJ9pL/jm1bQtzpu6vfRmG3nMNAEfEPaaRLOy3wWzs7gldfWxjVcahoMnAgbDcvICvuSFVvuOiCu+vKpyDYHlJOijWO5gaDAdNevZz3r2eMCUUjAFt3cCBuTksNFv+gTVNxbrVU5hs1JwBVmaB+8/AYt/fov1VA/E9IxAUh1HGGPxjc6iXoJuokM5Jxo6D2wcJwf6/A2m9/kcnnZ/zUOdAKcBs2xGqQaAbJmbm5rH3wEG0TcvJmDoewbIsQ1UkyDJ1J9dm++7IWvx1rhOvkLqRHiH+lo/hFulKSHBGyrAUrZM4qpCbNcE8k+Y/qi14r6taCqJmMTu/9ANyRbtf0pD3OQTDfQk6JLkeXppUMybH+EIc1baj1DVJ2UXpHIxynZKcQ9L8u0PppMuvKNdUIMgS/c57YO/ZH7l/4mUvAJ1sDJ2v/fhuNK+5PjoBpYBpdk93zhjOOY6oh4t6JmO+4FqXZBjM8dryvH4lQmLDHjsf0vF11xnDnG3ixOl1+LsTz8YRtQlM1xxv2rX1zjVVAByqTPp/T0sdj9t18gQIiH+ZJqSOR60Eigkq4beOOh4vPmojVCpBcut01OQqpy9JAInI4BaHTZ1wZY80FzApKVin1dA0TdjMxqSkYL1Wx369hZ/tfwJPnVyNU1ethUSSzVutSTIaPdcQTvFYskxMyEqifAbjPcvDrz3b/CDYPfeDnn5Sotz8PoeqQv3d34Tx1avD080cBPvFPaBnn56m0oKEcJ7P7H5Bekbply8HYd9gYfdVr/dM73FBD47e44kbbYYRiqZuQjdt1DUFDU1zwv5rCjRFhm5YaLZb4JwBCZ+ly4Xo/1aLMHsKOxaPMNEp6TiSsGfxibNnpP160+VSM8EwhInAUV6eQfGzV1TtuxfczxkvYogn+DvJgn2U/j5LX9mcg0rUv30IIeBEBgfHzOw8FEVGTVXQqKmQ3NDVikzBuBPxybZGH7fO+5lEBkSCKhLi+SzwyEUADqNo4tOw4mlYhyhNmVGNL01+jDFs27ZtqPIBILhWOfGf990BPiil0DQVqqpCVVTUGw00GsMPuvaV3b21b0vayQOMMTwwxLXw2HTCCZDlsTWDLprNJh595JGhjzvyqKMwNZXMCyaM0IEHOLPKO3+F3/OMMTzwwLbIsCMhheGoo47G5GRnMHmYAXYe+O84Xlvz8/N48smY9SF7kGUZmzadELov2Mka9aXb7wHQn+/BuTk8/vhj2LN7N3bt2oUDMwegt3W0Wi202y1IkgRNq6Fer6Feb+Cwww7D4Rs2YMPhG3DU0UdDVdV0dRtQL87DZ8iFtfKB7TjkOiqKgk2bNiWtbmq8c1tcXMRjjz0WWrXeR5dXXVVVsXFjp45RszcJIVhYWMDjj/fnn5agLZwZliq0Wg2qqmJycjK13QUO3rNQCAXZsPSdn8Tub7zshanyPfipL8QnYCxX8XfO1FGXZKhECt3fkOQugXc6JFTxhOSIlrOmjjVr1oCevAk4eRPIyRtBjjp8YP1lAJ78fGKqsxiMDGBQkGUZQDCI88bAv1fBCdXNH3gIeOAhfOrKr+B/tt2GF689Chef+mzUJRkKTSZchF1DmVLozMacqYfuHxriiS/RGF/5LmoJBWAPzjmk5z8biBCAAcD4wQ2oFVgALptQF0XYYJlg+SnbO3dQSOioQdne0M3cGXnt/Bvwn0PeGC1jHEstHS3dxGRNg6YqkAhBo6aipsrOPsMKlOf9g4EzZ2IOY2xZr29c2YmFC0GhESJ/8RnGDnHv/Kr0B8pOoudqhK2E5YpHVFuLmjgGOGP8UUKwE9ioa30y3+7d6TtjXhxhfRgKb6SPBPKk7jZJkvx+SrNtoF7TMFFTIFHixMojgKxKsG3m/NwJazRwXowAhIX3z4Pnn+Uzx3OKo5SD9GgnWVC2EM3ieV5Ocle+wjp3ST7ahhEBs/TMiss7CxE7SyG83W7j4osvziy/QdTrdaxfvx7r1x+KQw87DBuP34iNGzfikPXrMysj7fVpt9u45JLhr8Vln/4MVq9enarMUbn+pz/B17721aGPe/HvvgR/8AevyaFGPYJv7x5C0NZ1XHrpJUPles45z8Rf/dXbOi/jkWuWH5/9zOXYvPm+xOlXr16NT112eeT+rF6MHP2ioWEYuPfee7Bl82Zs3boFTzyRXLjuRZZlnHDCU3HSySfjjNPPwPEbNw4+yKtbxDMe6HS8krbqtO34Pe/5B5z6tKclTj/KLOYHH3wA//qxjyYuCwAOO/xwfPzj/xZZB++dyDnHtm334xMf/9eh8h+F6elpHHLIeqw/dD2OPOJIbNy0CRs3bhppkslKRHSCR8d+fDeMX2+J3F+/8ALQmLC4Ueh33A1z647I/ZxSkExDA3ezYBmO+EvDxV+gf0KMzRkOGG0cqrkT7yYbIE89DjhlE9adtAn02CNyFayXEzrZAM48FTjzVFz6B7+Li1otzG59ANrOJ2Bv2wll5+PYO38QJmM4sj4JkzG0mYU6lf01jwEnjPSiZWKdWuvKPzvvX4dB7zf+6JNgP78L9NlnDpfxRB3Sc8+BfcMdobvZlu3gT+4BOeKw4fIdM2UT6gSCcRPVJ42aFO3HHYicNO1+LzCGucUlKIqCRr2GmiKBUorJiTpqNY6lZguGaQIgIJTCZm5croD4W4b2K7xLq4eYeFNewiZoCBuWhJhIk8KG5cOzYe+ELn98LsKTOKrfAQScyFwRmIQeF37vRN1TzWYLS0tLqGkaJifqkCUKZjNQN0Q0ZQyMczhLYEidSHqkr6icKZZDo0CQhkwE4CgBIOqjIW5mWFblZ5XPMDPWMquHU+LAvNmYn0GtVguPPvooHn300a7t09PTOPmUU3DWWWfj9NPPQL1ej8ih+gxzD9x0042pyrj157fgla98FWhCLxiPuPDhocMHIQPiLMUg+R133I5f/fpXOP30M7zCfUE5prKBDsZwHc60HdSf//yWocTf5eLBBx/ALTffjNtvvw3NZnPwAQmwLAtbt27B1q1bcNWV38KGDRtw3nnn47zzz8e6dYf46Xqv7MCB75gQNVnyX/91Bf7pI/8MRclmYD/zwSQe3uktwozoubk5zM3NYceO7V3bDz/8cJxxxtNx1tln46STToYkRYtXAkEWLP3whtj9ky+/MFW+izFelABAbDs3MdXmDJOSMrCNL1gGOIDVrleqPNFA44yTwE47CcrJJ4BUWPAdRL1eR/3MM4Az3T6EzbDhkcdh378DdNtDoJsfgD43BwoCmVLsbi+BgOCwWgO7rSUohGJClvvCRx8w2piQZNTiQmsnZNAYhPH1a1AbVgAGIF/4nEgBGADM62+H+rqXDp3vuCnCuy4LqnIeVaGs9oiLdhXlDQz0iLF+aMb+gdXgNipJsGyGhcVF6IqMuqZBUWQoEsHqqQYMy0ZLN6AbJkCoH/axr7ySUdZ7Q9CPEIPLSZIxMWHPAtBrJ38ArjeZmGhTVnr7Eb3P1Mh+R483MAC/j8A7mwLvWy9FhzhxuRPFhIBSGS3DQtuchypLqNdqqKkUlAASpZAAMELAOWAz1ic8B/sruXm8AgDh4wtNWWBE2y8vcpIO8jDhkoMNL2pmyXKQhwdw8NoN0zntr0qvGBd2fNJt42dubg633Xorbrv1VsiyjNNPPwPPf8ELcNppp2f+cMj6g673JTguHn744dQemzMzM9iyZTNOPfVp6Jth5cQBi7wznMgVvPPvAWT18f/lL30Rl1z6ET/cbPC9GTcIkoakInUw/8XFRXzlK/89dFmeZ25fXUM6TJH1QDJb3HPP3fj2VVdh+/YHh63m0OzatQvf/Ob/4qqrrsR5552Pl7z0ZTj00EO7PJEHTe5J86GQZoIBAOzZsxtXX/1dvOIVv5/q+CRk/S4L64Av1/MojN27d2P37h/i2mt/iMnJSZxzzjPxogsvxDHHHLvcVSscUTNbBcPRuv62yH3aWadBOnJ4T0dz84Mwtz8SnUCSANseOt8kHDDaUAjFKmVwmHV1YgLKiRtBTz0B5JQTQJ5yJFYPOclrxSBRkOOPgXz8McDvPA8a5zjssV3g2x4Ctu3E4Vu2wzh4EACwoTaBOdOAbBM0ZIpd7SXIhGK9VsekrOCjD/0at8zsxvs3nolnrd2QukrOuzH6dc/3HoD901sh/eazhsqXHncU6AnHgT34UOh+64ZyCMAeZRaUglTlPKpCGe0RtbRU8FwGThwMmSzb21d1/kvACYVuWNANC5qqYKJRg0QpVFmCItdhaioWmy1YNuurQ9mubZCsvzEFy4vwSiwPaZxnhD3HQMg4YNdVH8IEwn7lJ2ko/j7ROLi9q//RPYMgmEdUdBHOubuPghIK02IwFxaxSCnqjRoamgpKiO/8JMsEFrPBGANn0Wps1vcjIfn1h8qWr6C8ZBoCOurjY9ykLXcUD+CoD7l+aE8aXimnDsuycNddv8Rdd/0SGzYcgRe+6EX4jd94XiaeebEerCUbeL/llptGOv7nt9yM0097GoDkYXXCBhQ48f4vZgIIGLyXuSynG5Det28frr7623jlq14FzrrLGrR+RVz9sxJevvGNr2Fhfj718X3lB+s46Fj0dMJ7rsdDO3fii1/6AnbuiA5hmhe2bePGG2/AzTffhOc857l45atejYmJ6BCscXbz9ufVEbn6u9/Bs859FjZs2OAVnEs5aYmbJFT059fi4iJ++tOf4Kc//QlOPfVU/PZvvxjPONPxZhMdS0EW6Lf9Cnx+MXL/xEufnyrfhW98Pz6BZecyl45xjjWKBhrTPsj0FHDhBaBPOxFTxx+dfSVWCoSAHHMEyDFHAC88DxSAtG0n2Ld/BHr3VkxIMhpu2OfDtAZM7kw04pzjzrn92DKzF1vWz4wkALvViMX85g+GFoABQH7hs2FECMCYX4R9572Qzjpt6HwFo1F2YawKVMnLs3fSfDBsamQfkRAQd/C1e1K681/GbDcfAOBghACUomVa0OcW0FBlNOp1UEqhKjLWrJpE2+JoNpuwbbsS11VQbYRXYnFJ+o6s0oSTwhMyPuWNQ6W56r32EvarLr5dGXeGjgPbORjASc+9EJMHuvs83rrE4Azcy4dIsMCxsNTCwsISJht1TDY0UOKklQiBJMnglMOyeV+Ya4EQlQXhhArAwwxGD5o1EiWqpr1xxh0qulPN3mOJs5O4L07Wez4Jzo84wltvzt2zZ7yk/dey6KLBrl1P4ktf/AK+f801eNWrX41zzw0MfEW4SvC+kBME4Myf6exsCrw84HoM8nSeg5RwAKwnZAUHcYX6zn3qebR7Yqh3D/cIOe7/+ZbhgZeku5HZNm679dZU9fX4xS9+gde89vWoaZo/88oLydFbn2HvEx7TNlstPUVtHb5/zfdxzjnn4qgjj+pvTYFLSXpEbW+OGaHUuQfc2V+O56w34EH9tJyE3As8mJ9nQcdQD2zbhht+9rN0J8WdgX4E6+LV1/0/Z70Kp36DbBFs561WC1de+U38+Ec/Su0ZmxWMMfzsZ9fjrrt+ide9/g3dbbmHwREQ8nluWZaFz3/+Crznve/zCoosO66OZMD+LCjzjNnNmzdj8+bNOPHEk/C6178eGzduikxbpvMSLC/NH90SuY+uWQ31GacOnaf1yJMw7ooJ608pkMOztWVb4Jz7omMUfG4B5Ns/AtuyHcYJx0I5aROkTceCaIM9hgXd8IPz4Fu3A1t3gG/ZAb57LwBg2+Is3nTfz/C0ybX43OnPAyUEGnHC2dckGZed/GxctWoHXnz4U7KqCaL6/3x2HvbNv4B0/tlD5ShdcDbw+W8BrXbofvP620olAFdpgLBK51JG4rxTykhUvaPEkcgIQm6S4FJBHATU+VD0P7paJkfLXEJdU1HXVEgUqMsEtakGdMNEUzdg2hwUBIwzUOp+o5DOt05ZSfotUtZ7aSVT5m+sKjGsg4LHIK/9Kjzrx07ceP0I2UY5cSRNKyg2obYk/boEBXXHPPsjmQTzibtfHCGZdt2Pkqu3MErRbBto6QY0RUZNVaC538qEECgyAWMA48QfM+2UlXZ6Q089YYOSbCNz9U72EwjywOmvu1oaA+SkHnRZDtonvcmz9OTNLo+OIOiJfaPnme+xy83+/fvwmcs/jeuuvRZ/+pY/w4YjjnS084AXhkfwzgh6SHsCrTM7iPuPcg6M9ODkoKDuYKAj5DNwOLOmHftyX0RlDM7MJPCu7eAcnDsCoPO3k5t3fzDinhd3gntvvu8+zM3Npaqvh67ruOXWW3H2M88F4ImNgRngXkJCuv8ekbZhpj7Wtm184Qufx9ve/i7nJR/U+n1tv/M86r3nabCxuTbnxOs0AN5ZUj8J8UOUEEr9vP3thMC2bVxxxRWpz4kQQFMkcN6zpoV3DmBdaYk7a41z5t8fYR8+jz76CC771Cexb9++1HXLg/n5eXz2M5fjl3feiT9905tRq9VS5RM2aJpF+OOtW7fipptuxAUXPCe23LjZxmGTJkS3rJ9t2+7H+y96Hy644Dn4wz96IxqNRl+apGGFBCsbPrcA/Y67I/fXXzC81yQALP3vAO/fnMRfxjkmBoi/HtwwgfsewOwvf40JScFkrQY85WiQk44HOfF44KlPAZnob1srnSjBN0jTMnH33D48uTCPBT188tpapYa/OP50yBmF2yacdM2M78X83vVDC8AAID/nHFjX3hi6j/3yPvDFJshkee6TKnluChFYME56B15D7z3evz5w2B3K3e/wlm5Ab7fRaNRRU50162uailpNQ7PZRsswnGebU6IzUZgFv29IZduB8DAtP8KG1aD3ueeNGwhb9tM3HlgARDssH70ibhJHFu+/vhiMYF8kvowohzdfyuVAWzfR1g3ILR1TjQZkhYK647oyJYAkgTFnnWDbtt2JcKM/J/K4V8X9LxgX3BXPOHhnDeDoxNl4A/emixOehxkYCH1IJG5Mbh1C8yR96YalzKJtHmzf/iA+8L734rWvfR2e//zng7urHXdmKHMQKoUe68w45q4nJwdnzsctY46YpptWqjrNLcyBdc3m6Xge9wtFvWniIF3/4YA/4/qOX9yRqq693HnH7XhmwBPTmTnV+bdHlgNto+axY/t23H7HbTj3Wc/uCPjoOHX7VzZ0dnm/t7XXJQg2Neb/wQMdie41Hr3zuO6HP8CTT6Zbixlw7r+ZuUU3VAlxwn0Sx3+cEoBSx+YYo1ckAAAgAElEQVQSBUApCAGoREGJHHktb77pBvznf/4nDMNIXa+8ueOO2/HEE4/jr972jk7I5YSM9swezNe/9lU8/enPwOTkZNd2QkiXN0RcnfrE6a606eoVnIcYW37JXhs33XQjtm7dgj9/61/g5JNPSXycmMEt8Gjf/MvY/Y0Lwyd0xMH2HkDrhtujE+Tg/cs5R41KoJMNkHOeDn7/DvBd/cJkGIeodeedaNnA9ofBtz8MfO+nACEwj1gP9eRNICceD3LSRpA105nWuwx4gu/8PVtRf+ARSHv2R6ZlnMNgNhZtE68+6qlQqIQTJlejbVuoSZ3AR6RRQ+u44zFpcsxv2YYJSYkN2Z2oniR+rjl/5EnwLdtBTomOnBCGfOH5kQIwALA774H0G+cOlWcRqMrgbVXOoypUaYJBkLDB1d59vkcJvImoMZ7SEgV31/xlhGKhqaPdaqOmaajVNRDO0ahrqNU0tNsGmnobjAOU+zOzK3ut41iJ51w1hJdwuemadC/aY2kRYwHlYhhtxk/LeWB8uns8PejAEzbBw/s3Y6wTJpoQMEiwbI7ZhSUADDVNxdREHZJ/PIcsURCZwrJ5l3id9j4r290pwj8LuulMyJDDGkPW3sBxIm8YwVldwxIWlndQeSHzkWLLEMJuegzDwBe+8Hncd9+9ePOb3wJVVcG4Ey6RMwbbjant2d+71owz31A8GGzX3dZMHZpY6qiPbp6d/++dldSdJoxBL5dms4l7770nZV27efCBbZg/OIfVq1e7hYeIpoFtcXdtWAcsr07Zd676Fp522umYnJx0BihI94DFwMoGCXt+9SSJqvu+fXtx3Q8HeIglwGKsSxUMDv56V77LLl3/7HgjE0JwzTVX45qrvztyncbBE088gQ9/6AN417v/JjYMMNDtHRAXsi+L+2x+fh7f+MbX8Cd/8qau7Um8UfN8tgdzjhODy8j+/ftxyT9ejFe+6tV46Utf5m/PKtpHWTqbYYOygsE0b74zcp980vGQDjtk6DwXr7w2dj9hLLP2d8XDm/HhbXfg/SeejT897jTQP3styJlOyGp+cAF82w7g/h3g23aCP7Y7dBYJ731P+Ds4mg8/hsWHHsWaH//cqfuha4ETjwc5cSPISceBHH5oRmdSHPj8IvjWHcD92x0P3yd2AwBk28ISY1il9IfJbtkW9upNHF2fgkal/5+9946Xo7rvhr/nTNlyi64QaoBADaGCKJLoxGAbjMHGNsaYEjuxncR24tiOUx8nTuI3sd/nTXHeN3ni2EmeOM4TOthAggEbA6YKVRACSagXQEi6ZXuZcs77x5SdnZ2Z3Zk7e+/uar98Bt2dmVNm5syZ3/l9fwVzEoZH7C2nnw0AyKdlpFevAM5ZAixfDLpgPuYRAl4sg/7pd5B56x2cIkeLauEEtWwVfaD85GkkQhLAZP4c0IVngB18y/O4+tIrXUkAA71DnvbKdfQSevmZBMnSdWtRhxeO+zjXWcM6V+NAoaKgoulIJxOQJQEEHOmUjIQsoqKoKJcrRj5hB3r5Xvuh79HWO+iTiN0Pv/ex15+tl7cvAUwdoOWEMuXdCo1+6OjORquh2i0w03PXMkKrvYd2jXXLYXf9zt+CINS9x9TMFWz4dVBUqgoqVQVJWUIqlYQkCnZESFEg4JyAcTNiZwRZxWCX+oRqN/W1j3rYz46YOYCbKS3jCl0c1Zu4fqy5Jx/zX5sd5ADx8iIN7008XfAMj+pzXrdi8+bNODE6hl//wm9hcGjItgAiXiEaOGAkhIc5cQNUIKYXsEncTGI+8ioalfxsdu62V7ZCjcmzk3OOTZs34Jprr/M81kp/LHid165Jvlgo4JGHfoQ7PvUrVkuwhYCgMe3Rncn0+4F774GqRg9pbYBDAK9ThBhW9+acRIg5qmF7rztfZlstwzn+6+Ef46knfzbJ/kwtSqUS/uav/wpf+9rvY9k5y3zPCzICsrxy4wgBbeHZX/wCV175S1i27Jymxg3W7zBGUOFRuzbOue1lVu9Z3L3zOWD0//777sWJE8fx2c/+Ws1S0wdh5pf+orB3wfJFqK+/6Xs8HYHU4qUKyk++4H+CKBietjFhfdbw8t04fgy/8bu/Z5O/AEBGhkAuuQC45AKjb8US8OYBwzt4137wA0cAznG8WsYpctLOT+vEDCmBklb7VvHj4+DHxzH21POG5/DwILBiKcjpc+1zGIyFruDzjnAARV0FAYFEKOQWQyDnNRUpQYTYwrtX0FRQQpB2eN26UdRVDAi1cNmVTBaJ3YfA3zrqeX5SEJF03KIq05FVq5iTSEMgBGelh+1jZMYwyLlnA8sXg6xYgpk+RDkZSGH4934D1W98B4gYTcYJbsqqfreIbd0B5PLA8FCoeoVLzvMlgNlru7ouDLQTvaKg7ZXr6CWcjM/EMmXm8JZtnZ7EAOpy5nEYYexVnWEiV4AsihgaSEAURQiigAGBYiCZQK5cQVVRwBkHLDkerWhbehutkMInI1neLeivN7ob7jnOTw/d9c/TZdjTy+i/k52JICM0935/PWBNv++u26usUTcHJcbfupVO0ozmWVY1FMtZpBISBtImEUwEEMIhEGKsjZnFJwS4ZpD6P7tNsOm/G304wR1fCgKTAPY9OQaFtLfCPfwSwRkuoG6/ldTYFXK3eX3T/8n0WyR4vrTWudZx3v0v96GDB/APf/c3+PLXfq/mxerJ8tUUaYTULHAIrZ072bB9DU226d5u3PByvPW9vN6TAO70sbFh/Uu45NLLsGTp2ah7p6eo25s3bcSbu3bGUBMB8+i0c5zWdvrPOf/9yENdR/5aqFQq+M53/hpf/+M/wcKFiwLPdQpzzm9DO+bjH/77D/AXf/ltiGLjZy7s++HlTTEZWDV4LVR5ly/nnnn6aWQmMvid3/law70PIryjzFkno2K311B9cWvg8eTla8LXueEVI7euH2IkfwHgm8vW4RPzFuHC914NetMHAs8lA2lgzaqah3C5DPbmAczasQfS7oMgB9/yJKfTrpzCGbWKGWLCqCNXADa82jBzKExHlekYFhu9ZQGgpJSRoiJ0SsF80m84oTGGFDFSHTDX/uPVMk5LDdT3GUBWrYIKImRX/WVdQ0oQkQLsurJqFRyALCUC+1FUFRR1FXOSA6jomn19iflzQM5ZYnhHL18MEsJznJw+F+KvfRLaP90ZSz5gQoLXOfrzWyB86OpQdQrrzoN6n3/UEn3TNojvjZYvu4940Sd4+mgXgqI0cQcpYK1D6rzDfAz+3V7AlomiIFBoTEcmX4IoUqSTSSQkEYQAQ6kE0kkZpUoVlapqGrwaZLLlrdN/Dwz070P3o7/e6A6EjTzV7c8zsPektwni/rzaOfCUS9DoZOgZ/8qmOGjdM/U3nDIN15znOlqloghFY6hkCpAkCQlJQDqVhEAJKIihj6XEzBNsa+TqrqVOF0eIuaaLH+0Yv/3Qz314w9BLcA5Qd6hdILpC3lmX11bfgeA6DBgvObMtNQhgZNd0bK1/6L37MnVwt02IyWxSChBj46a3oNe5dohcw1USvfBZP3HiOL73j3+PYrE43V1pO8ZGR7Fv755Y63z36FEcOXI41jqnCvffezc0bfLeNmFRLBbx0IMPTHm7fnj5pRfx858FhyvtdCiKgu/+4/9CpVKGKAoQRdHeJEmy/xYEwd4sRI0S0Qxvv/02nnh88iG+nZhsNAy/8p1gkBQnXnllK/75n78PnTEwzu2tWaQRzpixBcoP3mUZY2A+Zafyu98XkMOh/MIm32PS8iWgEfLdln4RkPtXlvyPRURKkHDtqvNx2ld/w9/t0wcklYJwwUqk7/gopG9+FeSfvw369d+E/rFrUF52FiAZRhTvlAv41q6NWD9ueMbOlBKQmhCVCSpACJCRT5VTGBAlJFogfyu6Bo0zT2M7hekYkhrva0XXMCBIICCo6FrdftUj//KQKGMkgPy1LL0nNAVZTQVOm4uZH3wf0l/5DIR/+DMI3/kT0M/fBnrVxaHIXwviletQviYuAjU4spL6i/DGgOSMecDsmb7H9S1vhK6z09BL38JeupZeQK88jyBi1ZlSxrnP/hveMkqQ3EIIASOApnPkCiVkCyVUNAYQApFSDKVTOGV4EClRADXbZlaKhb48ZGO6ZdM+4oHzuXmtO/roLnTTu9gt/ZxK9OfVzoKXI5uXXGLtd4LzxnWhszx1rHktGciLJLaOUUqh6zpy5QpOTGSRK5SgqDpgBoemlEASKUSBGB7FJrfCeS0lEzHJ35N9TPV1W72FBteoVgd31JeAcyNUEOe1UKgGH2291PUWJDZJivZ5ik0JuBGuoGZXa05e4HA6CliWMrWwTeZ9oKJZjfPjNtUX0R4cfecd/Ov3/wm/9eWvQpa9PVV6AZs3bWxLvZs2vIwFC85sS93txLtHj+KZp36Oa6/74JS2++gjD6FQyE9pm37Yu2c37r/37unuRiwYHR3Fd7/7Xfza579YJ6xRSiESI9SzRf4SQlzvuhECRlErsfbpkUcexsWXXIrZs2dHKu9nhRgV7nA2zYThbsZLL72IGSMjuOOOX7b3Ob/hDVabjm+9F5p7rTTPv+vlMdPH9ELZ5h+JIXXF2tD18WwOytYAIkxREHeoiVlDwxC++jmQgdSk66IJGVh1NhKrzkYCANd08P2HceDF53HkFyX8w849uGTmvJajngyYnsPjSqUhvy3jHFmtiqquY15ywKs4AMNbt6JrmOmTH9ftnWyhpGsQCTXy9TKjDzMkGRxoyOFb0TUIhHpeF+ccx9QyhhcvwuCqc3Dm8iWgyxcDQ/59joqRX/kE9KMngJ37J11X0BzD33oX/NA7IGedFqpO8eILoP3kGc9j+mu7esIjoheuwUKveBn1CnptbDULweg5/nhjfHqv+1InTxFi5/OrqhoUrQhZoEinUpBECkqAocEUVI2hWK5A1XSjfI/c67jRSi7FXhmnvQj7cTErMpTjGPF45/qPsmsQtI6ctnfSmsenp/WuRX9enT74ySbUvd/U3dR0OE4nwEa5xqmXc8o3blnH+VsiIgCOYrmKYqUKWZYwmExClkQ7lLQgmqklOYdhn8zNGI/Gf3HLMn0v3T6mE3UEcBjlaRTYym8G1D5jZkhNd7TUnhnANW9d4/KJqec2YiwRGOH0wDl0GMQvYxxMZ2DcsCzUdR0aM2uxLA3BUa1WI/Xowx/5GJJJhyLPdas541AUBcViAYV8HsViEYVCHqMnTrTNU3f/vr146MEHcKuDLOg1bIo5/LOFzZs24qM33VznVdkt+OkTj2HN2nWYdWp4b50o2L9vL156MSA35BSiWCziP37wv6Hr8YYjlSQJCxctwsjITAwNDWN4xgwMDw9D0zTkcjnkclnkslkcP3YM77zzdqxtv779NTz7zFN47/uvrbOOVhgzBShnaGxuh4wjhIJQgkqlFGt/FEXB//mPH+L3fv8PjBZDLOzamxO4BrcAG6Wt4Rkz8NGPfsxZae1vhzBcKhaRz+dRKBRQKBSQzWVx9J13YsiF7Y3HH/sJFi1ahEsvbfSqswR+ZvWVe4TyaSIHNJDJIeQG933uHZmje6C8EuyxmLwifPjn8gtbonYnEoq6ivSv3AqyaEFb6ieiALJsEa5atghXffZXAADswBGwN/ejsH4rUnsP++b5dSIliKgwHUmHty8lBBVdh8p0MM59SeWUICLlk8eXcY6sWvUkhwdFyV7KS5SipGsYkRINdSlMx7hSbQghnR9MYuCqyyCuWILTz1kEkpo8wd4MRBQgfOlXwb7xt+CZSRqKBUeBhvbSVkghCWBh3WpfAhiKCrb7AIRzFoeqsxPRS0RdH52FXhpbzmvxMoILChft3O+drstRj6nKcMqpis6g5AsQBYKhgQFQEEiigBlDA9A0HeVyBVVdR5+2iIa+wWKHgtU+7N4p28x/vJ6f8/T+I+0K+M6J7X4ne8XLp8PQN8rrHFhp1bjzN4KHvntebfZ+WqSy7YVMKTRNx2g2B5EKGEgnkUwmIAK2hzGllqMdrau/l2THMDgZr7nXIQLBym2vY14WF+HgVrRTX7f/Zmj9ZXS+wB7WsPYZhtetddjaZ1MWzvasayfEYLWJHTAAtYzhphcv4QbJywwS1wiNycAZDMKXm/sYA0DtCZFY/bHy3VrWL5wDPNoLeelll2N4RlBYRYu0rg8vyDnH22+/hd27dmHvnt3Yu2c3KpX4PPZefOE5LF+5EudfcGFsdXYKDh44gOPHj7Wl7kI+j107d2DVuavbUn87oSoKHrz/Xnzht3677W1pmob77rmr7e20igfvvxe5XC6WutLpNFaduxqrz78Ay5evQCLp7anlFnzHx8bw2rZX8fr2bdi3d685/0wO//XwQ1i4aDEWLV5it0k9Q5XWoiEwpoNwAk2Nf8Gzfftr2LRxAy6+5FJzNq0Rjk6bWrdQqevM/A5YC32zWMTQ+y0vOiJUP5BO45prrvVsz4KfclBRFOzduxc7d+7Arp07sX//vlhDs//w33+AxYuX2F7YrRDrFilMeM1IwHlmK94TbjS77x1p9d3jqL6yw/eYcPpc0NmzQtdZCgqt6/LUiAPq1ZdAuuqSeCttArpoAeiiBUhUqyi+uQ/DooyipiIhCJDOPB3kovMaygzAyLXLOIfoGM8W/eg1wjmAgqZiyMfDFzDI2zTnoB4EsfMrxAHMZTpEKqCka0g7zpfBccZoBuy5jVCYDolQlHUNLDkC+fYbg25FW0BGhkC+9Gngr/7ZMx9z6xUFr1G09Vsg3f7hUFXS5YuNMOY+Oa71bbtAly0ymu/yeavbCRDnsz9ZFUedil5SArtJYGufE275z5KpvGQrP69hLyUtIQQaAyZyeciyhHRShiSIkEQB4uAAEqqGclWBquswtRu1dnvoGcQJr/sybSRUT6Nu5HufwWuhQGusBGmJu/V8Zg7ntro6vCKf+Paqj6mCnw68YT6N4T10z899xA+3TOZGfz5tD5p9yyilLhnDOteUSepmw3pi11mn13tpRJ4ldl0mHQNBlMC5kdaiWK4gJctIJiQkJBGmjx44MXgczgBm8lTc/F+tj9Mr23vJf3300QzeJv3wFzbdlhDN4D7H8l51ndW0Hj8EhlirmxAA51KHc+4j+pkTivlCEW5OOjX+FYzpRmhTgdqFCQRwzqHrOnSdgXMGRWO2B5yu6+CMG2Q3atytO349IdRuyLHX+7rb9p5bxHVjm2ecsQBnnLEA77vmWmiahk0bN+Cpn/00NnLznjv/DxYuWoQZM0Ziqa9T0C7vX2f93UgAA8CON17HK1u34MI14cN9hsEzT/0c7x492tY2WsWrr2zF1s3+uS9bhSwncO111+Hq913TUvh093x5yqxZuPp978fV73s/jh8/hod/9CDeeH37pPrEGMO//9u/4uvf+HOkUqmWBBJP6+gYceedd2L1eedjcHCwQRB15m8CYP+2OkRMJX7NUTWeSBi+3hoR7oFBotfIe7977rVflmWsWLECK1asAADk83n8/OdP4qmfP4lCoRC+My6USiV8/3v/hD/9sz9vuPeW4Oq3IDMEbQ+JwbqHjmuy6/DphzP8tjt0dDM4y3obM9QjunHcyYVqQPhn+fzloetjY+PQdgWE7mXMU9EWFWTRAsz6tdtjqy8spFd2QhKNeZ8DGFMqmH/NFaDvv9zz/OYjtxHNMjCnQ9Rl+e8OuvZTALxURuG5l5GtlnF6atAIKz2aAY4eB+bPCdFKPKArloLdfiP4fz48qXoC55cTE+BvvWvk9g3Tt/OWg232/k6z7buBW64HMP2KiZMdfoZK3fxMup2Ud6NX3hE/uSbI89ewbyQ2GdtI8NbLTI3K1Vr7jAPlioJKRUE6lcRAKgFKCBKyiERCgqLpKJYqUFUVhNIGObgXnkFciBrJpn8Po8CfZuWcuxYU4e5v4PNwrClhOXXA8Rv+omr/fZk6eN1nr32R38M6B6Q+2o2+Ifj0wytEs/mHPcPaTgC2atBpdFarx0umdu63ZReYJLLjPGqdI1AwAKVqFcVyGaIgYGhwoI4IJhQQTJ7H6B8DY945iMPcgzjQbvK3P+57E6H0QZ2u0LQUysz2pK3tqxfcODiYKYBxe+FjK6RpLXelIIgQBONvDmNS0nSGSlVBvlBCJpfHxEQWY+M5jE/kkc0VkS+UUShWUK4qqKoaNMbBCQUEAZwSgHonQ+82iKKIyy6/An/8Z9/EZ3/98zj9jMmHQCyVSnj0kckp3DoNmqZhy5bJk31B2P7aNpTL5ba20U489OD9sXqTuzE6egI/feKxttUfBqqq4pGHfjTpetZddDH+5M//L3zggzfEkjt7zpy5+Pxvfglf/NKXMXdeOIW0G9lMBj//2ROT7lNcyGYzuPOuOzGeyWI8k8VENoeJbA4ZM8R9tVoFYwyEEIiiCFmWkUjIkCQRoihClARQgYBQTJqk9lKq1XtaTK5+q273Zn0XnfvcIIRgeHgYH//4zfjO3/1/uO32O2Ixxtm7dw9eeOH5hracC4Fmm+d1wvgmM86hm5E1WirnMmRrdr67bKv968MfrFCCtv+I7/HE+StC11l+upmhVYyLroEU6Fd+FUScntQLvFQG9h22fw+KEuYm0qDnhb9vnQCSTiF9zlKcnqqnh/2IzqkAve49oBHyUIeBvnFb6DLiBSt9j7E9B4CqYv/uz0199BGMXnlHwng0hTHMdAYbC6qPCgIIFVCqKBjN5JAvV6Eyw6BephQjg2mMDA9AFgTjS2zJwl2uC+kU9OXRsCAw1KCNhqnW2GzfyCS1zfI7Ydxa1Fh+KA0Gyn10PsKuY4G60dD3/u1Q9N/BeBHEg7jfA8sJo3Z6zfu3FT7F09nC3E+45XxBIIgiGAcmsnkcn8giX6pA1Z3OFcYmCAIEQQAhJJbIiX30MdVoIIDdHyk3oeoFg07lYBxgnNRtHNTerN9xov7DykHAQaixcegAYeDQwbkODg0gDIRyUMohCgSSuQmEw4iyTKCpDOWKikKxjIlcAeOZIsYzBYxnixjPFVEoVlEqK6gqGnQNYJxCN1oBIwScEnBKjQVRgLdQL5DAgBG64cI1a/EH/+OPcd31N0z6mja8vB6HDx2Mp3MdgB2vb0epTbmTLaiqile3tj/3YbsEoGw2i5/89yNtqRsAHrj3HqiK0vzEKcDzz/4C42NjkctTSvGpX/0sPv2Zz2FkJH5P+RUrV+H3/+iPsfq88ydVz7PPPI2JiYmYejV5vPj8czhw4IBNFOqMQdcZFJ2hVFWQK5YwkctjPJvDWCaL8UwemXwJ+VIF5YoCVdXBGEAjfMOcBkZw/O1JykaYPi1P2FY2u08e33ZnPxKJBK6//gZ8+//+n7gghrD8999/HyqVSl0brfa5mUdtkBex+x77fXetBUfQwr1ZxJG+Aq51qG/sDjwuRyAyy89tDD4hLnGLENAv3AEy+5SYKoyA13bVe40Ahqfs7JnT058YIF7Q+Mz5a7umoSc10M/dAnL63EnVETQd6JtfD12f0MQ7njsMA4z2u39O6oVrsNAr19Ir1wH0zrV4XYenoSHq5SZCCKi5EYcnDgBQuCOVNdbnjsDCGFAqV5HJ5ZEvV6Fzw8BSlkQMD6YxYyAFWZLAmR48QfYRCX0jxRDgMPNjAIQTEE4A698ppOPq1meMgesMgBHB0CCD44tE1cc0wXqYDiODProH/Xk1fvjJE5ZMUtNvWccaywTpdqzjXr/tNhgHYQB0IzIKpQLAKQqlCsYyOUzkiqgoKnROwGGwwFZ7FhFs1dtsTLSD9+l7//YRFnWabC+vGMDPUwfgnJgbBWek6bes1VCLYTZHSRDTIoTrDGAcAqEQqQCRCkY+GipBICLAKJhGUK4oKJYqyOYKmMjkMD6RRSaTRTafR6FcRllRoeo6VKZBY7rRBiHghAOmJy+nhnWs9+LK7Jf5r7UzzETRTaCU4kM3fhRf/NKXMTDoDvIXDo889OOYejX92Njm8M9T1U67x+oLzz2LI4cPxV7v1s2bsGunf67JqUSlUsGTP308cnlJlvFrn/8iLrq4vTknZVnG537jC7j40ssi16GqKp547NEYezU5cM5x/713+3pJuAVKxhl0XUO1WkGpXEGhVEauUEQhgqc9ASBQgBIO8MacknXftGn4JHgZflkE8cDAAL76O1/DJ2+9raXwx37IZjJ4/LGf2L/DCpZ+3sLWPmfobi8ZwXlN4NxQsJhbKwvxKFbd/YWiP5Sd+3yPiYsXgA6GCS4MsHwR2sG3fY/HuYyhN30AZM2qGGsMD+ZBHNJ1505DT+IDXeeRxmLPIcPbebqQkEG/8hkgET3KBiH+7z47cAS8GPL6Tp0ZSPTrew6Eq6+L0J9Lpw9+973bn4dbTugF+F2HHxFc9xsOQtg0Zm+lrKHqYGb0NCtcIkWhWMZoJo9CpQpuevHIsoQZQwMYGRqCTPtKxnbCSy7uy6Wdy8NxwNYxNhyz9a8GIWyRwif7s+x0hNdp99Hp6D/D+OEnY9jGaQ3n1zbLK9hyvrNkF+KSY6w6GxuH4cjHTd6GMBAigFMBFUXFeDaPsUwOZUWHphvP2WjH39HC6/q6Bd3U1z7Cw9bmcs7BCcACtM+cc8vHFpae2jrbb9C7lbRe+537ANhErsMvCbZ5HmemAp2BgIMSDoESiKY7vvEySmCcoqxoyJcqyOSLGM8Wap5d2Twy+QIK5QpKVQWKzqCDgFMBoBRWnE9LqHJ10vZGCjPhG+EF6n/3igewGytWrsIffv0bkwoju2f3m20hA6caxWIRO94I7+ERBfv27sHY6Ghs9U21UMMYw3333BVrOI1SqYQfP/hAbPVNFhvWv4RSqRSprCTL+K3f/grOXX1ezL3yBqUUd3zqV/Ceq98buY7NGzegkM/H2KvJ4e23juDZXzxdty9obNcER2OjhKJRBG0OzjkUnYMREVRKQJRlyAnJ2CQKSaQQRWqu98O/axyAbm7czCPv/jQ/344AACAASURBVPa6/w777bnhhg/hD//o60gkEqH7Z+HnT/0c1Wq1rh9hCFT3ua0albnBOK+TXzhge4V7EcmttNHSuW1aMHbjwlPZ7Z+rV16xNHx9r78ZeDyuu0PWrAK96QMx1RYNXNOA1xrzJ5O1HgRqN2H+HKjzTq3bZVzr9HoBk9Pngn7ulujlm3wzeEAubD/QpQt9j+lvHvRup8vmiCD0wrV007zdrJ/ddC3N0CvX0ky2Dfpt73dsrdRFCQHnDJwzCAIF5wxUoAAhKJYqBhFcrkLnhq5HEilGhgcxPJCCaIRgAWBqfJzRaSwdUB+xw0mE9sCwd8BjvFiCPwNsReYUw7n2aGV/0/ocpHBvPsfORdNZKeTD6BOJ3Y8+KTx5+K2ZrOgkbpmkVX2WbwQ4myg22GRLF6RxDTrTwamR4kJnHNlcASfGMxjPFVFRNTAOgxsCqSOdOSdt/bz0x1Uf4VDvNEvrBhD3funqJzDztQupPHa2Y6W54CB27j67Da6DmyQv5RoIdNMQjoASCioIRp4ZEDDOoGoM5UoV+UIRmWweE5kcJrJZZHM5FIollCtVqJpuvMym0tc0b/UIM1m7tNrmHTrTJqsDSdz6rA4kEnXQnZg5cya+8Ju/PSlP4GeefirGHk0Ptm7ZBE3TQpdLp8N5P1nYvKlJCMwOx5HDh/HCc8/GVt+jjzyEfD4XW32TAecczz/7i8jlb7r5FixeEp4YmQwIIbjp5ltw1sJFkcprmoYXX3y++YlTiMcffRQTExMNloFB4ZJtC8OIEcE4B4qlMnL5PDLZHMYnMpjI5JHPl1Aqq6iqGlRdBwQCGiWnKAco56Ccg+ua8U01Pugw/nJ4v6ImLLu/Xe7vmvteLF++HJ//whfD989EPpfDy+vXN3Y/gBh1e5+0Es4wqH4nwQvADs/t/sa7Q2Pb5Zos6J1ttboo8etbq+hGYzLtTX8vxUgE8PZgAjgOkFNngn7hjtDyb9xQdx9AMV8AAEyoVYwqZZB0EmTJmdPar1iwdhUyarVul5e381SDXrEW9Lr3RCpr6gd8oUUguMVl/t9ktmuvf196SGnQK9fSVxJOL7rt2xkGzUhgv9DOfuEYvcrV7XOVr1PYEgLGOIqlMiayeRTLVTDOARDIooiR4WEMD6YhUgLCmB1xhpvn8JNGgxIv6p8V8di8jUS7f06qj1hkX4/5j6WTm+pRZbXHXVscSWDr1zq8Lmx0H/EjSPvrvPFRHmufSOxshFl3959jSPjwL0BzOSWMTNPYbM3bmBIKgRBQGM9P56YujRipPsuq5RWcR0XVoLvqFoSazGTtbwfaUW8vy8QnL+rnnToC2PpANXjm2ltz5aq9WaEVHefYjRLUPHk5ByW1JNpUkMzk2hIEQQRAoOo6KlUV+ZKRk3d0IocTE1mMZQxP3nzJCNes6AwaNw37XMrcOOEUkqMqbE8GnDp7Nn7981+EKIqRyr+yZTPyuc4g76JiU8SwzLfcdkekchtfbiRX/OBHuky3gPKT/34E2Wxm0vUc2L8PL77QOeTjzh1v4MSJ45HKrlq9Gldc+Usx96g1UErx6c98NrLn54vPPRurV/dkUa1W8KMH7p3SNi0CtraDQNcZKlUVxXIF+WIZuUIZE5kCcoVoHuLOb5FAKcBhp0NwmyEyl6drGKxduw63fPLWSH0EgJ/97KeNMoaTkHXNQ1Y/G3Ilo7Vvu1v4DyKJ/dBAGKP2/Xf2y6tc3bU0mV/9wlxP95wcN7SDb4Erqu9xecWS0HUqr/vnFI7lziVk0K9+BmQgFUdtkwJ/bSdkUzmeEkQMCBJw3oppJ6bjQPKCcyGZUQw067vx2s6OcGkht98IGsE4AQh+NGx7cD5sL9Agsr9cBT/mHw2ml+aTXrkOoHOvJUy/em1s9cK1NLuOVhStFqhLoeolX/krW02jOEKhM4ZipYrxbAGFqmoaWXIkJBEjw4OYOWMAgiBA1021KqVgfQJ4kqjJlw1HfBTiNfnTsn3soneCA+DEWAsZisFOECNqaOTh2wLOOXSdQdeZSQjXnmcfkwMBbIKo5sjUvnekNw01Tj706tq+XfCVUYC68NDE43wv+aRpvRaJCwoKYpsSMRjRSBjnACVgACqahtFcAcdGJ5AvK4bKzfFIBTNCrRuTHQPdaPjfR+eANlMY1QalERbZud930HIrvkpN6V+/gOAQBWpuBJQYiwJd11Eul5EvlDCRM2Ktj2YLyORLyJcrqCqqvRhwx3d3thPkJRQHvOrtT+TeWLL0bNxy6+2Ryuq6jte2vRpzj6YOx48fw8ED/l5Ofph16qlYu+4inLVwYeiyJ04cj9RmJ6FSqUw6bLOu67jv7rti6lE82Lplc6Ryg4NDuP2OT8fcm3CYPXsObro5WgjMbDaL/fv8PZKcmCphZvu2bVM6txCH4oVzDg4dIAyEMoDUzKuifrMIMfI2JxIJJBIJSKIIWZIgiqKtsLPad3u2epGqzb5lN9zwIVx22eWh+wkAb711BEePHg1F5noRxn7EcFC9YeWCsCRsGKOwKMJ/r5DC2p5DvsfIyDDonFmh6uPlCrQDb/nXGao2bwi/ejPIogUx1DR5CFt2QDRJ0iQVkBLErs//a+PsszA4PAQAyKhVlDQVvFQB33d4mjsGEFEA+dKnQUaGwhcOeFX5eAZ8PBuuL2cvDG7uyNHmXerS+aOX0X8mfbQLrRrMNT0HNcVrmG+rIXsBEGppthjnKJXKODGeQbGiQGeGLCwJImYMpjFzeAiiSAFNR4TYOCc5ojKM3l7CQBfKoJyb0Q3bzrO21h3EZJA4mT7wLnyOfdTBa03cf4bdi/572Do8ZRQOEJOqdc/zfgZPLdULM1KEpTuCpbIzHR1BQIgAEAJGgFyhgBNjGYxl8igrGjihtrFNK/qnTnj+fVL55AAFqRdGWlVc+u3nxii3CVpKuL2B6+BMQ0XRUCxXDY+nbAHjGSMsZi5fQLFSRUXVoOrMjqtOAQjmeAyyPLVghXq2wxZR6u7qpOD34e2EF7cTcdkVV2LBmWdFKvvqK1tj7s3UIar375q16wAAa9ddPKXtdhJe3bplUrmTn3nq5zh69J0YezQ5aJqGN7a/FqnsNdddh6Hh4Zh7FB6XXn4F5s2fH6nsa9te9Z2722msE4QfPXAfqpXKlLVXf60UhlEVNf+u5ZWPchd0xhw57nPIF4soV6vQGAMRKERJhCRJSCQSkGXZtki0jKgsuD1ag4jVW2+7DbIsR7oXmzdvAtBozez2prX+9TO68vvbWX6y48pdt9t72lm/37W0Sh5zc1FhrRha6Xk3KnKUA0d8j8nnLA5fX4D3r4HJ3RPy/stB3nPRpOqIDUePG5sDRBSB85ZPU4fihXEtKwAApyZS0M1nx7dsn85u2SAjQwYJHDJUf7N3me0Ob7hHF57hX9/b77ZURzfMF83QLfNeq+iFa+mlZ9LN1xHlOQSGeA44t+Xwi4yBUiv9iqGfYURArljBRK6IYtmQXUUCyJKAkcFBzBgagCT0FZPNUJMD/c+ZnExsqNeN+o2/vdYN0wE77C4zQgFy1hnvLXds08VE+3t417ZayOjuWUtMF+rWXA6j6ql8rNxjs9IdWlv/CXYPvIzw+++hN4L0htQM7k9RH/UtXLQSa4exERAIsMJCU4iEghIKAg7OdcNFkhCAClBBUNEZxjJ5HB/LIF+qQNPUhjYtnsx9XWHuQR99RAW1LBkAa/Jh9ZNQLcCFsZ8zEM5B7BDOOggYBApIouHVSwiBziiqCkO2WMFEvoQTEzmMZQsYzxVRKJVRripQNM2IqU4IQCmIINReDCd5S4hpZeE/2J0vbVir1LDwmjysze0B1J+8Ddz40Y9FKrdn95soFosx96b94JxHzse7Zq2haL5gzZpIE7xf3uG4iZF248H77oWiKKHLjY2O4onHf9KGHkXHnt1volQKH9o3kUjg0suuaEOPmsM9t1FKcfV73x+prm2vvhJz7yaPifFxPP7Yo9PdDQDhPVP9YH1/qqqGUqWKXKGIiWweYxNZmxzOFoqoajoYIRBFEYlEAslk0iSHRQgiAQgDh25sHosRxhiGh2fg2g9cF6mfmzdtsucgv++ptc+C2+PXLwqHs5wfEdtsYeWuK8hYIaguP2MxP4WZZUBnLeZZwLlBvh2dvoDUArwTpbNOD12f8pp//l/THDB0nRbIogWgn74pcvm4wbzyxa5aCpKe/tDUcYE4vJmHRMPI5OiL0eSpdoCuWApy+42hynCCwJiLegQCmJwxz/cYO9IaAQx0N8HlRCfOdVHRCdcRx/3shOuIA906tvzklTBl/Uhdex9QF4KR+siyjcZyHIb6h4ASwzFYZzoK5SrGMgXkK4pZDpAFipGhAcwYSEM051LOYaf7qpOjTirao14SJISaW3vW995jombIWiOFuYNcbN+74yQwrTDPQOP6IW5YJrwN/TG74Nw6xv24RbhJ4U5dR0wbTONce3PMfdMNr3Uxc239p9iZaOYZ2slr+ukEIQSEEhAKEFoz93Y6Ajp1QpbcEaR/8tP5EG6EkyCEgHJAAIFIKBixjDC44SJMOCgl0MGRKRZxdCKHY5kcSopq5hJmANFACAOlRlRbi3MzsraRer2QdU1tfvadzgv0MRkYXK71hvi6xtqTDBgAHQAzPHkpARUIiEBBqADOCaqKiny+iEw2j4lMFuPjE5jIZpArFKAoSh0ZdbIMLkvJC5w81xyE5StW4uxl54QuxxjDgf372tCj9mL/vr0YG/XPweaHufPm4fQzDK+OkZGZWHr2stB1FItF7Hi90VvGaaTgHJ+dirGxUfzsicdDl3vgvnugRiCO24k9u/0JiiBcevmVSKXaq9gP44277uJLMDA4GLqNzMRE5PzHrSDK3AIAv3j6Kbz1lr83YrfD/UwZY9A0DaVSCdlsDqMTGYyOT2A8k0EuV0C5XIWmMlAiQpaSkKWk7TUsimLNyMq0WvzgB69HOp0O3a/Dhw+hXC6HLucWhKPALUB7LazqFwuTbyPoHC8hv9n5rEUr76lcOISBdsg/OoN45mmh61N2+HsAT1b6IheuBDK5SdYSH/grOxr2kR7x/rVAzltuaP0dmDteADsxPk09cqFSBebPaehjMwT59LM9B0N3gwa8K616APfRR7vRKd+dONBL1xI3vLyG/c7z14vUFLLFUhmjEzkUy1VbKSpLAkZmDGF40PAI5roGzrktk+qTNPjqIz7UZFb37xjeIcNfxeVeO3WoT3LX26gj2R3bSYVpvnAvb9/IdbnWjtPw+vQRA/pksA9sy4zG0W3IHfXmGs2M1YCa3OdnDEdRn4eYwDT+cZynaQxjEwWcGMsiX1CgayI4N0yJLBmGGtSvac3U2Jc4HEX66ANwEMC1CcTYZbunmyYVTAcURUWpXEGuaJC94xM5jGdyyBfKqKgaFJ1BZRzMMK2ok8Gd5NPJhlZzAvY6rr3u+kjlupEA3hgxDLM77LMVDjpK+0Eeb1P9EZl5yimRyj3z1JN492jzXHYWtm7ehJ073ojUVtQ+toID+/dHKnfV1e9tek4rlmvNrNpahSzLuPyKKyNdSzvf45s+cUskopwxhvvuvrPjjSHiRINgC0BnHFVNRamqoFCuIJMvYDybw3g2h0w2h0KxBEVVAUIhSCJE0QgpPTIygmuuuTZ0Hzjn2Ldvb2hvlKD63GRqK/UGeQa3Ssx61ensr5/VrjtyiNs72E9u8KrXL3S31z3jETyi4wSvVMEmMr7HxbPCE8Dq7oO+xyYbD4b9+KfQf+cvoX/tW1C/fzfUZzeATxMRyUtlYFfjt4T2GgGcTqG00GMceHk/TwF4uQL91Z1g9z4K9ud/D+0LfwL21/8SWhlIAtRs/HD4lBV0gX9KBn7w7VB19ZIiqZeuY7quJe52e+WZAL3xroT95nutFwI9hH3OCVp/GHonYwOM6HAcBIWSEUkuX1LAmEHCJGQRI8ODmDk8CFkSwXQdBIZHTl89Ov1wP2c3d+Ykh8O8Svb5Zpnpdq6Ni5TrFtSvO7jL03u6ezc5eD7DabiwOMnepm1Z3wD3WrDN7fYRL6ZrPd+JsCioumiwHHW5fBvLeEegc/7tPtcJpzewAFLnXWl/AwEQSqFxIFcq4fhEBuO5IkpV1TAmIsT2HBaEWhQPP/1RH31MBpQxZrrPCwARwDiBpnNUqiryhRImJrIYH8tgfCKLbK6IfKmCStXM0QsOIlBQUQChtM4TxbJmcBO/vT5oG4XeeoX0yTwpLz37bCSTydDlDh440IbetA+KouDVrVsilb1w7dq63+dfcGGkHNY73ni9o0Jn33zLrZHefU3TcP+9d7d0brlcxkM/ejB0G4ARavnDH4kWprwZNE3DkSOHQ5ebO28eTp09O1YCNw6sPHd1pHIH9kUjwVvB8NAwPhwxzPyhgwfx4gvPtXx+t8/flnGX28vVykdiHbOIRZ1zVFUVxXIFuUIB2VwBoxMZjGWyyBaKWLn6vEj92Ld3b+hFi9/Yb/Y+OIlSPxLWS/h3l2uln0F985MJ/K7R7xqs87yu06tMQ/2ePa8v065xru0P9rgXTpsbqj79WHCkjbiugp8Yx8TTL6D4vTuhf+1b0L/2LbB/vQ/sxS3g4/6Edqx4bRe4O73D/DnG1mOQ1zXOK3zr61PTeFUBf303+P2PQf+L/4XRz/0Bjvzl32Hsx4+B7TsE6NEMhgK/14oKfmwsXH0BBDAA8LEpGpcdiF5aa/XKdfTR/Qgib93yE3VsBN6K0yCFrEgFW97RGUO5UsFoJodSRQFjxjmyKGJkaACnzBiCIJC6fJy9B3c84e7Rp9Wes3f/A0lEDhBOAGZH1+wcdO8jiQ29Qj45Hx/3HYy9iQbS+SS7/l5FN76HccKWQ2j91NxMZ+Slw3HqzbzOqW/TzBNc52kMcOhgXAcRjLDRxWoV45kcjo9lkC2UwbgAbvoBW+0KjvSo7rbiwnTpk/uYPojFUgWEEKiqCp0DjDMQBjBwWMHUOQCIhtBFCQAYFl9Gzo/ax99p8Vm3/yQeWM7rdpLgft45vQxRFLHsnOV4bdurocq93WUhWl/f/lqk8KZnLDgTc+fW53QbHBrCOctXhPZq1TQNr2zZjCvfc1XofrQDZy1ciMuuuBIvvfB86LL79u7BhvUv4ZLLLg88778feQi5XDZS/z5040cxMjISqWwQCCE4duzdSCGply4NH/57KnDWWQshSRJUVQ1V7siRQ23qkYErrnwPNqx/CYcPhW/n0UcexvkXXIjh4Rmex3tJiLavxfpmm98hyxjM/c12Lubt46ZRiqZpmD13HtIDAyiFNDg57DCK8CIuLfgJ2WHQKjHarJ1m4yCof15tevWh1bHm9Fr3kjOAemWG/TzrGwxsox0Wp0GErTB/Nogshavv7WOT7VLLGBQlyFQAYBDC/NkNwLMbAABk7iyQ5UuBlUtBViwBOSX+70n51Tcgu/ZRR77cXkLyovOh3P8YqHPcvbEXvFSOPd8xV1TwfYeAHXuBnfvA9x6qI9pnChJmpiRk1erk2ml2/K2jIHNntVwfmRU8xviJ8abnNJTpMWPdXrmeXlgz9sqzsNALzwSI/zq85Bi3I4Cf/OXczzmDQIkdOp9zgIEgX66iVKkimZCRTsgQRQGiQDEyPARV11EsVe3UY73wfLof9bnnmsEtAlMOwJVAgds+VtND+POTeFgFzeNuL2/rmcexjms7LONcnJQ8fgNsItgB2qnPrg9feOkUOvYdbCNMysq8H/XzUdA9cuvALLjlJs86YHgGgxj51AkIODXLcAJCBEAAdM6RL5ZRKFeRlChSqRRSsmSoaDg7KZ9XH+0FzZfKKJQrUHQjpxzMwUkoBSEAoWYoaAjGQIURr9wify0EWYX2YcCtVHd6FZ0sWLFyVegypVIpEqE6XdgUMfzzmnXe4Z7XrLsoUn1Rw1C3Czd+9CYMDQ1HKvvIwz9GsVDwPX7gwP5I5DIALDjzTPzSVVdHKgv4exFa7/rYifC5oAHDY74TIYoiFi5aHLrcaISc2GFAKcWtd3wqksd8uVzGjx98oG5fN1sy+6Hue0xIHQEYZF3Y4NVqbYRAFAQsX74idF/GRkchSRIkWYQoCRBECkIBQjgYodBBzA3QOKCBQ+UMmmPjXAc3s3Fxc7PgFtqbbU5YIZWd8BoHzjasb7qXt7C1z8tLppklaivEsd9YdVp0O3MG2/mfrDKmMYBh3+fdVlwW/vqof/hkYc6poevTjvrnFo/zzeUABOI/t/BjY2DPbgD73l3Qv/IX0H///wH7wQNg67eCx5FDmHMUXnmj7po4ALa6t8I/2zhtDsZmpFDRa0Qs1zRg5+RTCXBFBdu5F+xHT4B967tgX/wG2Lf/Ceyhn4Ht2tfoZQ1j/B8q5fHg23snTQT7gUUIA00CvL/5aDiPYq/5otPh5Q3mvIapCKM4VZgKWWQqZJ5ekqt66TrCXkuQ7OK13+kNTB1lGWl0GLDLEmKLqoRwEDAQcDAOlCoKxnJFZEtlqMyQYRICwcyhFIYHU5BFCs44GCPQLf0W57BcSZvJVdMDL5fSbnctZeC8tVRoXnKmjsZtKrPw2t8QUttOZoRz6jHGruVQyjk810i152lt3uutsGDE2LxK2usdl7frVD7eBs/bKWw7CgLXkX10DXrBYz8KbNkDqKUIBsCJQdIackYjj+WMjufcb8HP2dH+zQGBUkMG4uZcRrgh0xBu6L4EAg6Gksowmsnj2HgW+ZICRecO/ZbRaUIsvczk5ZY+T3dygvYf/NSj3sKV1/3b689j+YqVkcqNjbWXPIoL+Vwucg7aNWu8CeDzzr8AoiiGru/QwQM4fnzqvKOaIZ1O46abPxGpbLFQwH898pDnMV3Xcf/dd0X6CBJCcNsdn7JDbLQDUcfu4qWdSQADwOIlS0KXKRWLbTfkWLDgTLynhbzJXrDyR59MwnBciDKvHz9+HOPZHMYmMsjmCyhXFXBCQSUZAwMJDAwkkEyKoJSDEN2hPeB2vhXGAM4AzgWACwCjRghAB6EZ91vtRx67FwNeBLS7nrAIWrT5eW179dsNxjl0xmqEiaO8e9Hj1Z8w0II8gEN4P9r1HfHPER/nsx+tllFlesvn83ePgz29Huy7d0L/7W9C/6O/BvuPH4Ft2AY9mw/dPt93GKcUDeKxaJKixyiDsGxh6Lq6BbPXrWkgIVmEPMBc06Dt2ofqj5+A/j+/10j4Ks2jWXAAf3XwVfze68/jB4d2hO6DXU/A69IsnLkXyKkz/ds6Pvlc1Z3+LQxSxtfNWx1+Ha2i059HGPTKtfTKdUwGzYhgt+KUkFoqCgE1Bau7rPu3M7UYYNz7UqmCiUwOuUIZOqMgoEiKImYMDmLWzGGIEgfXNTDz+80YAeN9h4WpRDMZOKwsOVXEBe8F/r3jUE8IGxsB5wSWk5H7hkd9zpQbm9fj68/b8cBaNzrTQPbRfTiZdF+GgyNgiBxW0GXAmimCjFy8jPj9HE/c8g+nhpOln17FkIsYRJFC0xkyuRxOjGcwOlFAVTM7Taw5EqDUNKLr6dQXfbQDDaxSr4Q06mT4hRvwCzPQS5h5yimglIaerMZHR3HGGQva1Kv4sGXzpkgT8aLFS3DKLG/ldyqVwoqVq7D9tW2h6920YQM+dONHQpdrF9ZedDE2vLweb+7aGbrsyy+9iIsvuRRLXMToL55+Cu+883ak/vzSVVdjwZlnRSrbKsYieL5KkoSZM/0Vu9ON2XOi5ZwcG2v/e3zDh2/Eq1u3IJMJn//wgXvvwR/9yZ9Clt2BVvsIwqxTw3tulstl5EslDKRS0HUOTVdQriiGME1qOYkFQYQoipAkARSGB4ntaUsMS3LGVcDMtmJEi6vlySXcCLtj/fb7xgaRtV7neSHo224d98vh6xUCMQyC2g06x+94K31tVq/XfdQDSClx3uzA/nlBeyfAA5g3jXLdMmQqYPDGa4DxLPiOPeA5/4gUXtDeegfq4beAJ56FCo7hMxeArFwCLDdDRg8PBpbnW7YDACaUChSmg3GG+VdeAhLBOKxbIKw9F8mnXgTjvBZ6busbwGduDnywXNOAfYfBd+4FN0M6v5udwIiUgCC0fr9KmoqcpmBuIg2+bCE+f9lvYeyuH2Dl4CmTvTRvjE6ELkJm+/eFHa/3AA47s3DXC9Spq5JWrssybHEST31MP3pF59C/DgN+MpDXMUKIbZjBg86Bl4zh7CcFB1BVVKiqDjkhIJVMQBIoKGOYNTQENaWjXFFQVVWDqGBWq8Sc4iwZMfKlx4DuHz9+8JK7J6O0dhr+uGkn0vD/ydVvtNHLT2f6Yb3e9e++9W88aQQ559P+DHuRXnMbcjCPbwiB93qwj86D1/e7l58dtWWM+veTA0YEEvjfEy9ZxS7vItS51YirDutc69+a7oWCChIYYaioOpSJHATKkZITSKVTkIRafVEiH/ZxcqN3NUcdDq/J9GSYdCmlGBgYRD4fLiRiuVJpU4/ixaaNGyKVCwrzzDnHmrXrIhHAWzZtxA0fvrGjxtEtt92Ov/r2X4bOIQsA9997N/7w69+wPaLHRkfx+GOPRurHjBkz8KEbPxqpbBgUi+GIAgAYGBhoQ0/iw8BAMFnhh6Aw3nEhmUzh5k/ehn/7l++HLjs6egI/e+JxfPgj7R8XvYSood2VchmD6TSAmnLFWECaAjBn4DDnCVILOS2KIgTzX0ocZKVDicCY5SEA2MpCzm2lgvG9rSn9vIT3VuZNd0QPN8kcdNzZRivf/8nM415ttSpzNCOOgzyE3dCPB3gARwgBrb/jH+WC+oR+i4IZkgy+5XUI3/wqMJgGf+td8J37gJ17wd/c35QQFgjF29UizkgNIkkI+Nvvgr/9LvDkiwAhoAvmA8uXGDmEly8BGUzXla++ugMiAIlSnCInUWV6z+b/tbFqKYSBFA6NnsCC1KDx7kxkwfcdBlnqMNzSGdiBw+A79gK79iH/xpvgVQWDgoSMWsVMEQUAVgAAIABJREFUOYl5yeBvqkUyv1spQpRlzF6+DIlzFmH+yrNBly0CSch4738+jPPPvAgjUiLyJQWNSR6BAKZzZsHPL52PZ0PX54RzvrCMazpFloz6XveCkW2Q0imueqcSvfBMgP51WHATAu5jnuPXQQR7yURBxnswQztzGMlAimUFpaqKlCxhIJmEQAFJFCANpqBoMipVBVVFMfx+TBnRqpIxS05qP6FvXV63j5ewaLvXrvV/bo0n60gwKczrT6vDyfWEOgHcfj9q70ntaKh3xpo3YupZH8HwXEPCRcCT4Hexj85CGCPvbgUhsNc53BU1wE+m8ZNVnKg3PDVkFeYyW/LU9RAOI3UCQAXBcHrQOQrlCvKlMpJJGelUErIoQqD1RhatfmN76fn1EQ59ArhD4VQU99oLOjQ8FJoAVpT25FyLE+8ePYojhw+FLkcIwYVr1gQeP3f1eZDlROj7MDY2iv379jZ4zU41nD4Xc+bMxbXXXY/HHv2v0PW8e/QonnnqSVx73fUAgAfuuweqokTq08dvuRWpVCpS2TBQIvQvne5sAnhwcChSuSj3IgrOv+BCrDp3Nd54fXvoss889STWXXQx5s2f34ae9SYGhyY/HuqUe8QQegkFiKnE4czw1NB0BlUz5kHCjXJUIBAECkkUkJRkiKJkfzc1xqAx3c47hTqlm6VhAKjLA7jV726Qx4vfosCPBHaX8cor00w5G7Q48ZInvEhqd9utehK3cs9YQD5cISCcrR+CwubGoWZ8t1LEi+NH8Z5Zp2P2sVHo3/k30K9/EWTBfJAF84EPXAlwXkcIs517gUKpoa4RKYEJpYJZCdd3h3MU9h9C4tBbEH72fB0hzFcuhT5nJnD4KAghGBKN6AQJQQTO69H8vyaIKAIrluK0TSVUmY6k6b3LX90BDm4Tvnz3QaBSk40GAUwwBiIauR9zqoKEICBBBfuciq4hKYgoairGdAWnrVoJcdXZmL98CYRli4CEDMHRF/bT58B++tykyF8geEzyKCGgRwKMb/LF0PX5oVc8oHpxTdXN6D+P3kaQgtSPCA7rDewePgKhIByoVFWUK1UkEgnMSCdAKYUsCZDFNLRUAvmSAlVVfQ3iLA/VdnjXTJOtxbRjSo1MrCFVa93c7TPfWEYAMMIGO/f1MdXwMkR1/l37bgR+PvzWKrzRy7uP9sF+7x3zPHH+7qMr0Yvymx0QhHuv10LLNBYcsg015zcjq693GfveciMvOqWGHozDYKpLio5yNQ8BDAMpGUMDAxAEwa4nquF+HycHGgjg/oCYPvjde8YYKKW+XkXdhihkQbXa+QTwhpfXRyq37JxzMDw8I/CZyokEzj3vPGzdvCl0/Zs2bIiFAJ5sGB5n2Ws+cB02b9qI48feDV3PE48/hjVrL8Lhw4ew443XI/Vl5apzceGatZHKhkUU0nNgMJqH7VQhPZBufpIHpooABoBPfPI27N79ZmgDAU3TcP+9d+PLv/O7/e+hDyzlCAMAYuT3FgQBut56jlSgfl6vu9fcFI+tNSMMMtg8WFszEkOZwzgH03Somo6SGUaaABAFAYIggAoUsiBAEA3BmXNqegkzMK6DcwbdzKliLExrxleW1SYhhtDu5UnrNU6ahZH2874N8iB21+FXvpW+uL2Rnec6FaLu0NheffMjmG0FjfOcon8ecDI0UFdfs/dPfzs4x72D4o+Mbx7ahscPvolvr7gMN85fhOE9B4Dv3QXhq5+pKS8IqSOEKefgh94B27UX2LEXfPcBoFBChWmYIXqHl89rKigxDBk45xjdux8DB49A+ulzoITUQiBbGEyDP/EcdHAwDogtzlUMHFVdh0wFUEJQ0jXIlEIizZXcHIanrEAIVMagcYaUScqWdA0CIXUkayyoKhAptY09CCFgDz8JPPxkYLGZchIA8HalgL89uB1r0jPxm4tXIymIOKpUkF56FtLnn4vB5YsxY9liIOVP7LJN28HufASAkQt6REpAjEgKNB2T+SIwNNCyEajbU7yu38VSrCEHnV4cU/JtdCjh47oOQsjUX0cb0e1ep54Kry5HL10HEM/Y8pM1PNtye/4G9MdzH7idO54QCkVRMaYokGUZ6WQCokghEIIZaRmMy6goGsqVKhjq+AnAzE3slnPq52X/a3LdAdd5Pqf1IDpJX8U5h+7j3W1J+9T40UeHo/ZeGr+JydyQFqSFOMlfP+O4zhn1048gr+C68wLO76MTwFEb8c29Tbv1OVoksKG7MOcY16U4r0032eJaOGkPUtch21jHBWLcx2CvYForQ2qygxUBWucUubKOfCUHWaRIJWSkkgmI9hLV7JPjEoz2a3u69Tn1ER19D+AugTsXXze/rMlEMnQZXQtHLEw1GGPYsili+Oe1/uGf689bF4kAfvWVLbj5k7d2VF5TURTxydtuxz/+/f8buqyqKLjn7v/Eu+8cjdS2JMu45dbbI5WNgigeyskp8EyeDFLJaP2LEvY7CjjnOGXWLHzw+g/hvx95KHT5fXv3YOPL63HJZZe3oXfdD476BTQhBIlEAqVSo9djEHRdi6U/XtaXHICq61A0o40C5xCoodCT7LzCIkQimAo9ZhDJOgPnDIRSaDoHNwVuxgANDKIpwxueyjXBGgi/eG1QbKKWr9ja3yz0UF19IRS2fiEVgwhnL09hr3BHfv3luXxwnwbTLbVhgZWDU0PEoYA5Pz2CFxJJDIiiHWKSbdqG/A8fwMhnP+ldiBCQhadDWHg68MGrDMvfQ+9g7q69yGx7A9h7GIly/XdhhiQb44xzKJxBpkIwmZovgv34pwCM5WFOU5EUBIgOItcKa5xRq6gyHXMTaZR1DTrnkEUJHIA1k5d0DRm12jRUss4YQAgEQkA5ByMEJ6plzDRJUb+sflWmo6SpNjEbFowDR80Q2mGwLTuKl0bfhnLhPHz1I9eCrlyKBU0I37p29x4E+/5d9sJd4wbxLSIaAdx0TBZLwNBA6zJ+kDGWhxd6HOCAYSjT5nVIuz10puo6pgKTIR07jaDplecBdPda3UJcz6TV8IRBMkWr7VjyjV2UCqhWjRzAyYSMVMLIEUxAMJgWkE5KKJUqKCsqGGcghAIg0HQOSoxKLDK43uDNu4+98Nwng06aU5xoZpgZ5vw+OgvG43NnhDZA43yMLjIoDkPTPgy418RAnxTuLNSI3wZ4COxen4EWbI07CsR1yV7XJMByLgjWqfgZ9RJCao4VpLGM89/GKG0AuGH2UtU5qoUKcoUykqkkBlMJiAIxUqXBMnLiINQ3OEIfJwn6BHAXwC9EY7eiWAwflk6SpDb0JD7s2f0mMplM6HKCIGD1+Re09EyXr1iJVCqFctnfg8oL5XIZb7y+fco8XlvFsnOW4+JLLsXGDS+HLrt7167I7X7w+hsw69TwuSajQopAvFdCPuOpRrkSrX/tfI+93qGr3/d+bN64AUePvhO6vocf+hFWnbs6cnjjXoZTzucwDGDCzktA/OPBS3FoOHJQM/cmh844GNdQVhRbgBYEAUTXIMsyZFmGJMughEIWOXTGoDMGBpMghkEWg9dCAxKDuQvsixfc6yXDIprVe9Fyg+DzC8fsWa9rEeL2Wo5zId1KmGsLLB9MSNGZMxrqDfSCrAYY1xgsfWB7reA3F6zE504/xyZjx5UKhkQZg0+9DHbaPNDr3tO8EgchPOuDVwGMgx9+B9Udu6G+sRvpPYfA8gUcL5dwZnoISSoAIT1pB0UJb5ULOCM1CJUxHKuWMCIlMChKkKmAQdF41yyPXTcoIZBaaLOka9A4wylyEoQQVHQNnHNUmR7oFZugArK8CpUxSBG8ZyVKMSeRaql8hTPwRWdgYPUK3H725/CpcxZDjhC1gr87Cvadf6sbZ80I8pbqhb+DkVCpglPaYPTphyAPYDQxkJgM6izV21h3u2GRwED3Kxh7hXTslesAeovQjus6Wg2haIE6ZADn0SADuVqdxmyraRooNQjfSlVBuVKFJFAMDQxAkoxoGIMDSaTTKVSqCorlimHsRI3oG4xxnznZbf5n9cV9Fb0zpoPQ7fopN3ppLjqZ4Vye2Q5509OVPkKiTwp3ICYxzVvvIrH/1/nvotPIwx5y3PGdt518m0c6aWbUb0eg4IbOinPD8NyLNK79ML9TnIMTDkIllCsKKuUKBIEgnUphICFDFCw5y9HpPk5K9AngLoGnh1OXCtph8/8C6CjvVS9sikBiAsDylSsxaIb7bebdJcsyzjv/gkihpje+vB5r1q6L1Ecn4ha2PvbxT+CN17dHMgqIgnnz5+O97792StqyEGXsFouFNvQkPhTy0fqXSEwuf6IfvG1+TU/z2+/A3//d34aus1Qs4pGHf4xf/vSvTrZ7PQf33S4U8pG+R7LcpvHg6As1lYDOfZYwbU1nmq4b4XAVBSVFAQEBoQSSQCEIAkRRhCAIkCi1w0Azxuy8wgzMZnXsOdKtOOKoCznqFT7Q71p0GIJ93WLJy7LWY/HhFZ45quzQTBlmece4SVvrt57znzdIMngs2MpXo0JjfVTxJ7iCSLawOFYt4YzkICghGJZkZFUFs+Qk9DsfAU6ZCXrR6nAVUoMQTi48Hckb3gswhsT+wzjl9V2o7jqA9P4jODYxDp1zzE8OQGE6SrqGlCAiQQUoTMfX3zQijnxr2cVICSLGlIoZTgpQOMP85ID9O+1D+johUwGz5OYE8JAoIacZhGhBU6FzhjnJNBjn0BhDmWl2nmI35iSipQ5w9lFlHj7GlIKceRrYiiUQV56N9DkLQdNGW5FjaeSLYH/zLw15dFXGQImRZzIqSMDo5IUiRJFC10lrIfWDCGAAyBWA4faklLDnEeKbVTF8nTHVE7bNXlKF9Arp2CvolecRNxnWNJSzlxcMaoZ9fopVr3KiKJjyCQNAQAiFxjgm8nkkJAnJRAKyKIASIJWUISckqIqOUqUMTbeIX+rRFupkSa/+uPd7XX+3o1v1Ua2i7x3cO7AeZR13E4aIqi3p+pgmOElhZ9qA/jvZZtQN+vBG7kDtmXFHfdwZjtj8X6c9yYb+kBoZbC9LSeMYdOtgggzfHCb0sFKccXCD3+Xm7fIzeuOOSCXQQYgRwU7jHPliEcVSGbIsIZ1IQJQEiLReJ9XHyYU+AdxlaCVUY6ejWAhPHrWTAI4SXsoqRwhBtVLBtldfjdT2Wkf45yClunVszbqLIhHAO3e8gXwuh6Hh4dBl22l0MDg0hI987OO4567/jLVeP9x6+y9DFKd22pMjkJ5R3pGpRFSCul3vcdC4XLxkKS67/Eqsf+mF0PVufHk9Lrn0Miw9e9lkutcd8HZkaGmRW4g4XuMeD63miDMEZaeCjsOwu6yZpnJOUNEYoDGQihG6nHAOSSKQRBGiJEEQjfDRBASccejcJIXBHc2YZDM3FjjOhY5fKCCvhUK90G+RSLVDBLVFsPvanb/DCvthlb5u0rcOAR67JJUAY6xuMQ8vQtvolOF5Xqm6q3HW2FJ/W8FMqTaHi4RilhnGeLxaRvIff4ihP/0y6NKF0RugFImlC5Gw6mAM8w6+DX3HHtCd+yHs3AtSrBlZbJo4hgcPvwkA+P1F5yMliIZHrlmdwnRIhEAgrXsR65yhpPuTtxYIIZhh3g/LqxiAnadY0RiqTPcNX51RqxgW5cacxi1CZToyuoI5Zy8FWbkUZMVS4JxFIOkUrBYn++R5VQH7zr+BHxttOJbXFCN/pBTdeCXIOZ2bYZsppSDEIIHrDCtcEzVJB1PcvFQBaRMBXGukdkFuMtVrzunElYvtCRwjmT2daHWu7+R1ZDeTZV5ROJy/uw1eBGuc1+KUF2qhDb0N1vy8bALlP+NAHXHLAAhEQFXVUVUKEEURA0kZsixBpBRikiKREFFRNFTKFah6fXSWmjLWIe8BdX13wx3ZxH0NfmWme9x4hbE82eD3Trsx3c+qj9bgRUS5YZFRJ99o7x64dZTOt6//LoaFx0hv8Bb1+U557PNKCeRe+9XmVUc9fu/jVD9OT70YA1wpgKg5RzhGov2Xv+cur5MZrAv3+r4KhIKZLsfcsy6rSlJTA1ld5BycEOiMoFRRUaooEClBOplAOilDEAQI1LmMazSs8zLsdx7vo/vQJ4C7GN340mmaFsnbs11hWP08B8PgtW2voloNH2pPlmWsPv/8ps/Refyc5SswMDgYmiBkjGHz5o147/uuCd3PduPSy6/AhpfXY/++vW1vZ8nSs9vahhcGBsKHjJwqj+ioiNq/gYH2KKKbvUM3fuwmbN++DYV8cA5SL9x3z134oz/+0yk3HOgm5HPhozoA0d6NIHiNg1b2Wd649bt5TcQn9olQdUDRVKCimuQvIFAKURQhiyIkUQIRKAjn0HUdzPQ60Ti3iVrikNAZqymVjH7UlIy+49qlkOAwyOko1pxhFHmtEgq+CsKgtjyUapwxO8y2F1gQody0p63DjxSdJSeRr1aNMMF//hXQebPjaZBS0MULQBcvAD78PsjjGcz42rcA0xPp8lPm43cXX4DTf+kynH75VY39gmH1CwLQFu8E5xxU10DF5mHZS7qGCtNxigcJOhuGh71uPkfJ5Sk7wHSonPuGoq4DY2APP1m3Ky1KSN50A4Sbr29ePgo4B//+XeB7/3/23jvskuK+8/1WdTrnvHHeyQwTgIEBhjAzDEmgEQKRBBJZCMlBtuWwV17bd9f22ms/tteP99qPZe5a9157WdlIlg2SFsmykEAISWAyEwgiwzCgGdKkN533xE5V948Op7tPd5/U5z1h+sPTzHs6VFd3dVdX1ff3+9X+0M3NeFN3hG3UQIhdXxABpslqQnDAU4XLDcorLkx6Qjji6SD2SbwcqyJwSvIM231fjOsJG2D0njsY2aWujRHjAVwvtgKEcXC7vcVBYJgmFspVCFUVOUVBRpFBOEdOFpGVR6HpJkqVKnRNBygFs719CHfaj40HRqO8gqM8gxql1wuORfEXaP4dGHRjj2OF0HfR84f1VteMUVMGA1/tlL6LCUDg7VFThAu7YY3mZuIkRRo+ha3hi1yWoacKuSriv0uc1+c/zuiL2/11r9OAs94xUiEgHmcCwAzIzcQaYHKjpNQ227mitoDMOQxGsFDSUCyrkCUBo9kMFFkEpdYUZ1b/M9yIO/2+DTLEXdIR7ZRF5cjhw20dt3Tp0oRzYuFUqLH7hFgTe9fv2b2rrXNvPuNMZDKtBSgUBAFbtm7Dk48/1vL59uzc2ZcCMCEEt972Wfz1X/5FcyEP22BkdBTX3XBTV9JuxLJlrYsChmFgZnp6UecqboXDhw61ddzSZd15jxsxMjKC6264CXf/8z+1fOyRw4fx8E9+hCuu+njyGesnOmjLtfM8jI2Nt+Ud30vCPAA4ITAYg6qqqKgaCCEQ7O+KKAqQRAGiKECUFctr155LjnMGbzTbmpcf6s4RHFwMzRs8AoaNt5sS7FB41ycd3i5qnptYg6uAUBjnReMS4wEc9ETshLyuQREEa27eAGOiDBRKKPzV32Psz/8zaBe8LcnUJMi5Z4PvfN711L3+uJOwdvl60BuvDD2mVd96CmCi4V4Wo/YSl1ZU56KVFg/f+XzdupJAMH5VveidFOyue8H2vBS5PZOAABz3WPOy35iQEEAQiCsEh5LNRM73y6vVRREzrXELXmfdP4iDBPwYEYEHScgZlkGnfhLyOqUb19JKlLOgYNzq8+w1LvOe12QcC6UyKlUViqK4A6OKJEAWR2AYJsqqClUzwDkHowSEAd52WjcN8VIGgzRs9ADjGtl63RI9m9NiHAicYvON3Tr/poVYT1jM5gARvZBFgNj9jOCaPixLu//gjN84T52/fePdvba9kQGbs060pyOzzuCMsfj3iTqWUFhlzQHNMDCzUAElHLIkIJeRoCgSBLs9w+xpj8DrPZ9TBpdUAE5ZVF579ZW2jlsy1T3hqJUPR3Df+fk5vPH6a22dd83atXjv3XdbP27N8W2d791338HBDz7A6uOOi9ynk850J6w+7jhccunH8NCPH+xK+tffeHPi3obNsqxNEXffvjf7VgB+6829LR8zOjYWafDgDf3WLc47/wLs3vkU3tzbet5/9MMHsPWc7Vi+fEUXctZ7Or3rr7/2asvHTHXJqKebhHmmcM6tuVZdwcCasxcATENHVdcAMIATy0tYFqGIMkRRggjrmXfnEmYMzDPHsDMHjGUZ2iBcYMhnzN9RsztKnh6UKzaHHBwccHfDMze4N7Hf07j3m/o7Ps0M+LNqjHcjSU4CZuDQmBkqADuMTOeh3v4PyPzXL4AoyYe6p1ftgLnzeRyqlrE6MwJdNIGX3oD5wWEIx61M/Hz9gPlDy9CN2cLiEbWM0Ss+AjLS2VzCUbAHHwN7MN64rmp7P092EgI69I2z81CuhBp/WCIwhWGade8iycjgEQJwnJFEN6gz/ljUsyfHsAiOQLhQN6iC0zAIqMP2bAHduZY4j9qw8zrGJ+6gf4P0gyGn3WM4BxVEGJxDLZUhqgS5TAYZSQQlFJIkYkISYZgchXIZmm7Ue+y08Jw2G1klzjs6eB1JMKh1RD+TisLDQUTwgZQBICgKe4vumH4Xgx6jjXbtg1vlBPwOE4WB3pVn0MjA+n4T3z2LapPHRf/wibmW2687c7IVzwSWo3RENBHHycDablpKMBHATQZGCKq6AVXTQAjFaFZBNpOBIFoRUhgzbc/gcGeClMEiFYCPQZIIe9wur7/WugA8MTHR1TmAO+GZ3bvb7iTdd+93cd+93004R/Hs3vV0rCds2EdmsSr5qz9+DZ5/7hnMzswkmu7Gk0/B+RdcmGiarbC0DQ9gwBJZe5nvKDRNwzvvHGj5OK8Xf68GFm659TP467/8CxiG0dJxuq7jW9/8Bv6P//jbXcrZ4KLrOt7c+0bLx7VrGNFP1Hmp2s81s20yRUIBQizDSQIYpgmzylDhdphXAFQQIEmStWRkCLDEVtM0YRiG5SnsmZ84ziK0GbwGvjzwF/H8P8zOs5E3WcN8xL32sbpxRMcoRtxK8ou1pEmxT3rrXfA77gb5rc8lPiJENq4HNq7Hmn1W3ZsRREyrZUz+4BHkPn9roufqB/i+A8C+A5jRqmCcY7mSxYrMCIRrL+vK+cq7nod8171N7auaJtA4UnYksbYJnmfaEhJqmygllrGJScGZ5+3NZABEhOGPM5LoAl5P4EGXD4YltDWQDtb0I2mZNE8znjHBfQnqPWIaCXBOvcXs8CyUUnBOUK5qKJYrUGQFuawCiVKIhGPJWA4m5yiWVWi6bg/0JnPNYcROsZHweVIWh2CZpnVC73EjKtkQxPcpwl6XtBgHA2/RxfWrh7o4m1R0B+Gr4C9P6z3sddk54ZmdzFnPmWuB3/BY77fBfUbrfAAIBFgaj9cILqTF46mbrD0YTAhUBOcAMykooVioqFgoVSEKBLlcBqMjWfjC1qUMNHVjfGmj79jAsSZdTNRqFW/ta32e17XrNySfGQ/OvfBaA4ctYezZvbOreUuaZ3bvcju2UdfZ0NOsS8iKgltuvS3RNEVRxK23fSbRNFtl5apVkNowYGjHU3Ux+Nnbb7UVqnvtuvU9qXe8rFy1Cpd+7Iq2jn3j9dfwzJ7dCeeo+4Q3AGvrOy2Nt9/aB11rXWRYu359XT6Cy2LiiLmtLAA8IXgATgg4sUQaAVboHDjees5inQwgBAwEJmOoVKtYKBQwOzeP2fk8iuUydJOBiiIkRUFGlqHIMiRRBAWxvXgtcYhzgLFgHR42IFELFFRb/MMazh4cHIx7Fm9peKymg+9yw2+mEBM+iNd/i71p1u3OOXhMPZRkNWNyDr2Jjo/KTBR3Pg/WpJDYKvzKiwEAZUOHykwoVID89PPgpXJXztdLyvc/DMCac3e5YkWOIGdtAlmdfBQGtm8/cMfXoZkGWIMHJyOIWJnp0AOZRJ+Da7rfu9fX/uKWB5ogQBRsoxDOQeRoW17eorFTUrAef+uTwhG0B5mwvk3fEpK1YLtgWJ4tYPCfLYfFeK7CpoUItsfq1oekEXcs547w623jMStKC6GoajrmFwoolMvQmdXWEgjBeC6DyfEcRnMKBE87h3GrncXtRhkBAs94axHIvJFWwvrmYW2wqPZTo+NSFo+wNm9aFr2FeJZ28PbHUgaPyDFS9GaMIjFaHGyp262Tl2JR8L+53DOW4YyNhL+TXR6J4twdBrLaF9zqCzp6rhtJLr6d4377I9oAhBBQSq3FbgORkHRgryccEKkIbo8jEYGDE9PKqEBhcGChWMGhI/OYXSiiXNVgOso6gu2IwL0d+JdleEmDeR+DtCJwJsnLL7/UsucdAJx44oldyI2fdu7De+++iw/ef7+LuUqe+fn5trz1FovNZ5yJLVu3JZbeZZdfgZWrVieWXjsIgoD1bRgxzMxM4+AHHySfoQ55+aUX2zruhBNPSjgn7XH5lVe1NS8zAHz3X7+NSqWScI66S7fb6i88/1xbx51wgvU8kJilHdoRcjsxciGeBnYw367wG3FNNT24lgeTc1Q1HcVyGflCEflCAQsLeVTLZXDGIEkiMoqMjCxBEUVIlEIA6hrYBAScO4szCEEAboI4gZM4A+fhIion1sLg7Tz5/wXqB34ZZ3WLG74wxhCG63q9sO6xenXWOeGyCSGg2UxkeknO4Klzhhk9IsQuLEHC5BxzugqZUrAHH4P+w0cTO7+DdN5WkNUrUDR1yFTAhKQAqgazQdjiQcOcnYP07MsAgKxnzl16xY7Ez8UPTYPdfidk3YRMBZRMHVUzup3KOY/d3hzRzyYVRYBHD/YTWB03gRCIArXWxxhCxL1zi8EwDGA7IvCgXstAeZQ1mdVhEOYdhuU6gMW5lqg2W5QQTO0lahDUf6w3vGFNCCbEMrxzauGKamC+WMJCqQxNNwECSFTASFbB1PgoRnNZEM5AwMDtsImO8YL/GW/ufrX7Djv1ltNuCvMyHeS67VggFYQXn2T6o976xFn8RrqcZb8IAAAgAElEQVRpcQ4OrpblvIcDLQq3+IR3OjDTU2qZr5VTbSyj9i4mORIVkouotolHiXPPGOgDBo8L6x9GtX8EQi3HgZA2j9XIIeB244bYArR1fm6J1CAAoWAAKhrDTL6AI9PzyC+UoGrWlETO4t5d973g4IQ3M7V0yiJTJwAPVEcxZWBgjOHBB+5v69gNJ3RXAG63Qb1r51MJ52Rx2L2rv72Wb7zlVihK+/PrOSxbthyXX3l1AjnqnBPaNGJ45N8fSjgnnVGpVLDzqSfbOvaELr/HzSLLMm75dHue5oXCAn54/30J56gL8Pqf3bBpnJudxe6dT7d8nCiKWLN2bd36ODG420JuP+G9Hs4BE9bcLAulMmbzC5iZzyNfLKCqqSCEQFEUjGQzyMoKREJBOQcFB+EMVvxpCoDCZAAhlqDmDHA2e8s4rDmFGQATHCa4z1vSHWj07Ocsbmcgbm5co17E4qgf9PKKwSQT/Z3gMV6WrSITilHBH/P36dmDWP/gV/GFVx6HzhkoIVil5Kyw3wCMf/ku2J6XEssDAECgIFftwHI5i4wgYE6zRGny4BNg6uKG+u0m5P5HQZm//OhJ60HO2pTsiQolsC9+GSiU3FVjooxSnAAM4IjamRFQbJNTFiMtc91n3/oBQRAgimLou1NLr4NY1Sku3sG/lP7AFdSGgGF6rhbrWppt77kGaCEewXFpBsVSYg+QumkRClU3kC+VMV8ooaqZACgoAUYyMpYtGcPEaBYKIbV524nQMyEvLBLAMD13xwppuQ0yQVEpLctBxTemMiTvZBf9X/sMRxD2iMLcGi7hNR1z0W4AJfCNxzgiMBDfzml23Mvb9mnGgM4nFFPuEYMBQkUwUBQqGo7OF3B0roBiRYXu6Yf63gXn5qb0FXQYKqx+JzKMRIQXbiv7tgMl1A0PsFgD58/s3oVDBw+2fJyiKFi/4YQu5KhGWIUXdr8dCxfOOUzTxHPPPNPVfHWLF55/HtU+9mKcnJzEtZ+8vuN0PnXbZ/pm7uhTNp3a1nHP7N6FwkLE3H494Oknn4CqRs+7GcXU0qVY2kdzvp562unYes72to6dnj6acG66gKc67+YX/oc/uK+tqA4nbTwZsiS5nhmuh8aQC7vNEtoeQK2BzgmBzoCyZmCuVMZMfgGz83kUSiWYnEOQRIiShIwiQpEFiJRDAIcIDjACzgQAFIwRMEaba0+EGMRyUhODnSWszBixFp6J8QA2TI+IbAvMqE/f912OSY8kaHJKCcGo6BfS3qsUAQDPzB2CQgXfkM6MVkXV1MHuuNsKL5wg5OLt0LMKOAeKpg4AVgjoJwazPRKEl8pgj9QbqZGrdzRvrdDMeVQN5u13gh+ertu2VLY8y+f1+m8dJQQTUmftChJjnEAk2Wqfe56oOmtvStz3kFIARkx48jiji0ViGAbHHI8P9++UxGk3AN+wlMewXAeweO98nDdvWAQFNPCeiWpr1rY587U6kUgoOAh0g2GhVMJcvoCKqlreMIQgI8uYnBjFkvFRSJIIAtMd6F3sewT0LgJcSvKk5TgM2EKUxyM49QweXJzoWN6lJ++lE8IrbGnF23dgvX7bwHOLXON1eN7HLgvCXmcH57dXBG6ufRK9L4U1NVlwoRyh7afwzDGAmOCEgVPAJECVEcyXqjg6X8R0voxiRYPJ7IM47Mgpx8pDNDhQoHkLgpTOiepoNOqQDPKguGEYeKBNr7nNZ54FSeq+B0Mr95JzjtdefQULC/ku5qh7qGoVL77w015nI5YPf+QSrF23vu3jt20/F6eednqCOeqMjSefgtzISMvH6bqOJx5/tAs5ah3TNPHoIw+3dezZW7YmnJvOueGmm5HNZnudja6QZFs1rC9ACcHRI4exp81oAs2GeR9+K9Rw6toHlNjalxVShxKr8Ua45eXLCIcJAs0wUSxXMJcvYDa/gNmFIkoVDSACFEVBNpuFLFGIAocA2OGHvKGig2GjW7/zcUZrcR67qKpu5znsrK44bA+8AgBVokNAh6fSPofUsi/FW9acjDu3Xoa/P2OHLyQw5xwypRgTZUDVwG6/E/xQvcjYLkSRoX/0PIiU4rhM7ZvCf/jYUIwa8YefBgLezGRqEuTcsxM8CQe/427wBuK8QgUs6PWe1RNSZxFK4oqJSpaHvlXXhoTsCvkbhh6ZHlEW1wN4WAehnXBpw3l1vaeT+5p6AqcA4WNZceMlUf1+r3F86DHE2yYDuG0cTgiFwYCFYhVzxTIqGoPJrNFjRaJYMprF1PgoZEmA88QvRmmHGRTG7ZcKioNLWoaDT63s6kVhqziPJVVu8KkThHudIQ+tasPHGt4xqLqIZknUrWEDXISDUIBQ28A35IlppCU1oxs5v6PaO2HHUkrhPCSUAAIBKLHGPxgHqpqOuUIZh2bnMb1QhGYyy3C/r576FCD1AG7JO7fVxaFRB6QZ4XeQefCB+zEz094A6NZt5yScm3raaTDv2b2r6/nqJv0eBppSiltv+0xbz342m8WNN9/ShVy1jyAIOOvsLW0d+/BPfozpo931Om2mPnvoxz/C3OxsW+n3owA8MTGJaz5xXa+zsWiECblNLSHfI9M0cc83vm7P+dEalNKG78KxKPqG4X6LAt8kZg8qWpqEZcHJvXPj2g16xghUTUe+UMT0fB7Tc/NYKJehmQxEpFAUCdmMhIwsQKTEChnNGAjn8FqmM9sMlnBm7eOzj22BBmJUMylaHTEOkzOwOA/ghNtNnFvn9HLZ8rU4e3w5RELdbYQQS/x1CAkz3CljV18KCBTeKMn84BGYL76e2Dl6ATdNHPn+j+vWk8svAoSowMitw+66t6nw3FlBxIgowmAMhqeuO6KWfb9bJe7ZJCO5moEDLKtt6lhRc0DwhCxz4DHhv4ntAdwPfb1hGZR2BMfBvor+IMlvfc+8bRJmGN4Rh15cR6yXDPzySdy+QHhZBL1ZrIhqjlcwAxUEmCZDoVLB7EIRxYoKxq16WxAETI6OYunEGEYzCgTOwZgndCJjoNxEp29FVF3b6phSKigOPmn5DRdpWQ42QS/hjttAIWFT4mr4Y9WwPil8orA9PtLNd9H6ZrsjMj7PYO8+9bO6cgQ/9VH5pCCWhzChllew5wny6VmsFuXNcY8mACjlllhti9ecAJWqjiNzCzg8W8TsQgkV3bAPYQA37bT9w1scw9X+7Wcs24IBudFJfvRaSacVj9x2RNtm9h9UUfjZPbvx4AM/aOvYbDaL0/rIi9OhUqngpRdf6HU2OmLvG69jfn6+19mIZd36DfjwRy5p+bhrr7se4+MTyWeoQ7adc25bx6mqin/52lfaEtviaKU+ffedA3jg/u+3dZ4lU1Po9jze7XLRh3dg3fr2Pc37lWaF3Ha/Y9++55t4c+8bbeXtlE2nYmx8vM0rO7aolUFtcb0Cg+tDLTgD4f8AmCZDpVJFPl/AfH4B8/kiqlUdnAOSJEPJKJBlGYog2vMJA9S2OmegMEFhBZQW4HQzw+qR0HolzgMYAC8UG1sgO9dOCJCNCSmdcLt2dWbEnd+3lhVrUFekFLOaiqKu4UC5PmQ/PzxthRtOaJ5eanvEHqyW/J5vP3o8kfR7xp4XMVm05jXO66rlWa3IIJdemNgp2IOPgT34WNP7C4RCpBSH1LJ7rzm3QpO3TcyzScdyvvfYDY0Pv2jhe7cr0dMycFFyDUMWg2bPMwyd/GG4hmHDGUAaBtLraJ+oNqyvfdvCvnXpwzHctxbvtFouJgNAUFE1TM/lUSxVoRk6GGeghGAkJ2Pp1ATGR0dAiRV1BYTAhOAad7VaxyxGnZQKioNNI6eVlN7Syhhv0Fs4pf9oVJbNOJI1cRbf0vDI1Nu3A4JmZDWjUGuBzzi7qaSaORvxtklC9iM8UHf4x4aC3r7BtpB3rJAST4horzEc9ZzYzhR3ohJ4hmas6HLWepMxlKsaZuYKODyTx0JRharDmlLTltI5Z+CM2ZVYzfkgrdO6R3Im9S3QauUWNbjYSaOl0aC305iPfFm6KAjH3YNBarDt/9nP8PW7/rnt4z908Q7ISmeh9hrRjg3/C88/B11LZiC3V3DO8cye/vdivuYT12Fionkxd/2GDbjo4h1dzFH7bDr1VKxctaqtY3/29tttGVIk0bDUVBX//E9fgWmajXcO4cM7LulbgxXL0/yzdliTwaab3x8vjzz8EJ58vHkBJcgll17m+x1ivJrSRaxQhQRUEMC4NXddRdVQKJUwv7CA+XwexVIBuqFDlERkcxkosgSBCqCEghIKMA5m1ubs9XrLOEQ+c3K0FzArlluqp2gmOgR00nWOxkyYIfmZ0SqomgaWK1lQQrBCyYUez/ftB7/j7sR6NPSqHRgPzEXLX3wD7K0DiaTfC7QfPAqRWnNSZ6mIBUMDuXg7yEj4PW05/d0vgN11b1vHHp8dhck5VGZiZSYHhQrtZyTm0RTGx3z1tusN7BEXvHMq8YVi7KloRgbB8IhJ/QR3vDd46g0cR9g3vlvfese7Zlie90Ho6zdDL68jTuClIeu8+0YKwLHr7bRtr2BCKAgVUFI1zC+UkC9aUVg4CEA4coqIpZOjGB8dgShQazAU1viEk17cvev1mNAgjUulRDNoY4wpIUJUQBBOy3JwCGsfsbp3MmSnZtIknt37czhuwKl/Fx0huOYl7PF47aAR7LWBd5sgtjeu857HjfvFGbv5r8ha53gFOwbwvjaSR1SuPxFAKAexJhkGB4FqmJgvVXB4bgFHZvMoqxpMxsG57eLMODyzL4cK3SnJ0PNRb+dh7XbDYzHF2lbzMIy8c2A//uGOv4euR89NFgelFDsu+WjCuUqGQRBOm6Hd+TsXk2w2ixtvubWpfSmluPUzP9e3Yh4hpKNn+sEH7sezz+xJMEeN0XUd//K1r+LI4cNtHS/JMi740EUJ5ypZjl+7Djs+0pu6JsoSr1/Z+fRT+O53vt328StXrcJpp29OMEcpreLUj8wTMtpqnNdCGmkGQ75UxvTcPI7OzmO+WIKqqaDgyMoSchkFGckShQkhYPYceJxz998onJC0YfBSfZjk2LZgTFqAJZQlRdHQUTbr2zPe+WBzooSqaUSGB2Z7XmpbgAxCNq5H9oxNoISgbBqY1iowmAn+QPvGGb1EfWUvDr/ymvU3M3FILSMjSKBXJWPQZbz5M8x86U5U7fly87qKOc3yNtaYiXldhWqH4iwaOgqGZeRnMIaioUNnDBKl2Fecx3mP3YOrd93XQW6ia3kyZondzfQNCCHgxXLsPuJIBoT271dlGAYmOeAbfEmp0cs7kpZHSpAo0bbZfYPro/epbXfaRQIVQakEkxHkC2XMzhehanZ7iZvIyRRT46NYOjGKrCyCcA7OzMjnuF+FnX7NV0rrpGU5aES5FPZvGzAlHp9e6PP2rhcdg8f4QweF7pqySNgaLThvexItH05x0qAY7N0nph/ZsI/pdfYljhhMfMJh048TYSCUQxAIQDk45TAYwdxCGYem53F0dgHFigHDFostnLuU0g16rpQ0O8jRiljbrtDaz42c8Ovpz/w+/tgj+Nvbv4hCoT4UYrOce975WLJkSYK5SobZmRm8uXdvr7ORCAc/+ADvvfdur7PRkK3bzsHpm89ouN9HPnopjj9+7SLkqH3OO+8CjIyOtnUsYwz//NU78fhjj8R69iZlRKOqVXz5f/4dXnzhp22ncd75F2BkZKSjfCwGV1/7CUxOTnb1HINs/KNpGr5x97/g6//ytY5CkX/siqt8YmN/fsGGG6+FqBt6B6brteI8l7V5hBlMw0BF1WxROI+5hSIqugHCAVkQkZUVZGUFsiCCUhEAhRXNh9iLp07KRkf14IVKXV7j6jW6aln8tSb4hI2KEuQQr8+sICIjiCgYGvK6iiVyBgVD84dm9tBqCOI46LWXQmcMIiHICiIoCPieF8Bm+3t6hzDEHzyC47LWtyIjiFiXG8PE+VtBVq/oOG1+aBrG7f+IjMEg2FbMoh3aGbAtnT37m5y53t4mOKqm4c7xfLBawuFKCa/OH20rL41qfeqJeBLmjUYptTrhTojQBnNLk4wCSRQhCh14LHeZfu57xeH9hjtXMKjXMiy4nvMYjvZF0Eh+0OmH6wi2vwnq2+fBfYNRGYLRTuLa9ZQSCJSCcxPgJkzTACiFwRkKpSJmClVUNAaDWR4vkihgYiSLybERZDJynVFzP9zDRnSrX5rSG9JyHFRqql/qFTyI1Id25sRaGLi7OOucJSj2piXde7xl5/XwNhn3eQo3pl4+dsZvBEosQRjcMoDn8e2TqIgnwfVOfgkIqDNPMPH3nOPGOAmhbp0jEAIB1viMdU8INMYxVyjhyHQBR+dLKKsaGLPGj1K6g+ht0IYRty2OOCvLZiwwG52z1cHzZq5hkAbkAeuFSoqw0I3+geLGqGoV37j7LjzXoZeiLCu45pPXd5RGt9gzJN6/Drt3Po3jb+5v0RQAbrn1Ntz7b/8auV0QRXz82k8sYo7aQ8lkcPXHr8W37/lmW8dzzvGtb34DxUIBV159TdfqrGKhgP/1P/8/vHOg/XCisqzgqo9fk2CuugMhBNlsFjd96tO488t39Do7fcfRo0fw1X/4csfGIsevXYft552fUK5SkqBWf9SHcA7uU4s0xGGYBgzTQIVzd/BUlmXIsoyMVPMKNgzD8n6B3fgHQCcnYR6ZDc0Pn/cbjTXTLiRTE+Cz+YgLDF/dDmHiL2B14KY1S7heLmcBAEvkjC0YhmeA3XUvMLUE9NwzO8pTdstmlFYvh3J4BjLs/JkM+PETwK3XdpT2YsI+OIwje36K5bI/pDf9xGURR7RAoQT2xS+jPJeHzhnG7MH0EbEWilyiFBO0Zpjg9epWqAAqKSibOjKCiIumVuOvTv8QzpqINz6Iglu96Ejoiinfb5/IGAjxRTkHmS9EpkVWL3ePFwQCQigMk7lpEUKsDPVB38cbSn5QcWunIbiWdujHQUbHEMf5Tg0ag5jnZuiH99071hHMiXcbR3w5BMfJQvclcOf5pYSAMxPOzHucMRRKFZRAkMkqyCkSKOWQZRGyLIGZJqqajoqqwzBNgAMcFCD1+e4X4u5XsB3X6+cgpT3i+gsp/Yy3TRmxRx87GR3LRL1fcRoOkDr+9gOhZRBSnl7jcVdi9e0Wrv0Qbov/sMudwLb4IK43OLPDKhNfHRCu9fjCSQfWu+vs/1ntJG7J0p62nbdtVC8+O1mshXtmnKCq6qiqOiilkCQR4yMZyAK1BWdmXwu3qyfqGnyGVWbp9yga0fmjHaG3Vzc2LdDuEXZvG3XUKpUKnnjsEfz7ww+hWIgejGqWy6+8quveeO3yzO72BOD1Gzbg4g9/JOHc1Hjs0X/Hu++80/Jxz+7Zg+tvvLlvwyY7LF22DL/8q7/e62wkwocu/jAe/feHcfTokbbTeOD++/Daq6/ixps/hfUbNiSWN8YYnnricdx/3/dQDgnH2gqXfuxyjI83P39zt2n03Th7y1ZsPuNMvPLyS4uUo/5mZnoaD/34R9i186m2Q/l7ufGWT/V9PXMsEPWNbzctR5Sqqio0TUMJlvcwFSgkUYIkSVAESwA2mQl15VKYe98OTc88OgORR88pHOpds3o5zAgBmNgW00mgMRNFQ8dUUKQkBDpjWKnkfPmjIHinXMC63Fh9YpyD3XE3sOQ3QDduaD9ThCD78Y+CfdUflp0/vBP8+itiw233E/zBx2GwwBzzG9eDbFzfWbqqBnb7neCHp6EIAihrre/ghH2e06sYF617mRUl3LZ2U9t5IiR6WI0ubS3qDSEE5sxc5HZhuV9MppRAhGB59XPeFyKMl37LT7s4gxHDcC3NMAjDxA3sLgaGYXlH+onY8S/HSCaGZh0ogmXnbOKcg1KrjVSuVFGpVKDIIjKZDGRRBBUEZDMUGVmGqhuoVFVoug6AWhF1bJEahPQ+rF8bpILw8JCKwsNBTbTpcUZSOqKhYVJKH+JvI/CALYZTjMHSDHOWrQnBlvG+Oy0W8e8TV2+HfZ/Dv9m2Zy/hrogd51wYXA/YeSTWPowZUDUDR9UKZFGEIknIZhTIogAQAq+NCuGuPzH6ILjxQCB6f8R56Ab3aWRt0kyDLm6ftKJaXJrxygb8ZTY/P48nHn0ETzz+KCqVSt2+7bBq9Wp89LKPNb0/D6lkusWB/fvbngv1oot34LwLLkw4RzVM08Q3v35Xy8cVCgt47dVXIkMsp+9hazTzHAqCgBtuuhlfvuPvOzrX/p+9jf/7i3+Fc8+/ANdc+0ksmZpqfFAEnHO88fpr+Ld//RYOHTzYUb4AYOnSZbj0Y82/x51CkExI5Zs/9Wns3fsGdE1LIFeDyXvvvYuHf/JjPPfMno7CPXs5/8IP4aSNJyeSVkqydPrt9L53TqOfMQZumlA1y3BAIIAoihBFEUJM2GY2PQ+A+sZbG73WwuqVMF/ZF54eeCC4b/tQQqDx8PdBoQJoSGildbkxaMwM9x62xUnyp78N0iCUdRzk4u0g9/wAvFSbC5aXyuBPPANy2YfaTnexcPK6OuOfKkDodO5fzsHvuBt8334AVhkpEV7cYRyulpERBExQBSuUnLu+aOjIUMENH90qcc+ksHJp7LFh3zjz6Ez0/sun3M56zRMYIESozSeW4DuSBK1GHupXXIv0IfPAGASxN4jzTDHUezMMIkPzjvTRdQQjLYRuC3gDR+0XNejZaIyL2IOanBNUDQa1UIYkUGSzGcgiBaUEWUWCokjQdR3lqg7dMKwwj5TY7a/e38tOSUXE4SIVoQaX4KuYFl9ShNXV3b25qaHNABJSRK7hmGcXErM/4HjbWmIwt8Vg//ZovSesbRTa1vH8RR111rZ4Zpy7j3y8jmito9S6KsYYQERoJmAYKkpVDaJAIcsicooCSaDWvMK+67GDVafPdyw+D+AomhUHW9meVkT9T7BMisUi9r25F3tffw1vvrk3EaHIiyRJ+Nwv/ypkuTmvlcWev2L3rqfbOk4URZx59paEc+PnrC1b8e17vgnDMFo+ds/uXZECcPAet3vPOfrP2wTwX0/b19bGcZvPPAvnnn8B9uza2dY5vezZtRN7du3E+vUbcObZW3DW2VuwctWqhscZhoG39r2JF1/4KV5+8QXMzyczb6Qoivilz/8qFCUTu18735Vus3TZMlz98Wvwve/+W0/zsZjMTE9j7xuv48039+LNN15HPh8RTrdNlq9YiZtuuTXRNFOSodNvaLAz4Hix+EIHEQKTEBiGCaKbwMR4dHrTs6DUFpB5rXvDubduCJzzuOg5YpMc6hcJxSqPEOhQMnQwzmFwBpnUC4wCIVjQNYxLIe0aOzyx8Ge/DYy1N1c6UWSQSy8A//7DvvX8h48Bl17Y9yM2/OGnAVVD1TTAYc2pTKYmQc49u6N02V33gu2pRXMwGEPR1DEpRc9BXTZ0cFjhoSckGRnBZyMLkzPM6VWszYZ4dTcJiRmjF5bHC8CAv/PMOQc7Gh5O3Zue11MfqHkCm4z53rVef3+99GN7sVW8YckG/VriCHEq6CviBrAGmWF4R4D+uw6fUAtPO8ljPOAan8Nfv3ojlXgN1MPCK9Z05dpHgXOAcWaPnVJohgl1oQBJoBjJZiErEggARZKgSBIMk6FYUaGqqh2NMTrk4iCTjhkOB6m4P9g022VMi7QRJOLvxSN9F/ufZtrW3Luft0hJ+HGOGOxGDokgzqDNWV/Xtua2IE1queaw5v212kg1T2VnvKjuesLOxzm4PY+wbnLoZQ3FkuUZnMtlkVEkiJTACglt5aPf2pX9hth4l+7QyJM4jLQwk+H++74HRYkeBOOcW6EcSyUUCwWUSiWUSsVEwjvHcd2NN+G4NWua3r+dsOXtYhgGnn/u2baOPfW005HL1Q8cJ8nIyAg2nXpaW+FrX3rxBVQqFWSz2S7kzM9ii/b9zE233Ip9b+7F3Gz0AG4rHDiwHwcO7Md93/suJicnMTExifGJCYyPj2NsfAKGoSOfz6OwsICFfB4zM9NQVTWRc3u54eZbsG79hsTTXSwuufRj2LN7Fw5+8EGvs9I0xUIB3/n2PbH7MMagVqtWvV4soFS0/k0qekMYoijiF3/5V6Bk4o0BUhafJOri4MAnY6wuVLNlwUlAnPbb8ujpHcwjsxBAoMhW+8TkHIbJYJqmRwT2D6oKxy2PucZkByLKpoFcQBQcESVIlGJareK4bL2IKxAKRRAwq1XrwkcDAD88DfP2O0H/8DfaD9l8+cXAj54A1FrkAn7wCPhzr4CcE27c1Q9wVQP/8ZMALA/ro2oFa7KjIJdfBAjth3EyvvcTVH/wcJ2AWzTCBWCTM3AOFE0dS+0yCh4LWJ3m4zLtCfUOcT5a4rrjmkvDM8DPjsaEgF5WCynt9rns35RansCMEJgeEXgYxYOU7mKFnw0MPvU56XhC7xmUuqbZEIn1c9z5oy9Eh5mG9RIxa05fiQgw7Ggj1rEUOqPIV6og5QoysoDRbNYy5KEEE6NZsJwCVTNQKFeTuOSUlK4R5kXW73VASkp36O1zHyri2aTvZO+Ja1uH9SV5yA+PFuvHNmhrNBYU1/4J9QAmxOdh7I0uzDkHIzUD5rA0fb8BAAygdlsK3jaVAM3k0Aol0KLlCZyRJYxkZUhC89G+jlW6LgA3stprpYJJK6NkePrJJ3qdhTou3vERfHjHJaHbmhmo7raw+Nqrr6BULLZ17NZztiecm3C2n3teWwKwrml44fnncMGHLupCrlKiyGaz+MVf/jz+7kv/I5E5Vr3Mz88n5tHbClu3nRP5Hg8Koiji1ts+i7+9/Yu9zkrTlMtlPPLwQ73Ohg9CCD77C5/D2nXre52VFA9JfyubCWto/2H9uyLay5EfPIK5QhGEEIj2nC+SJEAWZXBbpDKYdQ3MiSi0amVM3tq6pEjmtCrkTA4iscRJjZnQGcOIKIV7+NooVIAiR3dI+L794HfcDfJbn2sr03RqEvzi7X6pGCMAACAASURBVOAPPeVbb37/IYj9LAA/8Qz4rPWdkqmAFUoOZCQHcmn702Wwx/aA/+/7kdctMdwRckVK68RbgzEcVEtYqeTc88dhcg6xw4cqrh8jbmjOANI7cGp+cCRyP2H5Ul8nnQQ6/IRwCAIBIRSmyVwDjqAXf6/ohzwkxSAOcjf7paCt7NwHuFkdwDIJMsjvyCANPNcNdjqDmtaPWkj9QPQToHZtlFI3jLSDM6hpHwQAYNwyHOOcgzkRT8BAuBUWsVw1UNVKVhhEWYYsCRAIQU6RkMvIKFeqUHUDmm6Cg4C7BnhW4oQSNw/9ft+jSL3XBpdmpgRstH9Kf1MLmuCP3DS8cEuls+toP/37/Ma9W2nUhd4T17ZupjRc7+BAGt6iDG+HkdC2jH+fwDPhNegBqdvX2V+w9zU9xvzBfbz59yVPuOtXTDix6xkKcMA0gKKholTRIEkSchkRsiRAEgTbK5gBEPxtOeKkwd37eaw854s2U3LwAUpJcdiy7ZyOQ4R2+9lqN1SvJMs486yzEs5NOGeceRZkOdqzO449u3clnJuUZjjhhBNx22d/vtfZSIQVK1bitp8bjms58aSNqUFEh9x4y63Ytv3cXmcjxSbK4nLRWbokfvvBI24UkkKphPl8ATNz81golaEaJmRJQlZRMJJRIFMKcU10CGiL5K55Ss74wkpPq1VI9lywo6LU8PgFXcOcFu6hw/a8BHbXvW3njV61AwumjoLhmb983wHwfQfaTrOrcG6FqfZACYBLLwAZaS9iCn95L/hX7gEhBCszOUiUgnGOI2oZjHO8mJ8GAOR1FQZjYOBYrYyEz9EcQGMmjqjlDtua8c+ieMLxrSWXL/rmfg4iLF9iqQmIN9SglEAUBYii6NunX/psfVFvdUhQIErpPcNUJsNyHf2MUyeGGrrBimLhLAQAZwwEgECpNSeeHZLEl4YjJFMCIlgxG2ubCARCQUF8A6pO6ERN0zGTX8DRuQWoBgO3BzazGQWT46OYGh9FVqIQOK9FYLHnCg4LWT3ocM91RS0pg0dahoNLrdzgLnaQ2KbDSQ8GBOAUgyT+tkr6Hg4P3vfRW5TeNg6lxJ6L15KRowThMMfO4BKGAAIBBCKh7t9BCCFum6l+IwexFwYGTrjbvlE1DXPzCzg6ay2Fsg6TUTD72h3vZ8JNUJigpDYlB2Os+Rs5wHQsADdqYA1T4zIlec7ashW/8LlfhiAIdc9J1EdmsT88pVKpLc9aANh8xpkN50JNCllRcMaZZ7Z17L4392J2ZibhHKU0wznnnodrPvHJXmejIzKZDH7p87+GTKb7YcQXi+tuuAkjo6O9zsZA8skbbsSOSz7a62yk2PRdZ23FVOQm/v5hu/NB3cY/CIFhGCiXy5iZm8NsfgH5YgmaYYKKIoSVy2JOllz7MyuIEDztlNWZnCse5nUVeT0+pP64JGNElKBHdDDYg49BfeCRtvJGVq/AxLYzoTPLZ8gJIckCImu/wF98A/yg33tVBcAvb8/whr/zAdiXvgZumO46gVgD76OChL976wVct+s+/NYbT4FxDpFSyFSASJvrBjHOsUzp8PvGo59FosigjYwjAugH3o/dLqxeYYsR9SG86jvtloeaKAgQHE+1Pqo3+ikv7cID//YbPLAcCwzbNQ/Fe9JndU8YUeNaceujBkp96yipG5lzBmOD6zgHBEEEB0G+UMLMfBFVVYPjfyNJAibGR7BkYhS5rAwCDu4JLV07txUF4lggFTEGn7TsBhtv2QUFqOFiuLWPtC4dHurF4LAIG8HfjfW9qKhwUW0hgVhCMIXlUE844j1zba9gKgCEWmKwO3YkiOCg0AyGfKGMwzMLmJkvoFhRYZh2woQChLrX7ow9HQskfpWp2JvSLDsu+Sh+5Vd/HZIkhVqLeP/t5XP1/HPPwjCMto7duu2chHMTzznbz2v72D17Ui/gXkAIwZVXX4OPXzuYInAul8MXfut3sOb4Fr2X+pyRkRFcf+PNvc7GQCEIAn7uF38Jl11+Za+zkmLTlx2z5dFhoPH+YfdPTgDmWJ7aQjAjFDpjUDUNxXIF+UIR7KR1MSdL7vrndRUlwwrXXzENNzwjYIUaNpq41zIVYHIWKQLTu78HffcLbeWPXrHD9lIGNMZwVK2A73nBDbPcT7Af1QvTIxeeA3GqNREUAPjsPNjf/CNYuYyyoUNjlgh8uFrG+5UicqKEcdkK0U1MhiUhczE3IiOIUJrwFI7NZ0wzVtp0QsvpmTECMD3OCo1OiN2ZRr0HW/1vq/PsLP3Wm+vLuqxFHK9T1uNBs6DwOfh3tjOGaRAzvY7FIdIb2PWgoW64Qcfb1vEQdrE9euuOFyxvXU4ADg7OOCgsj+DawGgtbcfoK1+sYHa+gEKpAt0wwTkgiRRjI1lMTYxhfDQHWRQAztxjGbivLXMskYoYg0vq4T24BMsq6JE43MLwcJK+j8NB0Gvf8dgnhHpEYH/PITY6iqctFJwWo/7k9v6EupFTiHO6wHmiphLgMAHCrHYTHKHXmgpD1U3MLRRxeHoW0/NFFMoqVJ2BgdrhoI+d5zYRAbiRm3dKihdRFHHjzbfglltvi7W0CFpiOL9d645Fot3wz4qi4PTNizv/3qmnn45crr3wic+kYaDbIizcRSuLw1UfvwafvOHGHl5J64yMjuI3f+c/Yf2G1gevB4HzL7gQG08+pdfZGAjGxsbx61/4jzj3/At6nZUUm35tyJLVMWGb3ztU24/U5lNyww/Z4QwBQKDWYCo/7aTI5HiCMpZIKExwVE0DeV2F4PGaUaiApU0KixlBRF5XUTHrDcsIAPK/vgG2b3/L+SNnbXLvbU4QMSJKYIYJI0Rs7SX84BHwF9+oW0+v2tFyWqxSwdH//v+Az86jykwUTd0dyp6SM1iTtaI4/Pza0/DAhZ/EX5zSupGcaod/7pzo91E8pfVvqL7/vej0Nq5z2xeUWmFEG82zZy0cgmCFgxZF8ZjqEKekpM96Sqs0GgML2+YIwYJtaONNwxrC5HZUBgIqUFBK3LaPYHvKWGGnPeMhHOCC1UapqDpm5hcwny/BNE0QQiAKFCMZBUsnxzAxMQ5RFJCafgQ9EtP7MUjUC4mpADVMDFYREs+/qR6SMlzUDNniHfOi2kP+8NI0dDw8zBhOINRq74TUBe55uDddAOAQKECpYy3HAHBwIgCEgoGiopnIl1QcnZ3H9Ow8ymXVip5yjGiZLQvAwY/rsXKjUpJh5arV+E+//wf46GWXx+4XjC8f5rXg81To0nN49OgR7P/Z220de+ZZZ0O2vU4WC1EUcfaWbW0de+TwYRzYvz/ZDKW0xMcuvxK/8mu/AUVpby7nxeT4tevwu7//hzj++LW9zkpXufW2z/jmRkypZ/OZZ+EP/vhPsOnU03qdlRSbfh78ICdF1xns7Xdr+3GrkRrltchhheYVNp0YfS4gsVGEUVHCuChDIBQrlXpDq4KhweTNzR+zTMkiK4TXK0cX8ih/8cvgh6ZbyyAhIB4RNSeIoITAeOhpmMUkBMxkYN9/qL5MNq4H2bi+pXS4YUL9H1/B6EFr+oqsIGKFknM9dZ35mRnnyOsaVig5jApSy++GapoYFRrP8dwIEjMoJJ8S/QxHob/9TuQ2ZeMG929n3kiRUFAOUB7ega/dF+Z6A0uiaBk69El9MgyDu16vvcSvIibB1Nu3Mc43ZdDv0aC/Iw6Ddh2tOkO4hm2eddSeE8+7nRMCCAJArXnsTHv8zfbNASECKBEgOPMFEwJCBWiMYTpfxNG5BZQrKhiz3HoyIsXS8REsmxjDaEaBSB0DIStMNPdEKPF6BA0jYd/BVEgcDBq9a2lZDj797RlM4Bd8U03ES/r+DSJxBgwk8B42DgHdTHso7vlwtEbB7sMKIZ7Eji2+9duZ0sLxXLb3JxwEhhUu2jPfLyciNJNgplDGBzNzODS3gHyxAlU3wbgdgYXb7aLg8+z8xxl4k2M//ULLAnAj9+uUlDBEUcRll1+B3//DP8LatXHhGtujW8/inl3te8VuO2d7gjlpnq0dnHf3rqcTzElvSMIbt5ecvWUrfve//CFWrlrV66xEcv4FF+J3/vPvYemyuLk3h4OVq1bjssuv6HU2+pKxsXF8+rM/j1/7D1/A6NhYr7OTYtPvnSxyUrTQR6bngIViawmuWgaMxES+SKhuNzlH2dDBEW78qDGGUovTRRxRy3XhoLOCCG1+AeyLXwYKpZbSIxdvB5ma9K2TKirwcH982/nsPPiTz9atJ1d+uPW0vnIPpFffcudhjmJGq0KmFMuULAgh+KBaigzBHca4JCMndi4AxyGd1poAzFUN5s9iPIDtsOhB4wnXwjoQUiv4t/Wbg1ICQawPCd3vdUw/EwyFlui99EaX9SwpLTIkz/ewvKeDdh2N+pNhBm3UV/cG+6UcHAygABHsENHOvgAoOGjgTa+lQcE4kC+VMT2/gPmyCsNkACEQJRGjORlT46MYG8naHsUCIIhumPpaPgarDJIkTMgYtGcyxSItx8GnXhROuizD0iEhS0qrRNWl6fs4vDTjERx05gt6C7vYRswCiCUIw/YO5o4xXFw4aucPyzOYEIBzE4A1HsBMQNNMLJQqODq3gOm5PAplHbrJYHL/9KROXqyqwhGcB4djY6bjIaVfBKtGbDtnO/74T/8c199486J7xHZKu2GRs9ksNp12esK5aY6TTzkF4+PjbR373LPPtD3fcTcYVBG3U1auWo3f+4M/whVXXQ1B6GzewSSZnJzE53/9P+Czv/C5gXuXO+HyK6/GsmXLe52NvkGSZVx+5VX44//257jwoot7nZ0UDwPReVq1DDwbHS6Ze7yAm4Vs3hidXkK35Fvvv4lP/fQneCkf7pm7RFIivXqjWKHkUDZ1X7mNihLGJRn88DTM2+8EU7Wm0yOKDHL5RfUbfvIkuGm2lLduYPzocZh2G+OIWsaMVgVZvQL0vLNbSod950Gwx/Y0te8SSYFIqRtye6WSg86auxcaM7GgN3//I4l5COmKpSDjrRnQGDHevwAgnrwhsk1EqWVF7XgDU+73TvZ3wq0wpKIoQBAsv7R+if40EHVdEzhep0mnGfc7JZ5h8QQGhug9GbDriOuX+oXVGu6cd4E0vIOg7rECAaHxEdOce2aaJigVwIiIUkXDzFwB+UIZhsFACUAJw4giY8WScUyMZCBRBhIUlEGO6YokTJxIRYvhIC3HQacmyjbjmdhceim9IhWEh5Oo8XpvVOFgW6fRsY7xnDM1hreNEn6M0/ayPIcFkYIKBIJgnZcDYISAEwrV4JgvlHDw6ByOzMyjpBowXdtxDkKcxYqaNUikAnBKV5BlGR+66GL8wR/9CX7p87/Wt56CcSH53n5rH2ZmWgzBaHPWlq09CxtLKcXWbe15AZdLJbz26isJ56gxwyzmtossy7j2k9fj9/7gj3DKplN7mhdRFHHJpZfhv/7Jn+Gss7f0NC+9QJZl3PLp23qdjZ4zNjaOq665Fn/y53+Ba6+7AZlMttdZSrEZtI4SOTk6Egh/60Dr6Z0aFwY6mfvyb7Pv4u1qAfcd2g/GORjnPk9SpxPSKhOSAjUgSOZ1FQDA9+0H7ri7JRWbXHohoNQMdBjnMGbmYO5+oeW8JQlTNRx+4CGY9rWMCBImRBnkqh0gLRg6GY/uBvvOg03tqzEThAAlQ4dm32OR0qaF+ryu+Tyz2oXHJCFvbd1Y0Ni7P3KbcNwKEE/5N+NtGtUZdENs2R1zQRAg0P7pOg5avRdHEiJw6vGbLMPyfA3DNQCDex1RIffDjGncPjA8MoRHMKaU1p5LQsAJd/WPsAFTADVDYm5CsCYPhqobmJ7LYzZfgqYz2+CBQ5FFTI2PYenEKEYyitV+YgyMMwyHSUR7NBqXSL3ZhoO0DAcfJ2R9MmGjU2/fXpO+j4NNVJ0ajIQStj5IlAjsLAIh7hQa9cdSEOLvvzr5YdwEIRyCQEAJ4HgFAwAVBBgcmM0XcHBmFkfnCpgvVVHRLM9gbjWRBgoRgO+mO79TUtphwwknYOu27TjvggsxOjra6+w0hRMSz/sOcM6xe+fOttPsVfhn9/zbz8Wjjzzc1rF7du3EmWe15o3TCCcsQ0rrHLdmDX7zt/9PvP3WPjxw/3144/XXFu3ckiThwosuxseuuBKTk0sW7bz9yGmnb8a2c7bjuWef6XVWFhVJknDKqadh2znbsWXbOel8yH3GoHaGyInrwF/cG7qNx4hbkeltOilmozWY2umd+pMNW3DfoZ/hU2tOBoEVEvqIWsaoKGFCUlAydBxWy1ifG7PmTW0BmQp4p1zAupzlCVoyDGQFETIVMPPkHkxNLYHw89c3lRYZyYFcvB38oadgcIaD1RKOz44BDz4OXLit1ctOjieewSqTumrjiCi5eW0W/vJekK9+q6my1BnDUbWCNdlRjImyT5xn4PigUsTabLzn7TI503HbhXj+H0bmnDNaTlN742eR28SIEOthQoODV4jw/h3slAuCAEopDNMEY6w2stbj9l1w4GAg4RwMcMUf/zbUPULNvAPOPgN8V3pG8Ns60M8W6sXBQWWQ3/WgZ4sj4tbVxdbO1jGovcfck45j7GH9bW0RKPWFRHX39f0GOCfgBCCiBN0wMbdQgiBQZJUMMhkRIqUQBQFjIyJGchlUVB1V1Qodbc11R+yEPHXL4BVH14lrnw/i83usElWOaRkODlGvYngRpuXabwT7KEHSd7F/CSu3sDJ0jNu8x8Qd5wrBnmg9znHOpEUc3N3udqNsD2DO4TOuc+b6raXD3baWc7TJAJUzVHUNhVIVkiAgl5GhyBIkiYI4HsbO2bm3jWa3mYidnq9f1+3nt3YPAFsATul/+rViW7JkCdafcCJOOWUTztqyBRMTk40P6iOi7qthGPjp8/Xz1DXDyOgoTj5lUyfZ6pgNJ5yApUuXteXB/MrLL6FUKmFkZMRdF2eNk7I4nHjSRnzht34H77/3HnbtfArP7NmNYqHQlXMdf/xanHfBhdh+7nnp3K4ebrj5Frz26iuoVCq9zkrXIIRg5apVWL/hBJy2+QycvnkzFCU6XG9KSjvQjRsQFYSXv7kf0A1Aaq6JSggB1q0GMgpQVUP3YQmErd08vhSbx5e6v0VCsCY76nZIDM7AOYfBOTRmQGMmxkS5KQ9SSgjW5cbcTu4KJQtKCBjnKBg6ljz4GNiKKdArdzSVV3rVDlR/8iTAURM59x0Af+0tkNNixPJuwTn4Dx+rW00uvcDnrRqbxDsfgH3pazhcKmBSUhrO/cvBsSpjzQ0tBbxWBUKxXM5CZSaUiHRMzmBwDoV0Ng2DM8gfhXT2aS2nqb30RnR6G+sF4LjOdM2a2i8YWFAgIDUSQiAKAhghME3TerdavoKUOkIE+I6TTCSVYxPfOwO4A0Up/UG/hKJvh0YeLkC4YE9QixRAUO/AYe0bfr7avs4AKQBuzQVMqDVPcFlVUa5WoIgCstkMZEkEJUBOETGSkaAxjmKpDLWqAoSCUqFOWRnkckmaRh7D6X0abNIyHHxqNoxpOfYzjconfRf7n7B+aCNhPyoN374eo1lXQHbTtYzVOOdgxBGEo997X5vKFmqtfzi4L24sByEUusmQL5VBKgQwTYyOjGAkl4Uk2EZ6jLmOho7iyxgDpT14Vu1TEhIQgNMXJ8WBEAJZViArMmRZRiaTwZIlU5hauhRLly7DsuXLsX7DhoETfJvl5ZdebFvkOXvL1r6Yt3Xb9nPx4wcfaPk4wzDw0+eexcU7PtKFXKV0yprjj8eNN38K191wE97c+wbeeP117H3jdbz/3ruWR04bKEoGJ23ciJNP2YTTTt+M49asSTjXw8HExCSu+eR1+Pb//mavs9IWgiBAlmVIklWv50ZymJpaiiVTS7F02VKsWLkK69ZvQDabhnbuZwbV69cLCRGp3G2GCfLWO0CLQiXZthn8qeciNraUVCwaM1E1TYxLlnDptJ0nJAUTkgIAMBhDlVsicEYQcbhqeQqPiFJs2gbnKBkaJkTZNQ5dk7WMsdhd9wJTS0DPPbNhHsnqFZaw+OLrvvXsvocg9EAA5s+9An7wiH+lQEEub24OcWN2DuRv/hG8UsGoIKFiGg0FYALiemK/Uy5gTXbE55mdEcTYd2lGq2JUkIAG52kED/PotJFOPxkko7SUnvHOQfCFaOMvadMJDdNwOrleYcAxOHC2A/WDKkFvYOIRgfuBQfYO9OLcT9d4ZLAvZygIccIeWIZlsHRYriOMqEFRSqwBTW9dHdzXv66+XnRuWTCAg+V9TFA1GNRCCaJAkVEUZBQJBIBEOJaM5sByWVQ1HZWqCsMuA3dAlxIM2HR4PWNYvPKPZaK8EYe5bhpGhi3ix7FI6qk/OHi9fb2E9eHiyq/ZKMaEEDv4mOWNyz1+vcFjfe0ZX5sKtuOuJQrXtgkABxilKJSrKJarEEUBkigiI4vIyjIsDZgD3LT2dyYnszsW3X5Cg11JcuD9Q9wJ6Zi+IL0nLYPFoxuD6IsxMJ8+IylhVCoVHDp4EEePHsaRw4cxNzuHqlqBqqrQVBWEUGQyGSiKgmwuh2XLlmHFylVYvnwFVq5a1RdGCymtk46zHDv0o/DbyffI+N2/BA7PhKf7icsgfOrq1hJ84XUYf/OPkZu9oRQ75ahawZgoIROYS1ZnDCVTx6TkF/WqpgGTc4yIEma0KnRmYqWSc0UF733UmImCrkFlDKsyOXDwmnCpyKB/+BugGzc0zCN/7S2Y//3v/CsJgfDX/wVk9Yo2rrp9jD/7ErDvgD8rF2yF8Js/3/BYXqlA/W9fgvjekYb7OhxVK8gKIkZtwf2IWsYKJVe3X9HQUTUNLFPqjV7Kho5cA8G+EYQjdv7fsV/9NLLXfrSlNCvffwiFf7wnfGMuixXf+Num0wqbj8mdEwlAbV7JmpjgPdbpKDPGat7AfdBG7Yc8JIU3HOwgMawDqqEhugeYYSqXY4Hge+WL6BBYVx8GOi5Nq563wkPXvF7AGQRBQEaWkFVkUIG6bSnOOaqaiWq1Ct0wXU+bY6UsFoP0Xg4XaXkONmn59TfNfn/ScuxPnP5ncPqhZo+NWxf2bHA7PDRrMDpUlza3vIs5Z7Uwz04fmnFb3rXEZpEAiiIjk5EhiQIkSmrjPty0+3etTRvWKpwzaAbDy/vex6G5Qm0O4JTek5bD4tGtgfSwcEydppeS0gzZbBYnnHgiTjjxxF5nJSUlJWH6UfztFHLSevAIAZi/tq/1BM8+FRjJAqXw6B0sPhJvSywPEQwBK9TwfFWtE4C9QvFSOYOqaQCw5hE+WC1hSs5gVJRQtT1bp+QMprUqZrUqMoKIUdHuHKga2O13gvzpb4OsWhabR3LaScDG9X7hlXOwHz4G4ZdubuOq24PvO1An/oIQkKsbh7Pmhgn2pa/Vib8FQ4NChVAvYM45coLo87YOE38BuAKxyVndvM2dir9AvPgLAJkPNz//sUP1+VcitylbWg8nHRYCCwCoIwIDqIWHrrfaJoSAUgpCCHTDaPn83WCYRIhh8TwdljIZFi/zlMHEDmro1s3hYaBJxCCqIwxHpQwQR/x1fGSIEx5aR6FchSJLyOUyyIhWBIisLCCnjMIwGQrlCnRdd4ImpiRAUMRP653BJi3DwSb9/vc37YiFaVn2D816/jqERT0Jro871hJh7eDOxL8t9rxOC4zU2lak1iqzzw1wRmCCoKLqKFdVcM6Qkaw2VFaWIVACcLI4Nr6ea6LpQ58yDDgWI15vhrj9wo5LCqcSSmJJSUkZfhzb+2b38y5h+6SkRJHk96nTbxSJE6v2HQApV1tP8/wt0dsS/qSWbe/RIBty4w2PzQgiCCGQKMW63BiydgSGsmngULUMQgj2zB3CR574Dr78vj+MMwolsC9+GSiUGp5HuKpeZOVPPAOzWG54bFKwsLl/z9oEelJ0GHAH/pV7MP/8S5jT/M+CAIJ5PXy+Z0KIT/zVGUPB0CLPMSpKdeLvoWoptGxbJa5tKZ99KsjEWMtp6i9Gz/+rbG8cHtxLMHyo1/qaUgrCLS9mCoCCgIBZAgHxz9vk7C+JojXfUaCd3QuSbtsvJsFcD9p1ROV3kMvEwfEYcP4dFMLaj4N4HV663Z/vJdFtLFInsNa1zTzrvHPNOV4qlDr7Oil60gn+JpYvC+MAoQI0w0Q+X8RMvoBiRbWchAGIAsXEaBZTk2MYzcoQqeU9zLkTMNHxmPGWUaC8hqPoEsN7r7z/NjvmldKfpGU4WISVUVgZpmU5mKTl1180O84U1f6LCwXuTdMnNrsJoa4dEiYy19pQTjuKg9hiMKEchFp+xYQygJhuJC1BkKCaHHMLZRyayePIXBnzRRUVTYfp5NuTB/ea4JjksfoMRtyTwBYQQixPZ8Itf+P0Ye8Nwyr29cvz1MxHeRjvf0pKymDhHXBJIq2U4WcYjIqEs+O9FVmMp2MU9EPbGuyRXPskJ0o4otZ7G+uMwWhxPnZHgJySMzjOnvP37VIeRUPDE3OH6vbnh6dh3n4nTDVa2AQAcu7ZIFOT/pWqBvOhJ1vKX7vw2XnwPS/UradXNPb+rXz7B6g8shMEpM7TNydKWCbXe2EbjOG9StG3rsoMqKYZe66jagVFQwdgzb8qU6EuvHerBEN7B8nsOK/lNPVnXwH0aGFaaWJ+6DCi6glKCARqSb/Wd8rfeQ7uT+z9KaW+dV4Wu4/QL32SVgh7agblOprJ56BcSxiu4QOGRNDG4F5Hv7VpkiTKs8XpMIS16VyZ1Sfk+ut2wTNo6QjEhDpCMXcSACG0VtfDkZ25OwexYXKUqxqOzuVRKJahGyYIKARCkFVkLJ2cwJLJMSiyCApmhUpkZiDfIdeW4tJsuz0VoQYTb3kxxnz/puXYP7Taf07fxcEmFff7i6gxrGDfM8yguZn9KaWg3t+o9XWj+rnBdpXV5w30je12lSUWe6J42PtwAJpholCu4uh8AYdn8pjO+RgtJQAAIABJREFUF1HRDRjM9LQBAXAGa/5h6zyc1+ysvdcaX0/VvJy7G3A65ZhkkDpkg5TXlJSUY4dmvH1Tjh2CosswwHMZkM0nR25nz7YuAJNNJwBBwdN7zpZTjGddrt6DU2cmZvXWvZeD3LTmZPz15ovwf208p25bydTB9+1H6f/9p6h4jhYCBbn8ovrVDz0NmK2J1O1Q/OG/152HrF4Bctam2OPYY3tQ+dYPUDJ0jEuyz6PXweD1QrvKzLrw3DlBxBLZH5I7yDI5A5NbaVFCMCVnYvdvhthnTRKhXFRfro2oPP1c5DZhw/Eg4617FMfRTMc7uJ5SClEUIYqirzPeS3p9/qQYlusAhudahqV9NgzX4DA0z5bnOqLafo3WhxmYhg2M+tOpH1AMes64g5lUQEUzMD2/gNmFIkqqCSoI4LCmxFgyNoJlk2OYGMla4Q7hDU1tzyHfxr1JiScVLPqfKIEhyts0ZTBJy294SMuyt4SLseH7tGJ8TAiBAOIKo0GBtNOxN0IBN6SWP1cAsYRdnXGUVB2H5xZwcHoe0/MLKGu6NYTitJl81+Bvo7XybKYCcBcZJE+cJImznAmGs2l23zhc6w3PErYubPHudyyUTUpKSv+TNi1TwhjGbxM554zIbfyF19pKk14YEwba9mRMkryuuuIhAGQFEdkOvUcBYHVmBJ9aczJOHZ0CAMzaYZA/qJYg2R7DuZ++DnbXvbHpkEsvBBnxz4HLZ+dReXxXx3mMg5XKMB96uj4/V+2IjcfNX94L/pV7MC7JmJIzKBp6qEc1B+o8sEdECUrAW5iCgDYodUIIJux5m2e1asse3PWZC3bU/GQu3AaSbV1kVp9+PnKbsm1zy+l5iRqIjLIsJsQOcRXwRuO8FhJaEATLI5jUQn/2qh77/9l7z3A5ijP9+67qnjxnzpwoJCEJYRGEAIHIWWQw2CYZAwYb44jjOq83eu131xGvw3/XmIyxjRdjMBmRTE4CIYFAZJAEyjpxYoeq90OH6ek0+ZyZUf+45kKnu7q6qqtD1XPX81S3GG7auR61lqtd61Er7dYmtU4gNMrO2qgOjdJO7dEIXmKtHb/JOoQQzcMFFg9hm+BrCLMljxant4y9XJqQa9hhBCgqQzaXx9aRMWRyBSgqA/S8YtEwBvpSSCXiCAsU1PBqAUC6pK3akcCDrbPwE4ODduxsgrbrHgLv4OmlWk2tUhqv6LBWr2BKShYE6yS4WsukjZWhhYcmzBSDCbRlrcwllwgBJwJyEse2sQw2bh/DltEssrkCJEkG4wwAA+dqDd6/5QQCcEDTqTQIqeahqeXBbkW53c7djcb3gICA9iDw9A3YEaEH+IhWkgy8+Kr3fq88K4SBbraRO0IFbCuWPH4JIegRw03Ju2gJAxQiFNukPGZGE2ZIZJUzSPc8DLbMuc6uWZ5EHOS4Qx3bJ29d5u893CgPPYVksTxcMUnEQY480PMQvm4D2K+uB1e0kM15VXGs/2sQoQL6whGzPUekArJ6GGcrm4s5FJl/CGiD/317FX702nJkVWc+tcBBfEXu6AlOr+xKyC+/Dp7xXvc5cqD3ZIpa8PM6MyZLWvdbces3C4IAQRDMsNDTbSiZ7vM3i26sRyfXyQil3Im4Tfro1LpY6YY6AN7hDP0iMfjaYQCbZ7AW8rn8WIDS8s+Y3QtY+39JDDa2MU6RyRcxMp7ByEQGeUkF4wxEF4LTvUn0p3uQiEcgCqVvSpc0V1tjDTkc0LkEAlTn4tZ2QejvzsXPsS2g9VSrG3n1icx/81KfiHMOMG0itybQUk2kBcr6QPZz2se//n0xQwzm4EQFofp2EBDOQYm25AZjQFGSMZrJY8tYBpu2T2Akk0dBhfkd9xOm3QgE4AAA1Xvj1vpSqyTsGrMsqp3BMR3YH+zpLEtAQED3EIi+JTRfMgSh4HY0+tMgu8z23K0+4R3y1pO5s4C5M733k+Z6AUcFETOi5R62o1IBORcxslYmFAk5VcHmYg4hQcBgOAYCIKdqwmpOVTAmF8H+cBvY8pc88yEnHgkI5V3+/u0T4C++1nAZXVEZissedQiy5OSjQCLu4jgfGQP7+VXg+ZJXb1aRERMEiNR9uBITRG2ApPdJjVDRKmdlXm3G/oJ+vRTd2LKlmMOYXASgeXL/5I0VuGnDm3h6xLnucvVw3/uL9vUivHjPmnMtPPG8985EDKFFu9ecZ81wDgo9VBa3DpiNN7hFPEZpME0phaD/rN7AAY3RTtexkXK0Sx0aZSr6c8RyEjdP30bP32390nZ6RhrFy1vF135iX2cX5UZCarVx2I605l86hOueLN5GTwCaBzEhkGUVE5kcto9lkclJUFWtDiKlSMZjGOjtwUAqiZAggjNmqSMF58QpCndJW7YDjdr4AtqLZrVjcA9MP8Hz2B0EbTg92CcrA6W28PQGBgD72r56P0gFBwPX7EeEQCDU9NqtJWKtW5lK0VdQ8gw2BtjWnjih4CBQVI5sTsK2kQls2D6BbeNZZAoyFJWXT0RlhohtHK9lyTgJBOBmUYvX6lRSj4hbjWjrN8OhWi9e39mplWavtvgaV1undmvvgICAgE6EQJuRFrxJdzzIgft47mPPrgKKUs15Cqcd630+oOnWbc45xnUhEQDCVIDEG5/OQAmBQAiGwjFELaGNM4oMhTNEqICoIAKcg13+R7A333XNh/SnQQ5aXLaNcY7CPX9vuIxuqM+uxNjmzcipCjg0T145JIKcfJRrep7Pg/38ShS3bUdeP4YDuGbD6zj2qduwbPNaTMgSMrqgnFNkbCnmkFdlrMtNYmMhC0W/3mNyEe/ns5D1vwVCzX9zGCEwtX5eUgghrofrTgghfHX+vjhp5nwc2r9TA7Unvi+y+One96Yf+b97h+yOHnVwXXm64TVR09xvTYPy62kVB+x5GWsDW0NrAdNjbAwMMu2Hn2Gmk2j1vcX1h24q7t5ueka6pS7V2G7Kd5T2e6Wn+s+ZR8nL1/i/lw2oBNXS2QypjHPk8nmMjE9gfDKLvCSDMQZCAVEk6OuNYbA/hWQ8CkoIwFVYJxWZz1UXvCPagUrv2kAU7g7qacfArjn11PI8BrQX1T4rwTt1arH3ZyrqUZZZy0bbGMKs2ffhABgDYwwMHERwX1rU7x1qHxc7+1PlZfLWnig4IcgVJWyfGMfm0TFsGR3HWCaHvKSAEQJOid6DohApsPu8mdhjzlAgALeSWh9wvxdDox649pvTzbhj/PzycStzgEYgDAcEBNjx8tAI3pwaBCgzPNHgnbnDQX3WLiWKCva097qnnscdvgTo7/Xcz5p8mxkGTkMEToghpPU1ZetlRCogLoQQ1b1crQxHYqYjTFL3ekVRArvsavBN21zzo6ccXf43IdiwfAX4xi0NldONzN1/h8qBXjEMxjkmFRnsiAMcaxEDAFdUZH9xNfi6jSgwFZOKDM45coqM5aObMVEs4L18BjJn4PqbU6AUCSGECBWREsNQGMNgOAYASIcimBvvQZgKYJwjTKm5vm9MENEbikAw1lAWQ2Y4bZFSfHP3A3DlvkvN9M2GhEOI2dqhGopPPA/k8p7748cf1kixqsJrUqZmrAfAYa6xbRWBgfLBtCAIEEURBM7QoVNNt4xhprsezTp/txjFWhEOejr6j0Y9uqXP2jX3V4V6VDOJ3uNAz7ROu5GXM4DTIG685ykVwDiQL8qYyOQxMp5FLi9B5drEIZECyXgUQ/0p9CRjCIeo7XgahC6eYqz3WiBgdAdBO3YmRlvZw0UH7di5BO04NVQjBntpOASaJzCFMVmOgugRTryexUp5VnPe0j5jYjVgHwkQrkXlIlyAwoCCzJApyNgyOoYNW0cwOpFFXlEgM81+koxHsGD2YCAA10utIt9UhVmpRoSsVG63zr5X/l6C8Y6A94yM6macBAQEBHQLxOdneBe4/UodrO4xzgXUyJyZwGC/527+6HN1ZUtPcvc2BcrD0zaLvnC0TDickGv3XAa0sM8A0BuKIC6I2JB3X/c1RClUzvFGZqy0cTIL9rMrgEnnMWTBPGDBPPNvianoEUJQ7nmkrnJ6wd9ci8Q772OnaBwhSiEQguFoHHEPr2x+zU2IrnkbAJASw0iJIVBCoHCGn+xxMK7Y7zicM3sBBsJRc23lCBWQ0NMRAgxFYq6TRySmolDF+r/jchEFVamYrhKV7qnICYeDJBM155t74HHPfXR4AOLu82vOs1b8+rHU8tPGqNbZ0+5jDlEUIQrCtL/3u+WbM93XsZl0Qz2aWYPpvBrc498BnUFFr5cqtnunddtXevdbjwecE35UBkxkixgZy2JysghF1T4dnAPxSAR9qR4M9aWQjEdAwKEqMnw+QwEtwK3t7Q4qAZ1P0I7dQdCOAQH+2DUawKnTeY13zclo0MIoC5YlNogxCdrI30dnq0kXIkbmXFsn2PKj0NYKFimFCAKBa9HdCBHBQZAtSBgZmcDmbaPYvH0Cb6x9H+N5FgjAU0UlkdArjde+VguvgWDZHKpt34CAgIDppl4R1x46ruYJUoC5RmfAjgk98gDPffz1d4CNW2vP87jDgHDIO98W3HISU5HXhcS8qtQsKo7JRXPNGUF/dkRKIHmImAIhZjoDvnkb1MuuBncJnU1PORoKZ2CcY7tUQFIMgTzxPPjImCNtvbA7HgQATCoStksFAADZdw+QmcPOtLcsA3t0ufn35kIORabqIbUl7JpI4+QZ83w9cntDEUx6rLccoQKGIzHf8uZVBZQQLZR2g1TyTUqccVLteY6OQ17xiuf+eB0exc3A+m63hrEyg4cS7prWPpYRRbHl/eHAKNY6mnlt7d5mnQ7rElG+myYXAN1xbwG11aMa75Natpf+5mUeKtbkZYZQQiyRIIwfBwQCFUBekjA6mcHoRAb5fBGqqoIAECiQiEUx2NeLdCqJiCggoL0IvNc6G2vbMSO8qcWzLaA9qNauEniUdj6BZ3DrsYrA9gi59nR2wRicm2KvAAKRUHNCutleHlMmy8NL1xmtRYdRAkYBlajglAMCIEAThg0LLgMFJwJyCsGr67bgtoefAbVmHtxc1eP3QFoNG35hlxv5Wc8TCIrtjV872u8Rg+BZDAjoPOoN88yhCQfWXyNPfyPeuPWKuLXCODd/wbsugB5/uO9+dr+3B6QnsQjIsT6hcUnzvYDDVMCoVESRqRgIRyGS6ibrjevewikxXArprDMciZthit3ONz+eAgBsKeahGuLJm++CX/5Hh8pNDtoXtD8NSghmRhNgnGPLxBh4PdfXBb5xC/iKlwEAIULNctOTnCIle3Q52C3LkFVkjOmhswd0L+oiUzEnlqx4vowio6AqCFOKrIsIrA2B/Fs5JoimZ3EjVPIqjxxxIIQZgzXnm7/rYd/9Ub97vMl4jUPsA1oKY5DLdCGYefaBBUoREvUw5y7iXzP6w9UazNy2dxpTOX5oVRQra/6d2AZWjAlu1U5ya+flQrqhPQx2hHq47fPr11vHA7DZuLwcE4wvn3W/ER7aSG6OP0xbh7kFhDEQzjWBmFDIjGEil8e28QzGM0XIKgc4QAkQi4TR15vEULoH8bAASgCVA4wIsH59jTDRBNxRj4DGqUeACtqgffGyLRsEbdj5BM9id+DXhkGb1k4lRz3P/S6OmIQQEA6IhEIkFIJlRrg9LLQVY3GrasRgu26r977KBwpmOC6jAMYEPVVbRoMT0OBFUJlqBdmAgEawG8WC+yogYMdAM4M4f35Cru+vzb9VQX8jwEG6Bzhgb8/d7JFnAcndy9MP4YP+3pEcvOkW9lmxBCJU0ELyVBGtJWMRLt3CGMuM+XoSS1zr5A9HYphUSl6/bPlLKNxwa1laIggQTz/O/DsqiNokjIeedvUYrhV276OmiBcVRKRCYZCZwyD77lGWjq9+HcWrbgSgeeCmdAFWpBQFVUGoij6QyhnG5SKigoi+cBQJ0entPSoXkVW975t1ucma6udHJYEncd7pdeWb8wnRHd5vIajPWtfTRaXvjdsAVxRFCIJzosNUfrfc1m8KcCf4jteOr1iH9hJ7/eiWtu+meniJvX5UekdT29jBzzjpPL78/87zEmiGSatBU+vLGFEhisUiRscmMDqRQbYggXGAg0AQKHp6khhI96C/J4EY5QBXy8RqAHp650SlgKnBS6QI7M6diTU6R9B+nU/wLHY+9jYM2rJ5VNN/qtSHEswfBdWiODvTuUwdr2S3NbQiv7ISQmCEjbY7A1B7wh1psFuNZ6bfha9le0BAJbw8ydtRwAkI2NGp1kujHgG3kjdup05I4obH73QXJKAtEU7w8QKWZE0ErpX+NMghi30StMANGJqoKamqr8A4LkvgAOKCiN6QvweqEUrZjU2F0nq/6VAEOVXB+rx2Xvmeh6He+2hZerL0UJBE3Px7p2gCPJsDf7y+tZYN7HkUVAVZRQb90PFlVmC2bgPW/+R/oMiaqD1oWb93VCqgyFQIVXhOC4RitsVLeFKRHCJsiFBEPLync4qM2bHa1+P1wu/dGzl8CcS5M2vOs/Dgk+AZ9zWgASB2/BE159ks/L49DOVjSu3/RmyLciO99XhBELQfpaXFIDH1Y6tuMKJ0k3GvG+ph9n/0PlC7evlWwixzl/TlOv2+slJPXTy9TnSv3jKvYJ9j3Cawl7YR0ytYLylKYaNLeVBoHjSmB69AQQQRCuPI5PLYPjaJiWwRRUkGZ5r3SzQsIN2bQH+6F8l4FAKlZneDoyT8cu69rl9Aa3C7r9yie3TTM9jNuLVl4FnauXg9i0Fbdh52EThox+bg1T9yi47i1icyOsxGBBRKCCiHJgYb6eBvhvKy7Rp9Gn+hWDe/EGL2twjh5QLwjnaT+IVvtv/t1ridZHgPaH+qFXcqTU4ICAhoPc3wxu00AbcejO9sIPwGVILsvTswY8Bzf11hoAEIHz6+8rnrytmbCBWwqZhDUgxBYc6VYXOqAg5uTvjwI0Qp+sNRz/29oYjmJayvE1xQFYSIAIUzxAQR/I+3gS1/yUxPImGQ4w41/zbWKuYW79164A89ZXoRK5zh4ysfxMfXLQeOWGKmYSNjUH92JYZVgpjLmrt9egjoiufiHON62Ggzb84xoZR7MadCYdfw2SpniAhCVUJzJaq5dxLnf7iuvLN/u8/7vIk4wofuV1e+zcb6/eKAeVGcYyagtOhBuUHeGI+Jugg83f3cbhoPN7MuO5qtoJkYz4erZxw6UAjuknuhW+oBNPasl41DPDx3HUKxx7jFPS10MdiIceRyXltdSuItBaEUhWIRoxMZjExkUSgq+tiCICxSJGIRDKSTSCfjEAUCgXAwVS19m7qkjTsZr2+6OVa0rD3bTc/ljkQgJnYG1fSvgzbsDGqZbBO0ZX3YtRjjetvFYLf0RjoApeU/oa0fTLXpdp7n9WovaoswV70t2SYABwQEdBbdLBwFBLQT9Qi5wTNZMmo2uq5xwI6B4LcW8Mat5vqyNTF3Fsixh/gmYU2+OSkhmBNLoi8UKQsDPaFIUDhDlApIVyF0GriJpQCgco6kGMI2KW926PvDUewU1Tx886oCcA52+R/B3nzXPI6ceCQglMo1Lkva+r0vvlZ9JS1wVcV7ty/DqO6pnFFkrBnfhlPPOw9ED+vL8nnwn10BMjruCI2tMGYeWw0jctExYOoNRZAQysNAbyvmHQOnnCJju1RoivgLVDa4R5ceWpf3r/zS61DXbfTcH//gUpCwM+z1dGP97LldmdL30f53+cA5FApBEASzlQODRUAn4zWD3/w3AN5BXcbgeWxfmtE2DmOysd3280pfabvmEVy+zerRYj3eakAlhABCCLLKMZkvYNtYBmMTWRSLRbNM0bCAgVQCA+kUelNJEKJ7FTfpmx/QWuwG7+Bd05kE4Wnbh2Zd+0A87Bwq2SCDtmwMNzHYiqeHLi8/nkALyayJwe7nccPadt6T8Awv4PIyuPaEunF2QDUG+mDGUkAn4ecdHHgJBwT4Ewi5rcPq9WtQPt8/IMAdsrSCUPu3++vKV/jY6eBxby/aVjzihBBslwqY0D1VJaZCYQwioRW9fu2MycUyj9cCU7E2NwkOjlGpiKFwzOHpajx/m4o5SPk8Jn56OdimbVrZ+tPIL1mkl4shryqQGYN6x4P1VfaJFZiVV0xP5XQogjuPPhOf/dRnAABcUcF/dT34+k2OQznnGJWL6PPxcrbTF4og5RE22+i3M86RUxXH+ztEBQxH4m6H1gwBfG8eEgkjeck5deWd/ctdvvvjHz7Od/90QAgBJVQbyHJA0Nc8cvP01WCwegNbvYWN/4uiqIX1tBw/VQbFbhoHNqMu030tunlsTqGFhesUDI9K1kURXrrpvmqWCGzaF0hpypXX+MgZ8cEZMc+evyEEG0ZKa9nd8wIoOARa2i/LKkYzBYxMZJErSFBU7X6kBIhFQhjqS6GvN4loWDDzZ7y0RnDZNePunlMBU0M13uDd/B3oJqq1tQe0nmbbsYLnsXsI2rFxvGzHpQgmlgiu1L3fZIyfDRuRVxqv96lHyQAQEMJBuL7MBqGBB7Cd4KYP6CYCISugG6nXGzd4DlqHYQQ0DIHBlzSgLmJRkCMP9NzN33kP/JU3a883EYN4gX8Y3lbcs7948wXs89Cf8PM3VkAk/qGc/fjl+2uw70N/wg3r1mBULgLgmBfvwTapgG1SHgpnGJOLeDc3AYmp4NDWDS4yFYPhKBTOURybgPrTK4BJbU3ZxGnHAgCSYgizogmIhACvvgX+5traCsc5mEU4zigyGOeYc8KxCPdoa/Tya24CX/266+GEEAxFYlWfrshUqB599SJTzfWSKSHY2bJGsFE2oYnv/0qe44nzTgdJlcpQ7RhDeXsdpFWveu6PfvAYkFRPVXlNB2XfW5REYDfBwA9jwCwIAkRRdEwimqpveTeNDbulLt1Uj041utm9MzuxDna6pR5A858RVxEXMNcItgrENeVhTmIv/z/gXGfPNT9B+07IioqJTBYjE1mMZ4uQZFlfSx6IiBTpZAxDfSn0JGIQKUCYHiFFDztMCIExlcF+3m66L7qJQLjofLzExKA9O4+g7ToPt/YKnsX6sTvmefWR/ezRhGsewaI+oVqwxTwzjjPGyAbVthN1m50XGMnLCW7+gE7D756txfgWEDDdBN+lgIAdC+FEnzDQANhtD9SVLznmYGDBPO/9QENr4LoR0cMfv5ubqNnr18qG3CRS8QQKTIVACEJEyzcdimC3ZBpRQURvKIJd4imEqRY2d2Y0gXQoApFQxAURjHOom7dAvexq8HwedMEuwIJ5EAhBVpXN9XPZvY/WVDb+4mtQN2wGAEzIEvKqAkopQh88RsvvlmVgjy53PXZLMaeFqa4Sxjk2F3IIUff5q0kxZO4zJqQYbCtqYbIbaQcrnPt7jgszBhE78yQ9rXN9ID8yf7rdd3/yzJOrK+Q0Uja+dNluS11K6zUgJpo3sCiUvLimkmAM2H4EY/P2opsm/3XLfdWKeviJsYYQXIu3cHke5f/X4OBcixRhX/fOTKELuIIYggqKgqxiLFPE9vEssgUJCifgIKAESETDGEyn0J/uRTQShigKYIxBVdWysgXvl84jsNl2F0F7di6BiNgZeEVcsBO0X324RTKx7/ebHF3yDiYQiBYm2vg50nj0j+x0pQdwvYKBX8gRr78DAjoR670eiGoBrSbw1O1OrOH/AgKaxq5zgT3me+7mr7wJblnPthbES872T2ATqxrl3/c8BH8+6BT8fO8jsaWYqzufn+12MK5YdBQunLMnUmIYCmcAgCgVwLkmdlYq91AkhryqgL35LtivrgdXVAinHA0ASIghc01cvnwV+MhY1WUr3PN3vDY5iiJTkRRDGIrEQJYsgjhrJyiPPgt2yzLX44pMRToU8Vzf2A0O7vDqtdOrr62cUUqiNgAMRmKIi81bM7fS56nnK58w/12LIURdtwHS8pc890eOPgh0eKDm8k4nhGgzmK1/l/dBueV6lv5tTWNd3kTQZ1UHInB91DOW7Za6tzPdcI27yU7STfWYaiHYTQx2O8ZPHNaiP5R70DDGtTCGZr7atAPr8VTPA0SEwoBMLo/R8QzGJnPIFWTICgMIEA4J6E3G0J9Koi+VRCwaBoEmBDPGzPy65T7Y0fDzKg3atPMIBMXOJ/D07gy87K9GezHGzMgZQTtWplr7tt9+ov9HCdWWWqJC1YKyFc65JgAHRvbasYvCAQFTjVtntpEXcCC8BXjRiIAb3Evdh/mugWZ6YZUOCAioEfGCD/nuV//svz6qJ3NmgZx0pG8SBtvCcA1yWP9MRAXRXHd2RA9RXAu9oQgO659piqV5VcG4rImb43IRuSq9aHvEMDjnGHnhJfBrbgI5aDHIzGGEqYCkIY6qDPz+x6vKj2/cguKqNdgtmUZOkU3vWvqh48FXvw5yzV9cj1MYAwEc6xZXQmasogdvQVWwtZhHiGqezzJjGK3jmvtRqZsVWXooxEW7uU4gtffZ7P+evP4W37yT53ywgZJPD0ZfwBSB9Y9Hef/A+JoYXxZiXmfOS88kIQShUAihUMi1f9HqMVk3GTuqqUMn1HVHa5N2xuwbdmg9Onnif6VytqIuHNAmzlX4Ltv3u40P7eNG7b2vTW4r7QM4GIi+brB1rTzt/xwEHAIYKFEggGmGUhBIkoLJXB6jkxlMZnJgjIHo6aNhEX09CQz19yEei0EQqGngNmqqXT9mGsA75b7Y0bG3k5vdLGjLzsGrXx20YWfhZcMO2rH9KftGWwjasHoq2cgr2dI552Z4aOtP2wlzjK0dS8pm4lEjgwB3qnWLDwiYTlo5szcQ8rqfQMANqAYOONb4JejSUCIB08uuc4F99/Te/9o7nmvKVkI451QgmfDcT0C0BVhaRJQKDQuS6VAEUT0UrkCItn5vlRhhhKRHngG77X4Q3Qt4WzEPRfd64Q89DV6U/LIBoIWLTgoiRErRp69vPDFvJhAOYfTnV0D1Q468AAAgAElEQVSWZMcxRaZiq5SvWfwdkQqQWOXpJlFdJI9QAVFBxKhcMMvWDHiF0M8kGUfiU2eXzZR2+6a6eaLIq1+H9Jy3929oyV4Q5s1qTkWmADfPLgGkLHxVJWHAmPFsXHTT2A8gHApB0ENeGdcx6LPURjCmDWgm5vsNpf5iJ9HJ457pKLdbuGevdFaP4KqOMVReyzGGGOzMW/tpaQg411NTah5IqBEikSAvKdg2nsX28SwKRQlMv1EpAdLJKAbTPRjqSyERj4EQmN9xI+IKKIV1jeLA8N2+VPtcBO3XGfi1Z9CGnUMlL8WgHdubSn2l4JtYHdXa2ytpksZ+kVAI+mS50j5q9qcI6UC7bbsIE1b394CAHYXpfu4C/Ak8dAOajenlG3TkAqYY8WP+no7qn+6sL+NYBMLnPlYhEWnZYoZxMWQKktsbEIIjhoBKCGReW180JYZBQTB5053g4CCJOARCkGeaJzHP5sAffto3Dz4yBv74c47ticMPQObHv8XE5ITrcWFCMTPqLcB7EaUCUqFwVWmHIjG8PLkdMmOm53WzqPTJTHzhfKA3CUYAFRxMNx7bQ2YBTiPHxO9u9M275zx/z/hOwUv0ta5fRClACNcmYxCu/dsFQRAgCgIopVMenalbvofdUg+gO+rSTYazbqpHt9DquviNM61/u4WGLhN6jV8VXjJlf1P377TVU9j41qgqQyZfxPbxSYxN5lCQFC1EIiEIiQJ6YhEMpVPo7UkiJApaBBJCoaoMDAJUAMzW/6r1+e2me6sb6Kb3746KW587aM/OwNpeQRt2B3YP76At3anGRm8dLxs/tzE11UXfUr+qtMTZlAnA9QoT9kp6bbdfhHpEDq+bs5qb1fqCCghoJu340gxExObR6LsxaIOAVmAIv45weMF9FjBVzJ0FHLC39/71G8CXv1hX1mT/vUBOOLxCIlSO9dsgIUIxqVT2tPUjTKjpGVttKGhA88aJCiI2XvlHYMYA+sJR9IglgZXf9TCgegvL/P7HAZuXMIlGIT66HOHxSfSFIg4NfVQq1KWrM85rWiv4yndW4/Sn7sC/rXmqjrN5UEXBw0sPhXj4Esd7k5HSTwWHCtt7lXMUHnwS6roNnnlHDl8CYff5097/axTr+E0AAbWGlIDV2K+tBaz9WVrn0WrEN/5NKYUoihAEwfX6tPKatUOfvBm41aNT69Wp5e5WOjkktJVuedaBqatLJUNm2ZIONm+WavIx6uFMb/UK1sI8wxK/yDyGEBAqgBCKoqxgbDKLbWOTGM/moahMdyImiIUpBnqTGOrvRW80hLDAAaaAcm4xqzqxC1Fe1yigPanXLhzQnritWxq0YXvhZtu0j5eCtusOgrasjJ+t3xr5yjEZDgDl+k9LbaZtugDsVchKjevX6M3qGFUSSZolugQ3ccCOSCBCBgR0B37f644LGxLQ0YjnVvAC/sPtgEuY4WoQLvgwMHPINw33Mew1g1QojKQQgsoZxuRiXXkwi3pWUBVkleqvh0AIZohR8LfXQ1JV5CzH8pExZJ9+3vU4XpTAH3J6CPNCAfz9zRAIRZgK2FLMme+RMbmIuBiquIavHcY53stnaupfbCpmAQCy0Jw3luZ86t+vJ8P9SHzhfN9xQZkojJIYzPIFZP/wN9/8kxefXUfJ2x+t7+jcVu3fpiCMkhDsdo6A6rB6p3cynV5+g66px3QXoIl0S5tMNV7vYS+7mpetrZJnjFXoLcsHhtdxqWdnzcdwJGEcKEoKRiayGBnPIFOQwEG1lekJEI9HMdSXxmBfDxLRMCgpRQashFtfILifOgc3wSKw+7YftdrpgzZsXyrZl4OJGd1D0H6N49aHMvx/jX2OUbLbLLpKmft3YqwrBXqlcW73yr9SWbzyqxYvZd0vHefcDCUT3LABBsVCAePj4xgfH8fkxDjC4TBi8TiSPT0YGhouC3VnpdPvoVqe3U4kMCQGtBvZbBYT4+MYGxtFPp9HNBpFPB5HqjeN/v7+qvIo+47r/2fgoC0WwDqRbCajv9vHUCwWke5NI9Xbi1Rvr6v4ENAAs4ZBDtsf/KkX3PePjEG97X4IH/UXil0JiRC/8gko/3SZZxLTltjCx4AQbV1UAk3AjVChpu+M1Wu3PxyFoocjzCoyEmKo8vn1/79fyOAnb6/EvHAcX/nAfkiKIQjLngCOOMhxDH/8OfBszjNPxjk2FrKYG++ByhlkVUU6FKm6TlYkpmJGtLowzkad/3H3g3DC8Fwc1j+zrnPaYag8Nkp+5wvg4TDAGayrHDoNTdr/CSnty/3pdrBR95DZABD7yAmgwwOWPMrHS9ZZwO2OdZxplN0wznPOTW9Ba71Kad3zND2BoUWpIIRAVVVP77BW0EltUIlu6bt3S5t0Sz0Y5xVD+nYKU/VeaTVTfW9Z3/3G+Ulpp7nPbgR2sy14C8ruwVusk4W43rezJ9MMphSMaV7DqsqRzeaRyxOEQ2FEIyJCggBKCcICRTgRRzwBFCUZUrEISVHBGC/FoCalE5XdM5xrz4PtO+dXr4Dpx0tQdGtHr/QBU4vXe8S6z83TtNpjAtoH6zu20fdqt3zjOwX79Q4iZrjjNgHarsU6Js1Bk4C5bstwWCorzbq2Zl4dTgG4Es0UdP3yr+YFXqsYPlU35coXVuD6a65qLBNC0JNMoifVi/7+fuyxcCH23mdfpNN901umnh709qaR7uvDXnstwqK990Gqt7fuLP/6l//D448+Urbtxz/7BSLRaGNltTE6OoKXVq3CyhdWYP26tcjn855pQ+Ewdt55Dnbd9QPY/4ADMG+X+Q2d++knn8BNf/5TQ3nUw/d/+F++bdPqZ7lafvqj/8TmTRsbyiMcDqMnlUJvbxqzZs/GPvsuxq4fWABBEOrK769/uQlPPPZI5YQ+CIKAVEoTnoaHh7Fon32xx54LEYnUZ2hvx/uo3uskiiJSqV4ke3qQSqWQSvVixk47Ya9Fe2NgcLDRIrvy0//6/7DJcp/tPGcOvvHtf2z6ed5bvx4vrlqJl1atxObNmyDL3h5/iUQCc+bOw26774ElBxyIwSHN41FzaOOlf7tgiL9r330Xv/nvnzexBrVz/oWfwAEHHVy27We253r2znPw9W9/t+nnXr9uLVatfAGrX1yFrVu2QFG8w+wme3qwYLfdsXi//bHX3nsjGo01tSyrXngBN1x3dWkDIfjcpV/C7nvs2dTz2Pl/v/pvvPv2W+bfA4OD+N6/fr+l5zQQzjkVipcADIDf/hBw5EEVvXldmTMTwoVnQPXzvtSUKfdF5ZpIbygCzjmKTEWRqeitUjBlnEPhDGF9TWCRaJPLikwFVCAhVBaBAWBTMYd7NrwNAPju7gdqeb29DvzNtSAL5pUScg5+76O+eXFwc73egqpiXC4iIogYCNfe74pWGfr5/XzGzD9EadPEX1Qh/sYuORd0/s76wMs+uY+4Gge5LtSzdRtRvPPvnnmTRAwJy3rYXoPkThkUu3pz6f/m0DyjjQ3O8ZqeruxxtAng+vWhogiVMVMI7pTrE9B8uqX9u6EeRt+z0+sBdEd7GExXXfyM9dZ9biKOt4FYgNvIxpGelKyT5bs4tDn6xgdH71NJEiSpCEEQEKICEvEwBEHzphEjISQiITAO5PJF5AoFKCpzfqv1bxSlVOtLuVz3QEjsLCp5J9Z6TEBzadTOb3j2+3kVB7QHtT6Ljd4bAc2jmusdTJZyx65FukVyIoSAgwFuArBbRnZqE29aHzCymtk7ldJ32g3EOfc1+lfLyMgIRkZGsPbdd/DCCi3U326774GPnHkWdpm/6/SUaft2jGzfDgB44fnnAAAL91qED59xFubMnVtzfqqqNqVcbjDG8OTjj+GpJ5/A2nffqfo4WZLwzttv4Z2338KDD9yH/v5+HHXMsTjy6KPrEgwYZy2rYyvwminZrLztqKrS8PWRZRnZbBabNm7Ea6+uwd8ffADxeBzHnXAilh53Qs2iK2ON35eyLKNQKGDLls14843X8eQTjyMUCuHgQw/DB0/7UM0TJ9rxPqr3OsmyjHw+j82bNzn27TRzJvbeZ1/svc9ifGDBgmYUEwCg2O4zRVGblncul8OD99+H5597Ftu3bav6uGw2i1fXvIJX17yCO267FXPmzsPxJ56E/fZfAuIRhcBOs97tjeD2vrA/16ravOudzWbxkH69R0dGqj4uMzmJlSuex8oVz0MURey+50Ice/wJTRNoOZxt8acbrsd3//nfEIs1V2y2oiq2a93Ee7siw/0gxxwC/sgznkmUa/8C8Z++WFf25OQjQVa+Ar76dZ9E+szJFnvEE6Kty5tTFciMIVTFMyoxFaNyETOjibLthjcw4xw5VUGygjfwYf0zccm8vbBTJF4Wppnd+yiEL19k/s1ffA184xbfvARCkQ5FoHIGAmBWLImCvj5xLX3uUakAgVBTTLbDOcekIiMVCmNWNNH0vrzm/O2fp7BkL4RPW1pWJrvR188DIfvr633zT3z8I0AsVvG6deJYxg4hBBQcnOtCsGU7UC4El66pU1A29lFCAEEoW/fNml8r6IZ2CAhoJbW7BrQv3fS8T2dd/OwC1XjgVRNS2c+7yDq5yB72pfz8BCrjUJmCwrgMkQqIREOIhEIQBAJKgEQsjEQ8AkVRkS8WUZQUKIoKSilUaJPEVMa19YkJqeq6d9pErwB/vNqzm94n3UgwOaO78BubBbQ/wXfRHXu/Qou0Uvq36JfYjWaINvU2UrXH1ZN/o/VyE5879WZ84/XX8POf/AhLDjwI53/8QsRi1YXeayVrXnkZa155GQcfcig+dv7Hm+69Ww8vr34Jt/71ZmzauKHhvEZGRnDbrX/FA/fdixNOOgXHHn9C3Z6lnUI175l2f4ZyuRzuvP02PPrww/joeedjv/2XTHeRIMsynnjsUSx/9hmccuppOPHkU6a7SG3Hpo0bsWnjRjxw3zIs2G33uia8TBWqquLxRx/BPXfdgWw223B+69etxXVXX4mh4Rn4yJlnYZ/F+zWhlN2Dqqp44rFHcc9ddyDX4PVWFAWvrH4Jr6x+Cfsu3g8fOesc0wO7mYyOjODm/7sRF118SdPzbheE8z4I5bmXAK+ww2veBn/4aZClh9aX/xc/DuW7PwUmvdu81eKvlf5wFJxzZBUZHPAVb0OUeu43vIElpiKvEsQqeNP++56HOLbx5avAN54MMnMYAMDufLBi+bXzaeGs43rZDE/ezcUcBsMxiBXEbSMcsJf4CwAbCzkMRrT+YCv6CxVzTPcg/jXnc+f09nULS0Yg3/8E1LfXe2ZPZ81A6NSjoRqSCdfD8nuMWexjuHbvQ7lhrFNEAVO4NephnXZSup5MN96XQkRbrzUlBFQXgRnTJkS02sjaKX3YHYluaZOgHu1HN4k209kubmKYX3msXi5e4q6XsGxMcmNgejxoIz0A01DqXj4O6BMCBSicQc7mkaMFhEUR0UgIIVEEAUdIFCCKcSRiHJKioFCUIEkyGFfh9r2qhkCA6lyq9fYOaC+qnZxRTdqA9sRruZigPduLWiOL7sjt56g74WYEA2oPZdD0C8VJWceqUzBeBF4/L7opPMSK55bjsp/8GNu2bp3uopg8+8zTuOxnP8bIyPZpK8PIyHb8z69/id/+v183Rfy1ks1mcdutf8VlP/kR3lvvbRTsFKzPg/094/ejlIJS2jHP0sTEOK6+4nIsu/fu6S6KiVQs4va/3YLrrr4SkiRNd3HaljffeB2X/fTHuOqKy129haeTN994HT/64X/g5pv+3BTx18rWLZtx1e9+i+uvuQqZycmm5t2pvPPWW/jJf/4Af73pzw2Lv3ZeXLUSP/rh93Hnbbea4aSayXPPPmNG8ehKkgkIF5/lm0S5/lZgi7e3tu93J5WE+MWPV1WUqfoiEUKQEEMoqIpvv1MgtGwdYDf6w1GEqWaszCiVvfknFKkUQFFlYHrIZ/7mWvA1b3keZ/DhFffi0Mduxsai8znaKZpArkKdAK3+/S4ho2XGMCoVAACzYgkz9HVzqc4ol/jmZ0ASlT3v3foxbMs25K79i+9x8U9/1HkcOBgBGAFU/d9WrOOUSmOWdsfoCxq49Qm9xrD2vqQgCBBF0cxvKq5LJ1/7gPamG+4tY73vbqDT37XtiPUdbmAY6Y19Xt8E63YvWwQAUFBtkhAh+jp51nzcv90EWr+LEK715wkBCEVRUTE6mcPW0QlMZAtQVG19Y4ESxMIh9KeSGO7vRSoeQ1ikZv6N3je12CkDpo9q7VlBG3YuwbPY2djbLGjDziZ4DjXs3x7Ra4cdr1l29v3OfVWswQH3DlA7Cz9us/sN/Mpdq3d1Lddgl/m71rS2JWMMmclJjI2NYusW93B+mzZtxM9/+iN853v/jP7+garzNvjAggVI9/XXWKYJjIyMeIY53fD++7jsJz/Gd773z+hNp2suUyOsW7sWl//PbzAxMe6ZRhRF7LHnQszeeQ7S6TR602n09KQgSUXkslls2boF769fj9deXYNczt2baf36dfjFz36MCy76JA60rXtZLXvsuRDJZLKuY6slFA7XdI+2yzMdiUSx9z771HRMsVjE+Pg4tmzejGKx4Jrmztv+BqlQxIfOOLOuci3efwnEGjy/ZUXB5MQ4tm7d6iniPf/ccmQyGXzxK18rG0RXy1TdR7VQ6TpxzlEoFDA5MYGJiQlMTk5UFN1WvbACr7y8Gp+4+JK28OR+5qknceMfb/ANa5xIJLBw0d6YMWMGUr29SPf2IRaPI5/PIZvNYuOG97F+3Tq8+cbrnuvWrnhuOd5+60187tIvY/bOO7ufJ5nAkgMPqrkOOT38tJWd58zF8IwZNefVP1D796cWXnj+Ofzh+msrru+7cK9FGB6egd50GqneXkQiEYyPj2N8bAxjo6N4+603sPbdd12PVxQF9y+7Fxs2bMAnP/XppkeyuOnGP2L+rrsine5rar7tAjl0P5DHnwdftcZ9v6JC/c31EH749fry32cPCJdeAPW3/uugT/UwYjCiCYzjchFhKji8eDnn2CrlMRzxj9YiEM28KTEJRUYR8RFOBUKwXcpjMKydmz/+HPi5HzSF4EqsnRgDFQRkZRkSU8E4L1vLNxUKg3GOEangui4w5xwbCznMiiUc+0blAvpDrYsCQwDwKmT+2FcvhrBQW0Kg0uxjt/25n10JFL0nZ4WOOADi/ntZvJCI7p3kNFCwslV0LbEsib4QTx2eRtONdbxpHyMaf5cH6gQADkK0rV5jKlEQwCh1fFtbtTZpN3kHdgOd7nnqFk2gU+sClERggvarB+FOH4aK3/8ued7b7b3lFaXQEHXdDPde5fcXXQm0BT/MnMCJdi9YMteO1ycW6ZsAEFCqrT9ckGQUZRkCoQiFRcSiUYiUgxAgHosgHotCYQySpKBQLEIyllUhROt/cGMZA802ZoaLtn/xarbLBrQb3WAXD/Cm2nYMPImnD793qPHvwDu4s6nkwNnNmHXnHODwXgPYjr3jVXWYMZfNlV6E7dAIrSpDK2cgHHPscTjoYGf4vmrYvn07Vjz3LO5fdq9DlMxMTuKq312Or3/rOwiF/NeQs3PiSadg730X11WmrVu24Lnlz+LB++9DoZAv2zc+PoarrrgcX/vGtyCKVd/GDfHiqpW47pqrIBWLrvsX7bMPDj74UCzaZx9EIpWNk5IkYdXKF/Dk44/hzTecaw/Ksozrr7kK27dtxcmnnlZzeU897UM1r2/aDs+eG5U+zLWSSqVw8ac/W9exiqLgjddfw0MP3o9XX3nFsf++Zfdg7rx5WFyHiHjRJy6uSxTinGPtu+/gyccfw9NPPem4Nq+9uga33fpXnHm205uoEvXcR62m1uvEOcf69evw8ksv4uXVqz3X65YlCVdfcTk+fMZZ0xY6m3OOu+64Hcvuuct1vyiKOPjQw7DkgAOxYLfdXUPFM1v7ZyYn8ezTT+HJJx7H1i2bHenHRkfxq8t+hk999nNYuNcix/6hoWF88pLP1FyX9evWOgTgQw8/HEcdc2zNebWSvz9wP/52y82u+0RRxCGHHYH9lizBgt12r2oSxejIiP5ufxSbNzm9yl9+6UX85pe/wOcu/VLN63T7kctmceMNv8cXvvzVtn2X14pj1uLnzoP8jf/0FM74u++D3bIMwtn1Pb/0iAOAzduh3rKscmKn+tRSekMRbC3mHQIwIQR51Xvigp3+cBSMc0hMRZGprt7DCSGEhGDp7xUl8L8uA1++qmL+Cme469DTkVcULEoNQGEMo3IRjHPEdY/mqCCCEoIwpZCY6vDizagy0pbQzwVVwaQiYygSqyh0N0o1PYvIhR+BeNSBFQ02Xv2U4s33+oZ+Jsk4Yp87Hw5501yfsPx8VnO1vRjWY9yMwu36rrCLv9rf5debwC38J6AJwd4hNikAUGqGgzbzaOHYr12vc0Bn0a33UasmYDRCPQHs2rEe9dJu763y93xlW6JXGq9QvKVtpW8Lh/69sCQ3RFjrOcqzK/2hMAalIKFQKEIUKGLRKMIhCoEKEAUCMR5GLBaCJDMUixKKkqxP6ir3dNbOq4nBdu9mP3Zko3enUItncDPyCZheKjl8Be3YHtRjew7arjPZkSbbEJDqBWCgcsfJmqZW7LMsGsmrXXEzFLVLzPKBgQGcePKpOPzIo/GH66/FSy+WG/nWrX0Xd91xO8446+wpK9PQ8DBOPe10HHn0Mfj9tVdjzSsvl+1/5+23cO/dd+L0D5/R8rI8+8zTuOG6a1zba/bOO+Occ8/DbrvvYW6rRpwMh8M46OBDcNDBh2Dz5k24+aY/uwqKd95+G0QxhONPPMkzL7e1Cb1CF3ULXu+gViOKIhbutQgL91qEVStfwA3XXevwCL7h+uvwgQW7IdnTMyVlIoRgl/m7Ypf5u+Lopcfhqit+6/Cgf+iB+7HXor2xx54Lp6RM7QQhBHPnzsPcufNw6mkfwvZt23D3XXdg+TNPu943t//tFmzZvBkXXPSJKX+G/vynP+DJxx9z3bd4/yU448yzPdeR9XoGkj09OO7Ek3DciSfhtVfX4KYb/4RtW8ujPhSLBVx9xeW49MtfxQcW7NZYJTqIu+64Dffd4x66fd/F++GMsz9aU2QNAOjr78fS447HUccsxWOPPIx777oD+Xz5JKb169bilz//Kb7xnX9s6nvi1TWv4PFHH24rkb2pz1BvEsJFZ0C96ibPJOzW+yEsXggsmFfXKehZJ4Fv2gb2ZIWQ2tPweR3SvYFHpAKSYsgUTufFUzXlQwlBmAjIKDJkxiBS6qiOyjkKqoKEvoYvu8/9vWRlVC6iLxTBrvHSxAaRUsyIlkTbcVlCRpExGIkhQgWIhCKjyIhSwVwX2BCljfFGTlUw6OIpPB2IJx+F8EdOdGw3yuoUJG0Rg95ai+Kf7/A9R/yz54Ek474Cs9dYyb7d8F7isAoahndRee+xXcdfRnkE/XKoNiHYa3xFiDPcs5mXIIByDlVfG7jVYke7iSk7Ot3UHt1Sl24RT6d4blhLadd7y82O5iUC29NYvxl+31Fi+T6ysjz8y2HdTiwHSIoKKZMDZypisQji8ThC+ty3aIggGooBiENWVBQKBeSKEpiqgnGACgJABD3CRXO+1e36vQ8I2NHY0UPUdgrVTrgJ3qmdTbv2e5qFNqnNI5xKNe7w1o5VtRfKmr+14+QWVpnpA3O/n1esfft2r3RThdt1tV8L+37Psreo/IlEAp/9whdxyKGHOfY98vBDmBj3Dn3cKnp6evCFL30FB7iEQn7owQcwOTHR0vOvX7cON/7h966d+zPP/ii+871/wYLddm/o3poxYyd88ctfw1nnfNTVo/lvt9yM5c8+U/F+2ZGZjmuxeL/98Q/f/LbD47tYLOC+ZfdMaVkMdp4zB9/6zvcwY6eZjn133Pa3aShR+zEwOIiLPvkp/OM//xv2XLiXa5qnn3rC0wu3VTz2yMOu4m8ikcCXvvoP+MznvmCKv1z/Mc7Nn7HNjz32XIjv/NM/42CXd7wsSbjyt/+DjRuau7Z5u/LSqpWu4m8sFsPnv/QVfPrzl9Ys/loRBAFLjzse//L9H7reZ9u3b8Pvr7266WsC33brLdi8aWNT8/TC65vUyu8TXXoIyB67+qaRf/17IO8eqr8ahC9eALLvHpUTAqjOZ7S59IUiGJdLXtCFGjyArfSHoxApRZGpmFDKvaoFQlBgKrJVrBkMAFuKeaRDkYrpZkTjZtjnnKrg/UIGSTGEjcUsxuUi/r71PWQVGTlFxlYpb5azHfo6wv57IfaZj3nuN55lN4MzIQQ8m0f2p1f4n2O/vSAccYBje6mPScA5gTaEo/rf7mMo48eIuzcbY7xsLGWUt10xrqMAooXkZLxsuz0t4P5utXpPiYIAURBAXTzGmk1g5Gsvpsse0Ao6vS7W8rdDPbjHr1qs/fJOpx3aw4tK/Uwvu1qlPqojagQpXydYt16aPy25y51CStdPEARQSiGIERQlhtGxDEbHsshki1CU0r0iCgQ9yTgG0imkUz2IRyMQKAFXZUf/uhlt0w420oDG8bOJB3QOlXSNgPanXbSngPrp5vYzYxlW6kxYB8vWf1NKm2Lk88rDTzA1sDeOlzDcaS/S6Xp5UErxsQsudKwHKUsSHri/irCILUAQBFxw4Scwc9bssu1SsYgH7mtdmbLZLK783f9ClssNoOFwGJ/5/KU47oQTq15XtZKRnFKK4044Cd/67j9haHjYcfyNf7wBmzc7w4kGlDPVwvjOc+bg/Asvcmx//NFHWj45wYtkTw8+87nPIxwpN8avffcdrH7pxWkpUzsya/ZsfPErX8MJJ53suv/uO+9wRB5oFe+8/RZuudnp1TgwOIivf/u7ZQJio9+DSCSKj3/iYnzyks8gHC6/R/L5PK67+gpIkvfalN3Atq1b8cffX+fYnkr14itf/xb2WrR3086V7OnBZy/9kuskptdeXePpgVxL/lZkSeOiyZ0AACAASURBVMIN113ru360G7WKuNMpFImXng+EfILYjIxBufL/GjvHP3wKZJfZFdMB5aFppwJCiOkN/OL4VvzDi4/gz++9Vl9eAKJUgMo5FM7KajIQjjpCTttRuCayDUdiVXs9GfdOOhTB7Ki2xnxfKIIzVyzDxSvux92b34XMWcvDPdcC3XUuYt/8tG8a6zNhHYcA2ns7/8trwEd8JlLGIoh/5RNmer93ffk2qxhsCMTlaV3HQdDEYUY0r1oVvCwscrtCdGO8YOt/u4vA3PV9ZvV9ppRCFEXLeo6tq3s7X9cdlXa/32uhU+th2qAwHVOqymnW+Tnn4JbJNZ1Muz8jXv1VQ+y12mrsdkVKqWnPrJRnaZvb98Yafc347pfKURKetXSUEsiqinyhiO0TOWwbm8RkrgBF1e4ZgRLEo2H0pZLoTyXR1xM3nQRaNWHL2l/pRsP3joKfTTygs+g0/WJHx2u8FojCnU+3tB8FnKKvW0fHi0oXwNqRsM4wd1w8DnBW+tmD57gO3uv0NPHzHq5UF690dkNPK+HcfUZpM88dDodx1jnnOra/8HyFkIhV4NYpse9zq0skEsFZ5zjXMF2x4rmaBf5qJwdce/UVGNm+3VaOKL769W9i38X7lW1vlhfUznPm4Mtf/bqrUf+6q69surdYNzNVAsUBBx6EXT9Qvk6uLMtYvXr6xNadZs7C0qXHOba/sKLxZ7ibIITgI2eejY9f9EnHerqcc1x/zVWOd0CzmZycxDVXXgFFKffimzlrFr7xre9ixoydADTfo2DJgQfhk5d82vGcbNq4EXfedmuTztJ+KIqCa678nSMsc19/P77+7e86Jj81A1EUcdHFl+DIo5c69t1795147dU1ded9+ofPQCKZLNu2ft1a3Hv3nebf0+Gp21IG+yGce5pvEv7si+CPP1f/OcIhiN+7FBgeqCLx1IvABs+MbMY9W9bhl++uRoFpon+RqVBr7BP2hSIQCEVOlcu8ixXOIDH3yQQcwOZCruYyG2UblYvYonv5CoRiUbIPAJASw+itwpu4UartN9PhAQz+4GsIx6IgTPXpa7KyUJHWMY9898NQV/o/57FPnwukkq77/IwK7um9gwWVjNFama33LuOdYSw0J08CoD6qUckzq3J+AqUQBcFzMnSzrke7X9uAzqYb7q1We9ASyzujXi/fauDQbWBd0CZAZ9xbXn1bv76uVVB1czRxprfkCWfIbzdbpXkgZ+X7KQEIwBiQLxQxOpnDtrEMipIKlWmZa+sHRzDQG8NQfwqpeBQEWhQP6HZUe32a+Z3pBqN3t2O/170I2rE78GrHoH07C792Ctqw/ekUUdicF84B2pfuQU8yjp5EHIlYFPFYBJFwCJGQiJAgQCCAQLTBNQWB3dfRbQZatf8u8zoG13rjhENbssk/FJBfh64aQ6f1GKsgaRcn3cRie/01I4rTI3nKsdWhUfbYcyF2njOnbNvo6Ag2vP9e9UWC88FoxPC8cK9FDuP86MhIWcjSeutuv4deXLXSdU3eCy68CLvM37WlxvOBwUF8/tIvIRQKmdv22nsfnHveBVV7HAdoaG1ja58WaB1uazS/vHp1809UA0uPP8Fxv7zy8uq2/ThNJ4cefgTOOfc8x/ZsNou/unjmNpP77rkbY2OjZdsikSg+/flL0dPbaxrCWsHe+y7GGWeXJtaIoogjjjoGS487oSXnaweefvIJvP/e+rJtlFJ88pLPon+gGrGvPgghOOuj52KXXcvDF3PO8beb/1L3c5nq7cV5F1zo2P7Asnvx7jtvd564WyX01KNB9t7dN41yzc3A+gZCmidiEL/3eaA/XUViy5pvU8gpO83DObMW4F8XLIGgnz2jyNhS1ITZnKpgRCqYnrp+7xICICGEwLVeORjnCFMBm4s5x3EKZ5CZitkxd8HSmi6jh5GWmIq1uUkUmDbZJUoFc23fmCDiN4uOwtqTP4WTZ8yr+TrUCkeVa/wk4hC/+WlkRRGMEEQTCcSiEYhUABjTx0YEnHGAa99bZunrcs7BXn8HhWtv9j2NsHghxGMO0cpWhWGn8uRJre/jNyjVtpcLxVbPWPvYqB0hHhOGy7cBJXmHgRAOEO/xY0gUHZ5Wxv5m0q7XdEelne/zWumWenjOYmk0W31o2MqrZL5fuqwP1kn3VrU2Gj9boTXaYdk26F9ZczvVfuZ258+uFhvHUUL0/aXg0uOZLLaPTmBsIoNcQYaichDdFpuMRzDcl8JQXwqJRByCSMG5HsXDFvWilslM1X7jOsXovSNTjY0yaMfOxs2pqmMndncJbte92ufQrjkFbdiZtOt71bifxFg4ZG7g+kxL6jLrTWUcnHGo5k1pCKMcqqpq/+Ylwc9whuCwin7UVdS17tNmSTIQaB0hN3G1dIxbqC/3v6sZwLttr3UGeKUZg9Wct9GbpNbj/V4ue++zL95bX24of/vttzBrdv0eUtVeP78yvf9euQj91ltvYuasWWV5+OVVzYv4rjtuc2w/6pilrmE8W8H8XT+Aiz75Kax4/jmcfOppmDN37pScd0fBOgGkGeyx50KIoljmxfnOW281Je966enpwS7z5+NtSzkyk5PYsmWz6VUaUOLIo4/BO++8jWeffqps+4srX8B769c7JsQ0g7GxUTzx+KOO7Rdc9AkMDQ1PSadh6XHHY3R0BABw3Aknore3GrGrM1EUBQ/cd69j++kfOQPzd/VfV7YZCIKAT37qM/jpf/2wzAN5w4b38dKqlVi8/xLf472+Wov3X4JDDjsczzz1pLmNMYY/XHctvvNP/4JINOpxZGcjfvlCyN/7OTDqEW5fkiH/+HcQ//NbIOke9zQVIEMDCP3H1yD/x2+AbSMV02sOGxxTJQXPifXgsn2OKttmrLELABEqQOW6ZyoBNhSyiFABQ5EYCkyFyhjiYqistMZavllVgco5do71lL2LikyFwhkSQgh2OIAxuQiBEKTEMCZkCRxAEiGEqIB58VI7VAov3QoISgJtRdIphP7lS1BnDEKVFBQkGQQ5RCJhJKIRRCMxSLIMWZZBBQGMcd2QS8yBPB0ZQ+7Hl1c8FR8dA8/kQJLlYa/9JsG6TcK1pyeEwlgL15neeRyHy3iKc31Mph/h4fA+HcYK45wCzFrq5SRm9Tg3vJ2tnluAIZC7lZsCIIJgjnsNAqNM99Mtbdzp9TDtUgBA7FM9asineUVqCMa5KRh2Om7fnnam0pjfbb99cpDdkOv4Dpd2mH8b97A9P7diOOyBhAKUQlIYJCkPQikiIRGRkIhwWNAnKxGExDCSsQiKsoxCUYIsK/p3q9ze1crJTPb8W3megOZif5aDduwc3NqFMebswwft1xFUqwkF7dl5tNN71XQN0wRc7y4yJRyiCIQjFJGoiGg0jEQ8hp5kAuneHvT39WKgvxeD/b0YGkhjqL8PQwNpDPb1ob+3F6lEAslYFPFIGGGBIixQiASg4Fo4NV09JiAQiFC2FkctyrnXrJdavILtM/ysv2qEQ7/w0lOJ2/ndyuNXxrnzdnHkOzE+PeuaGszbZb5j2+TERFNnO73w/HPY8P77ZdsGh4Zcw2K3kiUHHoTPfP7SQPxtIc3y5I5EIthpp5ll2yYnJ6Y9ZLfbMzxdaxN3Ah87/+MYdhHH77GE020my+6527HG+IEHH4L9lhzQkvN5cebZH8WZZ3+0q8VfAHjmqScxOlIu4s2bPx/HneD04K+WWvsVA4ODOMNlOYNld9/VUD/hrI+ei77+/rJtW7duwa1//UvdebY9yQTEb/ivy4rxDJSf/A4oNrCudV8KoR98DWT2jIpJDW2sXYZnAiHoEcMIUy3E/c6xJPp1gZgAKDAVij4G2FDIYkLRrpPEVET0Ywg0T+JRqQAAyCoyolREUQ8NnVVkrM9PQtb7vgIhiOvibn84agrS031NCK9B/O1PQ/jXL4END0AboJS8uQqFAkbGJ7B9bAxFSUI4GkUsFkU4LIJzBlXV3umirCD7g9+AT2Yrno6t24jcv/83kCvUXC+/MYa2nZb9SmsFV5cfJ5q4yox1gmsuYWsxx2woRasyjO/eGN7A5QZy699UDwlNmzS2cC1FG80ODyjRLW3SDfeX6VzQBXRTXTqRSmN9L3uAn2ddpf6Efa8xIal0mPv9IAgU3FiSgWhONXlZxXiugK0j4xjL5iGpWmQLAo5oWEQ6GcVgXw/6Uj2IRcKgBIDutGMta3APBhhUY/9qN0+2gNrw0wECOo+g/bqD6WpHUwDO5YvIZHLIZPOYnMwik8khm8sjny8gXyhAkhRIMoMsMSgyg6oaHsCltbCskU0o0TyJQyJFJCwiHosgEY8i1ZPAQH8aA/1pDA70Y2igH4MD/Rjs60F/OoG+3jhSiQgS0TBikTAi4ZAWipqWz/ys9eXVrItr9S6tJBp7dSDtP7dQ062mkiBskEqlHMdOjI/XdU43oa0ewa2ZZfLi7rucYs8pp55WFpI5oDtpRBDu6S2/NznnyExONrN4NZNK9Tq2jTf5eekmwuEwTv/Qhx3bDS/gZjI2Noqnn3yibBulFCd/0H9t04D6YIzh/mX3OLafdMoHq/q2e/3q4eBDDnOIte+9tx4vr36prvwAIBaL46KLL3GU6cnHH8Pql6ZvPfJWQ+bvDOHTTkG9jPUbofzimsZOlEpC/LevAHNmVkxKdB/g1q1g2BiCfo9EqICBcBQhqoUgHI7EENVFX4kxbJXySIohKJzhN2+txH5/vxFfX/Mk+sNRbCpkkVO1iBcRQcDsaNLMJyWGIZI2W66Ca0JmVc/sjAEI//5VYGhAe845AbGsjUuoAA4ChVHkiipGxyYwNj4BRVEQj4aRjMcRDYnI/ui34Ju2VV1Etm4jst//FVAoelfDZ/zjtc/b6FOdEFx9flOP3avJ/ME5ZnOkIeWuzMaSGabnpO5BJYoiBH1t4FbN2G6HaxlQjn2idCfTieW3P2/d0A5A+3gkN4NObRN7H96tHtZ7z/79sNr67PnZjyOEaJFBAIcHOCHeAhxTmXmu0n4VAAMIRVGSMTo+ie1jOWRyEoqSCsa1tJGQgHRPHAPpFNKpJOKxGARBML0DW/Uds9NOfYWAxnBzHApoP7yebbd2C9qyM/Fy4gvasf1xG0+7OY+2Emp2AowbSfcCUJkm8ioqg6IwSLKCQqGIQqGoicL5IrK5gvnLZPPI5grI5YvIFyQUJQWSrGphSFRrpWCZhQZQSiAIFKGQiGgkglg0ikQijp5kHL09cfT3JjHYn8LwQBrDA30Y6utFfyqJ3kQcPbEIopEIQqIIgVKtE6XVQqsP10JVg+trmVnX+NW3aX+rAFMBlzBfdkyx26OD59rpswnCtc60asXD7Taosp7XuBFV1TnPv5Y+o9HRdatvvZ3PVj8UG95/D5s2lq8ZODA4iIMOObSl5w1oT/yeW7eOlNvx00nQGaid/ZYcgJ1mOkWe5597tuG8uf5jnGPlCy+UhQwHgAMOPBjDw5W9DAP8cRNq169b6/D+nTV7NvbZd/GUGkQALRT08See7Ni+6oUVDeW7YLfdcezxzvWbb/zD76d9MkorocceCnq4v9c8f/kNqP/7p8ZOlIgh9G9fBhZ+oKrkBJq4Nt2er9UiEmp6CifFEGZFExAIgUAoBF3QpXplZseS6NNDRYv6GnbtCq+lEXadA/H7XwPp7y29E7TOrENI1Ca7amGWVZUjl5ewdTyDscksxn99PdRX3qy5rOyd9cj+4DeeIrCbIFJJpLXjLQSXBGHn/vL8GCn9VMMz2CX6UavxGl9QSiGAgHKA6h9ed+8twPBvNo41f7QkDgi6N7BAqT6+DPpWOwLGZIDp7ssHlPrP1Tx73PJrR5hhiwreI9OOn3NCJdudVaC1isnWfBzisU0Q1mywVq9g/W/qLAfV1xmG5XwKU5ErFjE2mcXopCYGywoDByBQgmg4hHQygqE+TQwOh0RHWbltuqJ5X3JWtq3R+7WSfTMQMdoXt/vf2maMc6hBG7YtlTSIqXREC2gMv/5o8E5tb5xjUOdzaWhxzWtHI1KIZt9xnZ7v18lx21cSC1UoigJFUSBJEgqFAnL5ArK5vEUkziOTzSOT1TyMTcG4KOtiMUcpYiop+xHCIYoEkYiIaDSEeDyGtC4SD/SlMNSXxmBfCgPpNPrTvUinepBKJPD/s/edgXITV9vPjLT19uuCCwSDDRgbd9ObTQeD6RAIYNMJISThJaS9eZPwkQQChCSE0HsNvTebXkMHV1wwwaYZl1u3SZr5foykVdtdbbtbvA9RrnckzRxpRlPOc86ZpkgYYTmAoByABALCOKBqIBoDGAdnFByS2GsD7omb/d3ZLQWNiV8hFVKMF5GnQqeI8g2rd6dMvb3uULFt7R15luRGMY24xyN8bVNzU7EimVi8aKErbeY++0GSpJKV0UDtwmthaMAZWplSiuaWwvadLBW8wj03NZXue6lHEEJwwEGHuNKXLl5cVL7cNEwSfd8Sr75mv/2LKmNTR7bxdMniRa60Gfu4ydKBwi677Y6wY2/eJYsWFT1RnzX7CIwYOdKW1tvTg3vvvrOofKsd0rkn5vTOZW++D+3fTxVXUCSMwG/OBZ0109flhIjQubUMAuCibadh5f5zcOXY3Sotjm8YRqF+59hkxi6Q//BToIg5JeVA6v6noLz0dsF5sOWfo/8PVwOp/MKW50sG51pUptPd9+WSodpg6tUznc/QRowxxXgmSZIgy7K5bnKi2Oevxne3qaNe6qRav81CkLHPQvWSvl6oNXkzoV7alYFc40GmdKdncK4yrDo49/nMThdOUtmAqqroj8fR1dOLDd196IsloDFRN5QwREMyOlubMKSjBa0tzQgFg+A6cce42+OZW4y/nGNhqdAgKeoDpilBg3iqeTQIxNqHs+4a9Vh7yLZuLyZPc8bBPTpsv5ln84pzWsQZ3r+cQ3gYqyoURUEylUI8kUR/LIa+/n706uGo+/vjiMWEx3EikUIioSCV0nSiOG09KSZJwlpOlkTo6XAogGgkhOamCDraBUk8eFC7OAZ3oKO9Be1tzWhtiSAaCSEcDCAoi3DT4Ex4BVvfB+wTMeO5rJutO70B/StX/IWeLAWcHYF1YufE56s+c6W1tblDypZCJr/tbdVnbpkGDRpcMlm8QmVOmDipZPk3UPvw+jYTiTi++fpr23VtbW0l+24LxSqPb7iU30u9YocJE1yL8jVrVucdbt6w8jejTujpqVQKy5d9aru2o7MTIzffvAipaxN+xr5SjIdehPu48TuU4hEKQjAYxDbbjbWl9fR048s1a4rKNxAI4OS5p0OWZVv6go8/wltvvl5U3tWOwP+cDh4NZ72GPfEi2Lw3sl7jB9IJh0L66VwgIOe8lphUZG0vvuQMispqAzECAYlfvu6RzjkxdyhxH+Avvw3+6Pzs8oWCOfNhKz5H7OJ/5k0Cm3JkWPznIoN1CWHYCHtf750Psx7G2OcRXWmglBA27yykrZ6dY0iuccaq2OecC+9i/Xc5nqmhqKk+NOqkemCNpGP19K3V2qmXtlVvz5FtrZHrnDFeeEUA9LrXTLflbehSrWRwZr1huiwKTihUjSGWVLChuw/dfXH0JxSomvhmKAWiQUkng1vR2dKEaICYHslOXa7zvZQSmTygGgRU7UDTfcg53JFhVM6gMgaNMzBwMNRuX13PyKVbaXyTtQVrv+pVd406rF740XcWU5eempx8SUwvQb0IUWcjdJbpdHVmlnDUqqZBURmSKRXxhIJYPIX+WBJ9fQn09iXQ25tAf0xBPK4ipQhvYlXVTIKYcw2ABkIYJJ0gDgQkRMIBNEUjaGmJoK21CZ3trRjU0YFBHR3o7GhHa3MUzZEQIkEZQYlAIoBMREgxaAyEMVDOCw6bkO8kKltj8JtTtobkhBcZOmabbX2WlF2GbJ6UTrmsx8IFH7uuGb3NNkXLBADxeAyfrVxpSxs6dDMMGtwgzBrIjiWLFkHTNFva6BJ8K8Wgp7sbX/z3c1taa2sbhgwdWhmBagiRSBSjttralb50SXFewAaWfboUiqLY0rYfN74keVcC/kna8hg25UJ/fz/++/nntrQRI0eitQwGTflgWwcBDABLPTyV88XIzTfHIYcd7kp/+P77sX6d/z1Jaw6DOxD40ck5L9Nufxh8Uf7heZ2g0ycgcOnPgeFDfF1vEsE1FBa61iDUpjyj54wLw4dCvvxXILtnDyHuC5+tBrvz0dzX/egHiFx0Rs7LtE8/Q+ySfwLJwkhgA5kUuV7rO3u6kSro0/S13oa/HNwM3MQJTJWgIINzb7NTLjiJYGu6Hcx1GKSv8x1SS0ho63sth8FuA9WDeqiTelL+ufRJlqOW4De0dS2g1p/DajQF5NC9+SV2c51Dets0M0Q0cXsSi+uzy+0iUgHEFY7e/gQ29PSjqzeOeMrY5o1DliiioQA62powpLMVTdEIAjI1t/tIb93n1u0OFMQ8gpkHd/1Xe998LcOmr8/SHsxxhqSNdqz1yCxzx3oZk+od+RiWNlAdyDaGZSOHG3VaW8hVb8Ya1SSArRZnXhZYXhnkKjzTtdkmQdkehDENnKcX5YTYyzBCUCcShjdxHH19Md2bOKEfSfTHkkgkFaQUBkXh4IyDWJRxEoUgiGUJ4ZAgiNtaW9DZ0YZBne3o7GzDoEHtGDSoA+3tbWhubkI4HEYwGIQkSTX9sVjf94JPPnZ5NA4bNryi5JGXTIMGD8Zmmw0rSf5ff/WVOdE1sP342iVlGhg4zH/+OVfaxAp7js+f95yrL2q0Z/8Y5/GuVq/+oiR5f/Xll660sVVIAJfLG3eg8c3XX7m+hW23275C0qQxdnu3DF995W4bhWCf/fbH1qPH2NKSyQTuuO1m1zhXTyCTxkI+9eic16lX3Ai+cHnxBW42GIFLLgCZPM73LYToE/HiS29AB0Fa2emX/SW7ToF8yQXAsOKN/Pinn0H94zWAoma9TjrnREhTxiMxYSykn52aM19tyWfo//O1QErJeW1W+TjP+t17r1uM/YCN39SS5r02tK3/IFZsnIi/GnhFwqFbPZcoiM36OZdinnNuenIZ56yglEKSpIxhPP3I1kD1o1LGCw3khrPP4TU6sDZI4OpANj1oNn2l1/3OtGxksOse/RD7BRPTgFBc7y23MWYBsEetgBEGmkBTNfT1xrC+px/re/oRS6TM0ighaImGMKi9FYM72tDZ2oJgMGgrY6DgNEazmmZpnNsOVqG5xaaIfPrbjN8LjLoUXsJaY3ytGzTqrzqRqV78EL6NOq1NOA01DFhWq/5CnGSyFLf+269y2OkpnKtxecniZa2d4W5TYaFpGlIpBclkErG42I9YhJtOoD+WFCGnE0mkFCUdapqJyYdhmSdR4UUcCspoiobR2dqEQW3NGNrZhs0GtWNIRys6WpvR1hwVexCHQggFApAlCokSQ4Xi+da9rGr8IlNF54tEIo5HH3rQlT5l2vTMm1J7dRgFS5CGUe+JRByPPPSA6/zue+zlLrfAZ/faX3izYaUhlxuoX7z+2iv44ov/2tIikQi2Hz++YkTZmtWr8fqrr7jSvb6XBrwxfMRIV1pfby8Aewg648gHvT3uUNKlMmTxS9rWA7HrF16hu6uhbx8ydDPXe/bat7sQUEpx8tzTEArZQyKvWrkSL3gYrNQTyL67gR6Uo69TVKiXXgf+9kfFFxgKQr7wdEjHu/cOzwhCTIUvaaytigTXt2nx12dxWYJ05vchnXsSEAwUX/zHS6Feel1OkpYesb/uaUwggYBM3QHST3OTwGzRcsT+fC2QVFzubX7nu07lsNe93koA5zrNVEuDc2I7/KzpONKhAQfKytw6nol1HIVMqKgDxkEYhwTn+tIAs+UDCE9+6zhJCYEsSZAlyeP+7HWUq83WslFxPaGe5kNW1EvbMr4TCrEPe63WVoMErk5kcmaxjivZzjvT8vcWNg7ikeYghAkBp8ScJojoF+KvSdhRACBgjKMvnsS6rl509ycRT6agMaGflChBKEgxuDWKoR2taIlGEJADIMQeFYPpKkA/BIP3Bc6fYn5ghBJmJLdRh5G/yhlUzgZ0flGX8HhdWgH1kg+M/M2w0ZwVXH+N+i4f/OiHvHiCRp1UFrn4uFy6v0z8T6NeqxuGAZXgQjk44ci9cVmODHP928+CqdSLKq/yvSZuRrqVuAanwlpd1QCkG7XVspsSAqLv/yRJEgglANGJb4hzsiRB5hoIkXVdDdcHUwrOxf7HmqZCU0V4a0Ey6+HRiG7pZsjoMQp7Wmd4PJ9T2eMHqqrijttuxdq139rSo01NmLHPvr7KN+XIq+TMUBQFt99yM75bu9YuUzSKXXffw1OmQjokLwK4tbWyIUILxT133oFQKFSWvH943vloaW0tS961huXLluHBf9/nSt/vgIMQiURd6QOhRNq4cQNuuuFaV4jhrUePwVZbu8MaZ8O9d92BYLna0Y9+XNXtqNVDtt7eHjFmFJm3V1/j513UqxKy3PB83y0tFZDEDkopmpqbTcMCQOwDXCoMGjwYRx93PO6583Zb+tNPPo6x48djiy2+V7Kyqg3SSYeDb+gCf8e9lYUV6j/vhBRPgM7cpegy6WH7gkzYDur19wGrv859A/R5GgAxi2p8377BhfLJoOR8Y/gQBH4yFxhZGgMQ/p+Pof3zjpwSkL12BD36IHf6NEECa3+7Nev92sJliF36L0R/cQ5IKAiuL+ac65xs3kn5ei7ls54yzuVTPuPcjECV7f5ywvCQ8pIvncbBuVXRT/Sd7Nx5ATDDGtrzKA7ZZGxg4FHq+q0k6qltVaIPKTU4ANTBcwCbRtvKNm46zzv7jVzexq57PPIzTbWIuIIgtw6MEArDmsyYgyaSKaRSAKUpUIkgHAgiGJQhEQJJpmiSKJoiQWiMI6loprMM44Csj33O+rb+zigTsb8bbkkrFJxzWDflMsZxA5TXR5ssG8pQJ4VAs1s7AoDN8/sYawAAIABJREFUWK9Rh7WFYowhG6g8Ms1762k+XI+wD3+kOALYCa9Jnt+Jn3ViUOxk0c99ngQmB0SI6fQCn1I3sa1xDjAGxSovAai+VwelYt+OgCzpv/V9p4ghH0BlioAcBA9ycxKWJqOF8kBVRUhrRVWhaCznu/GcFOa5gOjq6sLtt9yElSvcYREPOngWIpGI77wA3TuOsawy5FIebdiwXpfJvV/frMMOR0tLS0GWYV739HR3udKqgSQoBN9++03Z8taYlvuiOgfnHG+/+Qb+fe/dUFV7yMe2tnbMmLlPReRauWIFbr3pBnQ72jIhBMcef0Le+W3K7ailxYsA7i2I/HX2gE6PVEopolG7wUBjElU6WAlWA80e9VsJtLS02OTr9ZC1GOyy2+5Y8MnHWPBx2tNV0zTceevNuPCXv7GFd6s3yOfPgfrXW8A/yL6vsnbzA0Ayldtr2AfIqM0R+POFYI/Mg/bQs/ncCcCYB6V/N+AA5ybxS/J8R3T2fqDHHlw6UV57D9oN9+a8jkwdD+nM72c+P20HSD87FdpVuUjg5Yhfdj2afnUOVEfYYb9rn2zX5SKKc5dBDB2d5Vr3OspUDhtnedplz7hLGoD2b6w9zb8WTbpbiW8JdES4q+1Zn03S7+ecm2RwqcbzeiC4ahnWZkGsDvENVA3qgXQ0+0XU9nPUG/z0v9nIzkxKc+s5vwSyflJ0PyZRZ7QZp9zO3+l5pnHOMIbSmDgUJQESJwjKAQSDEkKyDEqFZ3A0JCMSlME4QzKZQiIl9JXM8X4yPZMTzCCyy9TU3eW768hqDNaAeGemmVsVdUFWUli0/XRI00Zf2UAD5YHvCFuNeUvVwjBkLikBnMuy3O89A9FgvCdw1jSDcHXfB9hltCpgGGMQW2xpUBSD3GCmlR8lANH35BCexASSxABLODEKofgIBmRwHtQXAUbeDJqmIeIjZF0+73HNmtV4/9138eorL0FJpVznd5g4CXsXQWjlM6Eyrl2zejXee+c/QibFHVZv+k47Y8+9Z5R0stbX1+dKc5IyDWzaiMdjWLJoEV6YPw///XyV67wsyzj9rLMRCoc97s6OQvs+VVWxfNmnePON1/HRB+97XnP8iSdh8y22KCj/TQnW3iTi8e339/XBakiVq8ao0zLcyKe/35YebWoqeA/BBnKjv9+rb8/PoKlciEabbL/7PcahYnHCD07Gqs9W2ojmb77+Gk8+9iiOOva4kpdXTZAvOA3K5TcCHy/Nep1212Pg3b2Qjp9VknLpkfuD7DQB2rX3gH/uf1/ntMJMKDVqdT/D0kN4YZrEbx7vhWy1Oeg5J4KM2Kx00jz7KrS7H8t94aSxkH52Wu7rpowHPX8O2D9uz3qZunAZ+i+9Hh2//wlUCGMOhvS6Jquy2COtGO+lTGUIBTgAeCkCeFYFgc3jxCPfUsNKApuLPg6bBl3IZKwRCbgemtDaCG35WH6DsYybExWKBglcAejWCq63zgHOxIlar5OGF0f1IFc/XWuopz7Lj4I71zhsXONVz173ZvSsTd9o/vaKUpUe3tKrV8a47rSSnmcS5p4PJBUFiqogBqHjCAZlBCUKKkmQCEU0EkI0EoKqcSRTKlJKCoqiQmWiLMMRxlq6+Hd5vUuzrdM1jxkGY5o5tzQMvOqjxWYHN/9ye1oNPDwX3lvQYPq+i28I/r7TBqoHufrJRj3WFvwYQTUw8OAoMQHshVyVXAh5V4oO3fteDkH8ZlbCG52QMbkSA4x1YpW2eGdELPmpxatD4wCYBs5VV96Sbs0vUQ5ZoiBU0ieCFCCAJFFIEkUgICMUcnvsvPTCPHz4wfumYoJzpENPuw3xwRhDf38/uro2enpHGRg2fDhOnnNqQe/76SefwGuvvOz7esYY+vr60NW1MasSfNKUqQXLlA1Rj5C98Xi80YnVGbq6uvCvq/+e1z3JZAK9Pb1Yv34dNC2z5+px3z8RW48eU5BcN1z3L0j6HnKAfTJug/5Nq4qK3t4ebNiwAalkMmO+Rx1zLHbfY8+CZNqUkUgmXGnhSDSt4C0ibyf5mIjHi8itgVwIe0SvSCTc9VsJJBL2us830oYfNLe04ISTTsGN115jS3/5xfkYP2ECthu7fcnLrCYEfn4m1D9fB77IHd3ECvbEi+DdvZDPyuytmQ/IyGGQL7kA/IU3oT7wDNAX838vgbkfIPVQnm0aEJ6WjBuKUSCvnjcYgHTcISAHFu/ZbYV2z+Pgz7yS+8LR34N0/tyMJKn9N0B2nADiIxy0uuBTrPvtXzH4Dz9FpCmKhKJCVRUwllYWeylcPVc+DtInGyGcy5vHK+yj+O28xq7UTkuXlltzRA+yhrAq5dw7Y/QqT+1x2rsqrbwXym1rtCNrBCZJkgDL3lmllLuxBikOBv3giiTgVU1Zqo5zSx41XCX11qZqitB2EoG2U/VBZNTLcxjw+73k8vq1jdde5+HRHgySy7gO9vdrPZdOhy0nq6GWUGFyUCK5vhvOGRgnACVQNRXx/hQkQhGQZURCMoKyDEI4JImiORoC50EwBsRSKhLJBBRFNZ+TWcZOr/DQXs9bDLhX/47M3Xl6yz5u6m+FYPr/1VkbBgDmY2VRyjopORxzXcaYaSgqvgPDWCJtnGfC9ujmBLOc0jaQJ5xGMAbq6RvclFDqeqy3eWu5YI6DnJafAC41yhuagyDf4U0zX2Y6jepEMLXYVMExufFSkhjevSohSEIDoFgs+qGTwxIkiULT3O9h9erVWL16dV7y58J2Y8di7ulnZVVIZ6uTNau/KKk8ALD9+PGYe9oZZfGWa2nzDvmaCdXc6QwaPBjBYHn2bqWktj0VlVQKixctLGmewWAQPzh5DqbtuFPBeXy6dEkJJRKYddhszNx3/4LvF+2oPCFiq7EdWb9mL8MYY1/gYr/6Fsfe4qqqIh6Pee4b3UDx8N7PubShlgtFb49dDmfbKBUmTJyEXXffA2+98bot/e7bb8Uvf/v7uo92IV94BpS/3ZrTE5i/+i7UlAL5vJNLVjbZdzcEd50K5f6nwOe/md+9hFhnkzB2bqtv6Go7LjxTCplmkQnbQTr9WGBQR2kk0ue67Pp7wN/4IHf5Y0aBXnQmEPC71NIjAU2fAPx0LrS/3ZZdnsUrsO53f0PwF2cjEAkiHAqBShI0TUMqpUBb9QUQCICM2Mxcd1jn65mIz1whovOZ83pdb3jTOsu2ksKECGUzd5yl3BKuuURzb69wlRSW+obTU8v59aW3DspEcEh6aEnGmG1/4GpdP2wq4DpL4lkLeXSyBCS7u1mNoNEeBx4e9vl1j2rWnZQbfo2oXNc4xz2LBzB33OvyDkbm9mU4sBAuNJfCCMt1lZmJ4EHF2j2lKEgpKaGflAhCkoxwOAxKAEqB5rCMplATNM6hqgz98SSSmgpVVUUkRMu4aNPpcW6Gki7K2QcWpbf3E2WFlRg1vUt93lsLsLev7NfW0jNbjR9tJoacez6HGVFoU+qEG2igSrApzwcGEmlDGF4bBHC17cdgUxbAZbQJZpmbpSdqhiWSxQJcV2pZJydOBYJpJQcKTWNQVGHVlPQI01xKyLKMvWfui1mHzYYkSW7FkeMfA/HhyrKMAw46BPsdcCAkSSpLh9HqoXjPRRJUq0XSKXNPw+gx21RajE0CI0aOxIknz8GoUVtVWhQTLa2tOO74EzB56rSi8jl5zmkYPaYwj+ZaR19PZgLYLzKNXpkIyQYBXB54kap9fZUngDnnLjnybWP54KhjjsOyT5di/bp1ZlpXVxfuv/duzD39zLKVWxUIyMIT+B+3g7/zSdZL+dsfQV3XBemnc0HaW0pSPI+GIc89Gnz/3aHd9AD48s/zzsPUUXBjH9zqmG+UBlz/HzE9WApifrfeAvLRBwETx7pLKGLeSFIK1KtuAXJ4kQMA2WFbEfbZY7uWXCGXAYBOn+ibBE5ddj2UC89AIqVCphTSohWI/+susI1in3kybDAi550CMmZLEB97BvsNEZ3Rc9ZxXzYvvEznOGci+pFTNts9oi5pGb4BL2La6zntazYAcJPA1t+SJIHo3sAsw9o2nzbaIJKLg2GwbcDK8RfyRkXIb70+arxKasp7tgaRr26LMT2qnCWcbi2jXpS+hT6H0/DIsz04yV6S9mC0ji/E8teZt+lI4pAZcBPHmWSx55e+U6QLL2JVYVCVFGJJBbJEEQwGEJRlSJRApkAgJCMclKBxAkVVEU8qSKZS0GB/f0aXbDUQzxipYQDh0oFaflOHHreawayx5apb1JKDmQRIGoSL/7NuK+OcedZLX1VvyDWGNuqsduAnSlYmNOo5f9QEAZzJWq7SYXkJAEn/h8qZJTU9QbOCOQeQDP0WBRXKPW6ZXDE1TQhnu7lIyLKMXXbdBccceyzaWjugqio0TRN7jDEGTiW74oOUfzoWCASw0y67Yp/99seQIUNt5wrt/DOFYvNSvK/7bm0e0vpb1DU6q/rAkKFDceDBs7DTzrtUzf6tLa2t2HvGTOy598y69+grN9Z+960rraWlNORci1dfs/Y7DB1auj0qG0ijpcVN4q1b+10FJLGja+NGU7FnwEvWUiEUDuOkOafhH3+93DZWffDeu5gwaTKmTd+xbGVXC+Tz50C94T7wV9/Neh1f8TnUX10B+YJTQbYZVbLyychhkH/3Y/D3F0G7/2nwL78pIJP03Kumw49y3SrVILOL4U2+NxzSsYeATB4HY18wr/WDv3DMDinWbYD6t9uAz9fkFINMnwD5p6f6FjvTfJBOnwhy/lyo/7gt6/188QrwK28G/c25UOe/gcQtD9rPf7MO8YuvRtMvzwEfnzYKzJdkzHRPZhI3G0HsXCcxx/U5FOKW/GzKvBLOrY28JL2NMsc5L6LXkN347TzPOTe9nsA5NE0ricz1oqCsCKFtcRMqRanGfsCGFrke6qUeUGljCYOks/72i0whgmu9bVW6TkqFUjyH1ziZjgKY23CK6K68mUJMe5blNcby9JYjRlk2L0rd0Mm43eqoYkTDSKkakooKiVBIsoRQKIiQTEEogUwYpABFJBiBxsNQFQV9cQUpVYVqbqOQfibnnKHSYB7vXuXp7R8MKanj1Va6jefj7Vuv8Jy/mp+C5f14GFw0UHvItTZxolHP1QWrbsz6LdbLeqeSSBPAJIsFWo2hEh+2CBeWHkiKKV9MLvQ9hvVnoYCpKOOEeNK/0WgUgYDb28AAB1yKkpaWFrR3dGDIkCGYPGkSpkyZjHA4DM71PRRISG8XwiKfMQZV06CqDIqqQWUizanANhCJRBAI+A8fq2kq+vv7bWmDBg/GscefUBDBlonotXpXW+EkmAFg6eLFOPzIo/MuO5dcVlkaGFhQStGcJ8kSCATQ2tqGtrY2fG/LUZgwcSKGjxhZUrmaW1r8tXO9+SSTSSQd+9ROmDgJBx48q6Rybar4dIk7JPewESNKkvdmw4a70pYuWYRxO+xQkvwbsGPoZm5iffmy7KGABwJeYd+HlNkIYPSYMdj3gAMx/7lnbekP3Hs3th49Gh0dnWUtvxogn/V9aJEw2HOvZb+wtw/qH66GdMKhoLNmllQGMm085Gnjwd79BNoDzwBf5WdsZubjRYoZSjrnxK+CMAlrIL0vm0H8FoMtR4AedRDIlHEZ51P5zLNc7/O9BVCuvxcknnvPcLLHjpDPOcF3WTnz22kiZB8kMBYth/rbq4CV3tuu8JSC/kuvQ9vvzgcdPwaKokDT0h5lflHIwjubcjydnyEDz1qGV16aw6tD5FZiIljn9KyqcC8PX/FvtzGBU27huQwQWRbGtSVYE9SLUmRASSHvpWtRMOVmcLsS1SjqpW0BlXkWJ/lbLOpNh1Av7atUz2HkkR4V7fkbYwoTg1K6mzHIYofuK1eUDme6QxgxolmJYP3f4n6jv06PfebYSABV06D0x9DLGIKBAAIBglAojIAk5A7KAXS0BsE4h6ZxxJNJJJMpqBoD5wycFhcGutTIJYt164g0dS3eE+UD+71y86h93f5AwpwWcMM4NQ3reqVOhvdNAg3St7ZhjH3ZIlA14B8uD2C/JHA1TNbyrXxhoZa+1usuZ1Z+yuCci1AlRJC3HMbeFQUODbrizloiE77GMCKvMA/pTzjxJOy8y67pLKxZGhZ6XORh29eCcBBOQSmQUjnUWBKUUsiSmHRRffJFCYUkSQgEAmYYNsY4GAc0piEcchO9J805FRMmTBJ6yAzv0ho+TVFU/PHi32HD+vXm+W++/hpvvv4a9tx7BvTtlB1h4LzbY7Z2bCWGrfe1tbdjiy2+h9WWvYvXrFmN7q4utLa1ecpfLLLJWelvrF4xaPBg/O7iP1ZaDBf+8P/+hGDI/77N33z9Ff58ycU2A4y333wDe8/YByNGlpac3tSgqiqWL1vmSt9+3HgAxcdg2G7s9qCU2upu6ZLFRebaQCa0t3dg+IgR+Pqrr8y0L/77X8RisYp6ynsRwNuPL78RwCGHzsbSRYuwZs1qMy0Wi+HuO27Hj87/6SYx9kgnHwHS0QrtvqdyXqvd+yTYohUInHcyeDRcUjnojhNBd5wI/tp7UB99Hvh2fe6bssBWd8Tw0oA5fwJ0A/cyVbGN6OW6YpADnFjn3yVQkm63FaRDZoJMc38vfjxmMp23Qr3tIfD5b/iSluyzG+TTjvFxZX7wTQJnIH8N8JSCjb/7G1p/ex6iUyeAMwZVVaFoqvBa1IlgYXxqFO6eX3NbxKPcSuRcnmvudKI7YDDPa50W4KY8xIielF5/ldK71th721hLiQLs14ky055R9vT0b2tYTiJJ4IxBM4hgUvjXUctKEWfT4Zb+qrQFWfIvQ/ai/eptl0Hfw5yUrb8dKNRy23JiQHRYVh1FkVnlO57VIuqlfZWybVk9dQ3C1UoCE4g+0mpgYETFyJf0zWg8l77AMrcTY6x1LMt0p7GdA5UkqKqGFAPiyX7IkoRgQEZAlhCQCCRCIMkEQTkC3hRBSmFIKikkUwoUTdMjKxCbUFZCGtANv5znygRre/WcU9r+zXXdLQBuNX4szVw4Y5nWtNr+rAYWGbgAIP1eCQBmen1bVj0DTPI3kAOOubrZTxpdib4WLvW32EDx8PMdNTiUwuDJUGYazJzXDDTMCY/FgjofOTgXxCyDGIQ1j4Nx42Bg3LZTQla5DFBdSSD5f6y8QCyHE4LYTVud2Q6u363fbH+XFJwAGk+HbYknU+jti6G3tx89Pf3o7Y2htz+ORCKBVEoB0zRhdajv7REOyJ4EcGtzE6LREIKyhAAluqeBmDAaYaXFgwmFTyAQwOwjjnLl89STjyMWi4Fxi5LEeCeEmHviFNI+jAmscXh54C1dsthmdel1lAPFlCG8tu33euVXD17/1Yp86y/fuhg2fAR222NPWxpjDI889ID5uzEAFoblyz51eVcPGz4cnZ2l8Y5samrCqK22tqV98/XXtr1ZGygtxjmIVcYYlnkQsAMFxpiLAI5EIhi11VZlL1uWZZx86umQZbsd4LKlS/DKSy+UvfxqAT10H8hnHu/rWv7JUqR+dTmw+uuyyEL2nI7Alb+G/MuzQaaOL12+xhBA0nPC9CSS6//BOllM32tc6kGWmEpHrudizClgWXPrBo0lY3KCAUG0XnoR5N/+2JP89YJzfpjz/LfroPzmSvD5b/jKXzpxdlnIX1M+nQQuOh9VQ+8l1+C7199BfzwOSZbRFI0iFA6BUiIi/3BubRUuw2Bjbpn+nX2O43Uu0z2Z02DKku1a/SrznOFdW8xc3VxTQCiYJUIh6Ws96zXpvTnt7czZ7ly/IbywjaMUX0qtzusF6UuRJg+KzTB9cM4FiaD/Lt/M2Bq2lYAzUW6NVokL5VzzDiTK9RwD8X6sfUi91Ec9odT1IZwwxGHTOXIuIgUapC9gC99svd9r/mNNzzQ/8r6PmmVlnlOJg5qGZRygaeMyRVXRH0+gq7cfG7r7EIsnoWrcNMwJygStTREMaW/B0M52tDRFEZAkPeIiBzi1lCXk0DgTet4B/gb9whiOmO4oJA6mH/Z5V77gnIt89cM6D2+g9OCAWYcaZ2bbA4rT3TZQYniQJlZ+xPwWWfpb5Ma32Ki/mkbjO8yMuo9eYBKiPiueOQ4nieanEVkt8yoF53OonEHlDBrPPrWwTWaoBE4pNBAojEPRNPQlUuiNJdDVF0N3XwzdvTH0x+Ji4qaqrvxkmaK1OYLO9lYM6uzAkI5WDGprRUdLE5qbowiFAqCUmFb7ADB12nQXMdLf14dnn37SU+ZMHgeFEl/jxk9wpb30Ylohninfgehk8snbKabXvZmI4Uan6Yaf95PrvZXynRptfNZhhyMSidjOLV2yGIsWfOIyhmiQwf7xwrznXWlOArFYjN/B3ddsSuTbQGPsODep9torLw+8IDo+/OB99PX22tK2237cgO0nPnzECBx6+JGu9CcefQTffP2Vxx31CbL3TpB/fIq/i9d3QfnVFWCPzCufPDtsC/mC0xC4+negR+4PtJdm3/EMpen/AaY7iQVWwtg5ahkeKUYY53KOL2TkZpDmHIXANRdDPu0YkM2Hla0s/so7UH51OfDfL31dL/3wB6CHzCibPAZKRQJDUaFdeTOSHy/Fxp5ebOjqRjKlIBQKobk5ioAsQQIHgT3klikHIcgU4ShfIjhTuvhNLQcB5yRDHobXsL9ySzn/Ip5ZpYlz5z3O+62QKIVEqacCvxDUy9y9qMewaPoIN3u6AUdjLbVpoBJ1XG9r9Xp6jnI8i0kEZ7kml7FbPjCeIafxXFZ53OVb9RIcQF8ihQ09vdjY04+evgQUVQPXnURkArRGQxjS0YLBHa3oaG5GOChB4gBhlogaknBoQY3oO6yOOwxc6Gjz1L/V07df6+AQW5JYjwaqB8JIwtswghNYHAT1b9Hwos+h122gNtDoK9PIqmWsZsIgY8gNzsFAwECgCfUFOAgYoWCE5uGFIJQOXL9P0/MzDpXDdmgcYNxQUAg7cQpS0n2o8oXdCyN/Akh8JNb7hNLHcBIxLJ8UlSOZUpFIKK48FEWDqmjQNBF0mlKKYDCAcDiEtqYoBrW3YmhnB4Z0tmNQWyuikTDCoSCO/757D7XXXnnZ9I7LRrpZj0La8JajRqG9vd2W9uWa1VjwyceenYdXGeUkUT2fNcMkI5dlZ1FllvAoJ0T+jjIyGHYUKmOpSV0/h4GWlhYccPAhrrweeehBaJqWtYwGvLHqs888Q/NOmTa9pFa1EydPdqW98fpr6OnuLlEJDVgxesw2aGpqsqUtX/YpVn22siLyzH/uGVfaDhMmDqgMM/fdD9tsu50tTVEU3H7LzZ5GXfUKsvMkBP58ITDYn4e/9tCzUH51JfBFGYnyjlZIRx+EwD9/B/mis0B3mwZ4RFqpWzRFQWfsjMCvz4V82S9A998DiPjfIiFvJJJQ/3kH1BvvA1Lu+awnthgOuuPAfbNkp4mQfjK3+HxUDeoVNwJLVoKDIJFMoaurG319/QABQpEwQuGg6cUDuOfd2eDHEC7bPZnnYPrazLbecuShK94Yge3QDM+YEsw/jfWFRClkQkF4Ok1smyNUu4RYFOfcbSjh5R0sSRJkSSqYCObcftQCDDkzrev9PUs6RlY1Pbu1jhlrKKCqDSVZv3G7F2YlVlf11K4az5I7v2x6AacOLJfnr9c5L1jPWYlfAoeXMjwixpjl2Z5G6G257lVMKFTGkFAUdPfHsaEnht5YEklFg8YYCDgCEkFTWMag1iiGDGpHR2szosEAJAgymHLhGV2rMAko/VC5dxQTzWOe08DAwWvc8Eoz6lDlrEEKVwBcn/drXN8zPY8BWuPcVm8aRFQhZolE2tCl1h4GmoeoRvhyM6lmssA54QHgUtAPRLUaYQQ0h3JBLAoGQAAPiLB93h6KfmG+U8t9zvZghBsBdbeRRFJBb78I8dLT04vevhhi8SQUlYlXw0RtSZQgFA6grSmCjpYmTJsyCXvsuYctL1VV8dgjD5mT2lIQc14TYEop9jvwINe1zzz1pG9lVrYJ+UB1Opy7Fx5+wv1UArVKLhvI9E79ELilwIyZ+2LQ4MG2tG+//Qavv/pKQXJvyuCc48nHH3Wljxu/A7YctVVJy9ps2HBMnjrNlqakUnhxvtv7uIHiIcsyZuy7nyt93rNuIrbcWLjgE3y5Zo0traOzE1On7zigchBCcNKcU11RBL5csxrPPPnEgMpScWwxHIE/Xwgycay/61d/BeXXV0J7oPzth0zcDtK5JyJw7cWQzz0JZPL2ZS+zEiAjNxNhuX9/PgLXXwLpjOOBcWPKXi577xMov7gM/O2P8rtx9ddQ/3KDf8K4BKA7TgSdvW/xGSkq1CtuBF+yCoRQMA6kFA19sQQ2dPUgHk8hFAqiKRpFICClQzny/PY49EP45pOPW+mW9g62whr+2bzWI69C5ole8zxr5Ib0nMq6bqKm4slrrujM3wgJXahsftdJlYaQL/s6zVddpfdwEp7ZrDqf22ttVouohbaVD3w/i9PCooreAXNskVXLqMXn4JmOEnwrmXRNzt/FELuZ9BfZys1WlpMQJkj394ZxlDCYAtxjgPAMjidT6O7tx8bufmzsjSOlCEcfgEAmHE3hADpbmzGkow1D29sQkiVIlAJV+C0UKo/Tq9RKCjdQGeT7jQHpb9gghK3k4qZMRpUSVoNPYfQJM4qVgUxOU0541afzO/Qy0GjUYe3AyXNtKnVnW1nW6oO7OuAKPwIj9vDLFUWGCZ2/W92Wfrmuc58UYRUIFV7YKY0hlkyhtz+Grt5+dPfF0NMfR38ihf5ECilVEWQyITjxxB8gGLR7u3z80YfY8N036GhtRks0glAgAEopuMdkyE9btn7w1snubrvviQ7HXp+rv/gvXvBJzHjl6+da5zMsWbwI9993DzZu3OCrXD/5O9Eg//whHzJ3oN9pIBDA4Uce7Up/Wt872y8yBXGq1bGhEDz37NNY9ulSV/rBhx5WlvIOOfQCe4sHAAAgAElEQVQwV1t5+cUX8NnKgfVKnffsM3hh3vNIJZMDWu5AY68ZMxGNRm1pCxd8gvffe3fAZIjFYnjw3/e60g846BDXnrwDgY7OThx93Pdd6S/Mew4rV6wYcHkqikgI8kVnQjpulu9b2GPzofz8UvBVa3JfXCyCAZDdpkC+8AwEbvwT5J+dCnrAHiAjNyt/2eVAQAYZvw2kkw6HfNVvIF/2C0jfPxRkzKiBKf+b76Beeh20v90GrO8qKAu+dOWAksBs3utgj+feKoBMnwCyXQ6jpZQC9fLrwJesMPew5VwYQqYUFV3dveju6QNjHOFwCNFICDIlICy9urEquoHMJES2eYTX/D3bvNWdzmGEifYy77deaw3z5vS2KRSEiIhPEggo19ehptYpbSKcjfj1JIEJgSzpCu085THAufA8rUbkO610k/+oksV2GplIINs1HiRwrc6xa1XuegXnwkupHuBt8FNd7S3TN+68plTwo1swxvJs+gm/RHEu0tlMzyGzlRR2lwkYb9EghI12TPTogwwEisrQ1deP9V292NgbQ39CRUoV8lECBGSKwR1tggzubEd7SwtCsmwaBnEOMzKkQcybz6n/Z+gtOfwTRZXEpkZc1BuMurMSlkz/20D+KPe3YO0fzbqzHo5IQ2nBMhwNVB3MujOMSuuib9U1/FT0L7YVZS7SoppJIsNamhiWZpa6qojUxHFUCNVRY+mXYLYxIiZfjAOqxpBMKUgkUujti6Orpw89PX0Ih6M46KCDXbnddeedCAUktDRH0NnejKGdbRjS2Y621iY0RcMIBmRIVEwyuZjtZYS1g7ZOomRZxoEHucPqPvnYo1i5YnleT5/JajPbBJxzjq+/+gq33nQDXnvlZVz8f/+L+++7B11dG/MqO5s8Trk2KW9Qkl+45Vp4H1OnTcfoMXZPqf7+/ox7Z+eDWnkHhcA6D1uyeBGefuJx1zU7TJhYcu9fA8OGj3B5fTLGcNtN17v2hy0X3nvnP3jisUfw2MMP4ve//TVenD+vbongSCTq6QX877vvxHdr1w6IDPfedQc2rF9vS+vo7MTOu+42IOV7YadddsWkKVNtaYwx3HX7LUgmEhWSqnKgs/eB/KsfAk2R3BcDwNffQf3tVdDufBSIF/e+fI9L0TDo9AmQ5xyFwF9+geC1F0P+0cmgB+wBbDmyKBnKhkHtILtOgTTnKMiXXIDArX+B/Ksfgh60N8iQQQMmBokloN3+MJQL/wy+cFnR+fGlK6Fedl3ZSWA273Votz+c8zoydQfIPz0V8kVng2zrhwS+AfzTz8S9RMzZjb8aY+iPJbCxqxuxeBLBYBDRpigCAVl4qHKhTjUMMSVJAlCY56/VcNLPPWkv3/Qa3fjtXAGZeehKXrEWsZ9zhnfzC2sYTHMdalFqi/S0wjmTYa5zzmkqzSmFbHmvHiUDHgtOa37VG34490LZ6Y2mO5FlNfge6DmrjfPPXi3iem7/Rmp5jl2d7Sp/eH37Zh1ZO5kaQD2RQk5dRbUgn7frJBzzRb6Ebj46DK/rrGOPNRKFtb+yEcU55LHm67WHsVN2SsVh9J/EPESCqjH0x4TOsKunH/3xFFIqAzdDRVM9VHSz0BE2NyESkCGBgzANhDFQQs3vhIKCcArOOAjjIJyXfM/4crfdhhdi7YOD61ssMmialnU+uqnXszHnMiKRMr+evSX6rtOcBmzzPKtMGmfQONOd29LXmQYnDVQU2fpkRgCN6MYZnIFxJr7NGq07wjmMx807tlS1EyJpC7PKewI3kB1enregEjgoVA4kVQ37HHAQ2trabPd99tlnePGlV6BqIow0IYAsUURDIbQ2RdHZ1oKhgzowdHAHBne0IRBwe1QxXVFlTG69BtGdd90NI0dubr+PMdx2842m8j7XBDvXoJ3JA7mvrw83XHsN4vE4ABH++rVXXsbVf7sKmqaVdMDPNFmsdhI0P/K2umQvF4485jhX2qsvv1QUsWUsuKqt/kuNFcuX4babb3R9B5FIxPO9lhKHHDoboVDYltbV1YXbb7kJqVSqrGWv+uwz3HPn7ebvvt5ePPrQA3jogX+XtdxKYu+Z+6C9o8OWlkgkcOtNNyCRiJe17JdemIePP/zAlX7o7CMq4v1rxfEnnoTWVvt4u37durpuC9lAxo9B4M8/z4tMZc+9BuVnfwKf90beRkbF9bMEaG0B2XUK5DlHI/inCxG8+yoEfnc+pJMOB529L8jkcUBne4H554mmKMhWW4DsNBF01kzIP56DwNW/R+Dv/ydI6v33ABm1ec5sygH20ttIXfBHsHmv+7+pKZrzEv7pqrKSwOz51/yTvxecJn6EgpB/4ZMEvux6kwR25UmEF04imcKGrm5s6O4BUzQ0hUNobo5Akqi5C4wXgZsJmeaf2RSZ2UgzkZ7WsqTDQ/ssF2lFfaGK1Ezfs0ECO6+z/vbKx/pbliQbwe6kIHLJapCOlUYhXJqtThjTtxCqLMz2UqDBt5MErmXUw3MY35tB+Nby89RDfVhR6WcphfOWcHzwVy+lel4nkZtrrumXaM6Uf7Y8nfeZIaKt5TlebjZ5CaUAIUhpDH1xsd3c+u4+9MeTSKkauMZACBCUOJojAQxub8HQjlYMaW9DczQCCkH2imKFQpGA6n9JzXvoWQ3aKv39NJA/OCVmOGPVIBIt9bkp6OYygfM06VttDrWcCxLfKheDO/S3da3RQHXB8Mo3AgxxIO2dbxgM66SwaVhc5bAaocnW1HqwQgUAQtIdY/VXx6aLbO3MaIvBUAiHHXEU7rr9Vtv5+/59H7afMAmRcFifdBLIlEDSlSOUiHYQkAmCAcmVf3M0AlkOQNFUcE0DBcBJen8zAJAkCaeddTauuPRPJhELAN3d3fjr5ZfhnPN+jC22+F6xr8H1zF1dG3HLjddj3brvbOcppTjhpJNt+7CVGl55eimoSl12rfc51YJRo7bCjjvtjHff+Y+ZpqoqHn34QZx5zrlF51+u+q803nrzddx/z93QNM117uRTT8eQoUPLWv6QoUPxg1Pm4JYbr7elf7p0Ca75x1U464fnoampqeTlrvrsM9x43TVQVdWW3tbWjsOOOKrk5VULIpEoTj3jLPzjr1fY6nzN6i/w9yuvwA/POx+tDsOjUuC5Z57CU48/5krfZdfdsePOu5S8vHzR3NyME04+Bddfc7Ut/e0333AZKGwy6GxD4I8XgD3+AtSHnwNR3X2EC339UG9/GHjuVQROnA0ybYcyCOZvzCTbbgW6zSjzt0nOfbUWfP1G8HUbgY3d4H0x8FgCiMXBEwmQWAI8ngBiCaC3T9zc0gw0hQW5G40A0QhIUxhoagJpbwUZ3AEydBAwdBAQCnoLVEHwJSuh3fYg+Jff5nUf3W0apDlHgX+4EOp17tDttjI+XQX10uuE97iH8WGhYPNeh3bHIzmvI1PHp8lfA6Eg5F+eA/XS68CXrcp8s04Cy784xxU6mpD0PoCESiCEIp5MIp5KQpIkfa/gkIjok0zlRQIbyPee7CQwXOdyXU8IEUQeYKyU0+l5ro8JIcLCmgsrciNLQtzzd6/8M82xzHxthqsG6e0PhuFsrcDt8Sz+GOSp4XldETiKZSjAsh71o38B8v+OqwkE6boQ32ttPocVtVwfTjSepXj4dVbINBbl6qus45lvmYz8eJrE8crBOZ6LkY+YhupCaU/Rn1QRS6mQKIVECSIBCZIsQ5YASaKQKCAHImiOhqFoDMlUCsmkAoVpZp5Mj+BRai/gSsLL0aOB2gEzvj0jgXNQRwut1zq1bjpT7RrIfAxgC723gfLAGFdytTF7iHYxcFHLmF7t9WhqJwr5nKyTg2pavJhWMSAgHEXt61RuVGtYm0rCOSnedbfd8fKL87Fm9WozrWvjRsx77hnMOuxwaPpeNyJgadqrV1jKU8+9r1qaoghHIsJ6Q+NIpRSoqgqVC4s5w8t28OAhOGnOqbjxun/Z7u/p6cbfr7wcp55xFsaN91buOi2z/EyIP/n4I9xz1x2I9fe7zs06bDZGjx7jcRfKavbk1UazLSAa7biyOOyII/HxRx/aPEc//uhDLF+2DNtsu21Jysi3XVcLnJJ2d3fh6Scex1tveHuCHTzrMOwwYWL5BQMweeo07HvAgXjh+eds6atWrsRVl1+Gs889r2RENGMM859/Fk8/8bhrrzBZlnHKaaejubm5JGVVK7baejSOPPpYPHj/fbb0L9esxlVXXIZzzjsfm202rCRlaZqGhx/4N1575WXXuREjR+KY759QknJKgfE7TMDue+6FN1571ZaeTNZHGOhCxyfp8P1Ad50C9bp7wD/NQqJZ8c06KH+9BWSbUQicevSAhGXO9Hyu8XnEUJARQ+tItZUZ/PM10B56FvzDxfnd2NIM+ewTQCZvDwAge+wISWXQbsruEc+XrYJ6xY2QLzyzJCQwe/YVaHe5DUecIFPGQb7gdO+TwQDkX5wtvHxzksDXCa/h7ba2neKcmwaIjDGAEBGuTtWgqHHE4gmEgzKi4ZDwykmloKqaUKHmqRDJRuo6z/s5Z40Ek748HQ7LnU9aqaZypq8pIfb39duHcLs8XA+TLZTd1vKoS9mejRQ2PKeMtbfhEZLLG8R6jjFWMe8RqxeyH0NgL6JEkHNpZQ0lpOxRvziQkWc3vYDNC/MBAVBdepRC4Gy7tfgcXmRXLT6HFdZvvdafpdrAxKebt9GHH93fQNZVPuNpJu9g67156Yi4XQdt6pkc9zn7X0EY21X1Rt9D9XOapkFTgVRKBaUpSLKEYEBCQBI6QkKAkEwRlsPg0Qg0jSGRSiGlaEipGjTGTNlq8dvJJXODgKoteNWNM+yxMU/Ndk8twuVjSfIOuFJ1YLD2p/rTefSTUs0/afXCNNA1+kLrGFNQXvZ6tK4Zq+lbNOcsVvuRfMNeGYRbtTyY9WMihEMmAOFWJ+7qgXVC3ggDIGDzOIXwxD3muO+7rnv+2WfQ3dVlW9gI63qx11VKURGLJ5FS3B47qiasNSRCIUsUTdEw2lqb0dnahMHtLRjS0YrO9mZEIiFMmjwFBx1yqCuPZDKJ6665GrfdchPWr1snEs29Qyz1yRyhOjwWYslkAg/efx9uuv5aT/J3z732xv4H2vdDti3oPD49juL2ArG/V/97yDRQWXR2DsI+++3vSn/ogftcZF8pkE8bqQS45TDQ39+Pxx99GBf/3/9mJH+n77QzDprl/u7LicMOPxLbbT/Olb72229w6SV/wFOPP1b0fqwb1q/HNX+/Ck8+9qirPRBCcNLc07DNttsVVUatYK+Z+7j2XwZE2OO//PH/leR9L128CJdecrEn+RuJRHDaWecgGBw4b0k//foRRx2DIUPK6/VeDLyewe9RVLlDByHwfz+GfMZxQCTk+z6+/HOkfn2l8B79dp3f0nIc+aFa++dygn++Bupfb4b6v3/Nm/wle++E4JW/NslfA3TGzpDPcs9HXWUvWg71ihsBRc15bTawZ171T/7+zxnZL8o3HPSSlbZka3hHkxBF+tviHIgnVWzs7kdPbz+oFEB7azMiwQAIZ+BczMXFeMwBGGkMxEL2cY9/O5Ftbus3DXqI6DTsPgaMiAV9msBNh+Kz5pdJDitpDONdOc8TAphBxnRvJlBPQtf6m0CQnhJNK7LFHsx+ZaP683u8lgFAus24CTenzJ6Eg0c+pXwUay9rkrtZuk+jnmnBQogCeDaWucrhZfRczToNs0qFRQIyfQzV/Ax+UG86ppI+A09/t9bDum7M5g1ELeQvI06vIAHbrM3RzvIdx0oNrzmyNVR0rusNGEZI1hDTme51n7Dka+7fx133uYyAOAcoAad2mSSqb7VgjN0EIFREhFRVDf2xJLp641jX1Ye+uApNA0DEFhYBmaAlGsagtmYM62zF0LZmNAdk4S3FOMDtVD/hMENI25F7DlOtsLbJQmSvteetNwgv+PRhhBsuPMPSyeYEyda5WmD1fC58FVx9yGUAaRxGHWqWdUkDpUG5+2gjhLv1qGQfabS5ym44N4CghCBtx1XdaITocGPs9uMwYdJkLPj4IzMtlUrh0UcewtzTMiu9Mr27nt4+qKoGSqnYu4wSyJIMSZZAKUApQUiSEQzIYIxjzik/QHtbK+679x5XXh+89y4WfPwR9tx7JqbvtJO+bzA3TdS5zs96WUevXv0F3nz9Nbz3zjsZPax23nU3HO1BgNvaSRkadkGWnA1UBfY/4CC8+frr6OnpNtPWrF6Nd95+C7vstntZy/ZS6lUCTgm+W7sWixctwKKFC7Bi2TJX2GMrDj70MBw867DyCugBSinOPOdc3H37rfjwg/dt5xRFwXPPPIW33ngd+x14ICZPnYb29o4MOdnBGMPiRQvx5uuvYfHCBZ6GAJRSHH/iDzB12vSSPEut4OS5pyEQCOI/b71hSzfe93/eehP7HXAgJk2dirY2f/unqqqKFcs+xauvvIyFn3zseU17ezvOOe8nGDp0s6KfIR/4+TZD4TBOmnsq/n7l5WUxGvGLah1f6MxdEJw2HuqtD4G984nv+9hr7yL12rugO06ANHs/kK23KKOU2T3o6hqrv4b6wNPgHyzK/972FshnnQAycWzGaRXZayfIANQb7stwhQBftBzK5Tcg8POzCvIEZs++Au3u3OQvJo3NTf4aCAUhX3QW1EuvB1/xeebrUgqUy65D4BfngGw/2l/eEBbtlBComoae3l709QHBYBDhSASUAqmUAkVRARBwnULmDGBE/6WHNja81oDc3lJe5/2miXQjza7czTbP9eNBm4kU9lrj2a3P7WEn/cypZFl27Pfn1+gjfe3AgcNQ33nVTeG5GkrC4p+FO/4OBIx3INpW9Y5/9YB821u9jKH1snYv2XNkIG0LyipDk8rV1nJ51VYKfvpmp2dT5jE2e7r1HRDi3L6Pmx2x0yCKcZ6zt3e+R6usiUQCyQQgSwSUSgjJEuSADFkS+wAHAgG0tQXQwjg0piGZVJBQFKiaBpUxgFAQKoFyaysSYyon3FZWpeuzUPjxVreiVp+znsE5h2rOjwDodeTbszQ9ZSutXJnSjbkQKV/ZtQZrXwkA3NoX6i+okO1HNiUYvXSl2ECj7jTxA4BhzEAGfCubnBqJYgatahrwCCGgHA4SuDpkA7Iv8PMdfOsVRx97HBYvXGDbs/HtN9/AzH33w5Zbjsr8nrwMDiUJTFdSEV3BTUjK9GaQZcm0rKeUgBKKo448AiOGD8M///lPW3hdQJAFL85/Hi/Ofx6DBw/BxEmTsfkW30NrWxva2lrR1NwCVVUQ709g7dpv8OWXq7FwwQKsWf1F1meeddjhOPDgQ7JeY1g4OjHv2Wfwdmtr1nuLxeQpUzFh4qSyltFA/giFw5h9xJG4647bbOlPPPYopkydhlDY/36ezz/7DJpbvL1kC4bQOJo/J02eklc7+ve9d4NK7r2909lzJJNJ9PR0o6+nF729PUj48OQMBIM46ZS5mFJBEjQYDGLuGWeh89GHXeGgARF+/uEH7sfDD9yPUVtthQkTJ2PI0KFoa+9Aa2srItEoEokE+vv78M1XX2H1F1/gow/eQ1dXV8Yyw+Ew5p5+FsbtUI69SqsbkiThB6fMQeegTjzz5BOu811dG/Hg/ffhwfvvw1ajR4v3PWQIWtva0N7egUAggJ6eHvR0d6GrqwsrVyzHgo8/QiwWy1jmiJEjcfaPfoyOjs5yPlpR2Grr0dj/wIPx3DNPFZxHXc9XWlsg/2Qu2EeLod78ALChO/c9Oti7C8DeXQCy/RjIh+8HMqE0ofmBTTukG/9sNbRHny+M+AVA9tsd8vGH+vLuJnvtBJkQqNdn3xMYi1eIcND/cwYQDPiWhT3zqm/yN/Dzs3znCwAIhyD/8myov/tb1v2QiaoJEviX54CM9UkCEwLOhMaGEOE0k0gqSCRTCMgywuEQmiJhaKoGRWNgnNlCRHMulL1ip9vsXlF+jROzpVnT00mZQ0ObeehKaus5apxykLdGntY0b0NfDos/GaB7AttltCuVrecMD22DQBcepbmIElHuQBFDTg/XUhsL6qqxvEjgjBIMcLfpZZQAVI8epVBUBelo9bqsnBRVAcPjxRrRoZr0dfmgFHKXqj1YpeA+iEmbDJzbnqXSdZGP0ZUf5Bp7nWOivUjDi8LtpE8dcWDzNRYzIkcoDAATYZ9JIgVZpggHg5Alqu8jDEhUQlCW0IwwVJUhpapIKMKgjXFukgum17FHmbWOTM9S6fbagH9wwPyQGDy+RWfPVc6qJXYyzmhetmlio2l5whbpxzaKEVv/WU97mBcKI3x4NfbEYh3JdNvfdN2RPLZOKgS+TNL9TrAyLUqrBYQQSCAW9+vqkw/IPlmo1Ul6KTBs2HDsNWMmXnphvi39wX/fi/+56Fe2tFxt0DxPrfYyBFxX6iQUBsI1gCjCih6ATAmmTp+O3/7fb3H1P67G2rVrPfNet+47vPjCvIKe0UBbWxuOP/GkosjVxYsWFiWDHwwZMrRBAFcpdtplV7z80gu2vbO7u7swf95zmHXY4b7zWbRwQTnEs2HIkCF5taN33/lPyWXYYcJEzD7yKAwbPqLkeecLQggOP/JoDB06FI88+EBG8vrzVavw+apVRZW15aitcNKcudhs2PCi8ql1HDzrMAwaNBgP3X8f4vG45zWrVq7EqpUrPc/5xYRJk3HSnLmIRKJF5TMQOPCQWVi8aCFWf/Ff98mGhxIAgE4eh+BffwPtiRegPf5CXiF/+ZIVUJasANlqC0iz9wXdqTGWFgK+aDm0x+eDL1pe0P1k/DaQTjkKZGR+3vhkzx0hE4jQ3jnkU664EYELz/RFArOnX4Z2z+O5BSiE/LXIlI38NUBUDepfboD8i3NAttsq9/XMuUYRvxnjUDWGvv4YwDkikRCam6PQGEcsFgPTSWMYBo0+VuqZ5vm5vI1yX29E6clcptdJQwFMfSh8M6/3mOUabspivc/rOQyPZFmSwBgTUa+4X+VzmqQu5xrTVO5ZFVdlKM9QcOaVbxUOZen3tOmu+4uF2/yi8KquVt1WoagXfVKhz1EOZXCxZF+11Ek+MvghBf0QszkJW7jHTaZ7B2cikrPJxBizGU0ZZDChFIrKkEr1m1E0wgEZkUgYVCIA0xCUCYKBAJojITDGkAIQjycRi8fBOAMh1My/HryAc6Gen62eYYt+oH83hmvFQNanKUejCRUF8R7TEROogwzeFMHBSxblo2ywRL0w/hJwzy1lnFEwCp1z+I5Jls/Et1pCgGYCBQEjbkuyakGu91fueOXVjENnH47/vPUWYrH0PrnLly3Dhx+8jylTp7neS6Z3ZaZnUASJcC5iPyZjUphkQFLVMHjYSPz+j3/Eyy+8gCcffzyrp1e+oJRi5113w+FHHo1oNOq/nivWHNzvtzEJrA5QSnHUMcfhH1ddaUufP+957LbHnlXtfTiQ2Hr0GBx2xFEYPWZMpUVxYdfd98SEiZPx9JOP483XXytpON5IJIIDDzkUM/bZ1+YJsCljp112xbjxO5TlfQ8bPhxHHXMcxo4bX7I8S4VMfXYgEMApp56Gv/zpEiiKMsBS1RACMqSjDoQ0Y2codz8O/vZHue+xgK9aDfXvtwGd7aC7TYW0x3SQLfI3yNik5oXru8BefQfaa+8Ca9cXlAUZuRmkE2a79vnNK489doQsyVCvuTP7hYtXCBI4RzhoNu91X+Qvmbgd5CLIX/WqW/zfkFKgXnotAhedBWyffZx0GQKLH2ZoZ64HvOqPK0gkuhGQJURCIcgyhaYxJFMqNM5sHjQ5nyeDh6/bq8hNuvoJ4ezHk8iaxnl6HVfofFjcb8mfE1ONkdmDOP3c4h7hUey3X7C+s3LM4wcyOoHY2jk/T+BKw/u9c3s7qGEMJEngNDIASvf+6oXssHqd1vrzVEWdOMjKWka+Ol/rPV55+CFmc40PVh8uI5nqm2GyDN7TWcukEqw6LOM+c3sHKon5CudIKBriSj8opQgGRHTAoCxDRIomCAEIRcJoi4ShMgZFURFTNaRSKWiaVhffWC7kmmfU+/PXC8z9gj3muLSEMyrOue+9bInBaTaaUF5g4Ob+yYCoRyupWM/fZNpICBVtN8UYqDn37iYcIJbxqph5T96bUvldOPvxZq0UCETce64P/IxUn/K72kn0TKRqudHU1IxZs2fjgfvsHhcPP3A/JkycBFmWXbJkHa5sIRQs7dajTVhzIUTGPvsdiB133g1PP/m4i5TOF7IsY/pOO+OAAw/GkKFDHSJy828mhVY1DYoDqeRpIDu23W4sJk6ajE8se2crqRSeePQRnHLq6RWUrLIYPGQIxo2fgImTJ2Pb7cZWWpysaG5pwXEn/AB7zZiJJx9/DIsXLsi6f3EuNDU3Y++Z+2DvmfsiEomUUNL6gPG+99hrBp58/FEsWbTQtu1Avhg0eDD22e8A7L7nXmUj2q2W8aXGsOEjMPvIo/HQ/dn3O20AQGc7Aj8+BfzgGVBuuh9Y/VV+92/oAnvyRbAnXwTZfBikPXcE3X060JF5K4dyKrqrDj29YO8vAnvzffAlRXjiN0chHXkg6IF7lkQssusUSIxBu/bu7Bca4aAvPNOTBOZPvwLtntxhn8nE7SBfdHZBsvJFy6H++dr8b1RUKH/8FwK/PhcYl9tYyqsNGtF0TFkApFQNKTUGQoBIOIRwJAjOGBQlBZWlyVQA5n5/mTxqfEf+yeKZZP9teAcZKWkC1p2vILYJMZRaQlltKFuofh0XD+D5npxzfaLnkybTKYQ+2k7UMuZNDFNCQCUCpgklUK59r6x7IHvYxxYMQTSVJq+8yhWFu+urSrtH73ZrtHWgHjyBixmjOPS649573Tm9Bc1/l9GYodbrA6ifZ8lqsFO+Qm0/S/EGq4GYz1VupvEdyOx44fTSzeY8kOl+1z16RyDBUrbnFgweBDUR4xJ1PIstyobYjyLtncU5EkkVgAqCBGRZRiEioEcAACAASURBVCAQQEAikCgFpQQyJQhGAojyIDiPIKlqSCSTUFQNiqaaTUaMQ/q8wSC2dYcT4xE4t+gwa/vzrA4jjQYKgvE9GISUkwjONud2njNJrTyaQrXO2WoNnHNYtVnGa3V6mlbldyq64pyGAIbTXrW0mVK+S06sc5m0oQaFg+sy12+WsVC/g3AKwkn+BLBNkBqfNFa73PmSwAO5qbVv2Xjp28neM/bBKy++iLVr0+HrvvtuLV56cT72P+Cg/ORzipvDQtGJlpYWHPf9E3H0scdj+bJP8dEHH+Cjjz5Ad5b9Ng00NTVh7Nix2GWXXTBt2jRIoQhisTgURTGtDiVwEH2vU2NPL0rTk2dT3uq1FQDQmPhVEocfeTQWOfbOfuc/b2Ovmftg1KitKihZeSHLMppbWtDS0oq2tjY0t7Rg2PARGL/DDjUZ7njY8BE44+wfIh6PYfHChfjkow+xeNEiJJO59zZu7+jA2LHjMHnqNGw7dqxpKNNAZowYORJn/fBHiMViWLTgE3z80YdYsngRFMf+714YPmIEJk2eiomTp2DzLbYoWhZnyJeBxt4z98HCTz7Gp0uXDHjZtQgy5nsIXnoh2KvvQL33SaCnL+88+JpvoN77BHDvEyDbbQVpxi4gE7cHaW/xvr6KDQaLAV+7Hvy9BWDvLQBftqq4vGQJ0r67QT76YPBouEQSCtDdpwGEQPvXXdllWLTckwRmT70E7V73HuROkPHbQP5ZYcZb/NNVUK+8qaB7DSiX3yC8mH2QwPmAMY5YPIF4PAFZkhCOhNEeCiCeSCGppHRFLdWvFaEVJT3csRN+iWDnNZkIZed11tCRllxheGuaim6HloJYwnx5IZNFd6b7vDyePPOlBBLEfsyMMb2M8ho+p0lxwKK2GXDotVJTnsDeqB9PYKDwNSHJ1IzqdPwbKNS6Pm9AMUBtrVb1Jn49cP3mk228zla2iIRqXOMm02mWMSmbV3O6XoggdVUNlACUElAqISRLCAYDoEQo4UMyRUiOgBAClQGppIKEkkIqqUBlGogspY26iDDYAgSRbPd7rn00HETqA7a9g/X5HfWYn3h+R43qrhpYoxYZqObvMfP0z0H6Vu8jlAXW0N/plZ193SX4YMu3yfXa7+uLQSsi3GGuBlPNyikO0fg57JZj1YRC399AfMilqNuBCjdWSnh7AQh0d3Whu2sjurq60NXVhd7eHoRCITRFm9Da1ootR22J4ZsNgyRJkKiw9BfeDSJcm6oxqJoKpqpIKioUjZlhHIilfC+v4GqC33qt5gGngYGHOSmpsBzVDlVV0a33MT3dXejp6UYsFkMkEkE4HEF7ezs23+J7aG7xJo0ayA+qqqJr40b0dHejq1u882QyidbWVrS1daCtvR1t7e1obm72vL/Rz22CiCfA5r0B7blXwbt6i89v+FDQcWNAx44GGT8GaK2zb7u3D3zxCrAFn4ItWQl8u674PFubIe2/B+j+uwPNTcXnlwX8rQ9zh4OGTuTqJDB78kVo9z2Z1z15Y+UXUP70LyCZ3YCFR8MgsRxGRQG55CSw2xsIkClBICAjGAyBUCCVUqCqurcrYIvakwt+Ilb5SXcmpa+xym+obInw2jVSTQYvt0zec3p3OGevCADZfjOL8aiVpM3+fjKe8pBbv8e0TR1I0+TsqP3xV/e+qaMtO4qukypa+9Z++0JFvU7LglI+i3OM4gPjZVQrdZJJD2XzqM0SHTKXHquY85x7dBXEeY2//DM9Q1onJ0Z+WSKQZRnBgAyZUoAQUJM6E1teaEzD/2fv3aLuSa76sN+u6u7zff+ZAUlIgASSACFAXBxjcbEhILPsB+e6/BCvgE284iQgLCBOfOEOthNAICDggACBV2KzkofkIQ9+IA/xCllZJCEGE0AzXGYkbho0kkZoJM38/9853V2181Bd3dV9+n76nFPdX/3Wmvl/p7u6eldXddXu/dt71z7LsT8ckCsNtlk+yr2Nr+k+dVl0ZjgMWAVaeyv04XrhRJHaf33QPN1vitoUHIZaLySAvdJ46r3P4UN/+vHlCGAXY7yufQQXceU2PUHlN+YPTn2G51xQl+7fpWS9xLhrI4Nb0xCVCmR5JaSw+4kQpIzK1G+lMkQErRl5nuNwSJFlxmtQt+RBCGRwwJrR590VEOADwvwUMAZt0Vr8q+9G/r/+Evj3TotkdUGf9inA574B8gveCHrTZ62PEH7pEfh33wv91DPQv/Me4NkPLFY1fdqnQP47XwP66i9brM4x4F/5DeQ/9QuD5ehLvgjis19/dvKX//BZZD/w06C7AWL3TZ+N5L/8T5C+8xeA3/zd/rILk8CucbVa7wtTKRHiJMHtTQIiCZWmUEoX6cxmRhL2pLCcd9yQvn1l+wzkfTi+TtSODxnT61FLxtnURgK7EctddZjzPc+5I0SpTHkNS4f7gS2s4VX095UFWQiT+sTj74Ftja11taVrVJzUDmZvbIGWCF5jZLC7lljR62mg221op5G+7efscc1F9GLLoxxTr+0PpQFB9ZTWNmsHF3b1SEhEscQukZBSIqKK1rX5KTIG7u72OKQpcq2hNdccDNafxWIZrG3s3xeMzSIR+s8/jO07N7rUmcIuE2xY/H9LGREuCdKMgzYE8PMfORMBDMzxbPYFAjmXwdQArq/0ucreUs9ujRG3wHy5Lz3m3P5q834EclRTqSjTFwAEQUAiCDKSSOIIskgB7YIBKM3YpxkOhxR5npep8IQ4jhLwCV2R021p7wLuF/pGrc9jOmD9CPNNwFIYSqfE73sO6hd/Cfr//NXlb/7yTwC94fUQb/wM0Ge9DvSG1wFJvPx95uDjL4Kfe978977noH/7GeB9zy1+G/qCN0L+lbeAvvjzF697LPj//U3kP/nPF6nrpMjf970f2X/1k8Ddof8eb/wMRN/5t8uxkr/jXeDf+r3B6sfuCTyE5ncOEUExg0Rh9rT6LTEev32A25sdcqXxcH8H5nkRUmNJYCtPv45akdZLyeLe35WjSQA3y/Wds06l9m9DBPfLZq7t2ZOyw3SgmQGq0pOJq39NG6x9ra/G4/rbYjGqHSv4BthSf6ytLUsSwISWOdQ5dw2snQC2crsJDKp1qWqPbeMU2+cQ2du2SLG20bbz67WpoInqhHbN7oeKvLWWPwngZhdjt4sgRZVFhJlBkGACcsW4SzPc3e2RKTUqc8h9QXgG20Dox+vj1Kh7u//6uftSM4/OuEGMwb2Ct4KxbWVmZFrjyWfef74I4FYBO0gfH+Gmx2ISXrrYLvn8ln5pAxHcjrlp0gUBsZRFumiz75mUlcIIGE9GpTTSPEeWKWR5Cq0ZWhu/eyEEKoq5R1HtMX6dGyFC+P5izlvp8xoScD2E+SHg8pg55l58Cepf/t9Q//v/A3zko8uK5OLTP9WkZH3Vy4FXvBzik18BesXLgVe+DPRJLwc+6WXL3SvNwO//IPDc8+APfhj8/g9Bf/B5E9k7kH74JDz2APSWL4X86j8P+vRPOd99JoD/1W8h/2//2Ul10Be8EdHf+89mkfj8Jx9E/l//FPDSw/57vOG1iL7zbcDNrnY8f8fPgX/rspHAQ2AAYIYUhJskQRJHEEIgyzJkShdb+gzrtq5eW4vSb+i7p0YFU2PTUi6/1Kk41763a18Gofbf7aTw8HVmX2CttYmIGhG9wWwsDn0ltec++lvREypSSPeOY1+hAYAqI6KL8it3hbr+Gp79EHwmgeeMiL62rGWsjVmbfEc1N/GRedWuj+eI/B06zzw8rtquPdW+LQQhEgJJJBHHEWRBkBOo1GdyrZErhX1qtoVTSptNC517a61r76xLQHMpJ5mGrnPoDGKMnXWt7819QOib9UM0chWc8k4yuMu/NGAitNbIGXjymT/B8x958fwEcPPju22h9HVCZjYDb2v7Ag9hqb7wQZEeSm12CYwd302lzU2ZxswgriJ8pRSIIokoMt6DggAiUXkhQjTSRqdQTMbbH2hVEtvkduW6NKY8s4D1Yak30Id5JuA0hPc4YN04ffzy77wH6v/6dehf+fXBaM1zgG9vQMV/ePwB+CYBPbgFbm9Ajz9mHCEf3YHvDqD9Hrw/GLL30R2wPwD7dJBoPAfoL3wx5Fe8+arRvn3gf/0k8h//7+Zd/Pmfjfjvf8O8CO4PPG8ifz/+Un+5138aku/+ZvCDm9bTvpHAVp/WWoMKA2gURXjw4AGEEMjzHJxnRcSMcPRcwGodfcRwL0kwMmK4Ou4mK3NTX5rfbQbwofr7jNfNeaieYrNZ/vi4KtJCa338zVKLEhggin3a97cPW9I7hNhOW7bSkq2ML7vn9LVtAi5OmV+ajhLA+sacz+T8ECqicii+RvSuYRbNzBdt/Tt0Xde5vtHR9j602fvGpZOujkkhEUuBJI4hJRURwpWTDwHIMoVHWYY0y3HIMoAIgshk3bCggkTWBJDulHnLuC/t3DJCH64fTQe/TgfX4rvJncbGRv0G9OPqBLDF2LQe14ZVPjT5sPV1Hed+dqdOur72rWv8uQTmOjjYtNBW4Ws9z4woMmkBBRGSSCJJYkhRJ3hNewlZrpCmGQ5pikOWlc9CCGGMZeaKowhhHxAihNeLS7xtvs43Ad0I72rA+rH8GNa/+ltQv/xr4F979+J1bwH0WZ8O8VVfBvmVX9JJXPoE/o3fRv6j/3TSNfQ5n2lSMs9J+/zhjyD7hz8BfGyA/H31qxD/w78DPP6gt9jodNDf/TbgTcuRwEMGVpsSzHqcM2s82O3w2INbKNZ4dLevkZlHkTETo3u7zo2tx/y2KZftOT24Do4xKNcPMazxvO+6tnpMJHDDAFP7Yf4RPTL7Hv3rYis6iCVU1gw7zvrG1pqwlbEFXJdwXHo2KdvB/uzvOxVTHJd8Q339PSZA62XLv1qdnPrqX+Icd56fZ9Nrtz8StFYAUbHlBYELZywpCJGUSJIISRSDBEDMEERQWhfZ/QClFdJDjhfv9lBKQaHuuLHG7BDnwH1t99oR+m3dMKntqXRKAQGikfnIcG1XFHLjuDgB3ETbwnNpQm4qrGzKUxXR55TLQCBm5qLruR29L0SFllq9W5IIQhLiKEIkZRk17NbNmpHlOdI0Q5bnyJQuPTPtfYZkuSZChPB6cOnR4+N43RLCexVwv3Cl8f5oD/2vfsOQwb/z3uvI4Ave8DrIN38hxJf8GeA1n3xtaSaD3/17yN/5P4yKkqbPewOib/vGeZG/H/moifz98Av95V79KsTf963AE4+Pqjb7kZ8DfvN8kcBdqYvbyNvjb8aC/GK7xyBBRhFudgmkEMjSFFmuyuiYPjLjyDTb4YDpyjTmeHVMw0TztJWdFhE8nO55eK/gtt/aOj7r4egr+3QIldMq0/oJlbXBvhcmqm5dbalFlzvH19aOLmylHUJcJxBi0a+5jZFfXdkV14Dae9+IBu4iKe0l1bXd9uRzpZGu+0NZgSpRqAylGIdmBHMTdm0lIUy66EgilhJJJCGcOd9mrVRaI88VDlmOLM2hlNF9dLk2VMT7fcYa35mAdoS+XDdC/50fVyeALXyLLhwDxcVib8Iyry1OK85FfLT115S+C4TMaagryo3n7uz7odGmOGsIUJk2Oo5MWhlj2Cq0bmZoZuS5xj5NkWYZtNL1dDLm5g3t11HCz/1KuDmxezAlQiNgPMZ6Svv2pq917rmEl254LwIChlH3tzr/O9NK7BxS4Ok/gH7yafBTz4D/8Nmzy3FVJDHoCz8X8s1fCPpznz+aqPQV/Cu/gfynfmGwHH3OZyL6jm+at+fvR19E/o9+Ypj8feXLEf2j/wL0sif662uMQ/UjPzcqEjj67rcBn/eGo+NT353B8qXttb6VkMlqY/4WBDy4vUEcxciVRpqmUFoboykEQHXdRrd967TI0qaOjonKcoLPnPpLyzFQ7nhVj+js0ge6smnZrD/V76NLj66r/V1co5WqiN3iX1EYm0vyoYzAtgQwo5s29xtr1YnqzhLV8b5051dDR6Rdm3TeyLwAttCWc0cCn/K11hxDJuqoUW+LU9FRPSskiddIBLfpucbHYEpcNkHrKoq4rLIjWNclh8dl1phPIpdEsGsyO2qSIw9EFZXulNPMECSLe5n/7NiO4whxHCGREkIKyLoHDZgZihmHNMPhkCPN88LBq9gODmRk4mMC3lRxvH6MNMWtEmt6fwL6Efry/DhJt6RjVXCqs23AeNQI4D+9IgG8RlSKA6AvQXjNwKXJjrEv5VpIGO8+lNEfCTwX9iMukQJJEiOOpL0b7MDWzMhybfYYSVMcDmZfQilloZAab0Q3XbSVtzO//ylyT9A6p6bwCxjGWglgF2uZh4DljBBhvAcETMV135lR89TDO7Nv8JNPg596Gnju+fMLdm689tUQb3w9xJ/7ItCffdO1pVkM/P/9NvIfG04BfQr5i4+/iOwf/yTwwQ/3l3vly03k7yte1lmkjUy0OuPYPYGjf/CNwJvqJPBcB9KpaDNeRpIQxxFubkzK8P1+jzTNIGUEIUS5/62bNtG2W7TIPYYA7jrWhfbruw3WNXkGjdcm8mdSNDCb711jRNbd3yK27AZUjW3oS3WSy6fv2lKWkeV9kHkpbKUt5ySBz0UAd6HLVrHGvrpmmu5TYcnNeiTwtLYwF45JoCIpnk2zUfzjrJVjo3+PyM8JUcNtfaFHDPDje/RbX2x5IQSkAJLYpIuOIirWcAXjUCagmaE0cMgy7A8psiw3DnGykS2kJIXr91TFPsTCRwP4wljruxRgEPpvWwj9eRpYa2Q+RAAPYcxCeg0YuUSZasPHNfBaJMfayeAxcl1rHI59Zn3y9RGzAoCMIkRSIJKMKIoRSWm8BNlo5KwZeZ4jzVKkuUauFJQycwYJUXPlsdEC9m5LP7cpI6gteqOzrCfzzJrh59tdwdf5pw9hXAYEXBIrIIAboEd76Pc9B372A+Bnn4P+kw8Az34A+PjAPrDXwMs/AfTaV4Ne+xrQ614D8dpXA697zbWlOg9eeojs770dePiot9hJ5O9Lj5D9438y7ATwik805O8rXzH9Hg5G7wn8Pd8MKvYEvuQ3XRsBXJwBgRAnJj20EAJKKaRFmkQhRBk1417fSrw221GwD2OdD3uz+tSO2aifcXX3E8HjSGB7zH0OVVpojRWqUJOwDX2r6rtrt8dGhwP1d3NSHZvok/W3w3UGOrUtZ51GikjKUTGkK+8TCx/e9VPgrkOnZRs37KWpztbZvh1C17Ex58acb5Zt0wGG3BS67tGqQzjjXkqCIIGbREIK8x8Vz9VeqTUjVznSItgjzTIoZjg0uqlfGB1ANPSHNY+3qbhPbd06Ql9uA6Efx8NGAL/76Wfx4Rde8pcABqoPUAYAIUFgsyBZB6UrdjwXC6RfT6yCLyRHXySoj5hleL3gODxVvuPrCUKIgugtXyyAGZEU2MUx4jiCEDYKgorTRrlO0wyHNEWa5shZg7UG7EdILR/P6RHAp4yatjuGKOHT4O9bPA59H4JL9n8YSwEBPsK/97JJuiyClx4B73s/9B8/B/7IR8Ef+lPgIy+AP/wC8LHzkcMcSYhPeRXwqa8CvfpVoE99FehTXwnxuk8DP7g52319g/7ffhnqn/8vvWXocz4T0be/FTyB/C3JyUd7ZD/wTuCP399/wSc8juj7/nPQp76yta5JyHJkP/rz4Cef7i9XRAKLL3jjtPoXgmtwZYbRTYv0iWANKQVubm6w2yXIc43D4QCldZHZBrWI4C64T25MCui+6LL+4xUR3FW2X9e3x6nxu5sMbvubmcEaRxHBW9RzttAmGnBKOBdqY2OhOrfQHxZbaMtcwvFs344TSN82bKVP2v5eA+za6K6RZNNLnFz3cARv8/wp0b9NTKmjOXN2fRO0RTK3EszMEESIhEASx4hjCSnsnt5Wt0B570OWmwwpBTmsVeX0cRSbvNJo+aUwFHQzplzA9RH6ZzsIfdkOlwB+3ncCGChST9V28TB+Se4n+fWiMlGQwH5SIT4SrWOUIB8wl2i9RKqtU55dn4Gods4eMwVBRIgEQUqJKIoQR4YUthyvcYhgZKlJF53lOdJcHdU7FpcaHUcGu5Hl7jP8fnPnw/1YmtvfYZwEBKwBlR7i0zt7FgJ4CB/8MPiFj5kI1Ud78KM9cLeHfvQI9GgPvtub40qBogiIIiCJgCgGxRKIYyCSwC4BvewTIV7zKuCTXwl8UneK4fsE9T//IvS/+Je9ZaK3fh3oq750km5HRMDdAfnb3wn+/YH9oJ94HPH3fgvwmk9ur2cOJpDA8be9FfT5nz3vPguBuSLDynes0F1BjCRJcHt7AyEId3d3yPMcIDnq+fSRwM3jQ3NON7HbPjbaqjqOfK6jjwQeigYuj2mTFlprLlI/17/LtwKf1ofxqGwmxonhwpGB1nG/Ic0SWGd/tGMLbZlDAi/6DbmgLWkL/WGx5mjgJnFmt0EYhrVhd69E7nBx/27aJsfoYlOjg8faP7kUrrGOtxDAbr26sOG799FaA1JAonlvhiwI4V0cIYoEhGPPIwa4iPrNNCM9HLDfH5ArM7drDDvH3Ves9b0LqCP04zYQ+tGAi7n83U8/iw9/1GMCuJaCy/hsV9HAMB/utU4lm9yqPc3GuQYAs6Wl+chrywdMUWLGPCPfDKbnxNhnd6n0wucgzY3dhuq/2aQ37zRkmY1BIIVEFAlDCAtZS9nDbJTRQ5YZMjjLoFTdO9f8XaTqcY8zW8F6ZR/r6dv11JrX2t9D/XRfxn/zm4uBs+/x1tZXs725J19AR4argICALcKffRF9d4YLqNDn0d92jpnBTz4N9cPvGqw7+ta/CfryP9t6rnWMphnSH3wn8Mwf9Vf82C3i7/1W0GtfvXxEwlgSGED0XW+7WCRw57ttI8ScSBYqy5v9B5M4wi7ZQUpptjxJD9DF16UbTWwz4XQ5EXbrzwBaCNqxpHC9XFt/ts0ndW3Z3Pr4fn1EsGs4Lu5o0kJrZ3/g8hR59BV8Oq69RkxH5TBgxqg5VjOZLOmwfKk1zBnG6+sTg3p04zrb0ERXe841KpiH9/YFmrGN47GFfhmTjcJHtK+LeqT8tdCFo3qP67BzYL/DVJ98bcdr9usBnWvUfZ2ltW6haCNg2/Y35to/AIGtU1mxPggQIikQxxGklOa/hhFIA8iV2QrukOfIshy5Uo5tnmp6iLncRjVTYT+q+mgdI3IZrOX9C+hH6Mdt4D72o0sAP18jgB8+Kvfz9BVDEbcCRfoQrS/u/WY9pLXZBOpi952CYGich3M/t9kpkc8o15BMTTVbgkwURRwjSWJIUV0vhAADyHOFNM1wl6bQShuHkw6y17zrCxsrJyCQweuP9BVz3yv37zBnBgRsAP7O02GO8RNj+qUro0pTL8h//L8H/+t3D9YXf8vfBP2FLx4WLs2Qvf1nwE//QX+52xuzD+9nfPpwnXOR5ch+5OfATz3TW4yjCPG3XS8d9FhorUEESCGw28V4cHODNDcpEXXpeGwMu1LKo6gbC2o51gQRFffrIniPy/cdqyKFx88pTTK4b2uMrvNaa2gbbeTxXHsKtqLrG9vIQs7cV1y7ttIfwHba4trdFh8ZZ4wqb8MW+2SNMPPUJWxBNoToOONFU8/rWiP7nAJrd6Lj+8whh9tithjt+wyPBSsNECAEIRYScWIy/klRd/TQXN387qBwSFMc0tQ4hR1lOxHF33X9wOpO9zGSeM3vZMAxut790M/+YmxWpq3AEsBPPvMn+NALL1YE8MOHj6B0+8LmC8o9HIDetMuk1ZFH4rk7t0obQtXnt2fjyee+9RmXem6zUiVfQLYuudyYAsffHGBAFPuLxFIijsyeI27sr9YMzRqH/QFpliHLFFShOAohCs9JwN2b+BoT9FQScUuLyDVmi1YD5xXkaII5RAYHBKwXPswixwg62T1BliMfQ9hiHAmc/cA7wb/9nv6Kdgni7/tWnJX8LQXKkb3jXcMyAYaQftN100H3QWttnBbZps8V2CUxdrsdhIDZGy/dg0QCIQggwNpC+6J+u8hbq1scxSZ11TUY2WX15YGGuleUkTqi+D2dBGbmkgQu00huSB8GtqPfW0eBU52Pr/00ttUf24AlHBfVbK6gJ22lT7ZAANvsHMAl+kVMTu085ZhFXzpod+3s38rh+JjuWHeHvzUq+1z5bxF5TIVNT0YSiZRI4hhCUJH1jyvbPANKKeRKYa8Yh/0emVZgFiBBIJgtI6iw8/UR7fcJ942I2iJCv20DW+7HTgL4pYePoHX/B54vMJG2VcqJ4+jB40lUOovbOft3KEr52vC5X33HtZ7d2NTcl0Cfl5MbFVzKY1zNIYkgI4FICkQyQiRl4fVX7CeiTVqZNMuxzzKoPDfzkYcfL2Ojg8eW8QlLjaKuVq/tebShdPa5shwBAQEW65pXgh42DV3kU9PBc+xzvUpmj7FRu+gngbMffhf4t363v4IkRvzd3wz67NfPkXQeNkICNx0O3XSjSRJht9shjgQOaY40TU3EDUSZWroPbbpjbWsjak+LODadZxcZbM41zrAGkWgcqyQdY9CuHJ+rb+4qGpg3adBce3tsnwknS9Oobxlcd93qu/PcbD8+Yu3jCyjacEI7SmudB3rSFvoDuFwwzCVgmrDs2OAJ+1u1jcu2qN6ha8dGDI+5f9d5rpSLEXUZxyC7breu30V0kxAoU0bHUkAKAfPak6PTMHLFZhu4XCHNFVSuoKDBTBBHOsn29IU5CM9gOwh9uW5sqf9GE8C2sK8wJDAcErj6u9/3sPAmg9nzAFimg7lIZaudPRl8f34B3ehSRHx6bnOVxyVgn0/TkNVmqGJtd8iuUo8JIsRSYLfbIYpE9QFPBDCgNCPLUtwdDshyDaUclw8PJ+SmAaIratlH2V0MjaAtGVpOBcOv+SAg4L7CfQ29n2PDnDEbQyn1htCnM11k3JxIAmc/9k/Bv/5U73UcRUi+622gz/3Mk0SdAS23gwAAIABJREFUhY2QwECVnlBKUUatGOOuOf4JTzyBJJE4HFKkaYaSNG2kQXVHldV9LWyd2tEXu6KF3Wv6frcf73Z8burtLgncLOf+23bO/a2LKGDdlqdy5fB9jelCs6+Houpq8+NZJevHmNi1tfZJE1tphxZUs7ONhof60Rb6xGazWHNbXLsK0enj5Ij0bUvF0SNHE01HRJdI7UNfmbFkcncZ1CbQgr9tKWj/4Vr2E/de5lhUHKnWda0ZcSQgZYRdLBFHonVO5kKeNFe4u9tjn+fO1hH3MxX0FKz53Q0wCH24fqy1DzdDAAP1SCxmhqYxqmZbmwwxRSCXvh1Vk3aLljefFo1wDfgsmw/o80Tz7dmdst/HOVB7blxo1GQVUS7fE/OMi/dOCMTWmzBOUOqBDGitwKyRZoy7dI9DmptIe0GQcNrsRh1fGa1Rws7HxRoWkDXIeE2Ub5rrbXsVSQIC7jfWQAD7sDYviWZ75jqkTe2vqeWHSN6LbzExkwTOf+KfQf/qbw5eE33X2667z+6GSGDAOveayBhwNU40GBEJJHGEm2QHEhr79IA0UwBElQp15vgqI7Y6jncesyp3gwA+Lm/kZKujlw7R5q5d0b6tvxlobpLBplBp4M1ZQ7JoNIiOZFsTfF1rutDlpNvnHFCWO6dgHZg6MrbkoLq2sdWEzYgwqh0r0Y3W3CcuobfWlLP1tZTLf6fV4f6aRwB3yTaXyO2rE5hm4zs63yR/W/6uHXDsc6PlrC4r9QciKjL+SeziCEKgsPfZPtDQxZaJSjHuDgcc0gxZnps6iUAkoZkhWYNFU59pi0w+PrxVrOm9DThG6L/twfc+nUQAuxe5f/vWSOuZpOl076FyEQNb3grNpFw14nngnr4b/XyXz1eE5zYdozwhi0gFKSNEMkISS0ghCs9VlOmi0yxDmh6wzzS0UtWuJS2pay4e5VO2Zdo9l5bNt3n6PiBEBQcEXALrm9vc1HBrnpu7IgCb3vtDBHEf1vx8RiHNTCrn333vYNHoG/5D6N95L/Qv/9pw2b//DRBf/PlLSHgashzZj/48+Mmne4txFCH5B98A+sLPuZBgy8ESnGCzZ3AUR7jZ7UBEyLIcWZY5Dn/H+wqOMcK7EcPj0z6PPVZFBVeyiaPyU1JBd53T9htd1yOjtoAttMOm7gSKvrmyPBZTNemp31y+YxPt6Ju/VvattIn+KDCanPcITbtORQSPu9agcnY6FyaRpz3fBM2sG1OdLDvJ6MYPdg/2PJo+cruZYryU2WbcFGS2f4siRJKKlNHWXmdFIWRKIc8V0syQwUppqNIJD0Vt1RplHdhM7k8T7ry2cb0E7mObt4rQl+uFz303iwB2L3b/9aGhLuGjnRRcS4HQ/TE0RP4elfdY2fVZNp/RTPUSUMeY51KmqCG3LIFZl4blOI5xm8QQAkZxBIEEQzNBayDNUuwPGQ5pauqEOzeYI9fuH8I0I557/NTUlwGXh+soFBAQsCTWMe91kSdLztt9Dk5d505dT+bI30eALVH/6jAhEngM4r/7n4Le/IWL1LUINhYJ3EQt4qdwFtZaI45jPP7YA0RE2B8OyFSRsQaEas/dYVK1jSDuI7jmpYY2slSH2ohqlDq0kaf73e0jggFAaV0jgjVcynmdWPtcVR9f1/tOWlJPNkFo9XdmzVj7GAOKNlQeJ9cV5kSsuT/qaZSrdqytTXU9WncQqMDxzHLZFWdp2+oSEcat3yUa3emhJ8piUeoJ7v+4SvccS1lk/ZMQ1Ay1MjDZQ4C7wx77fQbFXASCGMc7gizTVtu01Gsby+dGeB7bQejLdeOa/XcSAWwr8IkAdrFkJPAYnJKKzjf4LJvPWMtz85+grlL5MJc7Btd9Odj8FoIQRRGSKEISG29OIURpWMq1RpblJh1fmhtFUcortKmOLuPdGENdm6Gt6XEZ4CfseA4ICJiD9c5vlzD+9K0FpxDAYV25ABYigaO/8x9DfNm/sZBQC+IekMCAMTsSAXZbQgJDCoE4SXBzs4PWGo/u9mCtgcLwPjaap+09bEt1OzYS+Ph8mT8HRLIhCzvnrZwoiexJaaKd3zYttI3e2QLWP1/yVffSXVpH7kqhvlasf3whkPKeoE1nXFt7jrO9uRkt/GvLUt8Cc6N+x5RpHp0icn/GH5v9r26HtJSDIAaBinTRAlJKCElOrLYJ7GImaM3QrLFPc6RZjjTLnECcIpZE1LffCDBY2zse0I7Qj+vHNfpwkQhg36Fa9i9qYjSJPeP7dDji0a/n2JUuN2A8fHeMaMInGY/ka3wk9kU4EYBIyoIQFpCRhNkqxIRk5LlGniscshxZbtLycUddYzG0z1ST8KupwS1GvylG9yGl1qd+DahDh7k1IGAG1junXUKfOsW5ayxhHHBGnEgCR2/7eoivfPPCQi2IjZLAGgyB6ruJbBgwUBo7mQEpBZI4xs1NDGJgn+bIlSqz29QiiUegjdialwbafq+I6ieNcxRpYjwRXH1Qa2aw1tC8re/O9c+bfPa9dC/Z21vZF3jt48pm5wv94SeaW3isBVpXUZ9kPbBWlleiz0Y3dW0cWov7rhlzp7Hi1L9LqnW/9o3h/N/+o4sIYUmEKJLGvicJUsgi4peLum1QMSFV1r6nkeU5tGaosty23tNTMNXeGOA3Qv9tC+fuz5MjgN2K3L99HIgaKD8ury3f1L0brgmfZfMdvj87X8ngJVMcJ1GMXRIjjqWJyjB3AGDmg7u7O9ztU+Say4/SMvkMmZ1EumTs6t2uj9ox0Z/USH0zpc1jPxJ86OP7iq6e8X2uCAi4DLY5N1mnsKme6GMzRARsCDNJ4Pgbvxb0li8/k1ALYqMk8FhYQjhJEjzxYAcuIljMPsECBAEUO9hZCysRlU5jXUa7LoPeGH1yPGnMLfOXE5vjyFhF4rQ7Fbf9bdM5MtfnzLVizbJXqPqUsJwN5Roa7zWjmpfGVtoBbKcta2+HfbfXTgxtwXlxqn2uLfBkrtNWa92jahomhYmmRRM3YdM9SyGKlNERIuFk1QMKu50Ag6EZ2KcpDqmJEM6UAgQBbPQsI0+ZrqW0/7XrLaJoA8GmmN4yxrw7vgY7BVQIfbN+jO3Dpn7ep6+nSi9LALu/fRp0lgD2CWshgn2Vaw3w+dlNjTK4JJYkMa2HYCwFIhkhSWKTVqaIDraKc6YUDocUh8PBzBUtk2ibgt2UtM+r2TXq9bVwLhHcJBjaTHVT6wyYhylvvs/zREDA5bCdOck1yFi0GWbmOPkEbBxphuz73wl+7x+NKh79rb8G8Ze/4sxCLYh7TgLDiVwRJHB7e4MkjpBmCneHOwgSYK6nk55C3paG/Mb5eprMqmxXxFd7lDAVKiyj0nKOr2/TacanhTaGXq3b93NcE9Yuv4HplyUjNq+p8Ta/r9aKbYwtg620ZWvtWGN7+lMQrw9jnW4ukhJ6kAwednJtNmWO+YOZK72ITKYKIQUSKRFHEkJU22swGGBzH80MpTXSNMddmkJpjUwpU4+gcuuO2n00H6WS5pWPqSlY+/sTUCH05bYwlEm0D4dc4an3vP80Arh5cx8JYADQxd6i2jru+CUeAMcTusUT75oIBMV8+Pzs5sp2iXF5SqRUX51EBEkCcSSRxBGEoOI/USp4aZYjy3KkaYpMaWPqKiOE63U15dUdE7A1nhAXyjTV29jW2iGDi1Fuu881235UpqNswOk49a33ed4ICDgPtjUPDRG9Q7qyr7p0wAWwPyD7wZ8ZRQJH/9Ffhfgrb7mAUAvi3pPAKAlO1hpRJA0RHElkeY40U1DafLVqItAEp5Ea6TtQfmyWgbZyzBooom36MD4tdPG7OKY1l2k91471z+FmX2D3G6i/tL+Y0o41YP1jy2Ar7QDW3RbXtuGTHXIs2jJRrK0NS+CchLA9Ny0yeLwOw3wc0NAnV1XWbMEhRZEuOhKIpIQUZLbpcEKFubDFZbkq0kbnSJWGUgqsNVBuyyFKPUSQyRLITab4nuI+vldbxFB2gdDP68CUflqcAAb8N1wzm4VCA97ZG9vSZjVxzRfR9771Gb4+u6XkOse4PFW2rlQ4tUgI1pBESJIEcZIglqJIM2O9B4E0zbHf75FlGXLFxlOwJZqiqw21cvbDqqtsV1sq4QfbOwfNK8OCPx7nfLN9nTcCAk5HmGMCAjoRSOASayaBuwhP19GXiKC1RgRgt9vh9vYGzMDDhw+RcxEJ22OM70oBXf7uON51bHxZxth5vCsFdOvv6oTZx28DRPD6derxkcA+aq2uTIRtOcBuoQ3AdtoBbKctaySBLbaQDnpJLEH6Trm+rXQRSzEZVbV1ncMNPDOBHAKKjYMasSqOEyIpEccScRxBEEHa+hxZGECuuSCEFdIsQ6qUqZ+NDsbgIoNgQBvG2kYD/ETot+2gry/PQgADvhutDcHjIwFsMTWS+pIvrN996y98em5d0aun4lzjcHakcqGzAVY2m7ZOlBG9KDIDWO1SCoFYmv1FTIRwERms2aSPUQr7XCNNU+R5Pkn2IwNET1TYmBZPiexoOxeigpfBud9sn+aOgIDlEOaYgIBe7A9mT+D3jCCBv/6vQvxbKySBf+TnwE8901uMowjxt30jxBe88UKCLYM20rPSrSoLpAaDIMDQKGJMkMQxbm93kCDc7Q/IlDJXCDEQc1thChHcPD5MAFf69BhMSQtto4AFsXHaBqCU9loXGjJ6bkOn5tUSwNbmQ1wLBNtIv4R2+IYttWMtbWmbgwMJfIyx6+iYNds93mZX7LtTny2qX6ah66hGMlcRxUbbIiIIIkSRQCQE4iiCkASbU89G/jIArRWUZmRKI00V0jxDvgGHtEsgvG/rR+jD9aGZxaMNZyOAXSGawlwSJeXDJo1WEz5/TFrMkfESqU/W8Ox8hM/P7VyyLTUGl4oIHnsvIkIURYiiCLtIQAgBWbr+MZQC0vSAfaaQKwWtlPEQbCNWCwLaRvlqmLR+5STdIePYNDtzDE/uPfuU9XtLCrd4iDZPXQs+zyMBAQb3bL4ICDgH9gdkP/BO8O+/b7BoiAS+Ls6xLidRjNvbGCSAbG9SFWrDXJnIYXPnck+8tm+/VjK353hbiM7YdNFNWF3atQO0PSd3z99m5hyrB5vU0Lo4v06ddI0y11B0nXDCaNesjRKFdNC+YSvtALbTljURwV24zymhhzAlZXSXTX9MHW0lmk5FY+1elqgdc++u64FqbEeSEEcRIiEgCAUp7F5gIoC1ZmS5RppnSHOzXQdrDc0wznyl3uI6GxE0WV6Cq3ptW8sD21mPXITI4G0g9N82kGmNJ585IwEM+GGoNkkgulNn+Ywl5Vv6xfX92fkMn5+dz0TwJd8H915aa8RSIIljky46lgAzhKiUT2bGxx7usd/vAQBCRkbhQbWXmU0rbZUhLoxZ7sfVEQncIdOcNo0limv3GeFNdF/g01vr8xwSEBAI4ICAhRBI4BLxd3wT6Is+9wJCTcO51mMTCWs42QcPHsPtTYy7fYa7uzujNxZ71TGj3LqkTZ6uaOC286elhW5vQ1v5vlTQbWmhrZ5t23u0xcpKsDZ528Elebp2TXRMVPMasI1xZRDa4he2YgMIJPB4DJG+S6aLBioiti8zyHCdIwp1yEEkAWhAm7YKSUUAiISUEkIYIai4hoqAEAaQs9ku7pCmSNMMijU0W6rXlIuKIBBlt1MQAhLH+pDe4NAcE5UYsC6EflwrGKliPPnMn+BDL3zsfAQwcHlDtUZzoegepF0pLnwa2Jd+flM8uwPmwfdnd0n5pkbmnhNdXo01o1pB/EZSIoojJFJCRhJS2AgFQxgfsgyHNEOudEkA2+Q1zUiNPo/HOZ6WY59pn1HQntMj7+XPjLksfH5TfZ9HAu4ztjojBARcAYEELuETCdxmrFzy+9FUX5FtAPDYzQ2SJEGmFdI0hVLaidodNnB3nWsjwsZG/o51qBxLADePcfN3Qf4ae0WVFnJN8MnOMA/bIlO2QARvpS8sttCeLbTBYk2RwG2ORyEl9DSMsZFPiR7uQxv5W6unqnCSXWasmUQV7XI3tTCZRswfJAApJWIhEMcxJJlU00Qm0MOSvZqNDVBpjTxXOBR7CudcbWNhtvyArRxmzzp7bvvjsk1PDu/jNhD60W8wMzKt8OQzz52fALY3PBeadWsau0NSfz0+wSfZpniBBfTD52d3LdmmRhRcQg6bnq4um1H5wAwpJaIowk0sIIWElEVkMMzeZWmWIc1yZHkOrbkmPxca5liFeg4h3HXdlPOD9S9Qh09oe6LuMd9a6vNcEnAf4NsbERCwMQQSuMS1SOBrrbM1AlUzhCDc3N4gjmMwa6SHFFmeQwhR7V4xU+frIsLGkr+nksFtfzefuksE27TQa1yDtqEz++U0fwq2QAIDWxlXBqEtfmFNJDCwnMN8wHDWjSn6ka0L6M5AMjWSeKh0X3VlMAYbpzKzxYawVG3Z7kpmmEAQKREV5LAQAoK4UScAAtKckec58lwhVbnZOs46stny94L+HYfwXm4HoS/9Qqp0EQH88fMTwBZLfzy3pomaSQB31ekTfIlYnktCBbTD12d3bRJ4KUVzKXmGwCCThg/Ag12COI4gBYPZpOoDgDTXeHRnUkVrrtJIt9bX0cY+efqiKIZSsJwyj9grt7LQtz15m5qH2G9To6/zScA24Q63rbz/AQFeYgoJ/Lf+GsRf/ooLCLUgPCeBr7m2EqH4PhcgYjBrCCLskgSPP7iFBuOlh3dQuYaIJMbkL+FGxAvQngmm63fb8T4n4a5zXdE/LgHc9ezdfYHdvYTXgDXJChSBSkcibycSOBDAfqD57b/29lisuR3uXN1nt/AFUyJPA8bBrtOujW6JCOAxkdlt9TT1Bh4o37i6VtaUFw37o0bT2mPrFcX2GyYBiQCzBrNGFBN2UYxERogiCSJblwLrYt9iEgAISjMOucb+cMA+VUaXKTau7HsW9wnhGawTW13Dt4CKAD7jHsBNLPPxbDxqzpUn33fjue/yBUyHz33qi2y+OB1MW8QYggR2sVEE41g6/n2ETCmkWY794QBVpIpmkNlbpIX8dvcS7pKr6aV4Sns6jX1HrWwhmUfU7xP8GOXLwpd3N+B8uE7GhHW80wEBm0QggUssQQK3RaE0z/kMK2OSxHhwewMCY59myPPC4FgY660h0vxNow00bvrp5vGhY13RPH0pOZtRRm2popu9UkUB46w2jHNgLTryMEIksI/YSp9spR3ANtoy5FB+SZy6TvvQhrVjKtF7zvs0y7ZlEOknmk8bDyVJTCi3jJMRISJh9v4VLbomANaMXGvkSiNVClmmTOZArtv+TIxyc3u6lkx8jq60tTG+tfbcR4Q+vA4OGeOp9z57mRTQbXBTOE0xYmpQkTf/vPD1w99XuQJOg+/96qt8U8jOc8rQBWvMEgCkFEjiCHEUQUqjCBKZVNK50jgcUqSZMdxpuO0qUi4RjEJXHtdgPv4IO9WQOWY+nrNw+0IK+zmSz4drvx8B/TjH+3DePg9Ke0DAVRFI4BLniARe05pZd/pjJEmCx24SCCIcsgxpZvRJGknQtRK7fecmRgUPEcFtaJ7XHeWtvt3camUNuLZefDq2Ewnsy7fKEthCG1xsqT1rb4sv6aCXmut9aMtacRSNe0JksNVpxuoHU8tMsIYtMLbYmu5AZJ5TJEwUfRRLRCRAReZAQQxm89s4ujE0MzIFZMU2cpYQ1lqbfYsLOyLBEMiO6IXXX7WeWRm2ivD+bgOhH8+LqxPAwDAJbFM6X+tDzucPSJ9lC5gPn/vVZ9mujbaICmYG6+IjSdTnO2bGLpK4vb1FEkdwveeJCI9Shf3epIpmVOlm6vOkBooEfl1z6akptKcY/AbrWqCOJXDfR3HzIy0oW9Ph8zNbKtNKQECAx9gfkL39Z8Hv+cPBooEEHsZa9Vs3qtfdx04AePyJx/HYTYyXXnqER4fUOB2KSmccWsf6InrH6HNd+mPzWS9FBLtk+BqJYJ/1iikw7VjPc+/CUUTVihHa4SfW3p5LpPm81By+9r7wHVPX91PtV1PuOXWETRmSTULb3o1JgArHPVGkVr9NEkgpDFkMLghbezOru8FECecKaZbhbn+AKqJ9NVdp2glO5DDb/Yg17gPCu7wNhH5cHl4QwEB9D4Ej8gQ42tPXle4Sw8Lnj0efZQuYD5/71WfZfEJz0aoIWgBFNIZN4UIAoihCEplU0VFEJlU0EbRmpFmOLMuRpgfk2ngEGmVSwJ0R27wv+wx4zWvGtGPM+b6UikfXj7zPXITR2o7wHp8GH5XSZfv02KElICDAM6QZsh/46UACYx4JvLV1sCJSCwKOGZEk3N7eIIkl0lRhn6bF3C4BGm5/1xrQlSZ3rtNgVwroNrgOj+Ux1EltW4/5bz19vZU1N5DA/mEr7QBCW3zDJYhg4DLz+Bb6w2eM7cMx36BT6hp/3pLCzXtz8V/FS7RV25XlxNjtqnKVLY8Kdc29DpBSIpICETGiKIIQhOaGHFz8TzFDKQ2lNdJcI01T5CovZTTJAgVohM63RYR3ejsIfXkark4At+3xY/xS3P19CE2a99IEMOD3h6PPsgVMQ9s74SN8ls13uM58XE5gwqR2hgYREEkqyOAYUSQhSJTpn9MsxyHLkaYZsmJPYGZXTa17GPbKMoIMblPAxy6+zXJtfodWjT5XdHAYqf24T+/yVpXGc/ShrXKrzywgYDMIJHCJKSTw1Hnz3A4xc+tu+24oayqOKwYi1njssVvESWycCg8pFFORJZBttsBjAyPXs9OUVdeFbz3WbBE5ZU9B2U4udDyqCGDbCrccQNDapE10amlK7A22tO5uoSl23G6gKQD6I+vWNPbWJGsfttyOOW3z4bu0GZC0lT66Luz67MLY+cf0eVv2kNa7TCzTnl0P4EaGvbr89eC0hqRHMjBr5x5URvhqu3UC2jOzVNUYHS0SAlEkIaWEEARJBCIu9DYGsShsi4ycCVppHIrU0bnS0EqZaGFd5YNuDu2aZtRwsuvK1lq13A+MfWfDe70iHE0dje+R0JWjcHUCuA1l2ibqm1ivCx8UkyZ8lCngdPjcrz7LZjElEtVXCGLs4gRxFGGXRCZuuEjxorXGw0OKu7s9tGYntZ9ZF2vehDBrp9b6yJg3NSq4rHNGdPBY0ELGFv9HqR9Yw/s8hDW/53MR0j0HBAQEErhCHwl8yny5hBH4qltfFG0XBOx2MR7c3kBp4MUXH5o96Nz00ULUrhvjBGgjJd0nfBQlzACJ8Q6Ec4y9bSkd3UxjNi20Mcj2t/Oa8EmWU7GVpnRFva8NgQD2C1tux1oJYOA4ejPgMujq/ym2qjkOfmPOj6m1m8StMGc4uQ5tNj7ZpHsGoihGLASklJCiukfz3hoM1oSMGYc0w+GQQuU5ci6c46wuqA25bLzsqsA8167atLGu+T1Zq9z3Cgw0tz08ctwofob+7IbXBDADR6mffYAvSkkXfJcvYDp87VNf5XLRJuNaFoWaIlVENQghEEcxktikiiYSEEXGBJP2JcP+cECea5PyBQBAYLKRx3bBFGDu3gek6fk6Buf2tLOKbh/8H5H+45rv9VreTR+wdLrngICAlWODJHBND5pJAi85V659jWoa6HaJwG53AwJwt8+glQYTtxr32lA7Z+vuMAaWDodd1/fIObZtzb+bJLD7r9a6TA/dJ8u14Js8p2Irzdk6CbxGhLb4B9eJaAp8sy1tpT/Wji5HtK7MdV11jL3XHPncv7uiZZcGM0OQgLZGPwKkIAgSkMJEDMtIQgpRX4MZhtwFQ7GA1hpKMVKtkOcKWZaZlNKo1m433tIGmgCFM51p8MXafSlsqS1rQKvu7xC/Q9cCjT4jfXzsnsNLAtgiEMGnwXf5AqbD9z71Wb4h2a69MLTtF1I75xjObNlIkNk3eBchEhEIxjBBRMi1xt1+jzTXyFUO1pX3niF/qzQ2Q1EWc6OD264de25MYr5r99l9wJx3OvTLeTFvng19EhBwL7ABErg5x9XWlAVJ4Pu9VhVpF7UGAUh2MR48SKA1Y79Pkec5SIhSP2zTA7syyLjQZMw2VDMdHl+z6LYfI4lgAEVa6IoY9nFM+CjTKdhCc8Y4pK4FW2kHsI22rL0Ndh4dmuN9thk1sfY+ue84R9roqeX6rnQz3ulZkcvF3sLGSOhWXMSPMAQxhJCIZIxICggBQwoDRQppBiBqcirNyJTZci5XjFxp5FqVdkkuiDkSBJAJPKEVvddzEOaC84KZQZCL1QW09BlVAVD3sT+9JoCByoNGe2q89Fl58Vm2gPnwvV99lW/JKNZzoKnwUZG3pS8S13rXaq0hpQQR4SaOkCRJeY6hATDSQ4b9IcU+zaCK8rV7tdR/JE8DY57pXAJ4Cu7j4n1pjE3HFHA+nDa3hn4KCLg3WDkJ3EsAA0CWI33Hu4CF9wS+D7B74Fb6IxDHEnmew2aZednjTwAAXrp7BK01hJSznSg1AYLr57sigJfWJ7qIYC729ys2yisdzn2xfXRhO/qWn0T7VGwlEhjYztjaSjuA7bTl1K2mfMFW+uM+Y0mSd3x9lmwq9I2WMtahiBjlnsC1OnruW9rwUOwDTPaC8giICBrG1iiLAiU5xowojhFHAomMIGT39mv2rpqBTCkcDhmyLEOWKygYUthe66tD3dK4D228OLgt8LMcWadXz1xFtZd/rGs9OgXtBPBLjyZ7n5wLZQSw/Y1xKakuDZ+VGJ9lC5gO3/vTZ/n60sRUClQ7VXLu+aac45wUde49RZFaBaiigcu0dQDK+F4yXoCxlGWaaCkkBBllT2lCpnIc0hRppkpjIAoPQnuPKuqjMIwxIEgAZBRIgbp375hn66LreZ6UItqjNWEM3LXNwjpwXlIGe98Af9E9r46JlcfIMgEBAZvBAiTwYGaSiZi7Rrdel2VIf/j0PYHvE1y9kh0Fskwyw+YAQSOJE+xuEggQsuyANMvBRLB75rp+X+PbAAAgAElEQVQrT1dqxi606oSN42QKzmlm79g0dgUGmIsd9CpoZpMtZ+B64Lr65tp03WNsxzBsW7GF9myhDcD62zEmu8LayBUbEeyzjWgs1vTcA8ZhqdTQ7efdY67dzFK07lkqdLJjPaNZ9xEpfFRbkTSXrYXQ2AedpIJlvfY6KtZmIQiSBKQUiKSElAKCHOnJbJ1BRZOYgTxXUFoj04w818jzHLnWJstKOV+RQxJX9k7b1iObFKPawu6ovVVJOnoa10eYJ8bB9Y+o3Aeu8exaxhDNXWf9tq72RgD7pFzYyUF5+iB9V2h8ly9gHnzuV59lA2akZ3H/PuO8yMydcjTv2vdhaM8TAbEQiOMIcRSZqA9RmPgUI01THHKFTCkorUCMWspp914mQsJEDw9F6HQRwmPJ4CWesS/r1yXQSah3lDcZgfx+RwNO6aP7M/YDAgI6MIkE/g9Af2lcJHCfIXfKunvKGs3Ms9NBB4yD1gpJFOGxB7cgIbE/HJBlWW2PtyahPJYIPirXcm7u+GiNjmkZr01ndxsJbP6uoqV9w5Z02y00pem8sGaEdviHrbRlK+0AttWWgGMsHSk8tvyYjHzl+QtE1DNzmY2QQJCiIIUjQwoTzP7DdaJa18jcXJvU0VmWGZJYKWRal06EDEMCMgAIE2Bi6gFKmrfOWFd/4jRnwUvj3s8bLUOU+TR9/3xwwnRqmYrGXgv4aotrJYAfPryD0torAhgoPsyI4HmGJm+N6r7KFTAfPvepz7IB0wngNpwj/ZfRd7pJ4CbcfUNqlRCVaWeo8KqyyWBukhg3NzeQNgWMMB55hzzD/tEBKs+hQCYaghksBEjX02WMMf4eeSuOWFPOkTLap3VsCJc0JDWjzgP8Qkj5HBAQcDIWIoGnrktDTmGnrnNlfYEEXhz22UopoZQCaw0ZRXj8sQeIIoHD4YD94QBAQEoJrfVs3a6LCJ4SUdzXht4ypmDr/sCGDK6Twj5hTXptHzbSjEACe4qttGUL7dhCGyy21JaAYYzJCnJKHU2MJYJr9ZsLjo8tCC44MUZhE2SzbYiUEnEUQUYRIqhyixEAJpMghLEBwhK+GgqELFfIshyHNINSebkVBwsCQcIknGllDU3dxX96Ze/jvZ0/2PbYWsFoSxW9tv5cFQHsoiQmABSzgzfw8UPRhe/yBUyHz326FtmWknJJUnhKKn7X+FCmZbEn0IzOMEQwCUYSx0iiGJGUZYpnpUxEcJrm2GcHaO146dWii4fTOc1RYt3rpp47pd7ZdfbfcFy5KyJEA/uF6X3h68gKCAhYCm0E6uh9WSeQwPE3fh3EX/zyo/u697vWt2FnewMJvCjs97/WDCHISffMkELg5maHOIqQZmY7ERdjxsakbUEa58bU3zZm3Ws7o4GLqBqXBC4UJGN3GLzzZeGrjWYOttCULueFtSK0wy9spR3AdtqylXZsBW39saR9YxIhO5EYnqJjz21TX5bBLpmA43jGaoNOAbsznb3G2BIJkgApTLSwICCSkaFs2G4z52RYIRMFnGtDAGdKmcjhPIcq0kgrtmmnHVunCf8FkZ/ZWqZ8M21tLmnlEVdPALfh+BvD9/2EOwlg7X78eApmNhuOwy8C2ML3ZxewLfjepz7LN1YhOrqu5VhtabNp7OaJVWLunuxNw1mpiAhhoh6K9CpkQoSx2yW4jSRImnQuokjhcthnuLvblylbgIbihnZjdHPxn0sGN+8z5ticesaW3Zrq0sTc9yHgNEx7L7Y+CgMCAlzMIV9r6/8hnUUC+0AAj5obAwm8KFz9TWsFu/evIYY1pCA88eABoiQ2+mGeV0bCGdHAFcncrnPNiQh22zDGgYJbbB9chLwwA5r9/JZZg+FwKKhgBU0YhS1FAgPbaMsW2uBiC+3ZQhtcbK09a0ZfX5xz/R6VtnlEHUtsfTH5mh6H0jILSmEXrHBsCzRVUakvaq3KrePs2iiLPYVjISAkOSwOg0iUeqB2LFG5ZmR5DqV0kUbaEMK54iLpoX962alY/ZyySbJ3Img4O9I1MJkA9ikq+ChNE5FXXxA+fiRa+CxbwGnwtW99lQuYnna5dl0DbTPQqV7hkwm5yi2uJoP1lDPzeEECFwSw690nBCGKItzEEeI4Kr31ACDNsiL9n4KGiYrQMKQyM0PAREGbyAltaiyihbXWZVqYPkUT6IkGWSgyuK1sM3qbuNBf7iGaY86ntX9LmD8vhr4ICLg02qIJ287Z833v97kyWfTJiDRD+v3vnBUJfG4015hZc2MggRdD2R8NfbIyGAoQawhBuNkluNntkKUp9ocDFAMyikonwz69zo0waZ6171C51QnVtzwhe1GD5HVJ3yljqm5XsNdaZ0mU7TFEsC7KU0u7mvEy54fv+tlYHdLzZkzCObYJugZ8H1tTsJW2bKEdW2iDRVdbwrfz9XApIrhL159KCJ/DwXJ0FHL9ouNjxe/mMUvxtclec8CDWQ8Jhd7GDF3nkiHIpJKOImnIYeHaT4v7a8cxkQENDc0MlWmkeYYs1+U+w7Z+ImkijJkhqN4GuzUeAFCRBcblkwjmgrItRAAuH228yjkkEMCojTbHSaHqz7FMwrIYJIAtvCZvig81Db8IYMDv52axBhkDpsHXPvVVLou5kbZTMcdDfMmozDH3t8qOJEISR4gjiSiOyzQvrBlprnBIM2R5Bq0rhbFS+qg0oGlt9yPuMQCOfP5DUcBzP8ROJeq3BNsTvr+za0aI+A0I8B9d72kfATwmAtIlm9z9ss6OIh20fuYPBotegwQ+GVmO7Ad/Gvz0iPYFEvhkMDMIjEgI7HYJoiSGUgr7fVpGjTQxKQV0R+RvG1nsytR2rI8MbuqhZdRLR3nrfK65n+C+JK59/yWxhaZshQAGtjO2Qjv8w1baspV2bA1j+2WO/jm17tGEbId+MqbsUL1DzqlACy21kA2ysAwa0phaZGeU+wsLEIQkEAGRkJBSQghhiOQioMUQw4bEZTCYTds0M7LcRB8rpZArhVxrKGXiirlBBLvRHmQY3+KH+9zZOezHu+6LHMWerC0nPJHPU5C4jo11NAEM+G0ItgSwrxL6/OwA/+XzHb58/Dfhc7/6KNs1SK+aA9yI8TOXpG6LrnDvb+8tuPBto/p1zCaNxW6XYLdLIAslUkgJBmO/PyA9ZDjkJjG/1sbwxzBRwX0KZx9hO6bc3HO9USkj6l075owkH9/btWH6M2yPZgoI2DqmOgW1lW9NN7uAzjY2BW1X+SmE8eLwjARefF1JM2Rv/5lAAp8R7jtkMrxw8RXOuLm5wYPbHbJDikdpVpaz101xkCgJ30amGjh/z8kY02xH896uofSICHYUatbWBuGHbrQlHWHtTdnid8RW2hLa4Re20g5gW23ZCk7JwDdkvxrK9NN3fo7uO+X7ZS7hPFqWGdd36myWRCTzQzOXZetBJBpEApEQiGQEKQ0pLIS51mSGqZS0Zh+kWiPPchMtrPIynXQVaGMCV2yGF4CLqqgkmjUz5IDT4bVwbhlqkd32GXFbJpyA6Sji7Kme7WhpHn0SAQz4bfy1smkA7KnHwRqeX8B0XNWQ1wOf+3QNsl1SwjZDQRtpO1e2LgK4CUkELjzwavI1FJ1YAFEUY7fbQUbSScuiccgU7vZ3yHN9FLk85/2YQgR3ne8jAbrKmHQx1HpuC5g7vn1+d33HuGc3L4I9IGBruAYBfG/eMU9I4LOtJ4EEvhisnldGPDJDCOMweHNzgzTNkKYptNZOGr1hHOlkMIEasrFGUsNY0jYfdEX/umXb1tgm+cuFUbI5arXWsNzwNbGd+csfg+qpCJHAfmIrbQnt8A9bassWcKqzZ1sE71D2uin37Ms20lXnGN15NrF7AjE99srmutj8LmNzsNTLqtTPVV+UxzRDUpE2WhCkkIglQcoIUliNzf5HNrIFJkmNgNYMBQ3WjEwDubIkcY5M5SBBYG3SQwuShcCqlNcnAhg4rxy1seGkefal7VsDEYCF97ieTABb+GoAth+h2lMCGPD32Vn4LN9aiNauheBaMq+lT30D43LytY2ZrlR3wDKytV1NZPbtJceby/WANMcIrp+cEIRdFCOKzN4dZL33NJDnCndpijTLC0XrBHlHGPddhXBsVMhQKsBan/TUuzYsMbJ9fn99wrTntI3xFRCwNE4xtJwrEngOvNFlr0QCX2zdCCTwWVF/f2xkbD3aIiJCsouxSxIorXA4ZFAT3ru6U6SzX5trJESlmzWJ4D5Hv+Z7OJgWsRENXB6DtSdylQFntNvlebAVPRUIkcCXxJg1cQ3tGIuttCW0wz9sqS1rx1IE8BDm6vZzyNyx2fNOuecp5duubdbQZuesZZcp7IYlfWujgm3KZ1i1s3I+tNBOvUIICEGIhIQQJlpYktl32PLBbUS/SVMN5EoZklhr5Eojy3NoUJliutken7GYjAyg3AU64KIgPkkvXoQA9iUqpfDDqB3z1Tjd50nkC3x/dk30kTaXwpxndkk519anPuAa0cBtaI0QvpBsY4wXVgZBhJs4xi6JIYRRpoQw1+3TAx4+yqCUNhlDYLzpjILnxlRoo3UNvBtTyYAx0V5jFOy2SJOjMgx3S5Gr4lJj1+f3+Brofx6eDI6AgJno01+nzgW+OMvdS5yJBD5OE3YlBBL4yiAwm02adrsYj9/eIsty7Pd7aBKG0i30PwWGgACBzXYkzjQwhpDq01WbRtyusTnG2GvTEVq4TpLutbnSNedJDeCCu30D2M5cupFmbMqJNLTDP2ylLaEdAefCnCxyU9HUEdy63WwjXY6pS42bNr5hTPmp9V8TTVe7Lom6noUUAlKaPYajSECSIZiJTLgLkeWV6Uhv1Gwc/lKVI8tyKKWQKQ2ldbk1qSBd8dJElbxc/I8ESOvSuZFttIzTqHJMWMMiHTsZ2m1ICDTawnQ8zlocF/nSWmvAVBARGFVEej8Yh1zjqfc8iw+98PFpBDDgx0tvwWz2AG5+Ifgkowtf5XLhq4ynGhbPiaWe2blk9rVPAf9l80W6poHrWrIdpW6x/zKDmCGEABEhjiPc7hIIIaFZgUiDmZAeFB6lKVSmwFQ3npGUIMeQ1ks8X4kErp0fUeZauOTY8PkdvjSGn4Wf4yUgYCymEMBjDC4+O0RuHmeMBPZiXQgk8PXBxmBFBCRxggePPYBSCg8fPjLvey3LTDd6dbuGztjlWDJEBDcjQbruaY28rq7qXmcjVq75CmxlLt1IM7z+XpiDrbRlK+0AttOW0I6Ac+ASBHCb3tsMngNMZOo50aV/z9HLz0UiA9OirHvvPbZc816WdLXyMJl9hSVBAIjjCEIQiETxL2r6pub6FiLMBTfFGnmRUlopBZXnSJWC0rqSgSREERRj2uDosaWYhXzanqtEFwUp7LbolACUo/EfCOCVoPQyqMZHR+roQ6bx1Hvfhw+98OJ0Arh2ywt/3bD7bxkF1x0x5oUBogO+yuarXBanyHduZewcz25JmX3tW1/lAvwigX1AORqdtCuVgxtB21R9bDzoIimQxDHiSEBGUVEHmf018gyHfYZMKwCizCoyReXoGztzlP0+ZbTTAFkVuAq959P49PldPhfaP/i61JJgEAg4HUtF4Z6qX5zqpBPIXw+QZkjf/jPg3/v9waLRN3wt5Nf8+d4y3q0BaYbsh98F/t33DhaNv/2toD/zeRcQ6v6gTqSaNH5JkuD2dgeV5ThkOZTWxdJYRE701HeU9alx3P17LBE8NRq4zaDb/BswNjtmrvZAvjC2M6f6kXFuCbSlvlwzttYva0doh1/YSju2gDGBAKeiy8lsrE4x5/toTIaTsTKfo3zzmjbnuq721drGxwRnU5KWONZJsjUr4sLZj4rgFiJCJARkkVpaEoGEIY0rG6CNAi+1U5P5kBmaAVWkj1ZaI1MaWmtobfREzQzWJguOUYnJqQFVfcxGW3btjnMaf/QMCFRo4GHuWjOKMeiMDwYjzXkZAhg4Dq0/x2TKJGYbub0zRhTwVS4Ln+VbWrZzKgDnwCny3qd+XRJz5sYlUC3JzrEzzbWnwsqjYZ2PqmcmICCkwE0cIdlJMCsQmdQrKtd46e6A9HConnOHAW8IcwjhKdHBYyPWLunt39bitnFzafj8Pp+KcW3z6/0MWDfqXsbTCeAx85Fva0rAhXBCJHBzvC2Zxm4xjIwE5ihC8n3fAnrD6y8k2HbRHAc204sQwowZ1ri9vcXNzQ55nuNw2ENrgIQEcz9h2nWmT5drEsRTSd8udKUXLDPbFKS2jRS5Frx7JyfDw3nlBNCVHEZPRmMImyj+64iyNLYyvkI7/IC7Bq69LVvAWNLxEhjrYGYxhgDuIo/HXj/3/NR62uSc0he1iNcz6VTMujBkCqPDuRlehCgyMTOsUyNIQBAgpUQiI0hJEAJFUuaiXOHkCBgTJ3P1G0V1mgHFQK5y5FmOTOXI8xyahMk8w2YLPdYASXYehij+HuFs2MZ3c10/7kKYx9YLZkamNZ58zx/j+RdeOp0AtpWekwDWdHoYuq/GaF/lAvyWDVhOvvtEAFv42rfeyoXryGb3QhN+PhZomMgNk4rkWIEwkQ/CZBIp9kUjEGIZYbcz/1YfSAK5AvaHA/ZZCq01mIz6NQdTlOgpx0vDHh3v41L74Oupd0m0DQ1d6Jw+JHDx9Z0+BYEADrg0TiF2+64JUbgBACZFAlsSeFVz+9hI4MdukfzQtwOv+MTLyLVR1KNerF5ERicUAoo1BBEkCHFE+ITHH0eeK7z0aG8yyTT0K8J4na45v9mtSYZkbcMYYviIBHZSgHAR0cFUt5VcA+ue3/10fJ2LdRPADSLFhw+NBbCVsQVsoy1rb0MggP2Cu5afq1/GciFdukyXQ1lfhGyXnWrM9V2yTDnfJkvXNac4iLbW6Ohaffdtk8OiWx5L3tavI5tel6ncfddKyMwQMPomkyFoic0aSUXksCRACoKUAiZwuLKfEhHAjnxEhowGQbM2WWU0kGkFrRhZliPTCrnWYM02+qb/GTfstWPbPlhvgNfQWiPVjN9+7/vw/Jw9gPuwxIcNM5Wkx9Lw3Vjhq3y+ygVcVrY50YjXiIYYcz+f+xTwWz7mkBZ6DNrS39QURDLpU+JIIokkpBQQAhAkwAykaYosy3HItUkRCAKTiaeo1C1dKkzAsXJv7+/+PUbxH3NuitPTVDK4K4J3rePO5/d5LI7bEBTh+4o6uVFhajRu30do+NAKuDgmRQJ/Legt4/cE9gIjI4HpTW9A/D3fciGh7juoiPgFdjcJdnEMKI27NDUREkTGsAUnmoLqGtLU7C5jsyHMjRK2ewRbNHVQQwpzsYXVYHWLYStrimnH+nXKSzmJzkYL2dtajIwN3uOWjMZUosVnrE3eLmyrHcaWsY23xV+cMmZ8G29tOkebXavMrjJw7RDGtr8WFTthfhySqWk7rN1r1B3G3bOt/qX6vssW2RY8QkRFSmkBKQiRUQwgiAxxbEpV5ctE08YWWjoXaoZSGkozlGbkWiEr0kuz1ibCWFsnRIJbLUEURLM9ZuQXhc5teXZ2wkpsPXbc2SbVSXGAwI1r/HvH7gMYCpkC3v2eZ/Ghjy5MAAPzDb323nxGV0LfjdA+y+ezbMD15TvFu+qS6EsR4ht8lm8NJPAUD8BL4EiRc/4VRJBSIhaEJEkQSVkRu4KQpike7g/QShmDYFGnLvbFsBHSVrEa2nNtiejg5rmxkXTUVsbRx/rg+5iz8PndnYL+dgQF9r5iLAHchTFE8bXn64B7ijRD+kM/O2rP3OitXwfx1V92AaEWRJoh+6GfHYx0XiXBvWK4xrHdLsHjD26Q5RqPHj00WWRcw1GXhxyOdbKmoc39ewoR7N5/bFRwn+GWtTGqaeZAAs/ARprhNwk8QACXo9sakTdCAgMe98kMbKUtoR0BYzElEKZLR/AFY+wptowQorR/tQVhjKl7TrDTVHmH6mmL1m69ZoI8c6Kyl8JY53AwOyEulZ4TSYKkIoJYSghRBMM064W7Ble/2FQNZiDTjDzPkSsFpTTyLAODoLSxpxKZHYcFRYWt29q7GdQTZFMG1xTprs09x+nZAefH2QlgYPgl0s4H3DGNct6B4rNh2mfZLHyV0Se5Rk/0AYPw+dn5TgL7SCp0KqGoVBUCIImQJBGSKC7T9xERlFLIVI794YA0zUFSmqhg5lqKwFMUvalE8FA0X5tn5BEJ3EIA+zy2uuDz+zoHgQBeB4be+6HMABanzo9j551AAAf4DD6k49IlA4i+6a9DfNWXXkCqBZFmSH/wncAzf9Rd5onHkfz49wC3u8vJdU9RzYui+K1BYCRJgtvbHbRmHA4H5HkGISIT8oBu4rQtgqR5zv27rYzrRNilt/YRwc0InbbzXFjlNFyy+DLz/hbWlw00AYBrOL0iOlXdDp2q9bRx5t0KtvCOWGylLaEdAWMwNRNi07HLp/7pczRuiwS27RibsaTvXqdgCuk8F501NbOuYFqfXooMnnKfZllBKOyjoogAt5HEgCQBIkZ9owljaDSRviaklyAAEHThsGj/ZWYohcLmqpAX/5XcXRH9a3zEqLVNZlthY9UVjS081phRY+24CAFc3qzDC4OFvKqheC1Gah/l9FEmF77LFzAPvvbrGklgCx8W32YKFiZj2CNHbZFSYhdHiKSEFAIkjOKhFCNNUxzSDLlWAI49HqfI0YYlyOBR18F63a0Pvr6bc9Hdnuu/LwHt6CNQ+yLz2yLExkbyBwRsDbX3aOyeuQCit/51iK9eGQn84ktIv+e/AT78QmcR8e/9JURf++9eUKj7h7oOWB4tSVgpBJIkwc0ugeYcd3cHaM0QVW68VvRGKLQQvlOyN3RFqrS1yb1/FylsvyO0vpwutaW1bStNuWo08EC07/hi7HdU80SEdviHrbTFNIMBbGQT7Svh1PHQRfqucZxVJPDx8XbbBjnXHG/VZq/twpxnNIYUdmWYS5iW14y4/6T6Zl4/ZH8oy6GQ2XgIGt2QLFXr7BPdZuMwF5r+LCoTBTls/pOmHjIEMhE5TlsD8hFBMUMXqaa1UtDMyJmhtYZS5jizSTdtreMaBHBFRm/hPVsjrkYAu0TJOdM8T4Hvhmuf5QuybQ8+k4U+96lx4h+W7xoeT2Of2zX7l5lBxV6+RlobBWLOmf0lNAhAFEnc7hJEkSgDQKSUOCjGo7s9DodDr9I45OE59x1oVeLK6N52J4Gp9fkEn9/HuRhuk9994jO65r7qXey7elq07BzP7zH1BgRsEb3zXrEnML/nDwfrid729RBf+eblBLsA+H0fQPa9PwZkeXuBmx2Sf/J9wOMPLivYvYKNSCh+OdErmsz4lCCw0tjdxHjiiQfY7zMcDmmpW5lrqnVkiPB179X8fc45v80I6/7W3E4Wz70XMG4N28o6t4VmGKMtnV3bZKDaSrt+91qZVlC9TItmVxl7u6tfFbbzjqy3HV2kUFeZa2NYFl1kvfBD3rViKQK4qz5fxtMYVG3REEKUx7qyTjG7386uE549ztVJc0Ht+jnfz2N1m6Hv9FPf9bnEMNf+qq9zY+qo2SbbzrfINSa7WRfBX5Wt7z9OhQJAAEgU6aUFIZayII2d+wDFml5ED9ecVjSIZE0eBqCUIYeVUkiVhtYaeZ5DaW32JNZUU9rIMtY18Z1B6ZZ19oFhZqs4OQmvq7rqdqbTx8t6ZoM6LkoAlzdlE1au4WcucJ+N2UG26fBVrjXAZxIY8Ltvh6KBr/lh4jsRPMVYxWw81yIpcRNHiBJZpjExURQaD+/ukGV5tWA7yqMhlPlon5QumZo4NSp4zEfsUJ3XhM/v4BwMKc0B3RhLpLrnj68fmhf91BsDAtaOUXP5lEjgFaaDVr/4S1D/47/oPC//xr8P+W9/zQUlCugCM0MKwi6JcLu7gVIKLz66M4ZOIiBXgBSV0UgXymKBpYnguSkWuyJgWFcRlt0RO8tjK+vrRppRRe+cEczD9xgZFNwLayYmALzi/tnOO7KNdgDd3xRra+Pa5L0GLvGMurI/rbF/6pHA4wJUDAhgqiJQe5reZts6Ny5pf+qz6VY0Z/86OkSMn9v6dArZTs7v8r/CsUAIASkFRJlqGsaZpet+msGEKtCHDSGtGWX2m7yILtZKQykFpRmaNVhz2RdmuxQU9yxioolq5GzlUNrQ6wWNeRVqa0jt+TnPhaDBK8vccDUC2HScn5PoGgzaPsvoq2y+yrUGTHl2l1SOfO/TsdHA18Acua4dsdzr5QuUCm4SxUjiCLIwBEpBUBo4pCkOaYo816XjW3kPIoDavOCP79WZqmUGGTyVWJ5a5lxoI7DXju42baN950DT43TcWBiKAO4ngAMCApbDZF1gy5HAWY70734/8JGPtZ6mT/sUxO/4jgsLFeDCrjOlwxCMsenmxqSGPhxSpOkBIDlqTenS5boih8fU1TSgDb1jbUZTq2tyrdxlvim2otNtpBnLplFuHT7dddeKnyJCYekl2sb42kIbgO20AwhtuS8IBPA01HUGhhAj28CAQF1/YbQsIQMO3+1ynBeXulfTzjvHEbBvTF3Dhtx3z74oYyIyPpZkUkkTESIhStusdWYzRLEpZ6N0bfy0eRbW4dH87dwBmgHNXEQOM1SRYlorjVxraDC04pquzMxQxbcCF04NpraRNmbogmBuP0tgEz2/ornhKgRwefPi40Z7kgK6CV+JGwuf5fNVNl/lWgOWXNCWhu/9eu659BSc8uzO3cdTI9CNh7pRKFhrCAncJDtEUYSdFMi1gpQSYIGXHj3EIc2RKwVRKChTycw5EfJTyWAfiGDf36+5qBtb1/lh14U2w3f7+Jp/j/M/r+30R0CATzh5Tk8zZG//GfDTfzBYNPrbfwPi3/yS0+53Qej/41eQ//z/1Hk++ZHvBF7zyReUKKANGlw47AnYFIda5Xjw4BaP397g4T5FmqZlphetdeeC10ewdaWAnLP+jSWDmxG/zW8I+/PcutlWdPrhhMMAACAASURBVKKNNMPZn28iJoTvHhv3593yqN5irAoUzrYb6RNgG+/JFtpgsZW2bKUdc+FL+5sEny9yTUXdLqCP29GiToxjaKr00iXp1iCZuwI6mk5v58a1CNXmXdvsjWMdva5pU26zmxWcreFVG+1iU8j+Mv/oQhcoyWGClBJCGP1ACKOzt8VTl3G3NlqHzZ8u/1oFsJvtYJQNPNUaKlfIlTaRxcxg1mYPY23SWeuWSHYhokJ04QpRV2K0qo3pZv82x/u155CrE8AAoDw18vlu9A7yzYfPsvmKJZ7ZOSc8n/u0L3WID1gzCdxKdBX/JxC0NobBJJKIoghRJCBJmuhfk3cEd3cH7NPUXDGxPUPPrqkICdGtTs9Jmzvl/Bz4/F7NRbNNVRC4n7rIHAQCOCAgoIlF5/M0Q/aOd4F/Z0Q66P+fvXcPtme56vu+3TN773PO7/e7ugjxsJAIL1MgRCzAIGSIKwSwgx/BrgpxUWCMExcG87JTMWAZAgSIDQJhgYQQxAQwcVwVhCnKCRISL4mHwDZI2LpXIAkhEKAr3Yd07++cvffsme780dMzPbN7nntmz+o+/am693f27Nkza3X3dK/p1Wu1S5HA+wTJ//j1jV/Hf+9/AP+sF5xRoIANW9SF/pdzhsvLNdarNdLkgJvdFjyKIDvyzp7LEdxF3QlcjzJR5wQHcD9oTLhNwehI4B4O4MkifXvSNwjNBXxoW0DQgyq+6dMXKnpbU+ISkW0IVT2Ox0U+oTlR9+30cQC3nd9GvS66rjkmSncqjuw44+++43tf2dvOaopwH3J+0zyTjvTVMhw5vwFj92FzDq48px6Uo+wFldFRO421A5mjTHHOWdUBjMayYpDG11oXIYSSWZQ2eJGOWqjtAoUQFRu9OD/XjuWL3My00NT6i0UdwBqhSgkyz+Wt3PnHjWlJqE6CU5ULoC0bQF8+yvRZwd73mZ3y2aZap1oqqvIB08s2Vb32kas0FAA1+HKIo1VoDFIKRJxjHceI4gjrmEMCiHgECYkkOSA5HJCmmRr/8tVj6h7Flep3txqVTc9A32N9dZ7ymhrK7fQU2vWad3HK+ProWxdNk9Yjb7sYxy+FS9t/gQBlulYUz92fH779ZZAPv63zvPgrvhj8M91wAh++42WQD9l14n/x0xD//S88s0SBAj17VHwA7OOfSvl25/ISq80K+/0OSZJCCAnGeWmXQ63rL8Zp/a+exKoZgX0cwVPNW0gpi8kmff+jCUQx7wJT98dft2yJ9raj33NY826DjY2hWfc+AcKy+5RBVIJo2h5jh3ChffUh6EEPn3TpCxWdbTZ022J+qlT1yKMwTQfwqddlxp6uPetu6PvJmMx7Xb9rGnNdngur1LR+R7R8d9I9av5VhtyWNj43ZmtUgh0fa6DiUK7dU3/JoNszA+MMEVP/KsdwaadrBzVjIv8dK+fKZP0pkHm0cXlctxchS0exgIrMFkKovYolkGbK/S2EijQGgEzkvk7tOJbKsrIt8rSWQPGPMpyOZ6T7ISGQZhK/87Z34d1PPIm45+8mRaeDkVKrwiCM7ykMAF0rVZaCqlwAbdkA+vJR5tRIxbmgWqfliy4jmw6abNn1kKt0ugKMqYm940k6PWAL7NMDWJbiZitwcXGBzUrtV7FZr3GxXgOM4Xq3xW6XQKUqUwO/ShFSGgrqviqqtz4J37TC0GZoNk3gt+neZ9J/6mhml6nqdt7+qevFYonFMvSwt/1AINDO0s/K6uu+DIfv+qFOJ3D68p9ADDjhBOaf9BxkDQ5g+SfvObM0tJnK0dn/ho0fKgihbLPr7Q58v8fFxRoPPnAPT11vkRwOym7jHEhTyDyaIMsyxHEMIQSYdrp26GdN5dewKM+ehaM5GkVHOZjRwEeOZs6KSbE5bDgKi/BPg1ZAQRdd8kljerVh2d+Im05yyiAq8R60qyQQCMwE9f4YOB5zzX9dkF9T1aP8Y/TWArXrqhkxNml/XrePtANP0zdIZEzUb+dY3BJpvHS7MO9+ZJ8af58yJ83kcVUfLzE4Pqf4bMhlnmNmvrFdt6loJVS6Z0ACGZBWvqzqWW9HjDFEYIjjMi015xx6G2LGZGlx5QsdGAN4pAKFIui54qi1b6iXthYrlSgijKVUexynaVp8FkZEsoBUQU4SedptXtxPWO5tez/JMgYpUqhJ7mgZBzByd6+O3lIFV/XAU3iYKLNkOoMuqDqWAjQYu5qrjfA8LE9Tupex9K3TrvtWyp8x7PZ77HY7RFGEVRxjFak00VebC1yuNxBSYJccsN8fDKOjXLLeNXk3xHlbN3JtuhynAWx2JPapA5/boVatLO8FhalhytIuFyGhA4GAE5y1X1+vhjmBGcing+Yf+sHIGr6Tjzx6VlmW4hzv3HO2UxbxIjpWCon71zvsdgkuLja4utxgt0uQHhJkhvNX/8tYdUW+GcnQlArPnKcYYmv1nURsswnLKIh53i98moNxWxeWz/iVjuBCl55V3pEF/bwwtR2gVoGSaGNwu22VBD3o4ZMuJq7qpbcXcx2Zb5VWd4j1+636j+X2hhjh/B1b910L74bYQGPnxpruZwaFUKev83+so3jMQonCjq5fq3bNQVjs9yy/KINqwxkkkkRUf5c7lFVWoKq9xRkDz1NS6x2LVcRxnpqaZQDUOXUlKrY7gBUDEAEs1g7k/ECpAACusiXn15GyTDstVBRtJSBKCHV2JtW9ZJ7eGlIi5UAWrRAhQyyzZVJA2yhycLNyY3FKAwTFB5uiTHUoy0hZNsqcu9x8iWq0rXCiwtxldkpffqpsTWn69HUjzlWa6FWMOIrUqq84ghAqRfT+kCBLVSQwgMGG8ykLHuqTi3MsnvCJqgO4aS1ifrRXeXWXNzVbhRahXAKBqaFk4xy+/aVe7Aksf/+PcPimFzd+v/6/vveM0pwG5fGo7jidkiy/bpneOU9OJ9UewU+7dwUhgP0hxeFwUKv6JcB5c6QuYB/FbON/k13YFMlr/qZeLir7zPE16scqEROVyaDpyphyexqC0oNO3zmcvJ3J5joh5eztQdgXmB5BD1q4rofr8rfhum7anc1axkW9JE40nnEeut57ppyTs9lRps1mC8wYGz3cJQNV2ua0dUmYZXXSPDCGl4d94aZuxaxYP2letitIp/6drByXYFJljDQj5GOuMk6C6TTUPD8nlyP/Odf2nTpJPZNMmjcEwAvHtvE2YdH+OIpeSgEBhid3B/zJex6n4wAGVGHWNxGnBOWHkbJsAF35qMrlAkuVXZ9BhHq9Lt3XtkHVGTylI7g+4aZXZXHOEccx1usYcRxDphmiKALjDNfbLbbbfX4Bc9eL0+RvK4+hZeX6y8iUmKvimib+miZrQzlOg27yoTwDgWmgaNv0dgJ/7ZeCf9qfO4NEI3j8fUi++lsbv16/4juAu1dnE8fHPvPcbVdKiVQKrCOV7CzLlD334N0r8CjCU9c3yLIMIncCN8GYPfVuWzRK08JD/V1TthZ9vO4ANq9V+Wz5znQET4Uv7dFNNY6F5g1zZcEBvBz+PCN+6AEEXZbGRZmH4IV+UsUZ2lRRTi7We3/fJZjCwVi/lkmfbISn2FtTZU+k9m44db0M1c5WHjr6uelabdkazWOcMWRCHJ3blTXIPKbnoPX8M6tlF5AQYEzZeuUi0wicqwjjCEba6uK6Wr5m/RhjSFMBJoSQAHBzs1vcKSGl2lRZzxqqvPJ0Oh1qD5cJZdkA2vL1kS04BY6hVKddqXCpsXRf28YcZdf2/Ax5rqaSrXH1fH59zjg4Y8oZvFohihg4U0ZDmmXYJ3skSabOZwwi38veHL7r6YLaVg/2ka3v90PPc5FTDcvQn89P26rKQCAwHMp2jfNO4CeeRPJV39z49foHvx24d2fSW962fnHu9lsd18t5BDPtGqAmR+I4xtXVBZgEtrsEhyzLV96rXylRq45fmyNYRWPiaIJ0THrFui7CMsFkLlosjgEABCDLOB71tayMw6faPT61V7dUaVmcUP/OKb10bFkZBeODbe66/Bpf9AD80cW1zFMuyDgW28IuJ5EqEpixwpIojhfmg2mD6EPnka68by0at21O8VRb0+asG/Nb8/Pc7YPa++FUkb/5xSbP3TKmHTPjX5s81mMNGYCO/BYd92y7lnmMMVZcjOnPeUSxassZOONGBPDNFrkveHHKla8cGfo98OeE2kNmEmQbD3X5qEKx3Kin0BiTzuLcLCHfFClcpr4nYwwR51itVlhFyjHMIwYJht1ujyQ5IJMiTw3NjQk6/Xu74dpHhymc5mPOp8ZcKzsDU+J2GwsEqOFan+WyE1i+449w+MbxKaBdH2PnhFo7LiZMIHGxXuPycoM0E9jtk3JVP+eQNQestt1U5rY8nVuumo68LDLKGJMxYzKNmCkITUzb0Vau5ruFaYue02Z2Bdpq9BfOh/pgoDG/NxW+6BL0oAc1XajJsxQuloMKXdB2jLJ39FacrqL8XdpmA+qutXZbqLsO27L5neIobpRo4nlRavZ4G7YMN0tTOFV70uYUXpLiKbcnOFoG/cLEGANfvoyOoNzJB9kC54ZivVLoXNtgxn9UWaJel6g322q9SnSFlDhkGba7HZ68vsHNPsEhEZCZwGa1wgN37+DpDzyAq80akAIM5SImvoqP7tM1MWiTpWlF4RAdKT8T5phv+w9oLosh96hDvVwCgcDtxMV+afV1fx/sOR/TeV76kh+F/PXfPoNEA3jiyebv1qteY1TgGIrtuLDBeISbfYIn3vcUkt0Wd+9e4u6dKxVtkKkMLpzz3E4AtMWunawSyvFrpt212WpN6ZxtMjV9tl2/7wJBPSE6VXpginXqNszyX398qI/imfJAF8CPOgH80sMnXQL0cLleJAMkOCTcdv4CgJQCaq9XCcZkbv80/5ePPug77g55B2h7N1jq3cGl9xfTIuJM7aV7mrV0OkOfcpu8Wpe2/+bWq0wBvd1BiHJPxKUxO9IsT0NAQS4T6p09ZfmoykZVLhcIZdefeklRL7sl5esTJTHXPfUknz5mpv+IOLBarRBHMSLOi99kQmC7T5AkBwgpjpyYfZy+feVrOla/j+2+U4+n1MbnLswJAddkX45QToHAXFC3A/pCKRK4b9+e/uTPIvupV9uv8eHPxPqff92UYt0aKLbpuj0koCeVJC7WK9y5XGN/yLBPDkizDJxzMMaVPVe7VmETdthk5ueuSUEto/6vfqxpMVvlc9NxWR4fav9YU815wPJqTCOAF/UhdYQNvDE3fagXH3TQ+KLLUs6jQH+ol1c9Da4/cyLKqTvMwZlbgS0BF7a5NWC6eUpb9krb/dp+33TOFLb4FNeoZ8k55ztC5U46U06f31nqY2gk8DlpLFNzLlo7gLfbfZH6iBJKCZUKmopz2gbFl1yArlwAbdkA+vItSduzGMrNzpHTN/+3bnhRhIJs52pv1kkuziAzUbZ7Buj9USLOEUcR1usYcZ46R+/9u9vtsEszZFnWqkOXLE30mXQc+v2p57sIhfbtBv63hUDg3PjY/xy+42WQD72t87z4a74E/Pmf1HrOOcag5IXfDfkH77J+xz/9k7D6mr8zuwy+Qbldl3Ye8u2vGDhnKnKEMdy9vMA6jpGkGZKDcgQzxq3OQillMRlUn9SyTUAOcaCa0cNNE2a2Sb8j57Hld2Vk8zj8sQ2XmPSe517O14k0FlW4H4xW4Hy9wA8dgKCHa/d0gSaHnVPlJRmk4Th1liKYV0f2lnNyk1zeugUHt55j2mZjnMT6N2qLueEL72zXtdmRXbb6FAsD+mSvGXqd4T+2HOshSlf9UKPVR5N/z/XLiZh8e+XpYEwiAhChmgLSp5QegUDAPZhEsQeYibT85zIUBjkzImLO6E1r2uFMGV8VI1KPnUIgORxwfaNSRO+SAw5C7Qm8Xq/xgQ/cwQc+7S4u4ghMSthm3MaOY0NXFo4pq9swztZXWPqubxV6C/8CAd+w9Ss+9zWrf/qVYB//0Z3npd/34xC/+SYAw1KrTcqT9xudvwDAP+6j5pchcDb05IhuW5wzcJ7bU/mx+zc7vO/J+5AAnvbAHdzZrCEgkBnWPINhgzEGkad7tmX4qU+2NfUHdUxbVAhxJHtTH1J/dmwBlcqmPX7G+vZJfvVdp/YzNOwoH8aU4r3Zlxdo+PGs+KAD4MczAqAYDwK0sI31bqDTJS8/3zcNZdLdKetA21Y8z/yn/kP+n3agj7tmU9lPWSe2Ntn07nX2d7Ee9JWr/t0pbaBPeVApn0Zq6jOZl8j9my2koN1JSanWpigzm2ZBU+3oqcoF0JYNoC8fRW5NmdXDeGuHR12SeNlRk2+qAf6U+5rkK6sQxzHiKEIUATFXewFLAEmaYp8kSNMULF/iXndmj812MSYieCzkjZ0RUGvby+Jf/QYCS+N7H1MZF5IDku98BeTD3ZHAq6/5UvBPf96MkjWT/dSrkf7kzzZ+v/7ObwB79oeeUSK38aGNFzYZgCjiuLy4wDqOsNsfsDskYBxq4Xxmjy5pSg3dFCHcZk/pKOB6pK8ZHQKUcpjRv5XJ6FK56mfjsNo7bxrb00XmUWOZsnG+Tsr1GDPVy/lxvk5ygh60mFMPX8roHNTHZz0Gu1iGcwVZLIV20s6NLfrXkMJyrP65PVq5rI+6I7cpvHWc0k0LC/tGC/ehT9ua+31iqusvFtU8AiGlWw5gjZQSGeHJScovv0G2cVCWjTK3pdzm0JJq2VGVC+hnnJzz3lJKRFGEVRxhHceIIg4BgZjFEFLgertFlqlZDjPFixDNKzH76NdmiMwRMe0rlNv6/Phbr4HAuWjqQ6hOCs0hU/Jt399rT+DV1/5d8OfPuyfwEbs99l/1zcDNzv79n/lgbL7nheeVyXF8GDfrUTyMMcSM4e7dK/CY4/r+DTKZgbG4jPqV8sgZXDxNOn2f+V2Dc7juJLZNxGonb93pq89rskfrCGtWgqrebdTv6Qunq0KjLHyok3IxxaJiTIIP9aEJutDA7KfD+/2yNKW5dbEcfXMAq+dk6L7A81K3MxX90lUzZq+f6jUB0xYp6/Toar3uOaVtP2X7WuqdwxZJ3ef8udtfHye9UztsmAXnlOCEoNLp2aAsW2AcoU7HQ7XsqMoFLDvxWE/NB6iyyrIMhzTD/Zstnrq+xuGg9gQWmcCdi0s8eO8u7l5eYBVzQEpIIYrUMk2pWvrI0uZ4mMOI8xHKbX1amOW/QCBwCm1941J9yxIpvtZf/+VgH/8xnecdXvJ/QrzhjbPI0ET26tc1O38BxP/NC84ojfv4Yg+YzwTnHFJKHCDxvvv38cT7nsKdqzv4gLsPgDOmbDbjPPMpastg28dBq2UxU0Cb8tXTQtejgrvgjB05pXWkTN0R3YVb6S7bGa8GLfvJh/rwKBO0F/XhI7e5XqinfHUJs+z0eGj+5xI+tgGV5YROPdTtzGpaafM/cxQUAEpbsC2ds97eRNt0+nN1VB0WsTu2f2jrY4Zez7Z4sqns5qTpXk33Pdcz1fWezxhzMwJYI8COVq9SglInU4eybMDy8rWtfF5aNlfxsdzOoRHlcqMsm+askVZMQEpAL1EyJ9DUCwEgGRAxhjiKsI44ooiDMwYwDiGlShG9T1SKjoaVffVIkN7itRglU+HbC4PGhbY+BFMdX+ssEFiCqfsKX55PapHA8l3vRvJPvxs4pPYTNmtsXvqtwJ3L2WXxAf/GyPrEGs9TJAMSEuv1Gnc3G0gO3NzskOlUzIw1vhtwi01nmxRqcubW7Vkz0rfJLrQ5co+cz8axagrDcfXqQp/V9W4wXIX6D3SowrL7A7tQFzYEADCACV1PanGFq/rUCXrQwhc9NH318U1vCrQt3HetvJuimt1FOT1N56NrTF0nbTZe9btx4ZenZi485bpLXLPdrjwtU+UU8knpaAroOlJKCCkhGc24YPOlilpHQ/mFXa+YolZmAO1yo4wP5baUBpTLzhXZluhLbE5cc0KOc47VaoU4isAZEHOOiDPsDwfskgRZplLySajIDCEyI82gRH3SqS1tYJtctuOn9r8U++4poNzem/GzLgKBOTD7Udsenyj2X7KnW23D136xk0O+J/BDPfYEntsJ/OR97F/4IuDx9zeeEn3+5yD+W39tPhk8ws0xcQok1qsVrq4uINMM2+0WGYty26nsP8z32TKl7XE/0DRR3PQ+bIs2KqKWkTuP9e/0b3tEHkspK+86Vbuy376GvvRzdjXc1M3VOqlHvjcvsXALV+ujji96AP7o0jW+BM6HbeGWi/iVFlqcLSLzHNj0mN4uZ7n5qO3BiS8PrYcsFlv2cToPnas8tVza3vnneEaG1G1dtib73hsHsAqKp/kQU38xpiofVbkA2rJRx6WyoyQp5XKjLBvQPlCeg6b7mJEdjDGs4hhxHCGOI0ghEK9WYIxhu91iv98D4JWsFzyKIUVWuUfjYD/gZXCqcvHFsK5Dvb3b8bMuAoEpaHuZszuAm6/ja783FYfveBnEm9/aed7qH3wR+Gd+6vQCXG+R/PMfhHz7O5vPedo9bF78jcDlZvr7e4ab4+E0CCEQ5Q7ei4sNrq42SBLlCJYAOOeFnVfvF+rRwJquRXi2SF+rszjvtxiqdVQ4jPVnNNuN9aM6IrhvP+dLX3ishpt6+VIf3A81APhTJ77oAfihyzkyfgX6UWaBo7UH7VD8cgADKhLYH33O6QTV19CZYoyj+XfHx6ocv1eXz0fjekUo57BVms7r1+8F2DI1jisX8/nu85wMiYo+ZYG5zfYH4IcDGChfVKQEJNEHmfJLcpBtHJRlo4wr5UZRSspl57ps5zQCbZN42niIoggx51jFMVjEcgeEhMgk9kmCQ5rlUcGwroTvkyK674rUMQbMbVx5TLnt68nK4JwKuERbGjXb9zamaO/nWVl9ixgQCcz/yn+N1Rf/jenu/SePYP+drwDe+3jraauv/GLwz/jz093XU8JzUC4SEUJFlFxdrLG5uMB+n2CfJJBCFM7Y45wtaExra04o6fug4VzzN+a5+cHinvXjtpm0SvSAOmB9F+rrDPbJ5vBFF/f1UE9ScATTwwc9fNBB07aAKHAe6o6hIYtKKTHEweUOotDDB32m0mFK295+LftCRp09R38+/T7md03XM3/HrOc1tfmucqqe3z+quY0hc69eRwCbSCmREV2ZSf1FmbJ8lGUD6MtHFUrlRkeSflAquzqUZQOWdQbXjYgmRzDnHMj/Xa9X4JyBCbXvnACw2+2RZinSrH1/sT7OYP3dOCNYTzgKIE9TrY91XcYHY9uETrv3q1wDt4+2Z6lrle7UqUnpPNd+0TcSmH/Cn8Xqy74Q+KCnn3S/7BffgPTHXgkkh/b7Pe85WH3dl510r9uAdv4B/o3lfak7fwFACoEo4ri6ukIcR9jvExySRL1j1KINTLocwebnvufoxfFF1JFRZ/Xz6nrZEEfn6X8FpLwdUWe+6OKHHrIxkt5VfKgXH3TQ+KCLTw4u12iKLHS5TroW57qFBBzfE9jGOfWYw1ncFCk/9fu43UFaLJnMP3fPZyqOncNaBzWlK63n2eU5lgHoI0f3glEvHcAACgcw5UkbqrJRlUtDVT6qcrkAlbKjIcUwqJRdHQpytUUl9JVvDgOqvtLNvI85GWeuE5NSIopirDmw2aiUlBICcRwhyVSK6CzLrClIbHo0rQizydJN83jLO5bn+2Jo11m+/ftZroHbQZ/VrUOdwF0OlDHyqN65fQFOoIUBkcAAwD/nM7D6gs8D7t0ddBvx2w8h/dc/A/nH7+4++d5dbF70DcADw+5xGwkOYHsaOQkOziQgBSLOcPfqClEU43q7xSFTW3aYUbkmbWmh65li6qnrmiaZ9RHtpI7y6CNzC5I23QCASUCy0qFs3rfQu8Ps8aWNBD0oERzAVOibVco1fNLDF11coj4nU19s70Kd2OycOi7o0Yx7TuA+Gd2W0GNqZ3BbtPzcc21d1z/+nhfP+vF42P2uNESftmdQyvYofW8dwDq0WleAlBKSWJ6Y5SeIu6EqI1W5ANqyUaZPJztbRGbX99aOPPQnfaAql8kQGc9V703ODQk1QRhFEdZxjIgBUcTBwCCkxD49IEkOELLbQTJAmkIm4PRnse2n1J6rU5m3/ftVVgF6NC0Uqb+QWZ0OPScIKD7zLoxbPpF82/dDPvz2XufKOEb05z4O/AWfhOhTPhHYrO0nvvOPkb7htyHe8EbIRx7tJ8h6hfW3fC3YRzyr3/m3GHMyE6D5HFNiFUe4c7kBpMTN/oDUcL7aSs48Vs/iYoscLiKLbdfSk5rGNczj9cUxXZliir/rn4s20fhzb9qJL3poGicJHdKTMT+sYpfKvIugCy1ccnD5jO2dKdQJDVRd+LG40QXZhwTkMH0qrzpZmcxt357qnvMd/5TF55PeWwkACQkpvHUAl+gGIhjNfPuUJ5qCbOOgLBt1zl12fZy/bSve6iw52FJtd1Tl0pwi3/kHc0C3Ws4YOAdWq00eaascM5v1Grt9gv0+QZpHm/RZwNBUDtKS1u+UCDr907ZruGC0DmGeZ8CvMgrQZIqV3lNG485J3ZlBSTavORyQfO+PQL7x4eG/vXMJdvcO8LR7wG4P+dR94IknR4mxeuE/AH/ux4767W2Cuk1HjSJiNstwcbHB1dUlDmmKZL+DkHaHadH31BbcCCGK6OGudMttaSdtfXDTQp6u9yAzKrhyLDiCncEfPfJ/lxVjFLbHxZfoZl/aF+C+LsHZuDz2jGk0/RS3DWXvtEeeuoYPz3vhALZkOdQIYJbBf453njF1cpIcEkXZCCH8dwADqsDyaXCcuvHy1FB+kaYsG0BbPsqyUWfqsjvlakMdwMByAy3lNkdZNsAdJ/BRKiHOwCWDlCod9Ga9Uo5hBkTRCgISu90e+31SC73VqaLtemhHsy6WU521Ngcwiv2Cm/HBaK0z3bPgX9kE5qKpzY1rQ8Ofefv5lJys9eeSCPrwBwAAIABJREFUkmy3hcO//H8gfv5Xz3/je3ew/oYvB/vIZ5//3o5B3ZajiJSAAEPEACkFOAMuNmtcXl1gvz9gt9urlfmW9w0zureI4DWubXMQ2Rb7VaIlRjiBq/rIUhBWPS4t57U6uD0g6EILtThiaSna6duLBgcwTVzXpx4JHOzd+eibUcN0OIa6WI5ybkzNUflSFa63Ke0A1osfm54rkaupn6bqwu7j82VHsTRmZDxl3tgSWNMlxxCsC/dR2h1M4nY4gAH9YiIhVNNZWhwrlF+sqcpGVS4NdfmoMmW5TV0DQ2VbMuUDNajKNrVcc9S5LR2NPsY5L/Zzi6MYcazSRDPGEPMIYEB6SJGkKdK03IPuWO/jSL22aOFp0qJ3O4pcN1zrDG9vfukfGI89Irf4q+lXtfNZJbLfh1RXY6A6Ht1msp97PdIffeXZ7see9SFYf8NXAE9/8Gz3dJXwvIxDzT4AkADLJxWlFIh4hIvNCqvVCmmaIjmkyGr78vadoK+neTaPmZ+b0j23ZWmoy3Gkn8VBfBwR3Nx+fBh3fNBB44Mu1J3AfXvStkh/Fwm60COkhZ6XMXOGoS6oILyqD9f1MB3AxbG67QrlTNXnVr7r6cg9csY2PMLaqSr1h55w43ptC+SPjlnuYxONWQ5Wfut7CmhpWZ2qXq1oPgAuvFxTlZFy6r4+ZUZNZgqMbWvnaKGnPgfnqGvqzyrFNn+6Y27e1KaV1aL59UWLY3izjrGOYoBJcB4himOkWYbdLsEhTY0rq30hGNhRvVQijlvuVTeS+9avlKJ2zfbIYGpt5hSaog7LyGv9jT86u8i0DtKuPqZtkn+C2zfcg2J/PJSueqI6JgaqyIffjuSH/w3w7vfOeh/+2X8Bq7/9N4H1atb7+EB4dk6hWHePTAIRUynYOOeAkIgihrtXVwBn2O72ECLL99cFcm9xNZJXT25ZnMNqibualpLoTivZ5tgdkv1ISJk7uJUQsvid/s9MY119B3B93DHxRRcf9Mhb7+K6TNFz+uQIDnrQ4tQtngLT2EeVcbwjs0fgXFTnpyjXwZB5Csp6tKEdm8VsYYP9WFrcdiplVZ9wMxYwttrAxnna6XwkQCVNTvl3OdtZ/s8mq6UXsNgT/ZzawrgYuw0OYOHgRFCQbRyUHUtAdxoQijJTYGibO2cLneJ5mLPeKT+vQgiSbb67zIbJPLWKOpWfcpS2O0t1v8I5xyqOsIpjMKn2NuFRhN0hxX63QyZVumc9yaCdvrYIkabymSMq+LZEBAPVetX4qKeLtC18MOlXX+39y3x17n9bansBpjwWBuyI1/97pK98FeR7Hpv0utFnfTqiz/9csA/+wEmv6zPh+ZkW076SuU22ihguLjaIVzHu379GmgrwKC5SEtoyttS7OmMZ3VGX3yeC2JYWun68Tae2Y7J2XEcF++QE9kUPwBdd5OJplIMDuErQgxbBAXw658gi52q9tKXrdQe1kM2XfYFdbUsAAFkuJ6TmmO+dKvpIXAY+8jnpst1t8kjpqQNYR0RJptbC2qDeGVGWj7JsAF35qMrlAo1OpzPLYWOqep1rAKPa7qjKpWmWb2g9TRsVXM120OxgPjYKGDhniOMYccQRMZUmGowhyzIcDgckh6xc0VY4mauTdF0OYPPeTZ+7dRSdhl3XalnXKBcwlcd80Is6fY3mPtCuLtLCzQb1cSbQTfZr/xHZz/865MNvG3+Re3fB/6tPweqvfFZI9zyQ8AzNS1G+UoJzhtUqxuXlJbJMYLfbFXMaAMAYL7KmoEhAV9pp6hzW6DAaEh3StvivzTnc1F6KuZlKRLOfC6B90ccXPfiZ1JizpzSjnnzAFz0Af3TxaTHOOZnTRjLnOVwbK6d8v14WncmkO6uKKzhbH9LMJ6Og0m/1jcJuDyAQpWnfa97meF626/5CCPcdwJJxI9XQyGsQjhyl/OIdZBsPdfkoUjynC8thY476nLIvotzeXJBNOVqnqw91rdP0bosYrctqjm1CCEQRR8Q41jFHFMWQUiCOY4Bz3Gy32O8TRPHKuEc/eZomDuvM6WijNoYPp2rMUbRLlqTvJPRxmfV/3twvb9flHwflsSRwOvKJJyF/7beQvuG3Id/+zu4fXF2Av+CTET3/eeDP/dj5BfSM8DzNS91O0mmhpRSAlLi6usLFxQZJssdut4MEhxn1W6+epmHLFgFpW6Q3dBKpSZ+mz/qYlBkYi2oRwYNu5QTu2xElruvCOrIKTcW5mnGICKaFDzpopsni5SfntIn0veoOx1AnSxP2BaYGA8B1emXn9RHzTOHI6nyZ8w5gYc2HPQzKDmCA7ks4Vbk0lOWjLBt1pJS3xgkMTDeYUW1zVOUC5nMAG3cAMP7aOo2exnTA1g1Um4NWp7RZr1aIOAeDRBRHYDxCmqbY7XbIBCq0jZNDVnuOnVxkbPprU8WsX190OhV7G5NHE8h9HcC2NOfu44se/aE8jgRmQEggTYFDevwd52BxBLmKzy+XR4RnahlMO40z4PLyAutVjJvtFlkmIaROH2379bF9Vh8N2jKnTDEe1hewNX2npK3/1jzq9jjmjz2hcFsfOUsE7ZI9pC9OYB900Piii0/ZtaZkCQdwvS5crBO/3rHLfYH90Mf9emGyHA9d16W0KuRsJjApB3A1jqn5nKOXBaDHL3vKQPhlN8g2HKpyaajLtyStTibjHIqcKlffhSh9zqlfi1KZ2R2SNGlytI69Vnmd4i/Lsb7XO75+PZqjnkJI36gYd/OUy+s4RsQ5OGdgEQfnHCITOBxSHNIU2kQwb2kTV8fqM0tqElOmekqjfi8Jx3dvOt19Q7Bse8fOfMDVCVLTmV9lmD4eVO9M+F8w1MeMQMBlwvM1DX22sLB+r34NSAFAIo447lxeAgB2SYL0kOUL+tmRzadtA9vlzUMcLLcDq7K02WN930+aon+PdWxua8EZTBO3dVHvJcB4+5FKzziHQ3spXNehz5ZEVIOMmgiRwDTsINuCftfqw7QlXHsOjim34CjrZfl2MgXO1ov2lTKfIoGl8mxPRt5u80hgMg7gNsrUryzf13f+e1GFqnxU5QJoywbQl48ygmjZLVGnQwY7ym2OqmxmNLBm6ujsuiHZ7/rVybo+EbpNL6pCCDAGRJxjtYoRxxE4oNJEM4YkOWC73apJQynBGK9cV6UvbJ/4szn9T1+116/MXDcIq5E16t9z6dQ38ruO3VGtndq+RuCeg9tZVlTHh0DAF8Iz1sxSY5SUAgxAHMe4d3WFDMD19bWREcnmpFVL/OoiqyGZWUeQpknmUyZt+6aGBpqnUV1vkj7ZNs7rUtjO49o0paYYnMA08U0Xn/Rpg7rt47ITWOO+A7iKej5ot5u++FAvPA+Y0Hau2+Shr1OqIdW8rVMOYJGvVj3X/ahCWT6qslGVy8QFGalx29JB96VrEF9avjboyza9w6rJAaxpv0//KOo+TlcVcaoigjnniDjDer0G5xxMCERxhFRI7Pc7HA6ZWk/PGCRnEJlQu9QZ126Ty1aO48r0djiANUukhu7/0lat76Y21vRdoC+3s+wojw+BgA/4/oy5Ne7oxXY6bRsgmcRmvcLV1SUOhxT77RapPM6oYr0aM//OFwIenWOPBj613Po6ghtyxjjfLt1qd+04r4sEGCQYd9sBrLHt8+0izrcrA190cTn18FiojzUu14lvDmBg/GIiiriuh5kOGnBfn8kjgSUAcJoOYFn8t1ylUe/8AboyUpULCLL5CtVIYGD5em1NMxfKbTDK+aYjK84/RvVNvzfWyVaeX6bek1JiFUeI4wgRY2CcgXEOKRkOhwOSw6GI6JRSRxK3l09bevKxDmFbeu2m37tsFNrSfgN9dRr2XE05ARwYy+0td6rjQCDgK3M9c3OMIb6PSXr7BwlRBAJIlNkzLi8usF7H2O72SPYJGOcQotkeaFtk2McRXMo1U0SwrNo3xZ+1W7k6LvjUXp3XxUgbWVfFxdZlTnq7TtCDHr5GArs+lrhaJz4tBJdSqq3T3FcFgB91oiOBNW7rJCafBiLjANZ3FhWziy8hSgXqAwNl+ajKRlUuExdkpIQE/TKjIF9bNB5FaMu2xD2P94JpOm/MdTVdjtvLywtEnEOIFBHnuFivcX+7x36/zy/AISAHLVprcgD3kcl2DaB5T6Yh+lJHqawWJFi+BXCcyrF+7PzkKxrzlYiBZuZIOe8ClPv+QMBHTlk4Npbb1KeNoUiNzJkaKfNuMcsyRFFULLZ78O4VeBzh/U/eV79h7eOqrdhtDiTbArCpM99UjomqLVNEBDfc0tVxwpd277we2n5mHuiC4ASmig+6BAcwPVx2AtOYi5gaWSzOcx0fdACAyJs9gad1AhNwACttMut6OxqVRX1w6LP34xJQLjfKsgH05bNRxgsudP/aHlLC+I6Ci4FinfZJ02t+f+7+hWKZaUrRzP533pZm6+Ob6sQcF/qcy3T4LljjQ6xTDEJKcMYRryLEUYQoUnrHPMJBZtjvEhzSVJUHY5BCVK5RXMci73QRu+b1tQHYfDalsdN/jusmYOc2OoAp9/uBgK/M5QAOWSROQzl0AUBF6epylMXCL7XYbhVHuLpzBWQZ7t9skWUSjHOAqfchBgkOVmRM4tb0t8q2M7Gllp6yHs3+vljQZnh9VUY4ZV9LKRp/23V9am2Pmjyn4KwuhQPYv+gtT9Rxt23V8EGPvvMPVPHx3cJVx3y9LlzU4RgzSMPtttY34IQ6TKotEkw71s13EfV+ps3zU3VYxAFcPvTlS4YgXg/UBw3K8gXZxkFZNsrU00EL0HAAA3TrtK8jeCmoygXUZaMxkNnq82RjwRJFa0aYXmwuwBjAOc8nFiX2e50eWk/e2SNS+6SC7iNTf7r2yVuuHk+9N+VnJdAHGn3IuQjtNRCgwSnPonuTKf4hpdo4a7Ne4fJijUOWYbdLIITMXb+AkCpyWDv7WYPjqx5JODYryxDZj47lvt56BHBTO3VxLPHpufFDFxcnho8JkcD08EUPwB+noy+4nNHMT0cw4Pq+wHNkfVkEqXwApi5uOoB1nQBT7Al8dgewAPJVqwAdt8wwKA4gFGUyoSwfZdkA+vJRJewL7B/Uy23JtNAmTQ7VKTg2mtT1RW5krdcrxHEMyAwxj8BXMba7BEmSVKKksywb9BJpcwyf4gCu/97mnB5zfUpGJfXnJVCHTts5F6GNBgKBwGlohy7nHEIIcM5wdbnBxcUG19c77HcJeBRBMolMSnDJEfHyPcnqBGbt+wLbPjdRz/piywJj6lJ+UHa1tNymbexwaVyhZDOeivu60IwWHwP3QAeND/UB+KWHi7q4NC4MxVVnXVOduKaHDZUOemkppsHl+mCy36JGd5AnO4HP5gCWAIQnk1uUBxCqslGVC6Atm8YFGU2WTgcNGHtHEYV6nQ6JxDwnlMvNjKrQYi5Rdk1O4TnLTrJ8j7rcExxxjvVqBR5FAMoI4P1+X6SHNp2utnQzZkpKU482hpS3Xk3XN+00peegD5SflduNW+1oakK7DAQCgXmQAKRQqZyjiOHycgPOgd1ujyRJwXgEALmj+HghvmnmdEUSDpl0brJB27YEMZXSqaBLyfptX0M1BbSGqlxj8E8X922V4Aimh096UNIlvFvQq5PbTnAC04Llc5UMjLxt2EyegfEEsWd3AOvCFQAw8/6I54T6IENZviDbOCjLBtCJuHXplY1ynTZN1FCAcrkBetJJ/U3FATzmnPr5GuvvWP7sSRTWrhQCUcSxWq0Qc6b2oItiSAD7vdonuG7BDFl4ME1UsKxMQDa9PFFp+2Oh/szcLtxuS2MJbTAQCASmp2oLqX+FZOBM2WGr1Qp3LjeQUuJmu0MqyoWKx6aNZQEh+qeBPiVzSnWMKO3oinRSQrbYjTZcGXtctzM1/uixtASnE9JB08QXXWyLiJbElb5+LoIDmAC1KCjX00Gb+KCH3he4+OyYTkWwER9voxQO4OubLbJMnFgIDABD5oz75XQoDzT16CpqUJRJQ1U2qnIBpWx6Z1DJAF5fxO1o3v05oVynwOmO4DnrnHLZFc+DdM+4GEM9greMhC6jolerGOvVClJKxJxhFce43u6w2x+U+cCZsh5yW0T/jnNea0f6hVPMokvpTG7/3lUoPzf+4XZbGYqtvw/tLRAIBJZFConLyw3uXF1ge32D3W4HROvcmSohpbK7MkhEYJBSRQjb0jcXf5vHjEnPpuwp9YV2UkoIIQybqzmy137seMGvkACrHC0Foz4WuW5bmvihS/P7q2vzGcERTBPXdanvrWkeOxfU+/Vz43r2Ml8htlZiFL60Ja2H6adwW7dhaaEndQAL5GkvXS6/EVAdeEy5KIa5Uy03IMg2FjPt8i3sCkZDuU6BbvmW7Feoll0p1+14Ctr2Ii6d4WqPujiKEEUcnDHwKALnHFmWYZ8kSNMMnDGIwqHMkUmJ6KgYOeZwAFd16DfBQ2lc7QvV58Y/3GsbpxLaViAQCNBDSgnOgMvLC6xXK+z2Cba7HSIeAwzIsgzgTO2ZxhgYA4SQ4Lwh0reShU4vAKympuuzbUfTVh+2eYyqPvaMTy7vD+yiPdmEH7rQmz8bQ2WxhgcEPWixROQp9b58aUI0MC10VfhQJb60q8i0YB3UqewD5aBEyxUHsBiQArqytrKI/PMo0flAqA9ClOWjKhtVuQDisoG2fNShWnZ95Fpq8KRaZooyChZw08Doi03H+qpg8+8o4oh47gzmHFEUIcsyJEmCLMsgwSCkiuXghrP3XGVYvQ3rNG9crVvaz4/ruNkmxhDaUcA1qGZICgSmpmKHCYHVKsbVxQbgHNfXN8iEAOdRxV7V55d/m1fMI78sWW7qE1F9nMCmjF3Hjz7n/6s/yX2ebarPv6v2ZB1f9AD8mOIMjmCauKyH7kPNdNBT69O1qMfl8psaszzMbBuaUFbLElJC08GMAKaWzn44+TxpjyopHMDb3R5ZJnobwgI42oMlQPdFwozAAmg+sNTLjiKUZQPo7AvsEtTrdIh8t3s1aKm7C/3vnJjpoc0XE1UeqkxWcYzNZgMhBKIoQhzHuLm5wXa3x2q1ghClA1inhJ4bs57KumuuP5frldaz4xru1vtUhPYTWIpT+93QdgO3BcaYivJFPtkllb314IP3sN8fcP/+deG95ZwXdld9UV8dXhsDmdoRrPE3dtvKvnVAny0FjiKFpQ5KaP6NDWp9gcs2pYkvegD+OIF9qZOgBz3qKf1PgVqf7BL1rbk0PrU1N7FnVXEVl9tTPQW0y7oUsO7siKUDeLtHJrodwGa0r6wb5fqiI2T1CYqDFUWZ6lCVkapcAG3ZgBANfApLl1vbisqhsp1zQF263EpKJ6e5Fy7gtrHUl6ao3/oewYIBkLLYumIVx4jjCFEUAVIijiIcDgl2h7TIUjJX+dVXyNbrSx073q/Ohqt1TOf5cQddZK7W+VhCWwnMyTmep9CGA7cFveCOsRjI9/wtfKRS7Q98sV7jZpfgcDgoJ+oAG2dMJLAtwrfPnqvF+VIWHjlbumjjqkfnNN2DGr7YFb7oAbjvBA5RwDTxRQ9gOmcKxT7ZRboWcVHHt/kzpUaIBF6apj2AXdOn8nwMcQDvdnsc0sy6KlNKCTEksXQAAN1Bi6pcQJBtLNRlk+bfnAFS7eAZaIdyvQKnyTf34HqeshuuA/U6BY6dt3PVVXNaIrVX8Ga9hpQScZ4m+pDvE5xlApKhcBpXV7nOszewTXYta/Uz8vtzy3E3cKGNLod79TkVoV0EunCpvwvtOXDbMRcoAkAURXjgagPGOO5vt8W8kJRM2VzqR2BGdK3tmT8+JsEZwxDbqOv7pufXli5aCuTpW4qrKwe4IZ8+3nX9pXCpb23DLz1otZExMMa8sGr7LBxxBdfkNbEt+Nafh/w+MA8uO7k0PjmDzXTQLvZVdVyXH1AWKoMP0cCiccqqkgI6tTiAtfNGBgfwYCgPYlRloyqXhrJ81GUrk70qXO9WzwHlOgWmke8c0Zzz4bcDGDifMWezPRhj4GBYr9eIYo40TbFer8E5x36/R5IkEFKC8wiccySHFBE3o3Xne0moO4Db7uOqEelCWz0/btblqYS2EOjCtX4utOlAoIoQApwBcRzjzp0rAAz3799HlmWI4hXSNM33Sas6v7ptH5XlhdXSHrZFBJ/iMLBFAwNs8B7BlPoI1/rXJnzRA3A/EljDfVEE/rQv3/Toow+l/tZnXE51u8T81Nww5o9DG/BDDw5WjIvuOuZ7OIBvtjukaQZo41yapr2O4GO4rZNfY6E8mFHel9KFcqMMRRmPU3IFhkCxToHp5JrLYTdvuY2Xsz5BRan/BezlVk+/N1V6J9skYP3+nHPEkYoE5hxgUoJFEdJUYLdPICDBGAdH/xcE2yTh2Bcj8yc+pYam2u8sg1t1dwqh3m8PrvVJpxLadiCgqNty+jPPbbDNZo2LzQZpdsB2uy/SQmcAIsv1WqOBjSBbHYzbL3p4mC72YwLKAWzalv2jfan0GT711b7o4okawQlMFFd1Md/t6/9S6U9vK0Oc8tTw0QGsEPniOj9wtW5Mm5hLt58Vhd6jrNrnVhzAB9MBbLuElAiJW8dBdbCjKhdAWzaAtnxUZTPTQQeGQbVONXPIN9WAO41s0w/+PtTpHHVkS1Gko4I5Y1itYkSRskWiKAaPIuz3OxzyfYK7DDZbikDb9henpkpSzuTm71yDenudDvfqZgpuT/36h4v9yRKENh4ItMMYQ5ZlajJSSDAOXF1dYhOvcb29QXJIITF8odvxQr/jkXbK9JTVBX4ZdBCDzL9T6awByYC6JOYCeZttuDS+9Pe+6AFP9nJsWpThIr7oAfijiy96+ILr6aApBrCNR21J4b7DscRlHWS+1QmH2xHzBbV9gSsO4CTLOhWU0vECWAgqLw1NUJaPqmxU5dJQli84gsdjpselxFztbaoo0wkkmeAaVYOV8jOqqWeKAOZ9aai3bc45hCgNY1OeOI4QxzEYBBhjWK3XKiJ4t299Rvo6gPU9zX+7qNcrYyE1tEuYqrlaP2PxuV5d57a1xbkIbTwQqGKzSRlj5bY9DGBCIGIMd6/uIIoZ3v/UUxCSD7Zt9D304rgmB7CUcpJomPJ5l9B30++fDLZ30W5nL4U+xJfxwCU92t+76b2TjyVEAtPEB1180ME3XHY4+uUABtQ4ov7yQScfdAAALuF+dLbVASwkrvd7HIToPcUt9VtBeURf8nQhPYXCS0MXlGUMso2DqmzBCTweqnUKnEe2KVPEGVcdJ8yEaEcnQM9w0gsP2ibm5kgR3ec+UcQRRREYAyIGRPEaUkokyQGHQwowhgxCGXFgyLgy6PS1xpT7Kecx1pz62iUo90P9cavMp8SP+nMD155tHwntPRAYR2mvSFyu17jYrJEcUtzs9pDakZtvHdZngaAtGhgoR2NWi8o9pf9sTw1dRRjpoc1ZrSHO4HMu0K0vXHR1nHFVbhs+qGJGArvcrgDf2pYfuviih+uUi7H8iAQG3JT/GNkaPOASPugAqDGRw59I4NEOYEA/cCqlTvD/9ofyJESQbRyUZdNQlTE4gsdDuU7PwbSO4Hn2IB4C1frU2KLPT4mWHXrvputLqQwazpUjmDMGzhnieAUA2O33yNIUMrdVBFPp/2SeLlpHGQ+ZTGv7rt6ObPse+RQZTL3dtuNWWU+F23W2HK49mwFFaO+BwGlomyjLUqxXK1xexFjHazx1rTLIScYRQdljNudRnwVvrBZJ2SeVtI2mDDt9HLkuRgX7Mi75o8fSEpyO6QR2HV/0APzRxRc9fMLVVLf18ddFHY5ROVicjzw1cL1eTCewuwujhMrqM94BbKbTCQ7goSz9stBGkG04VOUyoSpjcACPh2qdAud1Ag913Jay1VLeEUgnQ7lOAXv6ZM2cEa1NEbNmemhdh1IKxFGE1SouUg1uNhvs9gfc3NwAuQFnm6Qcs5L02CHdrx1x3v69a8Yl9bZrx60yHkNT+w7Yce25CzQT2nsgcDpSSDDOIKQEjzhklkEwYMMj3Lu6BBjw1PUWSZohiiL1G9ntzLUdr5tFXU7gurPXjGoa4gQGAAgBaYyVuSXX6/dL9jU+jVm+6OKDGj45gQE/dPFBB41PuriKOV/hugOYyjzeNCgHsKt1YsMHPXQ6aG1nutne5GkRwMVlpLpYEQ0c6AX1iQmq8lGVS0NZPlKyyTy1FlAkESAln0NQLrclZOsaiE2RqA7ertdpUxTs1HJUHb9Vg4wzBh5xxFEEzlVUMOMcWSaRJAnSNAXnUeV3p6TUGxoVbKZVbFtRSK1ttkG53d42+5R2XZwHl56dwPSEZyAQmIaKnQSGjDFImWHFODbrGBcXF0gOB+x2ewgBsJYFbq3O4PzdkPH23zR9NrO59Fn4VFn0JyVEwzl1Z7D+XdM7xLn7Hp/GOl908UGN4ASmSdAjMBdzztvMjV/poEsnMOCDPn7oYG4hp3FNr0kcwICZDrqcXGdgt22+bRTUJyjMlxxKDZxyuVGWDSAin6x2DzqRAAnZHIRyufV1GM7dv6jJq+b763Nsn5egHtUA0DIytNMVaJ/8shlJUxnpldWr+rPhBBYMiMAQcYY45tApn1erNdI0w3a7g57aq7SNfHVKKVs1zUlXyqEmneyRzJVPrZNGlOq/jaH90XnsCzfKbgqmGg/syfKP73WOvjsQGEIfp09oV4FAH9Qcj3b+ArmJxBlSKcDBwaXE0+5eAhHHzc0OaZoh32yj1a5WfxfLgQsnMFjpci1PP46GsX22Pfv1xX1WLU2bW+j7C0jG8/dT6896O5jnxqf+zF1dyrZcHHFVlRwtvrt1cowvugQ9AnPgitPRtnBfQ132fuglaawza5wrOF8vEtDrE111Ak/mANaoCWH9chAcwH2h7LwByol+ao2bcrlRlg2gLV9ICz0e6vXa9t0SDiAzelR/1vKYn5eiK8pgScxy01TkK9eFtTLWPfQPAAAgAElEQVSXTsftTSKKIqziGIDeM1hNKN5s9xBCqAk+xiDzKJK2CJIuucekkDafg6afU2oDfTjvAhC3ymZK5uj753IAu9aGA+5B2RYKBHwl4sCdO3cQAXjyqWtIplJHa3vKjNAF1HNq7nNXHRt0Npfi26Nz2qKJ2yaDezmBrSmkTcm6rzf0nFPxaWz1RRdP1AD3RRH41LaCHoF58Cn9sMuU2e7o+WHG4IMOgFqjyHvYohSZxQGMkAp6NFQnLKjKpaEsH1XZqMqlKRxfC8tBEep118b5ZD8eg9rSErdFsy4N5fru46CWOsbWUIMxtUCMz2ArNMmiJh7V31EUgXOeO4E5Iq72rUuSBIc0BeRx3zO2XYxLI91vFSu1tmrjvO23OZW2r8xavpVn9vhY+R0rtnMIBJaE8ngZCPhK1e6S2Gw2uFyvkRwO2CcJMiHA8rzOfbYIqTuCmSU7St+00OY9247b9Gk7b6gz+Fx9k0/2j1+6LC3BaahofMeVMPBFl6BHYC6CE5gOam4j7AtMCSbLcdElXSZ3AAPVdNBhNmg4lCcvgmzDoSqXhrp8grh8p0C97Odket2HjzWV9MG1aAGKzmDq7aXuDC729m75TX0vjanL+zhCWR8HpBSIoghxHIPnbWC1WgEA9vs90jQrIlcACSHG7cVSjertHxV8PEna/RvqnOO598EBfErk0pHuNoet5fenlllwAAeWhPr4GAj4jO35ixhw5+oC0WqNm5stDocDwHgxRjenhK5SpIWGsoOGOoLNY62LFXs4fSFFntq6RBQrHLsdy13fTYnrdpDGFz3MdOiuE6KB6RH0CMyBK+mgbxO+RAMDjrerPB10HxuUErM4gE10OujAMKhOZFCVS0NZviDbOFxMB025PKlAyQEMHDuBgwN4OGbZSQC9hn+tEiu3jYjOFBVcHlOfoyhCVEQFR+AcSFOBJEmQZhk450XEx1AHcP03Q9NDqwnT7t9Saq9NTNeO6es6lt4O4EpRqgnooybQUNxTtZXK5f2tkgBxqI+PgYDv6Ew6URRVnsdVHOHO1SVEJnB9s1XRwHnaZ8HKhYCarojgPIFM629sn9u29DB1GHNMBz/IAY7gPt+figs2YR/80IPee+VYfIoG9kUPwB9dfNHDF1yLcPQf4U2dOK+DPN4TmLpOwQFMGOqTGVTloyoXEGQbi1pc3e0IXjLyi3L5UWaKcjMvMab+zXZDPXqQejszncACqMzSVafFmjHTQp+6ytzmfC2ievPJSqZOyI3pfG9gHiFiQLSKwRhDlqa5I1g2huJyziFFHvNsmWjss1dd1751uaiwOvpq16TO6W2Zmo6n6NPu1JdSghF49BtFoFYVgVtFHyeLC31iIDCWPrbGXDTZMDorBIdKlbfZrLFZr5AkKbb7fZ4lhhVGfNsiOSklwJlhH6q00PktijGI1Q50p5duppfjFwCTQqnAWLHdiTRXNxb/yl7XtH1/yjuKT32f+7oEJzBVfNHFFz0Av3RxkXoGsz5zFJShGNwxBim1A1h9pqxPX5tFB4E4ieEEBugvmJjdAaxrsmrc0i0QilCd8Kcql4ayfEG2cVCOBqZcbtQZXnbnHUP6ROMtMdBTbnM6EqTuBB5K1DKJN5ajcmOsnFI06pNzjtUqhhQCq4hjFa+QZBn2+z0ycdwmhBDgnOMU2qI97ZOjzYY1ZePTpL0dL6tDU/nbZO5T3L3rxAzy1VHn/X45KyHNc4AalMfBQOBcTGWLzmU36ElADoG7d+6ARRGeeuo+hJSQUi2gE0JYHZ2N0b3ScK8WplfpeG1yKDdNztXH+6NtTVrObzpPNHRPfa+pj01RL67YhF34o8fSEpxOcALTxBc9AL90cR3XUt3a8MUJrGHMD324BKQtvYsjmJlsqEcCn8EBXFIatadNkN5GKE9wUJWNqlwayvJRl42udLTLjiouOYABuyMoRJ5XqUQCT1A0ZlroU8vajE5R/0nE5eyhJTV4mR6aMYb1eg0AuL6+RpaJInp4zKpYWxrytu/rmIdcdgTbJz1pyN579eqUoooJrzUhwQEcoAbVMTAQWBIqYydQymJ+XsURLi8vkaYprm+2RerorNhuo9kBrGGmXQjTCYzG39Wv2WXPT+EEbty1oefvp0CXJ4V2cSo+6AD44QAGghOYKkGPwFxQd3C1sfS83fTosX1pOU6Dm1lUHNSlvpUJgJODQubirA5gTWnbOli7C0F9goOyfJRlA+jKR1UuDWVHMPWyowrlTBGmbKbxSCV1tI641XItjVleAsrJekoZ2SYDtbE19Jr6WhISWa3c6vsPM8aKqBTGGNarWN2bc8RxDCklksMBh0NqbQt9nMJN6f36OR3rjuf2e1FoG21UnzP7OeOi8Pv3yWcvo1y0Sj0u2P+5+vIVuD0EGytAibntP+rjdhtN47MQAlHEcXmxQbxaYbfdIUn2AIsgJAO37HnQatfoaGDWvjDO9rkpGthmx/VO4SwkjnZBk8pes+2ONiQieCj1d5al31emwgcdAK2H+2Na8e6ysBxT4UP78kEHjU+6+IDLTmD/MMf4hUVpIrfRWmdqpDpJerJYbY4shlOwiAMY0MZoVzMI2KA88UFVNqpyaSjLR1o2EJePsGxL0zQBUa62pzdgAlVHqwmFlfVatqXlqKNTQU+1ZEOXf2Sk+BuicyGFrkcJCJ63STOlYD5RVtwvior7x3GEVRxDSoHNZg0pGbbbbSU1tClrXxkL57RlkUHbOeW/3fej1j5slOXWR9bhkdcUKOpPbYRYQ68aPX8ocHAAB6jTZltRHAMDgdAmS6SU4Jwjy7LCbnnwgbuQQuDJ6y0EY0fRFNaFl6jaa/qgzRFcOQf2+tDRGk2Ry/pv2/G6nG3HZOV4c9YXG0KUNsEpbcqX9uiPHktLMA2MLbl8cXr8aV9Bj8D0uOwEbsvu5xqmzUI06BT1fXI7ToVkbtcJUA1UoaQLAQdwSAs9BsqOJTNdEpXGTrm8NFRl7CPXUnUt1c0rL9NU2hxAt04p0OwALr/X2NLHLUVTOriuKINzQLHvNdHOVG3YnXIdQE/yMeVAY8qR1seSsDmAlTyyEo1pvtjY2iNjDFHEEUcxGMujgjnDbp8gzbJ8gQqOrmGLwGibcKwbjk0O4Lw4CtVKOe3lMEcbmbLt2SZHbRBs6vne1wJctrRIUS6KoDBlVunRlhcnELDSZVdRHf8C/hPaXT/qtksmM0SMYxXFuHvnEoc0wZP3d2CMgzEOIVJEHJBG4EDTIjgpZWkTsnKm6chRbPlsq7+6TdZkuzXpWHxWB6uf9d8il5Bp2639ujbbb+hiwzHnUqJtkaTL+KAKg7vtyoYruliSJhRbMHFjgYzr9n3ToujAMlQWZTlUF746gPW/zNYhLIApxpASLsIAGCvmDF1qXwAKJRjKPpiC/Is5gDXlwxccwEOh7FyiKhtVuUyoykhVLgCFU4kqlMuOBm2RwMs7fW30rdMlHcEU0ZNXU+wLXIdBGZpNUbNA84RZXb76uU31KKXat45zjohzRBEHz/cKPhwOSA4HqyOTMQ5L2Kf1+rb7D21X+vS2383RVuvO867jviBgz3Fjqkzk3azglEUZgcA58LnPCNCHmh3qCyx3fKroXYmryw3imONmt8c+ScGiGJmUyHOwgKFcRNfmtC2RR5GJfR2/9c9jUjV3RgXL9lwfehsS/bume41pn760aV/0AIITmCIu6mL2Ek19n6v4oINPuBwJ7CuM0RhLzMx6YxGORwPrMqCyYG1xB7CmtGWXLxTXoDwhQlU2qnJpKMtHVTZBVC4N1XJbnuY+/2gFPTFn8NA6bXN6Ta1TU8o6CqiU0PMu2iheRlBGV+oSGDpR13Z9jRCi2Bd4FUfIsgxxHGOz2WC322G3T2oTiHzQ6kx7uud2uW2RwW06NB1rY9poX8+QgI4/ksVnGlZuIVog4ABe9g+BI6ay8dqcdZRsoYCirHe1MC7LUlysVri6d4n0kOH+9RYMEQSXgORgMrNmUmlfKHfsBK6fM8QRbMpt1+XY3u9yHte/tS0eFEK03qNJjzZ8eyZ80McDFYwsTe7jQpvqYyUFx3xgTvosnKcItXmyaZD5OHKGfYFbOh/bYvhBl87TokhWzucVF3YIPeXHCTwjhBzAIRL4FKhOjgS5xhHkGw9VRzDlMpuf00YWytHAU9XrXDrpaAFKZQaU5Ta3IxhAMenHMSzqtO1cW7ojs32q1NBR/neEKIqRJHskh7Q1hbNNBlv66LbftZ9TTW1t06lJ1znxsn/MI5ooEhzAAVfwsm8IWJkq40XATYr65xyZAIAMV5sVri432F1vcZMKCCEQRzGyLCv27DXpdug2R8P1XRTXZwHe0ONWJ3DuAJZG3lZbBHBbKsu+9r/rz5ipp+u6aDxRw7rwwkWot6shlpIvjmAfdPAJVx3APnOOSOAponybsI3tEu5mLeNy+Yh5Mg7gEh5e9kdAvcwoy0dZNoCufFTlouoANqFadvMxbmSpO70oOoA1feq0bY+uOXWi2N5KBzDO4AJWmEZXH/rWaVtKlyItNIBotQLnHGmWIUkS9KkW2+SdrR3VI0Hq0THlv8f3OHda6C4ottchSInm9M5Sv4m1JXycWB4gOHsDztKVsYGiPRA4xjZG1aFs4wXmpXiWZW4RMgkGDikAHgF3Li/AmMR2lyA5KOdvfYEcUE+xp2NPdCRMeR632OB1m8lsj7aFfl3RwHX92j4b3xzZhlJWnTtNaaBt5aFl7SMblfSEU+CHHv6Mb744HAEaekzxluRLnfigg2+4OpYMHTupU47t0rpgbir4GadtdJ3MsZXcOdBlteQzohzAAG62OyRZRqahl2lvaMjjCpQnTinLBlSjDKk8BxqqZTeXXPqqXbXQdneqZWbigozjmNeZSXWSsB6pDIyXca6U0E2yLV2mIk8LPTVtqxKndga3XTOKOKIoUpHInGO9jpGmAtvttvDKClE6k82JvL4Tek2O4qYVuUOcwUu0CyHEYvceQlHOhJajBqdvwHX8tY/8gHq/HPALKSU26xWuri6w2+2w3e7BeKTeCfLEgExHyuZNs89CyzIiWEKloD6mLaqpbSFDmy6nHJPF3933HJMi2qdn2xddfFDDl0hgYLl2NbVVVCx4mfi6S+DLs+4L5nyG/uwSU8zlLUptMl1vOzaZLlKqObalnJh67gxwsgNj8nghInC8GHGee5N2AAPBCTwM6hMmlOULsg1nabmCA5gqwQGs6Stjl7NuCrnGynYOJnEANzh727QcWj99sTlj9ctQHHFwrv5brVYQQuBmuy2krUdj2KI9mhzAtlTR5vnVc4Y7fM/ZZpzoH43JWAoO4EqJLS9OIDAaJ57/WwgluyFwe9AjLZMSFxcbrNdrbLc7HA4HyGKw0xEV/bPtqCi44/sNdfrWF1TaFu5V9Onp8LUdl8ijgmV1i5e+9+v7DPvwrPugg0+RwLYJb1c56/vQzNf3pV58eU58oZ49w6X6oTpf1pujaCpRmQsaynFmM5WaJDiAx6EDVPrYl9Pfm6gDWKPEC/sCD4X6xAlV+ajKpaEs39yyjb065TLTuCDjMOZ1ABeDvhBEx6zTftsnuvOU61JtbyJPfTc6JbTsZy2MdbaPKTdblDVnXDmCYyVtHMeI4xiHNMXhkCLLMkRRVETA9pG/TY/m81W0jDlh2tcQPfdzt2SbFai2q9YUz2fGKgatLjEQGAzVMcpVqNlJgcBQJPKxWEpACqxWK1xebiCExM3NVmVQ4TGkFLA5gFttG2lGAkMFEfewr4ZM3A2J0G37rh4B3D+1tP2cNpld7jfqdq/Luii0vb60HKdjm/R2mSl1Wcry8aVOfNDBN+qZzVyBarDJaQjocWSoXudM8zwE2bSKzwH0XJKqD17YoOeLAN7tkKT0HMAaSSCywjWoT6BQlo+qbFTlMplSxim1vW1ldz6W65vrEbeUxi9b1O2U8p16LcptTTs+u/b3mMoYHTpR1F12ZuYS7cTluX1aRvNyMAgpsN6s1WfOsV6vIaXE9fU1oihCmqbgPLI6ktt0qEjT8NshUcEUJgmXa7NGJE9DlPlS1E1js4QoyRkI9IHyuLQ0lOybQGApdNRrxNW4fHV1hfU6xs3NDfa7FCzirQ7Ots/mZJw5gA6NBm47PoUjWMLMhtS+sLPPdaWUxZ7KWnZzAtKXvscfPZaW4HR8cTiaTKHP0haQT/Xiix6+YEYDuwrF+cbxiMY6oerstaEjgF2tE7NNRWi2Naek4gA+ZPZoEwooKd2s2CWhPplCWb6lZesaZJaWr40pZJtDO8plBtCXz86y/TLllNDAsDptW/HV5Lg7l2xzYas7fUx0xAJPaaDWy15PiLWd38yxA1jK6qSNOcGm/43jCBHP00THMQBgv99Dm2ZtTuA+KaLN+5qfy2Oycsz226ZrnpOzt1uhYy/oYXMAS4bWva8DAapQGJPODUW7JRAgh1SDmgSHkABnOv0gwLnE3ctLAAxP3ewq2YH6LPArvtPdDzu212zntx2bMhrY5gAGk5BCz4+NvE7DedVFge5P2pv4oIsHKgDwJ+2wxnUHsJQS3KPn3Rc9fMGHsYTyfGNfSh1kY5246AAG3K4XQJX70OyEYygcwLvdHplQtZ0JcTSpRIXSWA1poYdAeVIlyDYOyrJpxso4t2aUy46mbEQHBAPTEQzQMgLOEak89pqUo6gBtKZBPqesbY75UpZeCaiBlv28GGOIogiMAVG+R7CUAvt9gjTLAJ6niJHl+aYc+li9fOoTenUdhk1o9jv/HJzWX7a5dRkgluuLGY7HQap2eSBwKjTtnmFQGzsDAd8pbBowQEqs12tcXcbYJwfsdgcIMAgmi8nMrom1Y5sn/7fyfWk3DHEMN9lZQ1NA17/TKaGllBD59bkcd902h7BP/ZsfutB8ZxsDY8yBWYZuhtYFZasnRAIH5uAczq3AMApHsMMLxwt7iPthq3Aw8JZ5u1OxOoCFEEUUAUWUyMEBPAbKkyxBtnH4KNs5NPKx3OaD6GBQw5wQoWgAzFGvU6zSp9feSuoOak09kvUcdE/e9XcAt11PSoko4oiiqDAALy8vwTnHk08+qSY7GUcmMnDOwTmHkLISCtJkNDY5gtvOP/69aIyOXuq5m6ot6MtQ2ePXhKpNHgicylJjEEU7IRAI9KO+hEtmAowz3L17hTiO8P4nn0KainxRHSsig9vSG1c/G/Z1wzl9J7TNFMtWXQZ+V9jGle9Z5/l972d7t/Clv/RFj7bFpK5xG53ABF8zCoIDODAXvo0n7iMQwX3HqZTSGwewCimZb7GE1QGsjb4MdCfTq8YuLdmo48JkPzWoyqWhLF/jSuMzy2HDxXI7P271r3TKzc7U8nWNz4NeRi0TRFSiOpev1+bo12PZuhzBR7Gdjc5gzhk4jxBH6ppxHCGOImx3e6RpBsYZMuSGYsMEX9/UzV0R9Mfpoll+zK4l+dTQsuFvQqhUj0tLEQjMw9L9OrV320Ag0B8JtVUIh4oCVjYrh5QCq5jh8vISIhW4v92hvq1HmxPYdowDlbG4zXF8ynYZXX2izU4vHLaVRX39r93XYaxtwK6oZldwWXYTT9Tw3ulI9DWjlZCmOzAXVDKJ3Xqk9qZJMOZuHRQ2i2mjuKkKICU45lt8Z3UAl/eWyCDJGnhK9OAAHsPSky5NUJVLQ1k+12SjJC3VsltGLj/609tcp3VHXt/x2/zduVNC9ymXunzns0u0nVFG76KyMq88c5xdYqS5ZsqRW49SkVJgvVqrz1Li4vICQgjs9vs8TfaxkTikrZkTevVjtvOOad5Lpv1309JXZ0qRvlYR/OiGA4FWho6HFN9FA4HAMphOyTJbjDajBBiAq6tLxHGM7XaHJDkA+bm2LCZdNg/LzTvztKFppZuONenX9V3dLj5aXthwifrv27Lq2I77lBbaBz3K9ORu45MT2BfPvFd1Ar90cZG2zHUu1g1VP1lv9LSVBDgTeTYGh/UxYcxNJ3DuANZM7QhudQBrtBN4aWwPmAzhEaM5V50+/NCb8ZIXfzcA4Mu/8qvxvE/65EZ5xk5en5sg23AqL6kLymGDapkBS8jWvz99yYtfhN9/21vxYc96Nr7uhd80o0zjoFqvlJ3A9d+eiyH3PKd82qHLWFNUr3IKVx2o4x3AaZ5eOWa82P9YX1unL4zjCHEcQ2QZ1us1VqsV7l/fHJ1fymPTqWpQ2o6Z+y/ryVLbi5uU4uiYjXO+ILVPnOo/jmuJSQbJGCpO+ZnpY8YKhI1PAoFAIBAwMW2V0gnGIKUaxyUEVgCe9uDTsEtSXN9sG7Of9HEKMwmY5mCfCexTJ7mHOoJFxanbfZ0hWXfq6aGbtgNxCacn73N8cQADfkSdFkuFPdAlOIADc9J3GwWquO4AlpKBFRMiypaKG+e86FOZz3LUAaz3YzYXOQLTPR9xn5N4PjtVd+Ccu0DtRnU5+XgbpscOhwPe/ad/0noOYwyXV3fwwAMPYLVatZ53rol0cyK5iT6NWgiBP37XuwAA9x54AA8++ODJsvnG2Hp97LFHcXNzg/VqjQ/50A+dTJ779+/jiSceBwA885kfVnlZfPzxx3H/+j44gGc9+8Mnu+dQbGVGZUC3Reb1Ybu9wWOPPgYAeMYHPQMXF5fmVSeRTQqh9oyX53OWDEHX67mjWbuo12m9bqeQsztNsf0+73vfE3jqqacAAB/2Yc+aVCabnmP6qrpDdAr2+wSPvve9AIAHP+DpuHPnTn6vTmmg7CMAEEbdDrFLynNixgEpISzPlO47s0wgyxJEEUdyOCAVGdarGFEUIUkSHNJMJ2guIl1Uf6ajY+x9inm8Gn0sj5zLtudKStPwrl6zfr+5n8X6fYt7y/Z1rZKZUd7T8J5HHsE73vH7uPfAPTzzmR+GBx/8AHUvoHdXTNW63d7c4LHHHgUAPOMZH4SLy8uOXwSW5JF3vxuHQ4L1eoMP/pAPGfz7977nEez3e6wmthNvG+974gncv5+Ps896NhnbJDA97/qjPwQAXN25g6c//QMXlsY/jt/V9ESZtoIiZACeePIa63WMBx+4g90+wc0uAedRJURWX6tu/6jr5Z+1HQUGxiSkKFeS1W3axmsMtIXq160vxKvbclwfk/W9fHO7o4eJU42qPl5Qqu0qc35nrnSFc0Plff8UqnW0oCATIKT0xunY1rZcaXcSVVnpS9yOK+V+W2gbb13ANs/hlvymQaBmG1IpnHUCV8peSuVMZQzCnSpRdiagHNjqCADlk51igVQvBzD1qExz8tF3Hnv0Ufxv39wv0i6OY3zkR300/vrn/w18/HM+wXrOWOfSObDJtt/t8O3f+r8CAP7bz/ur+Jv//RcsJhvFMtOMke+nf+qV+A+/+RvgnOPrX/iN+PD/4iNOlkNKiZe/9CX4/be/HQDw4pe8FJdXV4Vsr/7//h1+5XW/DAB42Q/9y5PvdwrUjY+hz+pbf+/38MMvfxkA4Cu++mvxnE947myyUYdaXWrqE03npO7k0//+wmt+Dr/0C68FAHzP9/0AVqvVZH1d0yTWWLRjcwre+QfvwMu//3sBAF/8d/4nfMqnPn/UdWxO0eFUV/Lb+iQppdoDmAnwjEHGMdI0xWazwWolsd/vkWYZWB5JzBiDrF23LndTFo62icAmbBOp9e+7rjElTHu/i1xH85OmKX7ix38Ev/kbbyiOcc7x6S/4DHzeX/vv8PQPdN8Z8Nbf+1284ge+HwDwlV/zj/Cc537iwhIF2viRH/5BvOuP/hAf8ZEfhX/8T75x8O//1Y/+CN7+trfiWc/+cPyTb/qW6QW8Jbz2Na/CL772NQCAf/GyV7Qu1g24ixAC/+zbvgUA8Bc+8y/ii77kSxeV57YihMqsst3ukSYprq4usV6vcf+p+xDQjl0OZmS8a7OHAEBCVswJZjun4xpNx2zUo6RsTlmbM5bX322L/9UX7A1zTtedz3Xbvp4RhrrTRdvJlGXsi1roubQUp6EWKtBuM0C/5aJNzzh13eoUzzhzP0Es9f7otmHLLuYSlH0CY0mlQAQ366OO9GBQlFJCMFS2LhtbN4Nd+5QbAWMyr1uBc6bvo0qapnjr7/0uXvyi78SP/B8/hN1uN+v97t9/Cq959c/iD97xjlnv037/V+EP/uA896c+SI2VTQiBn/jxH+sVtd3F6375FwvnrwmD+ysIl4Jym6OIfk5tEbEUDDZbpOISsvW55+8+/BB+5XW/hGS/H32PqRn3PDDLf5NIU/yXB1+gtEeG6s4NuZoXf0gpkWUC+32CNBPYJwn2+z02mzWe9sAD4GYkrqyOW03PRUWjhnOzLMOv/err8abf/q3Kb8t2ZLbr5vZ1jnbO8skKxthZB55Xv+r/rTh/ATW+/od//xvYj3yGXOLdf/qneO3PvQpPPP740qIEAgEi3L9/Hz//mlfjnWd6VwsEOOeFEziTEk/ev0ay3eLpDz6Ai81anSSyIyemzW4xP//pn/4JXvOqn8UTjz3eaOJ1XaPtWBvaHnvozf8Zv/jzr8V+v7fadIVVarGVy/OZ5Vg3pqPX9i7TNI9A5f1L05SNKbAsOvKUKkMlo6zLEIIegTlxsV78m5vlkBIQElDbaXign5BgDdvdukQ2gf3UKwJYczRRSHKvCVn724MG28B/+bzn4TnPeW65ypNzSAAiy/Doo+/FH7/rXXj4oTcDAH79V38F9+7dwxf8rS+0XmuKSOB/+8qfxOt/+ZdweXWFF734JViv16OvNUa2n37lK/H616n7f9f3/IvJ7t8F9SjqMXL90R++E7/w2tfgc/7SXx597/c98QR++qde2SgXVahnPADG1Ksng7enNKWcm/ueJm0TIIwxPP74Y3j5S18CKSW22y0+9y9/Xq/7UH2G5nkebNc8RX/zuWUARCVS1+wHGGPIsgxpmmK1WmGfHJAcUmw2FwBjyNID9kkCgDdGZtiidm1RJgDwxt/6j/i//9WPAQD+0T/+Bv719iQAACAASURBVHzUR3+MJcLbVgbHq66HpkMcheHIVqkRcVrVdCCEwOtf90sAgM3FBb7wi/427ty5i//0O2/Csz78w/FnnvnM+W5OhFf8wPfhPY88grc89BC+6h/+z0uLEwgECPAz//aV+NXX/zIuL6/wv7/oxWd7VwvcbvQWGtre2KUCyZP3cXV5gQfWV9he3+Ag7JFhTZG8L3/p9+E9j7wbDz/0Znz1P/xfAChTQ0pZWm4zRAPnZ+OJJx7D9373d+V2+Q0+76/+9eZrWq4rCttL237qb60H0J2lqEvm+vYhFGl676cscxem/e20HgDJSOCxrw++RJ4KKf3Yq/nM2agC7TTNQbmAOdb78ZwzFU4pJXieQc5pnXQUsKPRwOYcmwBDdMK1BjmAbYJAytLIIFKW1dSLxISbkI/5sx+Lz/7cv9R6zu++5WH8wEu/DzfX13jtz70aL/iMz8SznvXsxvNPcUDcu3cPAHB1dYU4PqlpjeLuwvenytg6/Xc/89N43id/Mp7xjA8add9/869/ArvttpdsAFRfUj8WaGSsc58ajz36KH7nTW/EWx5+M77k7/69Yu/VOahPxFAqP9tzes4XE1t5mJ8vLi6xWq+R7Pe4d+9e6+TVWLbbLf7z77wRb3n4YXzq8z8dH/fxz+kte9P91TXfhN99y0P485/W/5pzcHp9lga4zG0v9beAENopzLE/HMAYQ8wjCAEwSGw2a6xWK+z2CbI0NYzfcoKvyTFrO17ukcxw5+5da9s51lMWkdFlZDE6f3dKuRXPev7ZjASWwtgfbwIb0SyBJ973BJ58//sBAJ/12Z+LT33+CwAAH//cT/TQGrVz9+49vOeRR3DvgXtLizKYpp5sWN2ZV7kttR4ItNP2rialxH/6nTfhLQ+9GR/w9Kf3XmgW8JOp2oNtkZtkHELI/5+9r45u40rffmZkyyQzY8zsMDZNw8zccNoUUm73t/xtd7vUbXe3zGnTJk2TpmFmZgbHToyyHTOjLFkw8/0xmtGMNCLbSWxvnnN8LI1m7lx4773vfREtila4SKVwl7lBoVLrDeWY9ZpVcrACXTbtCMuLyNxlqKoE3D08uCWei4BCcC/X32+QlxnzrMa8jUKhwO1bN5CZkYEnnngSKWmmqRVomoaTkxOkUina2trg4ekh4OPZepI8/sdchBfDd3BtN9SVuSZmJGpsiCimPOUElkYhlm1Rhj+WBzwGYFACAwCIBxR+WERc/KAkBD1F6ciGVe3OrRAzfDZ3z2M8PPDnSHeeL9217gJHABjyshsMxLpXe1gYItQZ/2f+dfW5LojER9OcEtjeOrdLS0bwF3uCAEUz1n1UF3O4Za0w9d8eaV0eFRISkzBt+kxs2bwJFEUh/fYtiwpgFu1RjsycPRepab0REhrGWdp2JqyFJZo5ew5S09IQ+oDebw1dTaFkDHvr19bWhs0bf8Krb7xl97tu3riO27dutuu9XQld2bsbEKtf91vnzpw6gRPHjgB4eP1sjdl/lOALVB423VlSArq4uODPf/0nGhsbEBYeYSLw6Yy+lOflYuOP6wAAvfv0tft5sfkqz8/Dpg1MmWm9++HhzxH+XtTRPjN4BPMfJQiJ4LuEYFhCiqKh1bbB0dEBbWo1CIKAo4MEMjdXNLcoBEJNfr1sUQgnJqfgD2+/A2dnZ3j7+Iq2y7SdhMl+YEnhK3y2feCEjaI/8l9quESgnUlEeOU16pW/ABAUFCT6yp6O1976Ne4XFSIqOuZRV+WRgD/WLBn3gOhTj/EYHcL0mbORkpqGkJBQk7NaW1sbvvniUwDAyNFjH0X1HqMLobPowVjpyMixaNCgQRAkFKo2qDRauLpI4erqjsbGFuY3MHl1JRIJaJrmIqywPMrrb/1GZI8z4mcAvRLYfL2MkZuTjbVrvgEA9Os/wEybSLi5ueNf//4A9Q31iIyMEo/iIvI+vlewoJ6Ce/XXjZTW4nWxHDmG/U9RlEkdjZXIXQU9xRPYwNV2rf61FwaH5q55dv9fRFf1zrYH3bnuj9E10RNoykQuQwA0TUDSfgnJIwfHZ9AMb0fwnIG7ZYsIQAdanzDO1HjOknFLp2jJujKhd+W6PSyk9e7DfS4vK7P5OXv7jiAIxMbFw9XV1a7nOgvs+10e0fvZOnRl2FI/R0dHjBg5CgCQmXEHV41yGFqDsrUVv/y8EQAgk8kwaPAQm54jSbLL919XxeN+sx/mDEq6ihCCLxh5lOAromXu7gjVGxAZ162r9BtgbT4YlKgPG2KKzgep6GeZPq1Wi7Y2NTQaLdQaLVoUrZBKHeHi7AQHCQFaR4HWe2cAACERDyzD1pUV4gWHhMLH18+iAte4nXwls9h1c214EPOAIAiQJCnYeyia7pSDQEtLM/fZydmlE0rsfpBKpYiNi4fEDD11ZZAASKJjf8ZrOEPjXWedfIzHeBQgCAIxsXGP9Kz2GI/B50XYPUqhUEKhUMLDwx2uLk4gCZrLH0zTNBwcHKDT6Tiehb/HmfI9vM/672Kesux3s3ygFf7Q08sLkZFRAIwU3Bb4JuPfWE9hUu/gQRjdK/asWPnmrvONDfn7IctPiuUKflSGsHyYG6/uBjZiUE8B9SAawyP8h8WpdXe6YkHRXTMppL0wt948ajnM/zJ6yhzpEe0g2PyzD15+9aBgeibXX+f93t1AAdDRlInThVi0FRadpgAmCIIRmHRB+QZTPxpMF3Wxyj0EsKGZAaC+rs6uZ7uCAsIsumq1unKfwbbFbfacefDw9AQAbP3lZygULTaXv3PHNjQ2NAAA5i9cxIV7swXs4dMYj3rWdhVlnHUwCzBj4fSo69I90FWVmEDXWEv4dTAW3HS1/uLD0G9dq47WBGSd/S7G61YCgIBOR6FNrYVOp4NGo0FbWxukUilc3VwgcSBB0xRomgJBmwrjjL012GusYNS4fcbPsr8Z0w5BkPrDhOHPGKyA8IHSGwkQEsLm6CE0YfpnDo96Dj8GA+6sQor/EQRjCUzq/7PyQP5fe0DTNCgaoB+h8cljPMZjPMZjCGHMkxCkBGqNDk1NzSBJCTzcZfow5TTnwWrpzGDMxwl4GxhCKZozfjPHB3aE97F0frWmxDXw/vw/y+Xw+T0xAaRhHyZN+t+UP3y0Z6CexruZ47G7Iygzc6U9oI3+Hia6+lnaVvSUdgBdTxb0v47ubIhjvDd2xzYIQDBnWZomwKoRu3O7uLrTNEgaILpnM7h93ZzDhTE6LVEqn7i7f6CRnoWqqirus6eXl8nvuTnZqK+vh4ODA/oPGGixLK1Wi5vXrwEAQkJDOW8wACiQy1FTzbxr0JCh7aprfX09Mu+ko6amGuo2NTy9vBAbF4fomFizhw6WsAsL5Kiurmbez/M6bWxsRE52FgAgLi4eXt7eUKvVyLyTjpKSEiiVrfD3D0BgUBDiExLtyh9cUlyM7Kx7qK+vA0mS8Pb2QVJKCoKCgkEQBGqqqyGX5wMA4hMS4alXqrYXKqUSBQVyVFZUoKa6Gp5enggMDEKvqGi7yrYWbsnF1RULFy3Bt19/iebmZmzfugXLVz5rtdy83FycPX0KAJCSmobBQ4ehqKjQ5nqxdYORNSFNdHxRbmpsRE5ONgAgKjoavr5+qKurRUZ6Omqqq0EQBLy8vZGa1hv+AQEmz1dXVSEz4w7qamvg7OKCgIBAROrLsRUtLS3IvJOO2poatLa2wsPDAyGhoUhISoajo6Nd7SktKUZOdjYa6utAkCS8vb2RlJwqWner9WpuRmZGOmpqaqBSKuHh6YnwiF6IT0h8IOHUFQoFsu5mAgCqKiu567dv3oCzszMAJqdWfEKi2TJomkZhgRwF+floaKyHo4MjvLy9kZiUYncfmJsP2Vn30NLSDFcXVySlpAIANBoNsu7dRVlpCVqam+EmkyEgMBDJKalwtuDpl5+Xi4aGejhIHNCnX38ATP7jjDvpqK+rhU6nw/iJkznDC5O2yuUokDNtlUqd4OPji+TUVHh5edvcTp1Oh/zcHNy/X4SmxkY4OTvD398fySlpVg011G1tyLiTDgAICg5BcEiI2Xu1Wi1ysrNQVlKMpqYmuLt7wMfXFwmJSaLvuX3zBnQ6HUpKirlr8vw86HQ67nv/gYNsbqegzOL73LUCeT50PE+D/gMsl6lWq5F1NwNlZaVQKVvh6+cPf/8AxMQl2Dxf1Wo1srPuoryslKEXNzcEBAUjKTkFTk7OJveLhWqxhJzse2hpaYGriwsSkw00mp3F0KiipQVubm7wDwxCYlIyXF3doNVqodEw3iuqNg0AxrOlqakOOVlZoHQ6JKf1hqurG1pbW5GRfgsV5eVQtakwYOBgRMfEoq62FoUFcgDM3ubu4WG2jgqFAvfuZqCmqgqtylZ4e/nAPzAAScmpov1oCE9IcMwkMxZ3UVZeguamJshk7ggKDkZKahqcXTrRw5YEQAM0RaNAng95fj4aGgzrS1JyCvwDAs0+nnEnHSqVEiX3DXSXn5uDtjYVAMDXxw9RMR0LidxQX4/7RYWoqqpEY0MD/Pz9ERQUjMjoGDg5OXWobGOUlhQjJyuL43G8vL2RnJKGgEDzfcDiTvpttLWp4O7ugYTEJLP3qZRKFBbIUVnJ8jVeCAgMQmRklOh6CADZWXfR0twMJydnpPXpC51Oh+yseyiQ5zM0L5MhLDwcKalpJjSm1WqRcScd5aWlUCgU8A8IQEBgILPfsVJuK6BpGgVyOeT5ecy67uDI7MEpKQjg0QcjUBSWl33vLpr1+0pyKpPbsbKiAncz7qCurhYuLi7wDwhAr8hom/qZ34+ZmXdQVVGBFkUL3GXuCAkNQ2JyCqRSqc3l2AuNRoN7mRkoKy1Bc0szZG4yBAQFISXFvrnZHjpgoVS2olAuR3lZGerqauHj44ug4GAkJqfYzMMwa0wm047mJri5yRAYHGx3O3Q6HfJyslFUVIjGxgY4O7vA3z8AKWm9BQa5nY3qqkrcu5uJ+ro6aLVaeHl7IyY2Dr0ioyzuKWWlJSgrKwUA9O03AA4ODmhubsad27dQXVUJmqbhHxCIkNAwREVHd7iehQVy1NQwZ7WBg5izWl5uDhoa6qHVaLn7qqoqce3qZe57QmKyaP/V19XhflEhystKoVQq4evnh8ioaET0iuxwXY1RU1ONe5kZXB/7+fsjNCwcMbFxNpdB0zTk+XnIy81Bc3MTPD299HxFvM300Z494Ob1a9BROnh7+3D1LSyQIyc7i6PT6TNnmzxHURQK5PmoqqxEZUU5pFIpAoKCEB7eC4G89AbWYE/fdYQeWDQ3N3PyBKWyFZ6eXoiI6IX4xCRBLl/+Z5qmcevGdegoHTw9vZCUlARXV2fk3C/C7ds30dTYBGcXQz/x97jEpGTu3fzoK0WFBSgrLUFlRQXcPdzh6+ePtN594MpbU9j7r1+7Cp1Oh+KiIu633Nxcjm8lAAweOoz7ra2tDbdv3gAAhIaFIzQszOx5RqvV4t7dTBTfv4/GxgZ4eHjA188fySkpcHf34OrBglsxeO0oKS5GVWUF3GTu8PPzR2rvPhy9se81fj//Oh/sNdZ7WkwZ/ijSebGwZMj4GD0HXUVebe/ZryuCRs8xcewJ49GTYLyndGd0d9qiWUUfTdlyXH6MhwG9dzYJ8agtxug0BTD/JQQACkIPNFpfuUcJMZfvR16ph4CTJ45xn1P1wiY+Dh3cj/RbtyCTuZtVALMMvEqlxLfffAUAmDp9hkABfPrkcVw4fw6A/QrgmupqbP3lZ9y8cV3094CAQCxauhzRIsIHtm6nTp7ARfb9PAVwWWkpvtPX+aVXX4drVSW+W/M156XKh6enJ5YsX4k+fftZrG9Odha2bN6EYp6Ql4/E5GQsWrIMRYWF+P5bJo/Pa2/+Cp6epv1vC7RaLc6cOokD+/agpcXUG1cqlWLy1OkYN2GizYoJ8flgQP8BA9G7T1+k376Fi+fPYcjQYRaFuFqtFhv1uTadnJywaOkym+ohWjd9NHsm5wDdKRY55WWl+EE/FiuefQ63b97Eru1bBUomANj2y88YMuwJLFi0BM7OzlAoWrD5pw24oTd84IMkSYwYORpz5i+wGOayqbERe3btwJVLF0VDXbm4uGL8xEkYPW681fHLzcnG9i2bUVJcLPp7QmISFixaYtPS1tTYiN07t+PalUui9fLw8MTMufMweMgwkafbj9qaGqxbu8bk+uaNP3Kf4+ITzCqAb16/ht07t6G2pkb099j4BMyeuwARvXoJrvOZLmteiwBwcN8eyPPzEBYWjoSkZJw/ewb79+xEa2uryb2Ojo54avRYTJo8FU7Opoq940cOIeNOOtxkMqT16Yu9u3fi+JFD3O8kSWL6rDlm21pXWyva1r79B2D2vAUWFcE0TePShXPYv2c3mpubTH4nSRLDho8QfT/bT83NzVj//bcAgElTp4sqgDUaDc6ePoljhw+K9pGDgwP6DRiI6bPmwMPDINDftGE9pyBjcfrkccF3exXA4mUeE3y3pAAukOdj/fdr0NhQb/Kbu7sH5i9airTe5vMUazQanDpxFMePHDKpBwA4ODpi9NgJGDdhssmct+egc/jAXsjz8xAaFo74xGRcPHcG+/ftglKs/x0dMeKpMZg4ZSqkUieoNVq9AI4AoaNw5dIl7Ny2BQDwp3f+hsw76fhl00ZB/dk25+XmYOOPPwBgcr3K3N0Fgj+CINDc1IT9e3fj0oVzJusswIQUHzFyNCZMmiJieMXwkBqNBieOH8HhgwdE+9HR0RHjJkzCxClTO0n5SePG9avYtW0bp6QwRlx8AubMXyiqYNixdTMqKyoE144ePsh9HjRkaLsVwC0tLTi4bzfOnj4l2p/e3j6YM3+h3XPFAAOt5eZkY9svmwQGFHwkJCXj6cWW9/itP29EbW0N4hMSRXkHrVaLc2dO4aAVvmasCF+zf+9u5OfmMoqpsFB89/VXKCosMCnDw9MTK1c9j6TkFADAzRvXsfmnH9HUZLoO+vkHYNXzLyAyyrKS6/q1q9i13TJ9zFuwEOERkaLCxf17dyM/Lxdh4RGIjU/A1s2bcOHcGdGyBg4agqeXLLMYLrdNpcLB/Xtx6sQxaDQak99dXFwxbeYsPDVqjMV22QuKonDuzCns2bkDSqX4njhqzDhMnjpddE9kodVqcfb0KRzab54OJk6ZhrHjTemApmlcPH8Ou7ZvgUKhMHnW19cPs+bOtzgnNBoNjh89jCOHDqBNJb7GjJ0wCRMnT7WoSKdpGhfOncXe3TvQLEJfJEli+IiRmDlnrtky2oPi+0XYvmUzcvUGjsbw9fPHrDnzzPbB9WtXcWj/XgDAfz7+DBfOncH2LZuh1WpN7o3oFYkVzz6PoODgdtf37OmTuHThPACDAvjY4YO4k35bcN+9zAzcy8zgvr/569/B3T2B+65SqXBw3x6cOHZElH+NjIrG4mUrBOfU9qK0tATbt2xG9r27or8HBQdj0pRpGGSFV26or8faNV9Bnp9n8pujoyMmTJqCSVOnm1V8dWQP2LD+e7SpVOg3YCACg4Kx7rs1uHfX0L9R0TEmCuCMO+nYtW0LysvF00cNGfYEZs2Zb9FAoz191x56YNHU2IhdO7bi6mUz5xpPT8yeu0CgSDV4qpKCfgoOCcEnH/wHd/UGqwDTT9NmzQZoYMvPG1HH2+P4PFBebg5+/mm9aOotZxcXjB03AVNnzIREwmTVo2ka69Z+C5XRGnSMd04AGB6CrW9zcxO++eoLAMDM2XMQGhZm8i6tVosTx45i397daBVZIx0cHDBoyFDMnb8Anp4GJwGuHTnZ+OnH9SjXG4kYt2PMuAmYOn2m3qOXkfeRBGmRjxU7ixl/p2maoXFS743MygUegfDcOIqNufd3FcG+tXqwQ9MFqtohMKHVaU72bO+zttzTmV1kfVy6v5EBpXcE685tYNFV5vNjGNCd5khPph9GPq93ziIISNjw+d24zTRvY+yW7SAAigYI0CC5fAZsdBvhmaJTFcD8jiJpwDgjQFexsiKI/w0lsFKpxP69uzmlaEBgIAYMGtzu8h7URJDn5+HTjz8UHExIkoSjoyPa2toAMJa/n3z4H8yeO79DdTtz6iTu3c0UPRQCjLfwl599gldefxO9+4gL90+dPI7NG38S0JCjoyNomuYEJll37+K9f/4dw54YblO9rGHD+h9w+eIF7ruXtze8vLxRX1+HxoYGqNVq7N65HaWlJXjuhdV2lW2p755evBTZ2VloU6mwccN6/OkvfzMrADt0YD8qyssBADNmzbHLM9akTu1+0jbs3bUTdXXiCjUAuHzxApRKJZauWIkP3v+XiUCfBUVROH3yOHQ6LZ5eIi4MLym+jy8/+wRNjY2C607OzpywUalsxZ5dO5B++xZefOU1s1btZ06dxNbNmyzSXnbWPfz3vXcxYuRI8x0AxrPry08/RlOTsF6Ojo6cELmpqREbfliL4qIizF3wtMXyHgYoisL2LZtx5tQJwXVHqRQ6rZab13k52fjg/X9i0dIVGMqbg8ZhyGwNK0PRFDZtWIcrly4Krjs5OXNKKY1Gg+NHDiH7biZWv/qGWYGYVqPFZx/9F/l5uYLrAYFBgrlF07RoWx0cHASC2Vs3rkOel4dX3ngLQcGmSlmdTodNG9bh2pXLgutSqRM0GjXYML7nz55GgTwPIaEGAZI9IV1UKiW+X/M1F22BBb+PtFotrl6+hJyse3jm+dVWFS2PClcvX0RebraogBVghG7fr/kSz77wsqgSuLW1FWu/+dxEyOvg6Aitfm5pNRocPbQf+bnZeP6l10S9gQHblcEURWHzxvW4etk8jWo1Gpw8fhg52Xfxwsuvc0p4nZaCTtsmeNeencx6ZIzw8AibBBiVFeX48rOPTdJOSJ2coNbv6y3NzTi4bw+y7mbiuRdfFswZgiCgaFXgmy8/M5kr/DVKo9Hg4P69yMnJwquvv9Uhb2CKorB18yacOiE0PpBKpdDy1pfcnGz851//wOJlKzFs+JPtfp89ULa24uP/vMcJ4kmShI+vH1xdXVFdVQWlshX19XVYu+YrtLYq8ORTo9rxFmbszpw6gS0/b7S8z9y7i3+/+3c8NWo0d4+58JBMfU2vb9qwzma+ZpUZvqapqRHv/vUdUQUkwCgDPv/4Q/z2j39CYUEBNm/cYLb1NdVV+PjD/+KPb/9F4MXLgqGPn3HqpHX6eP/df2DJspUYaoE+Wpqb8d/3/onSEnFjLgC4dvUyamtr8OavfycanaapsRGff/KhSRnG/MXWzZtQWCA3y/vaC51Ohw3r1pruibz3ajQaHD18EPfuZuKV198yuydu/PEHQTkMHXihvr6eo4O9u3agrLQUzz7/ouDZXdu3mihI+KitrcHaNV+hqakRo8aMM/m9tVWBrz//1Ooac2j/XuRmZ+Hl194UXWPY/rh6+ZLgutTJCRq1YZ89e/ok8vNyRRU17cHVyxexYd33gr1KIpFAIpFArVYDAGprqrF2zVfIzrqHhYuXWvSqW7/2Wy7ShxjuFxXiw3+/i9+//Q58fHw7pQ3tAUVR+Oyj/3JRKMRQWCDHh/9+D6+88RaiY2Lb/a5bN65j3do1AuMKkiQhkUi4axXl5Vi39lvk5uRg4eKlogahJcX38a9/vIOW5maT3wCGzvbv3Q2lUinKb3fWHlBZUYH3/v4O6uuFe3N4hNBg8vzZM9ikN+oFADc3N/j6+UOpbEVtTQ0oisLlixeQn5eHP/75r6IGWJ3Vd7aitKQYn3/yocl5S3CuaWzE+u+/xf2iQsx/erHZsqoqKvDeP/5qwsNERUczUjYIz6pcGGmCwJ302/j680/Mlq3Sy2fKy8vwwkuv2NlK23lzpVKJLz/7BPd4CmwAcHZ25hTNWq0WF8+fw73MDKx+5XXExDJzhSAIpN+6ic8++chiOw7s3Y0Krh2sktZyeh1z3sLsb/x7tPqw2wQISPTGulQ7lX7tha3ewF1FYGxrPXqMIvgBKYU6u1u6Cn08aNAAR1zdvc3dUhHUw9FdvIGNZY49DgTB6fbY9BjdrZ2ixmkUmFxQ3RF6IzwdmJDWMDMmne4BLKiDlTCzjxJCS8PuOcg3rl1FdWWVyfW2NhWqq6tQXlbGMfgeHp5Y/fKrHQ4D19nsdl1tLT756APOWymtdx9MmzELvSIjQZIkmpoaceXyJezavg1qtRo7t2+1UDfryMy4A4AJWTl56jSER0RAp6NQWlKM3Tu2c+GKN6z7Hu/++wMTb4NbN2/g558MAsTxEydh5Kgx8A8IAE3TqKgox4mjR3Hm9EkoW1tx4thRQ/3auSiePnmCE5L26z8A8xcugo+vQfDSUF+PnzduwO1bN3HtymUMGjSYCzHbUXj7+GDm7DnY8vMmVFdV4eD+fZg529RLsLysDIcP7gcAREZFYdSYsR16r8A8w4qncnvAKn9HjRmLIcOeQGBgEGpqa5CRno79e3ZBp9Mh/dZNvF9SjNqaGpAkiUlTp6Fvv/7w8/NHdXUVrl25jKOHGcHjuTOn8eRToxAWLvQ0aKivx+effMQJfGJi4zBtxixExcTAwcEBSmUrMtLTsWv7NjQ2NqCwQI6vP/8Ub/3GVNibfusmtvy8kfs+dvwEjBg5Gn7+/qBpGlWVlTh98gTOnDoBpbIVRw4dFDzPZ2KbGhvxxacfcV4qUdExmDJ9JsIjesHV1RW1NTXIzEjHvt27oFIpcerEMYRHRGDw0Cc6pf/9A/zx0mtvAgAunDvDhTB75rkXOeGqm5ubyXP79uziFKIODg6YOmMWBg4eAi8vb1AUhfKyUpw6cQyXLpwHRVHY+OMPcHVzRe8+/bg+aA/KSktRVspYv3t5+2D23PlITkmFk7Mz1Go1crLvYdf2baiqrEBJSTG+/foLvPnr34kKstraVMjPywVJkpgwaQrS+vZDYGAQdDqht82h/Xu5tjpKpZg4eSr69O0H/4BAtLWpUFZaisMH9iPrXiaamhqxpugaeQAAIABJREFUds3X+P2f/mLyzj07t3PKXycnZ8yYPRf9+g+Am0wGiqJQVFCAA/v3ICfrnqCd9vQXTdNY//13nPJXJnPHjNlzkdq7D1xcXKDRaHC/qBBnTh5H+u1baGxsxJqvPsevf/f/4OPri2dfWM3UpbCA80QaP3Eyou0Iq2gMsTLHTZyMmJhYWNstsrMYT5Wo6BhMmDQVoeERoGkaZaUlOLR/D+dpuGXTBiQkJgv2VYqi8MO3Bg8fL29vTJsxBzGxcfDy9kFTUyOKCuTYs2s7aqqrIM/Pw85tW7Bo6QquL9uD8rJSzkPDy9sbM2fPR1JKKpycGBrNzcnCnp0MjZaWFGPtN1/i9V/9Vk8vBAiCBEUZ3s0qfwcOGoyx4yfAz88fFAi46OcnfzszZvoVLS344pOP0KD3ng6P6IUZs+ciNi6eW/cyM+7g+JFDKCkuRoE8H99+/QXe+L/fcuueTqfDt19/wSlmvL19MGP2XMTFJ8DH2weNjQ0okOdj546tqK6qQn5uLrZs3oTlz6xqV/8BwJ5dOzjlr4ODA6bPmo3BQ4bBy9sHOp0OZaUlOHn8KC6ePweKovDT+u/h5uaG3ryIIcufeQ4ajQby/Dzs2bkdADBj9lxOAcGGWrQXmzas5wT/k6fNwPgJkwRelYUFcvy07nuUl5dh57atSEntDW8fHzvfQiP91k38sukn7sq4CRMxYtRo+PsHcPvMqRPHcfrkcSiVrdy+z8ISm8On7TNGfM08Eb5mM4+vGWiGr2GVjb6+fpi78GlERUVD4uCA0pJi7Nq+DUWFBaAoCt98+TkX9SU0NAyz5s5DRK9I0BSF+0VF2LZ1M6oqK9GmUmHH1i1Y/cprJu/as2snp/x1cHDA9JmzMXjIUHh5M/tPWVkpThwz0MeG9d/D1Yg++GhoqEdDQz2kUimmzZyNxOQU+Pr6oaa6Chnpt3Fw/15otVoUyPNx5tRJjBk3XvC8TqfDN19+yil/AwIDMWf+00hITIJUKkVbWxvuZWZgx7YtqK2pNlFOdgT89cbX1w+z5y1AUkoqnPV7YnbWPezc9gsqKypQUnwf33z5KX712z+a7E9nTp3glL99+w/A3AVPCxSLDfX1+OXnn5B+6yauX2XogO3PwgI5Thw7AgDwDwjAzNnzEB0TCxc9D3Pz+lUcPrgfWq0Wu7ZvRVqfvgLDRIqi8O1XwjVm5py5iI1LgLePD5oaG5k1ZvsWZo3Jy8W2LT9j6QrTdCg7t2/h+tfJ2Rmz585HvwGDINPvs4UFcuzbswvZ+vQRZaUlHR6Duxl3sG7tt9z3J4aPwOjxExAcHAKCIFBfV4crly/iwN7dnLe9o9QR8xYsMlsmq/x98qlRGDpsOIJDQtDc3AR5PtMPzU1NUCgU2Lp5E1582XSOtBdTps/EU6PGQKPRYM1XnwMAUnv3wUie13pIaCj3+eTxo5zyNzE5BVOmzUBQcDAkpATlZWU4c5qhK5VKiU0/rsMf//K3doWTzc/Lxdo1X3GGE4OGDMW4CZMQEhoGkiTRUF+PG9eu4vCh/Whpbsb5s6fh7OyMOfMXmpR1X3++9PL2xqw58xAVHQM3Nxkqystx/txpzlj7xLEjGDRkqEmEic7aA1jak8lkmDF7HuLiE+Dl7S1Yn0tLivHLJuasGxIaxkTg4inR1Wo1Dh/cj8MH9qGmugp7d+3AvIVCuupI39lDDyzv0djYiM8+/sBwromJxbQZsxDRqxdcXFxRW1uDzDvp2LNrB1RKJU4eP4rwXr0wdJi4oXYpr59mzpmPuPh4eHp5w9lZCg93N7S0KDlHBz7Xpmxt5aIZObu4YOr0mejTrz88PDyhaGnBvbsZ2LdnF9f+q1cuY9DgISAArH7lNVD69BN7d+8EAEyZNgNx8fGCtloDQTD5itd89QWn/HV398D8pxehb7/+cHV1hVqtRoFcjmNHD+Pm9WtoaGjAZ598iLf/8jf4+vmhtbUVP/24DgDg4uKCaTNnoV+/AfDw9GTSBmVmYO/unSbtYN9vTkhvThlsrh0SUqIXytKg2PskJGjQkHBeLuiuorzH6CSwSkexOdI1JdHm0ROUjuyU7BFt6QFt6GngGxA9HptHDwoA2UPGgqZpkDQBqjs3heaxRQ9TAWxKAMb+wF0DBsaz+41yYUEBCgtMQ96JYcTIUQgLj3jANbIfG378gVP+Tp0+AzNmzRHQjoeHJ8aNn4ikpGR88dknqKkWD7lnDEsHiieeHIFlK54RCAO8vLyQmJSMzz/5CHczM9DU1IRrV68IPHhVKhU2rGNCXkqlUqx6YTX68gSSBEEgODgES5avQGx8PH78Ya1JCLX2KDNZgWNMbCyeX/2yiRDDy9sbL7z0Cv72lz+hsqICly5eaJcC2NwMGDlqDK5evoQCuRxHDx/EwEGDBd4LNE1j44b10Gq1kEgkWLr8mY7n7SHMf+2sdWTW3HkYP3Ey9z00NAyhoWEIDAzEt19/CQBciOFnnntBEFItLDwCYeER8PD0wvYtmwEAF86dxYJFQmvyTT/9yCl/R48dh1lz5wsEoC4urhg0ZCgSk1Ow7rs1yM66h6LCAhw+sB9TZ8zk7mtTqbBpw3oAjDLwmVXPC4TKBEEgMCgICxYtRmx8PH5a9z3n/cFA2GtbNm/khCSjx43H7LkLBPPOz98fI0ePRVqfvvjPv/6BluZm7NuzG/0GDLI7V7EYXFxckazPqcsPCxefmASZTCb6TGGBHEcPHQAAePv44rkXXxaEeCZJEiGhYVi8bCXiExLx88YN0KjV2Lj+B0T/Lc5Eodwei+GY2DiseuElQR5bqVSK1LQ+iImNw/q13+Fu5h0UFRbg5PGjGDdhkmg5BEHg1Tf/D7Fx8byrBiViSXExp1iRubvjrV//XpDX2NnZBdExsXjptTewd9cOHDtyCFWVFbh04TyGj3iKu69Ano9T+hQAfn7+eP6lVwRewiRJIjI6GqtfeR379uzCiaOHTeppCy6eP8uF5usVGYVnnn9REErO0dERMbFxiImNw4VzZ7B18yao29pwJ/0WRo4ey4WHpXheTGHhEVzY1vZArMzw8AgkpaTy1l/ze/+AQUOweJlwLfPw8ERcfCLWrvkC9zIz0NLSjNs3rwtCF54/ewp5ufpc4zGxePHl1wXevR4enkjr0w+JSSlY89VnyMvNxrUrFzF67HhRD24+bNk3omPj8Myq1SY0mpLaG9Exsdjww3e4dzcD94sKcPrEMYwZP9HgcWHUF1Onz8SkqdMgIUk4OjiCJCVQqpQGIZwe/KcIgsC2LZs55e+w4SMwd8HTkEql3L7s7OyC/gMGoU/f/ti88UdcuXQRVVWVKJDnIzYuHgRB4Ozpk1xI05jYOLz82puMsJtm+sHDwxN9+w1Ackoavvz8Y+RkZ+HyxQsYP3EyQkJC7N4nCuT5OHyAmXM+vr548eVXeQJ4GhIJifCICCxbuQoJiUnYtGE91Go1Nqz7Hu/8819wc2PWLdarnR/GMSQ0DHHxpqEqbUVjQwNu3WTSY4yfOBnTZswyuScyKhqrX30df//Ln6BSKXH71g3O45EZXlOenPnNcF2lasNGvcBXKpXi2edXo08/4T4TFByMp5csRVxCAn78/jvhPmNlveC/67TeuCUmNhbPmeFrnn/pFfxdz9dctsDX+Pj64vdv/xkymYHmE5OS8X+/+wM+eP9fKCos4Ly4InpF4le/+Z3AWy3Nywtx8fH4+zt/Rl1dLe6k30ZTUxM8eHmtC+Rybk328fHFiy+/IlDQECSJkNBwLF3xrEX6MIZUKsVv//hnQUh9lr+IiYvHx/99HwBw6sQxEwXw8aOHuTNASmoannn+Rbi4GEJFOzk5oW//AYhLSMD3336DLCMvsM5AQlIyVr2wWtA+qVSKtN59EBsbhx+++waZGXdQWFCAE8eOCHgugDEEAIDomFiseuElUTp47sWX8c+/vs3QwaULHO/DKtoB4JXX3xLk5g4OCUFwyEyEhUfgl00/YejwJ+EgER55z5w6wRktxcTG4eXX34IzT6Hm4emJPv36IyklFV99Zlhjxo6fJBivgvx8nNQbe/r5B2D1K68LfidJEtExsXj1jV9hz87tgpDw7UWbSoX1P3wHgDFGWLriGZPww94+Ppg4eSqSU1Lx7VdfoLa2BiePHUVa774W07ksW7lKEDnF2cUF/gGBSE5Nwz/e+RNa9PmBa2trOhTphw92LvHXTV9fPy5HtjHOnz0NwNDffL40KiYGUTGMcjU3JxvDRzwFnU5n97lEq9Vi3XdrQFEUSJLEwsVLTbxqvby9MWb8BAwcPARffPoRSorvIzMjHeMnTRGN5BMcHIK3fvt7wXxh6+vnH4C9u3YAYGiTb2jQ0T3AGN7ePvjNH/4ETy8v0d/PnTkNnU4HV1c3vPL6W/DyFqYYkUqlmD5zNpoaG3Hh3BlcuXRBoADuSN9NmDwVvSKjAJjSQ0pab9H6AsAvmzZw55qx4ydizvyFgj3H3z8Ao8aMQ+8+/fD+P/+K5uZm7Nu1EwMGDjZ7rvH28cFv//A2PDw9Ded3ioJCoYSrqwtIQk9TvPNEZsYdNNQz/M+ceQvx5FOGaExOvr4YPmIkklPS8P67f0NKam8EBjHh1GmA43kpise3RkQg1Uy7LbGEZ06d5MJoR8fE4KVXX4e3t8EgQCqVIiExEQmJiTh14jh++nEd2lQq3Lp5HeMmTELGnXTU69sx/+lFGMEbP6lUihFPjURKahre+8dfkZLWGyHBIRyXQYMWsAP8eooJ7c0piNkUUCRBAKRBrqPT6UAA0IHg5jWff31YYUJ7ivKhO4VVtYTuKc0VR3enLZoJA9CjlMBA958jPQndxRP4fwE0CFD6HBAEuu98Z/kTiqIYL+Du0g4aIA0fAdCgaRiipfAiqwCGex8YDC/tuh3I9EVXVE9bho+vL2Lj4kX/goKCBYfd/Xt349//+icUIvm12o+OufoX37+PzDuMR25sXDxmzp5rtrzQsHAsWbbCvtqJlOXi6oq58xeICgIkEglmzZnHfTcOZXr+7Bm0tDDKvHETJgmUv8YYMnQYRo42n2/N1n6jKArTZszCmHHjMX3mbLMCDIlEwuUtLhPJ0yMGNlyVNZAkiSXLVkIikTDhZH9aLwgjePb0Kc6DYsKkKZ0W2s4Y7FrSGZuKh6enWcFI3/4DEMxTxIRH9DKbO+3Jp0ZyCkt5fp6gbveLinBX73EeFR2DuQueNhvazN3dHc++sJpTUp48fkyQQ/XC+bNcbryx4yaY9SgCmNzNxm3j91tFeRnncRsTF49Zc+ab7VMfH18sWrocAFBfV4tzZ06bfe+DBqucAYBFS5cjolcvQQhnPi0PHDwU48ZPBMCE4z19Qpj7FTCf99ccHKVSrFj1vECxxoeLiyuWPbOK85A8dfyYaA49ABgy9Akj5a8Qx44c4ubY8pWrBMpfft0JgsCU6TM54emh/XsFCpnjPIXuwsVLTRSMbJ+RJIkZs+a0KySzTqfjQnA6Ojpi5aoX4OXlLW6FTdN44smnsOLZ5/H//vJ3jBzdsUgB9oMG9IIpS2MvlTphxux5ZveJqdMNefLycnO4zzqdDieOMX3uJpNhxbMvmA3t7CiVYsnyZ+Ds7AyKorB/7y4A9q9z/BXcUSrFspXPWaTRJSsMNHr6pHka9fMPwKix46HR6KDV0WhTq9GmVsHNzRXOzk6iXhwEQaCysgI3r18FAASHhGLh4qVc+GCKogSHaAcHByxaugKLl63EX//xHjcntFotjh0x9OPK516AI+tlTehzz4ChJ0dHR6x45jk4O7uAoijs3rFN8B5bcXD/Pu7zkuUrjbyv+IdMJu/h+ElTADAhZE8eN11fOhMOjg5cOPuxEyaCpWMDLTN//gEBnKFfRXkZSJIASRL632mjP9M5cOHsGcM+M2GiQPlrjAEDB2G0kULSVlAUhakzZmH0uPGY1gl8zdhxEwTKXwAATcPRwQHjJkwUXJ46fYZoqFJnFxeOZ6MoCgXyfMHvB/VRBABgyfIVAvow3n8GDRmO8RNto48x4yeK5lMHmFzCafpUJLU11YK0FUxeR8b71dnZBStWPS9Q/vLh5iZjoms4tz88uhicnV3wzHMvmlVuu7i6Cup14tgRwXpDURSmTJ+JUWPHYeqMWRbpgI3iwc9DyaavkEql8PMPEH22d99++Pt7/8H0mbMFyi6dTodj+gguMpkMq154SaD85UMqlWI5b43Zs2u74Pcjhw9wnxcvXW52PEmSxKy58xEV3b4c4HycO3uaMy4cO36ixdyz4RG9sIwXGeHA3t1m742JjRMof/lwd3fHxElTATA0n5eTI3rfw0BTI6PoCwwMNKu8mzN/If7w9jt4atSYdhkuXr50gZtzw0eMtBhW38PTE6tfeR3Pr34Ff3rnH2bTuMycO9/sfBkzbgJHoznZwnzODo4OeHrJcv0eIG5YCDD7NhuJSCz3LIvps+aYVf4CwMDBQzB52gxMnTHTRPnLR9/+zDlYoVBwYZcJgsCVSxcFfTdi5GgBb8P/8/TywkuvvoEXXnoVb//1n2b7zhLKy8pw6wajII+Ni8fseQvM8lA+vr5YvHwlACYa1NnTJ82WO2PWHKHyl6YBgoRWR6GpyRDKm+QpJ/lpdULDwgT7A8ubeHl7491/f4hlK59FeHgEaL1hG2eaaMS/8M865vgb/iWdTocD+j3L0dERL778mkD5a4xRY8Zi9cuv4t1//5czXOW3IyxM6EDAjp2vry/e++9HWPHMKoRHRJiMLXc/95zhP8uDiIVRZv+YvMKmimIJQTLKd4KAlqagpSlQFMX1zcMMRWmrLKVrg5HTdvtmgBF4U/q5xP51V3RnunoQ0fu6Anpaex6j66A70xY73yma6vZ7Isc7ULRlK7suCmY3F7p0GI/HA1cAc5UhAAkISECAoMEkje5SfdrlKmQVo8aMxe/++CfRv7//63188c13+M3v/8jl9cnLzcHnn34syMXzKHFDLyQGgFlz5lq9PyU1zaLVuhiMDwD9Bww0FRTy0CsykgtDWFJ8X/DbjevXADBCrQkTzR/AWUyZNl0Qnsta3cRAkiQGDByEBU8vRmJSstn7aJrmDvvVVaZhwe2Fcd1Cw8K4Q6E8Px9nz5wCwFil79rBCMMCg4Iweeq0Dr/bGLTRX2cgrXcfiwKhhCQDnQ0YJK78BRjBYJQ+LFpFuVDYcv2qIefq7Hniuav5cHNzw5TpMwAw+VQz9JbbAHDrBqOwdXFxwdgJE6yWNX7SJLi6CoXB7JDe1NMxAIwbP9GqV0TvPv24OcFv08OEUtmKu5n68O2JSUhMSraav3fshEnw8mIEV9c6od5PjRrDlWcObm5uGDWWUYo0NTWa5IBlMezJEWbL0Gg0uJPOhN8Nj+iFRCtesA4ODhgxchT3ztycbNA0DbVazXnlxsUnIF6/drKMmVi/TZs52+SaNRQWyFFXywj6RowcLQg5aG6N69Ovv0Wh4sMFpf8z9EdKahqXH1cMoWHhXJjSslJD7s1CeT7n+TFoyDCBF7QYvLx90LsPI0C9m5HO5eoVgy0M9YinRttEoyNHMwYiTU2NKJCL0+iQoU9AInEEQZDQ6ShotFpodRQUila0tbUJw17z5uLNa1e5ek6fNcfq+iKRSDD0ieGCvbJAno8GfX7CIUOf4PrRuA9oABRoeHp7cwZZd9Jvo02pEhtWs1C2tiJTH/o0MSmZi05gCRMmTeZo+Orli6Dp9udW5StxjQWjBEFAJnPH8BFPYcWzz8HT0xMkSXJ//PuUylZO6N4ePuDGDT2P4+KK8RaUDCwmTpoCV1fTUP3WYA9fU2YDXzNg0GD2AcOfHonJhvJZz1RzSOLdW87bz5WtrVz6ED598NTwRiVRGDfRsP9YCr385IiRZn8DgD59DYaGJcWGtSY/N4fn7TbBrFKJhUwmw9jx1nkHezBuwkSryho3NxnnudzU2CjItUuSJPoPHIT5C23hbxnFL58OwvXGDmq12iRvNx9ia5A8P4/LgTpk2HCLyjCA8QRklV0Z6be50ONqtZoz9ItPSESChXawmMkzNG0v2LQObm5uJl7VYoiLT0Df/gMAMGdB47ymLKzlDu/NMwoxPiM9TIRHMGOfnXXPbB7gjkYi4lJnODtjyvSZVu5maWSAWb7Hw9PT4vojlUo5D9DammqolEruNzc3GYaPeArLVq4SnXPs3qhQtKBOP7bV1eJrplQqxaAhQy22hU1XY85QlgU/ZUh1dRXX9mtXDOHQp4p4KxvDWt9ZA1+eMH7iZKtj36dvf857nR1nYzD9NEygyOBHjSP0oYkBQEKScHdzAWhKEG2NNXoR49/EIpXRtPmzrjmlsNi1/LxcLoLVmHHj4etrPV/3wMFDBOH3IyIiuc/HjhwyaywoISWmSlz9H6n3AAQhVP4aw1aFrVgfSvR/FGjoaNqgEBZ79gEKc7uzwLsnwtJ4dKex6u7KFNZYl1XM9wR09zHpaeCPhyUZ12M8DBCgQEBHd6911hh8JTBBdeF2UEyUFDHuic9XcV7NejzQHMCCSoDgakeCtXTsOsE6DIwnrecPu0a9OgIHBwfEJyTi//35HXzx6ce4k34bebk5OH/2TIdztPJhKdyyJeTlMoIgqVRqc3jEtN69kZ11z876GT4H8ELEmYOvrx/qamuhUCi4a1qtlhMyREfHwMVV3NOCD5nMHVHR0ci6e9fsPe3tO4ARPJWVlqBALselixe4vJT8CW4J1g5bxnWbMm06bly/iuqqKuzasR19+vTDls2boFQynqpLl6/slPDAtqAj/QZAcMgVA18Y6G3tXk9GUaTVaqFWqznFCBu+1NXVVZA7yxLS+vTF1s0/A2A80AcPHQatVsuNbVR0jFkvHz5cXFwRFRPLKTVYEASQqw9PS5IkVCoVZzVvCf7+AairrUVJ8X1otVqT/MSdDeOxzcvN5ei6d+++NpUhlUqRkJSMyxfPo6a6GvV1tVbH0hJSzIQiNEZiUjIO7tsDgBHCxCckmtzjb2EdKiosgEbvxevr52fT+PAFM0UFcqSkpuF+USF3na8wt4TYuHg4OTlbVEQag+8BKxaez1h4JrwGPFzDK2E0EkIvmTLuGz8b9glvX1/U1dWilbdPsKGfASbf8u2b1seOVXxSFIX7RYWIizelF6au1nkSNry1tXsTkpJx6ADjHSLPyxV9p39AoKAcmiag1epAkgQkEgnUaoMhmZOTVH8PzdGDRCJBsiDcNnhlmQrx+GDXTqZsJ24OCO4zaiK/HwsLC5GQmMR7D2Exik9ebo5hfRGNrmD6MCusv3j+HGqqq9FQX8+tLwQhfIIwqntnOqYoFC0ovn8febk5uHj+HOeFJeQDrL9Qq9WiSB9OODrGNh7HxdUV0bGxAmOljoLP11y2ga+RSCRMqGYz65vMTQZHR0doNBp4+/hYnBsePIMNJS8CR14eb//py+w/Bk8t9i5huVKpFEkpLH1Uoa6u1oTvkEqlVg1h+HmR+WsNX5GalGLb3pSS1hv7LXh/2gtL4Vj5SEpO5d6bn5tjkyEnSweFBXJcuXRRlA6GjxiJQwf2QafTYdsvm3DtyiUMGDQECUlJCAkJtTjWubwIP07OzgLDOHNwFqwxBUhITEJRYQG3zybZYDgCMMpYJ2dnTolsL5StrSi+X8SUlZBo01wFGGM+di3Nyc7CkGFPmNxjiTcBmNDBJEmCoii0tios3vsg8dSoMcjJzoJWq8UH77+LtD590advf8QlJFjl722BTqeDXD/HekVGCcLBtxf+ZrzU+fDhhdRWtCrg7GLda1+haEFxURHy8/NwibcH0GbWTF8/f4GC1B6lK0VRqKyoQFFRAW7fvMFFEwIM+7pOp+PWp8hO6jtrYOcze66xZT77BwSgtrYGxfeLRM81fv7i/SR2/tTqKNAAPDzckZycgpDQMJSVluD61SsoLCjAoCFDkZySiohekQLjOcA4jKV+T6EFN5jn68zwV1n3DHISvhGRPUhITERYWDhKSopx5fIlyOX5GDrsCaSk9kavyEjDeZ9T7IqHcRbUW++ZabgmnivY3PPiZ399SEOCAE3QnKKXonVMXkKCAEmQ3LMPUrrXE8LE0nT3iXppDebGozuOT3cNo9wd6/wY3Q98BXB3pTn+/tZd2wABH0DxLne/9gh4D4oGbSzcedgwp+i14VFBaGs8RAWwcSUAgKQZ72qmRV2JMLqOYrozIJFIsGT5Srz9h99Co9Hg6JFDnaoABvRjaucYshboxoJmS2Bz5bQXvn7W81V56ZV/ylaD9XVzczMn5AkItK4cYBEUGGRRAQzYHiZFp9Pplfi5kOfnCRQ8Dwr8ukmlUixZtgIff/AfqJRKfPzhf1BZUQGAyTFtKaStLbBXDSRuPW3bxm/N24PvJe5l7V6eNb5KpeIO96wVvj006+PjywkG2fnRwqM9a8I5PgIDA6F3mhWA9VCkKArrv//W5vIAhgZbWpqtehl2BvhjW88LfWnX/As29H1dXV2HFMC2GI8AwjFqbGgw+d3JydlsnmMAnOcjANy6cd0mBTAfbNg2dpwB2xSaLPwDAuzy7OG/JzAwyOx9pvOVEazRNBvKlgDNWwUepuWg8Zrh42M+VB4L1kNYqTLsE2zeWwA4fGCvyTPWwApuxeplS3+wtGftAOHvz6PRRlMaBYSCaEN9SFAUDa1WA/6KrdFo4OrqAq1Wy+WM8w8IBEmSRl4zwrYY733sdz5N8UPv2orGxgbhfkLTAqGWcb/U8bzhAoNMadhcOHN+qNeGhnr4+fuLjpMl9sjew55C0YLbt25CnpcHeX6exTCf9kCwz9ixxgYGBiED7VcAs3xNvr18jb6bPT29rPadzN0d9XV1VvctvmedSmlQzrERDgDDGic0XhF/Pz/kfn1dnYliyppCGoCgzkqeR2A9b44EBFpXLAGW1+f2wOY9kUdPDSJ7IsDQQUb6beTnMXTAKmUswdPLC8uffQ4UjeuoAAAgAElEQVQb1/8AtVqNwgI5Z6gpk8mQkJiMtD590H/gYJP0G/z+sxQS2Rya9Osmf7+2tT/Ye1klrr1oaGjg1o0AO8aUzw/xeSo+rHkKSiQSyNzd0dTYKKDHh40+/fpj3IRJXLoMvjLSPyAACYnJGDh4SLtzr/PXw6AOnjtZ+NjghckalAIQeADzoVC0IP3WLcjz81Agt7wHiK0vfn7+dgnj8vNykXXvLuT5eSiUyzmjX3Pg911Hz+y2op53rvnhu2/selan06GludnEGMfX19/sM8b9RwNQqtogITVwc3PB6pdewScff4DamhrU1lTj0P69OLR/L6RSKWLi4pGcnIphTz5pUwQNg7ERw8QQMMcP8s5MvHUpOLj9Y7DqxdX44tOPUVNdjZrqauzbsxv79uzmDPdT09LwxPARcHUztIN/DufqyV7jNNxsbY1zBbP8EMDuq+I8lYiiGKyziYHfpAhASzHhYCR6JTB4PFe3F7Y/ILDd2lO6pTsrhfjoCe2geEYY3b0t3X39sGaM3d1gzuC8u7eru4ICAYLrf9ooKHH3A0HToPDo5onQhYW9aFtdjOfBI1EAsyAIAqReMEd3IZrgh/jpKfD19UVYeDgK5HJUVVaiVaEQMOyPAqyXmT35fiyF5bQFtniosha/fGUE3yPOnvq6d4Lls1qtxtnTp3D82BGBIJIPV1c3OEodRRVOnYmExCQMG/4kLp4/xyl/Pb28MLsTQtq1B+3NMWLNg5Vv9S2RWL5XQvIEi7x6qNvaAMBu63cPDw9Uq1RQ6b1D2vTlAIDM3XKYR2E54nOFX1570NHnbQX/cM5/p7sdawA/BG97vW1YmMuragw3NzfOQ0Yl8k4HR8v01PHxUev/G95tSeFsDHvXLLZfSZK0uqcIFYH6MHoEP1SfMGzfw2Tc+a9xdJSav1EPbo2g+fvEg5tbtkQ9ME5vYO5w6mqFRgHAwWiv5L+fyQfPqwsNKBStIEmSW/fc3d2h0+k4xYstazU73h3tR1VbG2gI88BZei9/rtiiUASY9vCVc+b60ZZybEFDfT2OHTmMc2dPm13LvLy90dbWBmVrq4B/sQUCHsdCmgxjeHi2j8dRq9U41w6+xngcJQ4S48dMQOrXGHv2fSH/Z6BHD09PCEXO5iFQ5qhMlTm2rTPiRiBsnUiStDkMt7OLC6RSqSBPfHshlUpt8k4ELO+JarUa586cxsljRwQ5jvmwxN8OHDQEoaHhOH70EG5cv8bNjZaWFly/dgXXr13B3l07MWf+Qi4EMtB5a7WQP7PnLNP+swF/rtpzJuLzQ+bWK0epDWckgm+89WhAkiRmz1uA+MREnD5xHPfuZnKW7dVVVaiuqsK5M6cQFR2DxctXIiQk1K7y+f3Tnpy0YrBlvvP5ITHDqONHbdgDVG1QKlvNjo81HpTFzRvXcfTQAbMhtqVSKTw8PFFTUy2oL3+t64wzsC2wJ2qNrc/b2k8AsxNQFKPMbGpqQXivCLz73vvYuWMnLl04zxkHsqlZ7mVmYP/e3Rg3YaIgvLg5npczNbLRPZM1HiBJEm528P/GCA+PwNt/+RsOHzqAi+fPcYp2tVqNzIw7yMy4gz27dmLCpMmYZiHUt1geXz7vbGklaU/EL/YZktbPKZoGBSaUIyniUdzZZ42eoXRgDSe7ezsY9Iwx6Rnt6FkS9u6J7hye1xra6xzUFdBd6mkTCAI0CNCMKphJC/Go69RR9JDF65EpgI0t77raMiTCn3Z7BAQGoUDOHOTq6us4YT17mNdorAuF+GGRLcEWht3VTYbGxkbu8GgLaszkNBKD2ILPCoTt3fjceIqNGn1eHVtQXW1728S8vRSKFnzy4Qe4X1TI/ebj64vYuHj06hWJ8IheCAwKgqenJ/bv3YO9u3dyz3fmJsIXos+dvwAZ6bfR3NwMAFi4aInNIeg6G4I1pItt8K6ublCr1aipqbGZ5rRaLScMZw/tfKVarR20Z25eyWSM94bM3R3PrHrB7lD83t7WvSM7Cn6oNYqiBPmMa6qrEBoWZlM5/BxoHRGCAIwXmJ+/eY8AFg31dZwg0k1mv5ENX4k3ZtwEm8NKsvD09ARBEAK6qautQUxsnE3P20NjgKGNFEWhvq7OapQFw1xg/0Rt2jg8Ggtbft1sB3/slix/Fh6e9hksWfOWsbaO1NXVws/PlEbFBMkcjVrJHWr8frHPWp0OlI4CKBqubq5obGxAbW0NXJ1d0NbWBpoQzmlLoGlaYLCwbOUq0X402dt5n4OCQwR7FkEIFWnG5fD7oKa6CmHh4RbryKKqqpL7zNaZa6eZ97UH94sK8dlHH6ClpYW7FhYejqjoGERGRSEkNAyBgUFwcXXFx//9N7Kz7tnN47jy+8Aunsz2e1koFC341ApfE6Dnaw7w+RqKeiR7PJ//q66uRkhoGMytDXw+hJ+v1p55Zk+dKIpCfb2pd7EYmhobO0X5CzAKgKamJpsUmfW8PZE/txWKFnz+8YdCOvDxRUxcHCJ6RSE8IgKBgUHw8PTEwX17sG/PLgCm/G1wSAiWrngWCxYtRVFhAQrk+cjLyUZeXi7aVCrU1tZg7Zqv8MxzL6L/wEEAAHeeMd2KZ58XKOttAevdzVe+19bU2BwJx545ZgzhecT2M1Fn8kNdBSmpvZGS2hstLS0okOejID8POdlZuF9UCJ1OhwJ5Pj798D948/9+J/CAtgZ+/9g7Vub2SYKwvv8ZP8t+v19UiM8/Fu4BoWHhiIqJQWSkcA/45IP27QF8bFi3FhfPn+O+u7i4Ii4+Hr2iohEeHoGgkBD4+PhCnp+HD//9LwAGPkPQd+3YH9oDd/25xt3dHc88v9ru5zsSHYgFa+QCEGhVquDkIMGSpUsxd/58yOVyFMjlyMvNRW5OFhobGqBSKbFvzy4oFArMf3qxIFqKIGIKDGseE3hCxMOWvVf/neVFKYpCbU0N/ANsixIhBjeZDHPmLcCsOfNQWlKM/Lw85OZkIzsrCw0N9VAqldi9cwcULQosXLzELP0b15cVCNOMS7NRaGih/M1cGGhzn5nyeVw8QUACgCIBHd8rGQRIfV0686xhNgR2twLTg11NttIR9JS29IR2GK9l3RndzRPYHG/QE+gK6H7jIYaesYcYwEQwAbq79pQEY+wHG/j5jsBcXt92l2dU10eiABYjaiYqS1fTthpC09B6F2Wa4e27ZViUel6YQ2+e5wp7WGtra4OipcWiYIAv8LQGa8LqwMBAlJeVoq621ua8ohUV5Xa9v711M4ZM5g5XVze0tipQVWl7H1SU215fPtj6bd+yhROORUT0wtwFCxGfkCjatrLSEu7zg9rECYKATOaOJctX4vzZMwgKDkY/nleFNbBqFf73jtbH+HNXWUcCgoLQ0FCPqsoKUDYKrqurKqHT6QAw8wNgBKaurq5obW0VCJStgfXQNkZgUBDKSkvQ0twM/8AATnBM0zR34O5KzAZJkgLFWEVFOfrY+Gx5WSkApj1+NuRfs4TKynKbFMCVvPXBlpxvxuCHoFUoFDblSxRDQIChHFvpRqvVotZOYac/7z2VFeU2hdkXzlV9IDjaeO4KbQUNIVcfAmNPCCLV2Qx+iFWaphGf0L6xswRLe1dVZYWoAphfJwCorDSsDbbQtNU6gQQNAjqaCf1cXlaGutpatCha4OvtjVaVEjqdDjqK4sJCSyQSsPlIOG8NvQcmf77TNI3EpGRB/c2NvbERlYHOAIBmOGrwmGqCoSb+nCsvL0Nf2LanlZUa1hdb57q9ezNFUfjum685wf/AwUMwZdoMQfhpYZ1KuPfYA2af0fM4dvB59vBkLPh8TbgRXyPoH5pG6cPga0SiEPF5lQA+fZSVWcypyK9fGW//6YgAXgz8cMPVlZU2KYDbM1aWUFlRbpMCuKqCv94Y+mHnNiEdzJ63wAJ/W8p9NkcHbFjSuPgETJg0BVqtFteuXMaWzRvRplJh44/rkJyaBmdnZ5M1JkG/xtgLfkoKW+eNVqvtkGLMy9uH8+S254zBPyN0djjwRw2ZTIa03n2Q1pvhDpubm3Hi2BEcObgfzU1N+OXnn/DGr35jc3lubm6QyWRoaWlBRXn5I+WJKYrC2jXCPWDy1M7fA1hcvnieU/7KZDLMe3ox+vYbYJK7FgDKSoTrM8AYuxj6rnNSFFhDYHAwSktL0NzcjICAQJvCbT8IGOiEgIYCVM0KuMtckZKSgsiICAwfNRqgaWTfvYvtv/yM8vIynDx+FH37D0BsXLxQ0Sss2cCPEgBN6wCQxqmCORjzNHw+r720TJIkwiN6ITyiF5dG7N7dTPz80waUlZXi2NHD6D9wEOITDGHXLYUYpXlxNgiC4BS27HP8atL8xpttAwmC4O3c+jL5eeMlPAGeTq/c1OprQwBwIEimHpT+XKIX8lKwz3PJHC/a/WDgoZk2dA35SkfQvcfDgB6h5IJ+PNC928GiJ9BWT6AroQG4UDbcXdrVc/YQwJDyDdCCaRtJPxqD7s4ACYZHIJiFq91RjDtbyWsPupQndlcmBJapJMjuqfxVKpUo1ud19PLyEih5+SHEqqwoCortyA0JWB7T+MREAAxzzrc0NgetVouLF87b9f721k3s3jj9oYbJu1dq5Qmg+P59s6GzbIFGo8GVyxcBMN4xv/rt75GQmCRab4qiUMB714NWgvbt1x8vv/YG5sxb8EDf053BKi00Gg0unj9nE72dPnnC8HxyCgCG9livkgJ5PsptEKiUl5chPy9X9LckfbkAcOPaNe4z623wsNdhwfvM0G1kVDScnJwBAJfOn7MpR2RtTQ0y76QDACJ6RQq8ZtqDC+fO2nTf5YuGNSo2zv78c0HBIfDSe1nfzbzT7tDVIaGhnBfwtauXbeqzq5cvcgYItiIxyaDkPH/ujH2V5MAwt6RI+EMhCCu/izxB2P8Mv172CDzieUqEWzeuWbizYzA3Ry9dsI1Gr16+wH1uD41aQjzPYOHs6VNoam4BRVGQyWRwcnLiBHuswpckmbxs/PC7ibx+vHn9quDgxj/MmfN+4X9nr9E0zRjw0axhAcl8pglERkXByZlZX86fOWPTXKmprsad27cAAL0iox6YR11uTjaq9Yql/gMGYtULq80K/quqKrnIHPZaLxAEgbh4/T6Tn29TbuHysjLk54rvM+agVqtx1QxfIxgrmoaOolAof/B8jbXDW2RUNEcfF86dtY0+aqqRkc7kRo6IjOx0D+A4npD9gg38MwCcP9ve9dlceadtuo/Pt7P7BUMHlwAwXr9v/vp3Fvnbwnbwtw4ODhj6xHBM1YdYVamUKC0pBmDMB12xqTwxhISGcbzF1UsXbaKNyxfP273P8uHo6IiYWGau5mTdM2vsxwdFUTh7+hQAJoR/fEJiu9//INGx/doAd3d3zJw9F6l6hXB+bo5AEST2Xv4fSZIcD15UWICiwoJ216Wj4O8B/QYMxLPPm98Dqnl7gL1pAFjw14k3/u93GDxkmKjyF2DOwyz4+3Rn9Z2t9JCUbIiUc70D87kzwabAaFEooVK1wVUmg8zZGdDpkJScjKUrV3H35uZkAzC0l7TaboN3Jh/s92Re5CD2XPkg9s+k5JT/z955x8lRnHn/Vz2zE3ZXK+0KJUAZBBglkgADQkjkaBzO5/MZAzZnny/Y7/kczne275wzmJxzMhlkgoTIKOdAViIbhVXYNKmr3j8693T39OSnW/39sGi341Ohq56qp56n8LV/+ob+99tvvWmVssRCfK/FfAyGxq9c53Gtbpx0MJ27jG0lqN86gBiUyVhZllGQZcgSIEum8nBNhT+oLEyPUIjKgxbljbRpE9UtWpjnASjbmfY1orJoLg0zAAuh7L8hQ4AzFP1oUK4QimgcQeyqHn34QQz09wNQJrPMTJx4kP77C88vcH1GJpPBc/Pnl/1utzI99jhjUDn3icdKhqZ78YXnah5OqtSEspkTT5oJQJlIefyxR0s++5GHHqhKtt27d+mTSQdPOgQpdQLSiSfnPm7ZR68RCoifb7UZXwmVNuT4T5+ge7X/9YnHkBkY8DSyfvD+e7qRcdjw4ZYJyk+feBIApe49+fhjJd8997FHXSe7jjr6GD0M45NzH7dMHhpiGSXX29ODm66/Bq9vWF/yvZVg3jdvYKB4n0RAmcg9/oQTAQA7d+7ASy88V/K5jz3yIPL5PADgpJNPqVrO9WvXuBrVNT784AOsWrEcgOLR5DeUrJ2ZJ88CAPTs3eurHXnhuWfxxGMPo1cz/kBAkhiOO/7TAJTw1aUM2NlMBs88ObdsWYePGImDJinGiNfWr8Nbb77heb0sy7j7jtuwdPEim2c8t4QHddozU8FqJC7VbpsNc271yw2mOwv4a8mGDRuuT3y+/tp6rFy+1PN6zjnuv+cOLHrVn9HRKltxO7Jh3VrLhKwTH334AVavVOrogaPHquFsy6No0tE0ETn9qGOQVvcGnff0X7Fn715wWWBvTw8YY2hV9yFlYOAyx+7du3DzDdda6s2w4cP10OevbViPFcuWupaz5kV87123Y+ErLyGfz7sYgqH/cC7AuRbxgCEeb9Hb2B07tuOFBc+WzIOHH/yL3r5o3jCuOESq8ItZ5zl8yhTX6zjnuOeO2/W/K1EBTjDpOHMfe6Tk9Y8/+pCnUcWJcvSap+Y+YdkXtp56jZtWr3irm/qfHdvxooeerPHoQw/o9ePkWSXqRwUcOHoMxowdBwBYsWwJPiixOPO9d7dixbIlNZVh+dIllvDNTnz4wft6Ozh23Hi9vdljqgcHTZrkXQ/+6lwPZFnG6pUr8NTcxz1lMC8s0spk2PAR+NThyve0Yf06fVGCG8r3dRtefflFS1stSRKO+7Shm7z68ouez8lkMniyhLx+OEnVETjnePShv5S8/uUXn9cXrs447njf+zc3mmQyqY8LBwb6Xa/r7+/Di88vsJSb3YjLGENMXVjEOde9KJx+nDDrjQ/95b6S7c8br23AHbfejE9q7Glv6QMml+gD7rxd/7vS5lILed3Z1eW53cqbb7yO5aY2xZw/tco7c33IeOhvRx0zwxjXPPFYyTLo6enBDddepS8SrTWbN23Eg/ffgwF13JfN5dHT248YgK7BHZAYQyxutEsFtV3S6qlFb+0fsOSfpre4GUSFEBg5apQePWjtmtV4/bUNnlGyZFnGLTdej4WvvGxZnLJp40bcc+cdnvMz5sV7hUK+6Hyp78wSycv8Y7tP08fNP2qKoUXsc3qP/ZiuSwqBGBhikCAxSWkrJAmcATLn4Jwjz2VwF73Trz5iXtgWZMxRwsJAOWVImTCkAVDLA0GbXXeGct0qp92inI5KCEOagiy7hhACsgB4UeS/4KAtWhdc2f5MDz7iJzmiud6/QAMMwPrHBv+NOhUDjhOKIgkoqSlv4qsZ9Pb04PZbb8YLzxkTVqefebblmkMOO0zfw3Xp4kVYsbx49Wx/fz9uu/lG7N27pyp5zGU7aFAHTplzKgBlT8I//PZXFiOmmWfnPY2H/nJ/Ve8uJVcp78cpU6dh/ISJAIDVK1fg9ltvdhwUDQwM4IbrrsHrr22oSqYhQzoRi8UAKB45TgaCQqGAZ+c/g6f+Wr7hphY0w2PUDxTk6hg8WK/fe/fswR9/9xubsdWQb/3aNbjq8j/qk+nnX/g5y6D68ClT9YUba1avwl233+pY97KZDG6/5SasW7PaVa5kKoWzVY+YfC6H66+5Els2bzLJZci2c8cOXHvV5Vi3ZjWuu/rPWOvx3EoZPGSI/rt5EtuuFMw5/Qx9L+C5jz2CeU8/6ag4DAz047abbsDa1asAAAceOBpHHTOjJrLedN3V2LB+reO5je+8jWuvulyX6ezzzq/4PSedfIoeSnnxwlfw6EMPuE7AvPj8Ajz28INYMO8ZXHPl5ZBlWTd0nTz7VD3PHn/kQbz84guOz9ixYzuuvPwP2LOnsvb93PM/o9fXW2+8TjeC2+nv78cdt9yIFcuW4L6778C8p/4KwPherXXhfcu91rJWPTht550U+8Gm7Q4+/OC9onscEVajYTlq2jnnXagv/Hjw/nuwdIlz1IpsNoN777oNSxcvxIP33425jz3s+x1m7JLdcuM1eG2D86Tmpo1v44Zr/qyn+8xzzgNg1tO8tbRSAychBFpTrTj1DEXHGBgYwJ//9DtsffddFAoystkc+tVJ3FQyiW2ffIxrr7wC69asxjV//hPeevMNve059wIjH/9y791YsuhV3dhrnsjPZbO4+/ZbsXjhq7j/nrvw+CMPWdPk4SVsGIUFTj/jLH0/z0cffhBPzX3C0bg50N+Pm667BqtXKh7eo8eMwTHHHueZb0CxccIv5pDq77z9tuM1vb09uOv2W4u8b8plslnHWbUSd956i2O7k8lkcOtNN2Dt6vL7A7Nes3nTRmXiWy0IbeJXLhTw3Px5eLqCBSnl4Dles9Wd00436sfjjzyEZ56c69r/3HLDdVizaiUAxVB79Ixjay88gHMv+Iz++1VX/FH3IrPzxmsbcM2f/1QXGa7585/whouu+87bb+GqK/6o59O55xvyDjbrt5vd9dvnnp3nujDp/nvuws03XIsn5z7uuiist7cXixYqi58YYxg1yvCcPP/Cz+ptzH333OUaiSiTyeDO227GooWv4L6778SjD1sXZc057Qy9bjzy4F9cFwjs2L4Nl//+N9ize7fj+XKYOv0I/Vtdv24tbr7hWn2RrxnOOZ556q94+AFl/JRMpfR2nypan62FF7YagZRJlz/85pd48P578cB992Lzxo2Ober7772LN954HYASEtfPNkNmDp50CA6fMhUAsGnjO7j2qivQ29vjeO3aNatx8w3XYenihfjdr35hRGKoAZX2AZWOgIYOVd63q7vbsm+0mQ3r1uK2m663tIHm99Uy77T68OEH79v6b+P3VCqFcy+4EIASXeDaq/7suhhux47tuPqKP2Lt6lW45srLsUYdK9SKZUsW4w+/+SVeeuF53H/Pncjlcnr97OnvR382j/b2VrxkmpfZ/4ADdb1GSbOhC7//3rt6eo0fJ/3VmjcXfu7zul5+3dVXYtlSq7Fe++nv68P1116NxYsW4rZbbsLcx5UF7ksWL8Kvf/F/eOH5Bbjztlv0xTNmOOeY/8zT+t8HHOi96NVNB/JalMGgeESbjcPGfd5GX8tzfCz8iGkRAEznZKEZgrlFpyxn2joMhgcNY1wUDsJRJsGuW7ouHqLvJEyEoTyCXK/si7eCmg4zmhWN0wpGXBlqp6jNTLISxSOhucZfoIF7AJdbVb1WKkYorF65AjtdPGIHBgbw8ccf4+OPPrRMrJx97vl6mD+NVCqF004/A0+oXoM3XHs1Fk2bjoMOOgjtgwbh448+wqqVK1yNs9VwwYWfw9tvvoktWzZj65Yt+Pn//gTTjzgCY8eNR/ugQXjv3a146803dQNVurXVcZKjFphDELqd/9pl38Cvf/Ez9PX1YvHCV7F540Z8avJkjBs/AUINU7d+/Trs3LGjankTiQTGT5iIje+8jW3bPsHvf/MrzJ5zKoaPGIFdu3bh448+wrKlix3Dv5XrlVMLqv1S7737zrKuP3zyFEybfoTnNaXKtN6ce8GF2LxpEzZtfAcfffgBfvurn2PqtOkYO3YcOru68OGHH2Dr5s2WxQInnzKnaF9lxhgu/tpl+P2vf4G+vj4sXbwIWzZvwmGfmoyx48aBC453t2zF66+tN+peutXVg2LmrFOwZfMmLF+6BNu3bcOffvcbTD/iSIwdPwEjRoxAX38fNr79DlauWIa8agA4esZxJfO7EsaNMyISPPrwg9j2yd+QSCaxZNFCfOGLX9K9S4cM6cRXLv4abrr+GsUT+onHsG7Nakw65FAcOGYMctks3nv3Xby2fh12794FAGhta8Mll31Tn2iulGQyhXw+h/7+ftx47dWYPGUqxo6fgJEjR2H79m14d+sWrF+7Rv/uTpp1Cg6fPLXy96VSuPSyb+LKP/0B2WwGLzz3LNavW4Op047AAQeORiKRwPbt27Bi2RJ9b8SWRAL/+NVL9T1WAWDw4CH44pcvwu033wBZlvHIg/djzaoVmHjQwThw9Bjs2bMH77+3FRvWrdW9Y9PpdNmesuPGT8C5F1yIJx59GNlsFnfedjOWLl6I8RMmYtQBByKfz+HdrVuwcvky9Pf1AVA8pE+zLUbqGNyBzq4u7OruxtLFr6Il0YJhw0Zgw7o1GDdhIk4/8xzbm4uNwNrCLE2H6Ogwnrlk0UK0tCQwbPhwrF+3FuMnTCxaEGWlfBXtwNFj8NkvfAkP3HcXstkM7r/7DixbvBAHH3IYRo3aHzLn+PD997Bi2RJ9QdXwESP1RRmVotXRgf5+3HLDNfjU5KkYO248RowYhR07tuG9rVuwYf1avY6eOHOW7gHnh3La0VmzT8Omje/g9Q3rsGP7Nlx5+e8weco0jB47FsOHj0Bfby/efvN1rFu7RtdPTjp5lh76mTGG0aPH4PNf/BLuv+cu1Vh+B5YsWohJhx6GUfvvD845Pnj/fSxfstiSj+eUmY9KugQGDxmCi7/2dVx/zVXgnOOJxx7BmtWrcMihh2HMuHHIZjJ4d+tWrF+3Brt3Ke1LW1s7Lvvnf6moffHbN40bN17f73PJooXI5/M49rjj0dbejm2ffIIPP/gAi159Bf39fZb7KtEBGGO45LJ/wm9/8TP09fVh8aJXsWmTquOMGw/OOd7dugWvrV+ve4mVq+MkEgmMGz8Bmza+g+3btuEPv/01TplzKoYPH4Fdu7rxt48/wrKlS8joNVobMKSzExdd+nXceK1SP+Y+/ijWqv3P6LFjkc1m8d5WpS3V+p+2tjZ8/Rv/XHX/48bhk6filDmn4YXnnkVvTw+u/NPvMXX6ERg3fgKGDRuOv/3tY2zdshkb1q2FEErI9XhLC3LZbNXvTqZSyOdy6O3txTVXXo7JU6dhnKlP3LplM9atWa2X2exTT8enTAU/ZkMAACAASURBVN6L9nrwp9/9GrNmn4phw0dgt1oPli/zrgdnnXseVixfilw2iwfuuwdLFy/CtCOO1A1mH33wAZYuXqSXx9EzjrUYVkaPGYu/+9KXce9ddyCbyeDuO27F4oWv4JDDPoX99z8AMpfxwXvvYemSRdirLo4aMXIkzr/gsxZ5Bg8Zgi9fdDFuvuFayLKMB++/F6tWLMdBkw7B6NFjsGfPbrz37lasW7NG18m89DM/KN/qN/C7X/0cvT09WL1yBd7dsgWfmjwFY8aOQzKVxHtbt+KtN9/QF9YxxvCPF13iuU98PTEMLt4REcZPmIjt2z7Bhx9+gNtuvgEHTzoUH7z/Lrq7u/Ev//7/IEkSzv/M53DT9degv78Pl//hNzh88lQcdvjhGDx4CHK5LDa+8zaWL1uq1/U5p51ZkcwXXfJ1/O6XP8POncqWIr/43x9jytRpGDN2PDoGd6B7506sWbXSsvjiyxddjEGmyDbVYu4Dli5eiEIhjxnHHY+2tnZs26b0AYtr1AcAwKRDDtWj3fzxt7/C6WeejXHjJ2Cgvx8ff/wRXt+wHm+qhnWv99Uq70rVB42Zs2Zj88aNWLZ0MbZv+wR//O2vMP3IozBu/ASMGDkS/X19eOftt7Fy+VJ9YdOMY4/H9CPc93SvhKNnHIsF85/BB++/h2VLFmPj22/j6BnHYtT++yPeksTOHduwcvky3bA7bPgI3VgOKDrJ4MFD0NU1FN3dO7Ho1ZfRkkhg+PARWLd2NSZOPAhnnXu+fq1+n+lvxhgmTDwIn/38F/DQA39BJpPBTddfi4WvvKzq/6ORz+WwefMmLF28GH19yv7SY8eN13WoGcceh3lPP4X333sXSxYvwjtvv4UZxx2PUfsfgGQige3bt2OZKQrE8BEjyhojlprns5832gpnT1TGrIZJ80JB88R50XuZ9Z6Ydr0aOUJWDb+y+m4A+tYlfiY+3N4fERGhoK2z1gwnAs03kNSCMHzvWhrCkBYgHGUSCrS+k4lAlold5qClwTAAMwYhrBOnlSCEsIR0rrYFb7YBxw1DgdQGPI1fwbBl82ZsMe2P5kUikcAFF34Op595luP5c867AFu2bNH3tFu/do3+u0YqlcLnv/j3uNsUZrASzMp3PB7Ht7/7Pdx43dV4/bXX0NfXi4WvvoKFDuFKhwwZgsu++S38/je/qur9pWVzr7TDhg/H9374X7j6yiuwY/t2fPLJ3/DJJ877bx162GE46pgZuOfOOyqW5ysXX4Lf/PLnGOjvx7tbt+C2W24quiYej+OMs85GX18fXny+dGjcesDU1bn6CqsK2pBX1f3J/DKofZCvwaZTe9aoNiUej+Nb//4d3H7LTVi/dg2ymQyWL12i731nhjGGM846x+LVY2a/YcPwne/9ANdffSV27tiBbZ98gm2ffOJ47cSDDsbxJ56Iu2+/zVW2r1x8Kdrb2/HCcwsghMDqVSuxWvVasjNr9hxc8NnP+0hx+QwbPhyTp0zDhvVrsat7J/76hBHi+p67bsd//fj/9BBwk6dOw2Xf/BfccevNyGQG8P577+qTKHaGDx+By771rxg2fHj1Mg4bhjlnnIl777wd+XweG9avwwaX0HGzZp+KC2uwN/boMWPx7e9+HzddfzV2dXdjx/bteH6Bcwj+rqFDccnXv4FR++9fVLenTT8CX730Mtx71x3I5bLYvGmjq2fESbOUsH2vuHgKezH71NMRj7fgiUcfQqFQwFtvvuEaDnrK1Gn4h4suQUtLi+W4JEmYOWs2Hn9EecaLzxmheN95+01MmXaExYPLjvKtxwBTSGLGGE46+RQ88ejDKBQKeMH8zLfexJSp09V8szwJ1fTpx59wEhKJBB647+6SeX7wpENx0SWX6XtcV8p+w4Zh9mln4r67b0chn8frG9bhdRdP4Jmz5uCCz37B13MraSuVBSvfwAP33YUVy5agkM9jzaoVjvsiM8Zw7vmfwTnnXwBZliHLMhhTJtc+feJMJBJJ3H/PXSXzcdIhh+KSr38DyVQKnHNLBAWLl5JNp9T6fCGAyVOn4xvf+jfcdsuNyAwM4L13t7qGuB0+YiS+9e/fxvDhI8rOH+u7vfM4lU7joku+hptvuA4AsHL5Mqx0iNLS0dGBz3/xS1gwf17JsLxeDBs2HN/9wY9w7ZVXYMeO7dj2yd+wzUXHmXjwwTjhxJm487Zb/L9ACHzlq5fgt7/6BQYGFL3mdhe95vQzz0Z/Xx9e9BHyvxz81ehiXXDK1Gn4p2/9G26/+UZkMqXqxwh881++jWFV1A8/fO7v/h7xeBzPznsanHOsWbVS9z42E4/H8Y9fvQQL5s8rGS7aD8OGDcdpZ56Fu2+/Ffl83nHMoHHaGWfhM58rbm++fNEl+P2vjXpwx63O9eA0tR7YvXy7uobi2//xPdxw7VXYu2eP536jEyYehH/4ysVFx0846WQkEgnce/edyGWz2LTxHdetHiYdehguveyb+n7QZqYfeRQuveybuOuOW0s+RwsZX62+PnTofvjPH/wI1139Z3zyt7+hu3unawjqVCqNr176dUytxSI+ZvxTjwmPU+acimVLlH3qVyxbihXLjK0UXn35JZw482QccdTR+PJFl+D+e+6ELMtYv24N1q9zrn+nnn6mHt6+XAYNGoTv/uBHuP6aK/Heu1uxd88eLHzlZcf9tNPpVlx06ddrvlAylU7jKxd/Dbfc6N0HDOrowOf/7kt47tnq+oAzzj4Xr21Yr6f3ob/c53jdkUcfg8M+NRn33Ok8zqhV3vmpDxoXXfp1tA8ahOcXzFfGNStX6NE67Mw+9fSa6Op2JEnCv37nu7jx2quwedNGdHfvxPxnnnK8dtCgDvzwBz/E4I42ZLIFxZEESqS5WXNOxSMP/kWJhDD/Gf2et954HVOnH4n9DzgANn9Yo29TJyNPP/NsxOMteOiB+1EoFPD6axtco5NNP+JIXHrZN3S9XJIk/L///B6uufIKbNq4ETt37sTTT/7VNR3f+Y/vue4V7YV9EtXtvHGOQbIZbQHVaMRMx0zbtzi1U6XaLu1snEkAA2TTuEJb7MAZU/YS1p4nbDfrcgV3otgN3X4e/KQ4LDIIJkFNhwQUKeZcW8DRDIFqCKUyqXQ8b76fQjqqhVKZlINbXxm0dADQ+0gBQAYDA4MkrIsIKafLKeqIuV6V8gJuNjXxANYVIqAuS3aoGoEBurLFYjG0t7dj+IiROHzyZJw482QMHjzE9XpJkvCv//4dPDvvGTy/YD66u7st5w86eBK+cvEl6BjUUbUBWEPLu9bWVnznu9/H0sWL8MLzzxVN8HZ2dmLGccfjrHPOI6EIjNr/APz0Z7/EgvnPYPHChdi2zWqEGz1mDGadMgcnnDQTL77wvH5cksr3Ahk5chT+639+gqf+OhfLly6x7M2TTKUwefIUnHfBhRg5ahQeesDYB6xZdVIfqDXl7SWwhYJqFKlUCt/8l3/D2tWr8PyCZ7Fp4zuW8kkkEjh8ylScefY5OHD0GM+yGzVqf/z3T3+G5xfMx9LFi7B9mzU82/4HHIiTTp6FE06aiQ3rnEMVa0iShM/93d/jqGOOxYJ5T+O11zbo3r6A0oZMmToNJ8+eg4MOnuTxpOq56NKv4c5bb7GEVz5w9Gicd8FniyYUJk+dhp/87JeY9/STWLVyOXr27rWcHzlqfxz36RMwc9bsskP+eXHU0TMwZsw4PDn3caxft6YoryYdehjOOPNsTDjo4Jq988DRo/HfP/05XnrhOSxfuhh/+9i6r9nQofvh0yeehONPnKmHenZi+pFHYdyEiZj/9JNYu2YV+np79XOSJOGggydhzuln4pBDD8OjVexdPnPWKTjs8MPx7DNPYcO6tei3eQZOOuRQnDBzlucE6azZp6KvtxcvPr9A9w7t6BiM0848G8OG+TXmM2itkBACs2afqhiSip55FvYbNqwu7eVRxxyLiQdPwoJ5T2PdmlXo6bHW03HjJ+CEk2bhyKNnWIyV1XDkUcdg9OgxePrJJ7Bh3Vrk89Y6evCkQ3HaGWdj/MSDSj+synBJ8Xgc//CVS3DMscfjxeeexTtvv2mJRtLSksC0I47EzFmnYOzY8chn80i1piCEUPb6EwyxeAxHHTMDBx9yCOY//STWrCrOx/ETJuLEmbNw1DEzdK8MuyJuTZb1uPlvIQQOnzIVP/35rzDvqb9i1Yrl2GtrX0btvz8+feJJOGXOaTVrX0rpkUcdMwPtgwbhybmP4523rGF+h3R24sijjsE555+P1tY2LJg/z5KuShi1//748c9+gefmz8PiRQux3abjHHDggZg5azZOnHky1q/17mc09HyG4kX5w//+MZ5+ci6WL1tapNccPnkKzjv/Mxg5ahQefrA6vUbYfvd6gp/nT5k6DT/9xa8x76m5WLmiuP8ZNWp/HH/iSTj5lDk17X/cYIzhM5/7AqYdcRTmP/Mk3nhtgyVUZzKVwtRp03HOeRdg2PARev2oBUcfcyzGjlX6xHXqAjeNlpYWHHb4ZJxx1jnQtq+wM2LkSHz/Rz/GM0/NxQqnenD4FJyj1oNHXOrBuPET8F//87946cXnsfCVl4rKY+h+wzD71NPUxSTOxoljjj0eB086FPOefhKrV60oesb4CRMxc9ZsHD3jWM+2+sijj8GEiQfh6SfnYs2qFei19bMHH3IoTj/jLBz6qcPx0APORrVyGTZ8BH70k5/hpReew6JXXy7SEToGD8aRRx+DM846Fx0dHfpxe4hV4/fmb58ydtx4/NM//yvuufM29KkRQ1paWnDyKXMw/UjDW/OEk2Zi3PgJeH7BPKxcvqwoXP34iQfh9DPPwtRp1Rlkh3R24vs/+jEWvfoKFr7yUtEig0EdHTj+0yfi5FPmoLOrq6p3uaH1AU/Nfbwo1LvWB5x9ntIHPPdsdX1AIpHAv//Hf+K5+fPw0gvPWzyLNX3xtDPOwuFTploWfTi9rxZ557c+aPJ9/otfwtEzjsX8Z57C6xvWW+pFLBbDlGnTccqc03CwGl2oHnR0dODb3/0+lixaiJdeWKBH6tFIpdM48aSTMfvU09E2eAiSyRQSCWVvYgaASTGcMuc09Pb04PkF8w29dfBgnHXOec4LW23ewIoRGJh96mmYPGUqnnpyLtasXqVH4dE47FOHY9bsOTjyqKMd0jEY//mDH2HRq6/g+QXP4sMPP7CcT6fTmDnrFJx2xpkYYtpupVK8jMF+J791z1/LsdLPd3uXRgxMn0fQFrtzCMiqVhETzHSv8X7m4M4YVAOEExWu+SdJZOCiRVjSATS/btVqnqPZ6agVgTegmghDmWh9qtv2DEHA7C1vh1qamFClzGRzkGX/4YK4ebc4czrrmD6KhlYNRTSBZngC14O+3l5s27YNsixj2PBhnsbjarGXa19vL3bt6kYun8fgjsHoGjq0KR+OJbSSR+O6a9cu7NmzGwwMnV1dlkmWB/9ynz7h9j8//RlGjxlTsTyyLKN750709OxFW3s7urqGNmSCsRKEKLWbZPNodjvS19eHnTu2Y2BgAIM6OrDffsMsE5PlyLd71y7s2bMHjDF0dnZikKnulUs+n0f3zp3o7+9DR8dgDOnstISuVORy8p6rHb09Peju3onOzi5faeGcY8f27dizZzdisRiGdHbqkxC1kO2KP/wWmzdtxIEHjsb3//sn+vFcLocd27eht7cXra1tGLrfUKTT7gbYUvgpcyEE9u7dgz27dyvl3TUUra2tZaeTc45d3d3o6dmLRCKJzq4upNPpSkV3RZZl7NrVjd6eHrS1tWFIZ1eRx68XuWwWH3/0EQZ1dGDwkCGQpHLbuuKQa7lsFn/7+CMM6hisPlNSrzFPYNS+r+GcY/euXejp2YNkMoWurqFIJJM1efZVl/8OmzdtxAEHjsZ//vDHAJQ053M57NixHb29PWhtbUPX0P3qUs5+yWWz6N7VjWxmAB0dQzB4yBAwxiCrIfdiMQmMKRO4yUQLGGMYUA1K2oSaLMt63U2lvPPRaaKvlBeIPcyVEEr7snfPHsTiMXR1DcWQztq1L3b8tAPZbBbdO3cim82go2MwOru66q4f7dZ0HMYwpNOq47hin4xWsUsqyzK6u3eiZ28P2gfVXq/x25saOrz/vDT3P/FYHJ1dXXr9aBa5XA67urvR39+HtrZ2dA1tjJ5YKBTQvXMn+vp6kU63omvo0LK8wZRv26gHnRXUAyEEevbuVbbBYAz7DRtWdihezjl27erG3j1qGzN0KJIVtNWcc6Ve79mLRCqJrs4upD0WadWKXd3d2L1rFwpyAUOGDMF+w4bXbIFRo+Gc46MPP0AikcCQzi7P+qTpG7u6u5FubcV+Q/dDqk79XV9fL3Z1d0OWZXQN3a+m4Z790Mg+QAiBvXv2oLt7J1KpNLq6uhw94P1STd6VUx80lHHNDvT19aFj8GB0dnbVLSS/F/39fejeuROZTEbpJ4Z0GmGEoegayUQCyWQCuUwW/Vlj3+Csqrd2DB6s36ehXWMOj2z/27yoQ5tH2NuzF+3qPIJTPrrVp/6+PuzcuQMDAxkM3W8/dHZ21q198Tsusvxd4l6jnwe0vt7rPaVkkE3jDO1fSZIgwbSPsA+1gtrkcKWEJBmhKQ8gHGkJgycw0NyyqPXcZxjqlUZQdWSvRVKBRJ1/kRB8g7YdaunRDcDZbB4FWS7piWAOtSLsK9q0h9ZDUtv7qaKIR6uQgwLVsq1GruuuvhJrVq8CAPzxz1ehvb22EwVU8wyIjMDVQFk+JfSVfdKBBnoIctTXAFwviiYzCNeDRmHkQTAV9HrjZADWoFR/7J62mvFXO6aF1IvHlMnKlngMyUQCBVlWvF/0pDCU07M46bR+DMHWc/7vrQWUyq0sTOMDoPGacCW5FtSsjtj38N/M0NLLIiIiFKwLzGx7KwoBSWJIp5VtLAYyWXBu9SgpR3ex6C22g04thFNIQ0qU0ovcjb7OGqPT5X7eYS9DMKZGF1HPAbrRl5kNwfDOU2r5XT7h8dYEwpMOIBxpYQhHOoDGp6NeY8qwlUeQ0xMmb3lJjV4ChCM9AL10OC6vNiubeqOhKqf66ro6b9HuVpHtYfuoYZ1sdLCQ0yp/UlANp12pXHv27MEbr78GQAlLW2vjL+AsG5VQEPZvtVmTwk4Eqx2hhSRJEMJqbNWgWO/Mx2ig9a22o7aIAwA1uZuD97ca5Y8XTWnnLHqOqa5rsiiCKQZfNdSPLMuIt7SAc4GCLEOKCbCC4sWcTCbR1tqKzEBWCRErmYPrae2QMdnpFYLHfI+GfTLWHuXAjPJc67uMk1a5qoFy+++IvR9o9OsrvS9AWRwRTKIuPCIiQqNY37AtiuMCvb39aE8n0TGoDb29/ZBlZYc8wYrH0U7hj41nateY+kiLzmL1ErbPHVAbQ7mlVT+vKGHGOaH+6Zom87PMx9z1ZnN4SksZAuCqp7UEgAsODtUozIXh2SRJ+nyqLUi16lQTXE9HQ/cOR78XpnE4lXnBaghbeQCNSUs9x5JhKROneQPqaXIqV++5jeAgINS5JPcFa9QwT31ZSsbUITLQKQ/dpca+Ps6iBEIJ+cyZ4vUrlGU4RQ9jqF0hBTcGuLJqIZCiE4BquWsybdv2Ca664k9YvGhhyXuefOJxZLNZAMAxM2bUVTaKeQbQX7EXyVYpgmQbp30LtPPOMJ6bf8xQl78ZMH1Zvfknwg/Nq09KOekDEdMPTOEPY7EYBOdgUMLnCc6QlwGOGHL5Avbs7QGYQLo1hXg8BgjFW9gYfEoQwrpw0Tz48TtIVa5z+hY1w7CSCscBdR3Ca1Fvy/T2q9mClA2rdXFF7KNo4z23nwZI4PATERFBGTfnBqXPl9A3kENf7wDaWtMY1NoKpk6IKtH3mOO4AXCOImS/jDEGmdmvtepUQaBIN2K2c9p8JUyto0ujbG+v/eiM9nMSjElVSZIQA9P3DRYxJc/zgkPW9Ffb5hiK+hmc/LfDmISo/4moG0zxtOcB/kYi6BKkvs8NyvMF5SCYBM4kcCj2xyDCwcDBIKv/Uqtfnhss6RVJCHAiFmsN6h58gLbIz7QmgE72RVRAPp/Hn373G+zatQtvvP4adu/qxmlnnFW0T1kul8OjDz+Il158HgCQbm3FyafMrrt8llBShNAGXVQniil7W1Ft56jVMSdoy8hM/YPLFaTljwgaDWvnzJNokNQQeZU9igsBLgNxiSGTyyOXLyCdSiKdTiOfz0MuyMqiRMEhSQzCti+JlzdLUZQb23H3tte6xtP6LG6ZyK0VpPoBc341UQyNSnKERD5GkCC43WxgBY+IiPBAqK6rBc7R09uHdDqN9kHtGMhkkc/nAcHAIUFiokh/AazGS+OYc+QSJ49YJ+MnxbkFwF3HczvnZATmDnlobl+dPI/d8sjp/RIEhOqdLIQAuEABhl6s7T8ZM3keM9VtWwR0L8QgedKVIkxejhphSEvQ0wDUv0waNdYJQ3mE6fsAwtVuyVAWV1FOi+bdCyjGX930p8rMhREJhAKeBmDAWEUnEbReA7SNN4aXnCiamKQERe8ziuWaSCTwtX/6Jq744+9RKBTw2CMP47ln52P6EUdhxMiRKMgFfPzhh3jnnbfRvXMnACUdX734Ugwa1NEQGUlNFNtgatgjgJ7vHsX6ZoaqfKY1QiShmW+saNKZnow0KZ6IaZIgESXh6r/msDh+sU+qFbha0DEJmUwOXJaRTqfR2pZEf38/BBfKRFlMAue8qB+0e9LaPYSdKDV4cvKqqfd33Iz2zJz6ZrVTtXxr1GaEDwJDlgaxzyQ0ImKfxbKoDAz9AxnEYjEMam9FNptD/8AAJN0j2KqneBltzX8rJkdjMZswhYBmLvdTx68x2H5ecrrPwRtYO+juReysUzJzWGfGICD08NAAlO1Q1Ou052hzr/btTrzeTwtm+z1SvCJqh+IsLyzfbkRENTj1ldTsI/veXKESu4Oo+xhgs41yj/ohwPTzJY2vDcCnDMqeFgJGKBmhHCYBzUl+O9p0qOR5FQXc9j2JACYdcih++N8/wS03Xo+PP/4IPT09eOXlFx2vTaXTuPiSr+GIo45urJABgOJwIBjtCE1Mtn0ymCdEnELD0oFYxgUA+4RMhDP1bs+47e/i70uZyNInKat8nyzL4IrOjUwui2w+h0QigVQqhmwmi4LMEYvHlD3z1Ek0PwZeN29gpzQZl3jnrT6px2obxrlR/ZRT/gSZECQhAvuSsbcU1kgEERER4UPrf2XOsXvPXrS3pjBkUBt6+jMoFGRnL1dXIzBz6deFh77jrAfQG0dZKaW3WM6bhhNORlyzF7VxTP/N1Ujg9n6zkd18XUGNIMOEgJCFrjtSz2svNG9pSQpuGjSCVP9LQXMupHzCkg4g+GkJy/ehlYN5MXmz5Kjlc4JcJgCDDA5JgMSiD4vRV/vFRSx9YZ1Jbq1uNbNM/BmA9dApVrONuWo2ezqWpuej4ftiKJumnGp2pmmv9zHB1wyjMM0yBcaMHYv/+d+fYfXKFVi08BVs2bIFA/39ABSZDxw9GtOmH4nZp56Ktrb2pshIJe/sCo2lzgjVLGCOTdUkuTSo5JsTlGUDShtSmom9jfOrEHV2dWF470h0Dh1aJ7kAOKzwjijGufyaJQ0dhnR2YfiIkejsMupofeqR/Zn+6m01kjh56nKuHCtwAYkBA5kM8hJDa1sb4gUZ2VxOnWBT9VXT5J1Qo7Jo029O7YCmmHtN3lk8ZywrhQ2lzs0bpRbtYTVGWfNdrpI4fGuNpD4ev4o+HvRJFlo4lZR73oYr2yuJbeD1LNie5/XsyPgbERFWzBOGykIyCRCAxCT09Q0gmUhgUGsrMtkssrkcBGP6Nm0MAoIreo+y+A6W/lzRHTisfaGmTyjRibgQSgvj4Q0chEllL49g+3nlgHGdpOad5T5mbaeFQFFI6WJdT9FD7eMX7Yzi6avkP5eUldRCAAVwMMGUyW6myGPotSZ0j2W65QAERkzfhEGP9EpDEL5vANb2jRV9HYGjlvnezPmkoNQfJ7z6uXp+916L1St9p30OJYjlYYarkU/s/W5tKR7bmYtGc2jw6wjrFH3EmJZw/07qXV5MqDUuk81BlkuvdtAn4Yg6ZAP0J9GFudbUcg6hBpSbd9pEZKMaFapl29/Xh3yhgPb2dn0/FypQzTPAkI2ihJTzDaAkX3HnRpWiQTgRZcjJuBmhYOQHrXaVEo3x1jT7+0pFRxqNOc1xSYIQMgBgaFcn+vr6kM8XILhASzKBfD4PJknKfsFcgLkYZMsx2roubIIxyVUqVGCtKKfMBQDBlLlMuxSszGfVi1pJQCApoUWpw1EG1x4aOklERAQ9GGPgsgyJSegY1AohOHbu3otEIol8Pq/PP0iSsh2G03yElzevuV3Xoyg5XGd/XpCoxOEBsO4VbFAqyoykGtzL0NFMzyswASaM8WqcSU33HKqGMOkNQS0DM2FIg4bdsz7IBN0ArBGW+qX3hXVMTyPKKxzloSzkjzXwa9fsdlz5p2YwwfXFXY2mYgOw9v9mTgB6QaHhc8NiACZGJflWypuyllAuV4CufFTlAoJlBKZkNASolKvzyiXzv1GeeeNUz/ZlnNMfGYCdaFRd4cJ50V+zQyQJoewDxQDEYhIE50gkWpBItKBQKKBQKACavwUz7nGT3ex9Y27H7JNu3jqPbQK1AROnFXsEmyeCayVMldSiRu/jTWhZEOqe93GigoiIiHDGGm0E4IKjNZVEOplEf/8AMvksgBg0r1PzAv3iRWpW/cZqKC42ADvdb4fSOM8PnpFrHM7ZvXbsVxj3OHkPCV/vK/IWUrfe47a3aWWieQVTCIvplwCJWpKg1XkvwpKWIH0LblRbFpTmkMJSrwDUzcGr0eUV7DIR0HrfGOprPDXbJwSrQ9kLAcm0BUcjy6VsA7AdIQS4+mPeP4MKlBpBO4ZotPIMqC7f6m0Qpl2mkWyVQNkIDNDOO6CRqJboYgAAIABJREFU8vn/hu3GE2pQlU+bmNH2EN1XoLwwihrNaI8o1Uan0NCMMYBzMEn1kABHMplEPB5DPp8Hl2UI02DBbAjWME+KmhVyJ6OxtwdwsWxuXsFN8wYm0qfVWgq3Sdh9BUJdWYQrUSFFRESUh91rV6g6T5wxtLWlkZPz6O/PAmCQJAmyLEOJMGyduHQKSejmVarrLQ73hcEIbMYtQpSXTuXsFazdUxx6023cWUpv46oB2XyVXm5QwoObPSApl4NR36x/B5Ugy24nDGkJwjfgl0rSQHW+MozlUU6aqJVL0MtDCAFJXfzEWG3yVnsKb/gYTTFqxyQJNUpKSao2AAPGRFqtXaNrBbWPzo4QBDPNRD3zL0yrnOxEspWPfYBDCap5ptEY+cKjjGpoe35SxMlIFUaiMM/+aUR9MNZXmqEbuo1zjng8rk54KvvmSTGgRYqhUCigtbUVLS1xZAeykLkM2Dx8S+E0EVhuWGiUWOXZ0AVyTWpTGvFW88RrULHWBZrfXEQ1BLt+RkRENBa7kVaPfqIaeiWJIZlIoC2ZwO6eXuQKMuLxOMAF7FNMTgvd7OfVt1qOORlWwmYEBtx1KD+ho52ucLutXA9ky3kYRmHzPYwxxNQFAADNsjDqHk35KiFKBz2cFq0EkUoc8ygShrLQqMRjMyqX+iAJ6AbgatLCwVxH2+bj9cktY9YtXg9PYwdqYgDW4ILroU44aFUqqh8eYISHoTr53Yy881t3aJdrJFslOA2iqEzpUs83v9cUf18c/tqf6hTRUgaUZmAfQAPND2vrtFo8DFjT4ZTHNOoEVWpfD7j6f9u3b2qDqXynGr49JxgA3UOCIRaTkEokIMUk5LJZCMEtHsFaJBvFm6LYc8NJDsA9JFTxJKnVM1XzgDC/g/l4r5csHhcYv6J5X1lta682YFLy39Cja5m6YiO+P/wNF4l9WhFVUU7diwo+IiKiMvRxChgEBFRFAgxAPMbQ1taKbCaHgVwOXABgEiTVWKhe6tObV//N0le5GVbqHd2k0VRiCBY2XUs9CKG3+cpRDgYJ9m1GSjzP7gGuPk8Ik4cwF7rXt/avpPyibYRieV4zysiaDsBev4JO0Os9EI40AME2AlsjSLmnwW+0AioEtTzsePV3QSgHjaCVh2O/JQQkWKOtlX6O+q8WiQKA29isMQZg4/mMSWp/XUcHzFoagM3eSpxgfaL8QSqy0TQAA83NOz91kmrZUpULIC4b6MpHVS6N2slX20bc7s1KSemwG1opyQYE3xPYWXa6/R0l6lvuigFYNrmIUKv7bui6psmD38kzxTKIBkdLPI5UKgVZVvYHFsLqXVNNfpeaAC3+2z2/a7IIroltRqPerHn8NrPtDsgnE9EUosoRERHRGLTFZu3pFDiAvv4ByLK6gQeTwEx7A2uL1+w6hLOHb3H/ajeuhNEbWMOP96/bMac7ZQBMOPcOpQzMThPgHJoTjvO9jDHEJQnGAkRFMDrlE+ww0GbCkg4gHGkJshHYTql0BGmeKAxl4scTOCqTxqF5A3tFORHKCrgmhHkuD4kxbXl7fZ5f6wcyxgBGc8hbahVNM7F6iNCjmXmnKbzmn6BgXxFESXbS3wNotiEA/Q6yFvLVq5pqdY5aHpqVOGqyAcX5Rq0tccLbe5leHlOk/mUsgfoWFLVE5hLyskBvfx9yuTzS6TRiMQlCcEhgyoSYQ9001+VShkavb9MpIkI9vmWmPLymz6RKPdtu1XGm5E9ERERERESjMY8NGGOQBQDG0NPXD1mWMbijHa2JmHKtFIMsDK+ScqJGqW+znvMhH/Vxil9K6RZO+odeLg7XK95K2l9G+Ee/7yo6BiUajZdemisUkCvkUeAyCpwXOeoEYVwZBKI8pEVUGjQJw3fiNdaP2tPGw+E99SGrEYp5owSqAqX+1HE+vtYewPYJaoqewADthscqGy0vKcr5Rpkg5BtVGfVOlOBMK9U8A8qRzbz4xGi/620I5VzpgqkZXKmXKWX5NAwZafVfQaDe5WtXfJsVBs4PXt4MXufN95s9exmTwLkMAJAkJVRQsiWOZDKFQi6PXC4HFjMm0extlNn4aw4B7eVB4/dYOd7ARekWogEhivzROM9ff9cRrdoRoSSqbBEREe7UW9cyj+QE54jHJaSTCQjGsKenH4wBggvXrSvsuHn4mpPh5GFXyjM46JRvPLces5/RIpl4Pctp8aH5HTKcx4dOCw+150iqfixpoaIJQESMqglTvQ96WsLkBQw4pyUIc0N2wlImUTrowIS2jVexmZfb9tf17nVpwMAgwXmLiKqeW0sDsB0hBGTia28oN5iKaPSqJuU8CwJU84+UXLZWWYjifYGpQCrfHHBfoWb83ux9gCjix9uv0ZjzjHL+GWHKYs0WJXDUslw19dc99Lpku5IeXsbVcsLyafdpBlshhDL5pXpftKZSiMVjyObyljB7Zu9fL+8KpxDSfgy65YSFdnm5/2vrDB1JIiIaBQ3dICIiojFQGQ84oo6VFd2bQxtMp5JxJGIx7O0bQF4Ads3cbazjrquYt9ZwOl/67zBRKmyz1zmns8Y9pQ09+t+MgUMoCxcdZrad5OAQYGrcG0mSENNDRDcrGhatMXe1BCUdpRYCByUdpQiTIbjkwuAAEYYyCUoa/Cz6D0pa3GBC7Uns8ynu3SppYmBgrMaR4uppAAaUPSkAe4ATOlBuMBXRaHpSlVJog9541BPadY6IbB6DF2H6m0o9czLM0ZVN8XxrtgHYLJMfo0mjoWgA1vAOr9x8iuWi1X9Rwl73G2sApkM16XYKje7ktau+CUakA9XbgAnd8NqeToMxhmwma3FFMO817CS724pst4k0s3xO10iS/R6urgNFU42+NFuciIhmQbM9jYiIqA1+F3bRQNFvNN3GiFSieHu2t6WRy+bQOzAAxmJQtgAREOCQhUCcSWCKBRlCcDDVU9i+p56RBZruor7Hh+cv7fyrjkoMwZpO52wILv0suwFYCKGUoRru0vNe04vsxn6JMSOsNG/UWNiYoQ9DNQlKXfc7nxaU9HgRNiMw1XkgN5ykDVuZOEFlznqf+NZNht4wGIABtU+GsCgF1ZRR3Q3AZqMNJzp1RL3xVOSjN4FOPd8oQz3vKMtH1RuYcp4BZuMIrX1u7cZMirJRhXpI6CgUdDH1Ki99kZ11ZUdd3lVP/Hr1au2E3SPYrR1xm8A1DLHKVJkEgZaWFiQSCSWKTaGgGH9NnsNgzvu6VbKytlSoRItHMIFvvfkSRERQIHhta0REhAGlsUa90Q3BDGhNpwEI9PVnIHMZjEmQuQAHV6KiCCAmSeBcgEnFepaGY/QSh3N+t8QIE+VEp3E653xWyTP7YsRSYaD9jDm8rmGMKQsDHHRuP8+rtqzDUFXCUt9Dkw7t35CkhyrljBf3BSNwkAhDGsyEIT0SRM3arrobgDWEMPYDFlxbrVaXV1UM7Yl07TdimaZCOe8oQzXfqMqlwQnLRznvrLJZPdCaCWUPaiAYZUrfY1lXW5opStOodx0yr/SnWg/cKCdvSqWtPG9gxycAEGDgSKdSaInHUMjLKHBZ99QtCA6piuL0N5lqn2wTTfly6LZ8ERGNIhjtaERE2AmKTkMJi+cuGGS5gNZkEvFkHJlMBtlsASwWR17O6x5lkiRBEqV1FScjMKC2mLaFcvuiIdhMud7BwuO88afVpYkLoTvb2D19WQkZPGUxRapRvIMlSChegAmUq2+XJkxVJCz1PQzpCJPBkSrljh/DZpiP0kGLoKeDaY4KLpHlyntWgwzAZjjnhrcKsbIIwkQ/RW8qyvlGHap5R1UuDQG6MlKVCzAbDI1jVDpFqkZMyuWpIYS69xNo5R1gzj8Gcp1+A2hE/aG7i687jmHsUJ2HgZMB2O2ZxYtftNCJyr1xSbmvpaUFqVQK/f39+gRpJaG3vLyXS0+OippMWHjdX2oSMiJi32Tf67MiIpoBNd01TBjhoCWAczAJaGtrRSEvo6e3TxlTSwxC2xdWNRg7ae3Oukvx2I05REsppfeEGb9GYPtVruGjXRZ1a/8qu0D7udcdxyhdAoipIaIlNUx4vQzAyjOqfgQZwlLnw5COyAhcXyoZP4bJCByGNGgELS1Ozk1BS4MbDNA2BKs4TU0xAGsTcxxo2rjaSzGhPNkvCFrNzR8Z5byjDOV8Iy0biMtHVDazQY6CB7BGpYaVRkFVLg2qIaH3VQNwvcqCm0LEUfl2y8EtRJ3ZuFrJc7z23nE671Y+Zn2GSQDU0IitqTQkSUIhn4fMrTLr17uE5vPyWLZ46DgMWIz5Vfe8qef2LRER+w7Ba08jIqgQRH0k9JgD8EAdJzCACQYGgdZUC1paYtjbm0W2kIeQlDDQmj4jMQbJFjHKrucYx2yL3NRr9vWQ0Ha8Ftt5jxs4AKmkQdh8WgspLVjxoks3fdX72cX6OmMMMW3BQA28kxzerr8z6IQhDUBI0qFGO4ionmrHitZIFSGpXwh+Opz6/KAT5LRo30kM1ZVLUwzAGtpktRACQqJVGBQn0e0oMtLruIKQd1ShnHdUZdMGN1Shmm+AuzcwBaMw5XwD1EgWRBUjc9/qZRRrhtzF5Roeo3B96qwWL8XIJz2CSgBp5nddrkeCYQhWJjYlKJMFba1pCFFANpcHwACmeM2Al36+33OOhmBbqTPBIZhUdF0tobzdQkSEf2xWEAvh6H8iImoBNX02ov7EJAntrUkM9GfQl8lCxOOKJ7BpHKEZ+MyU8rAx/ymVvHbfrncyd5+D9ecBbMC5u95m8RB2eJ1fQ7AbTPUMZurCAT+l6mSMdl/s6OOBxCkViSdI30KQZPXCKVpBhDf1Hh2aDcFMKH87tVlBIEzfSRiwLPQPcJoYE8p2DGW2XiQMwEDxB02hQKgbIAz5IiNwWKCcb9RloytdAPKOaDhoypj7Lyp5pmEe4FOTMWwG4HpHwBCWAG7BzSeg+fXRz7udwjRzwQDBITHFIBpjAjFJwqCODmSzWRQKeWVyk0ngpkk8v2k11yGvSVG/6yRrmbfU+9aIiOoJdrsaEVEJVHTCiOYjhIDEBAa1tUKWBXb39ACSBJlzxGIxfXuZuBRzDenstiCWMes55nCv29/7Ehbv3BLnS94vnI/bj8kltDuvd5Y6xxhDXF2gqJe9zwXmTtdY9eTyF3RSJejyA+FIA1C8SCXCmUaOCXUjsDmQXAAJyzcChCctVByeqoODAYix8myBTTUAA1YlxE+4vEZD3QgBaIoejfwyE4S8owrlvKMsG0DXY4l6vlE0BHuFim02mtJAuVz9eAI3C2u+BdMIXB+Dr/ffQcQedg5o/jfsFRa66Jga2ZCp5817nqWTCSTicci8gIJcbPytVBZvb2D/X0st8jkyAkeEj+D1NxERQPP7zojwoffwQqA1mURLPIae/gHkZFkfUysendDD/CqXu++z5+YN7GRkKRUiel+j1BYcpY3B/ryGlWhCAsp/5b3TPv71MjSbDcBxJjkukiw1NrDWNXpj2moIS1rCko7IEGxAYexn9gQOMmFIg0ZY0hL8dKj9JoQSccNvhLtmG4A1lAkmoUTQI1oWtCf6AaoZRznfKEM936jKV2rg1Gyo5htgGIHNRg4qNNt70Auzxy0F2ezGaap1rlguWnXOjboYfrVoKKBXvyuBUp1zaje09sSrnfNalKidkxgDYwLpdAoxKYZ8Pu84qVUpJUMsljERVm29orq4KiLCH8FvVyPCQxj6+YhwIABwCMSgRDuJxWJoa00hk82jfyADLjggSfrYwhzat9TCNcP713hfqXu9ju1LaBqX3dBaKlS08o91UbfbtXY4io3Cfu/1cw0TQllEIEmq/uy8YKDUM7XLw1JHonTQISwGx1rgGI1A/beROcQAIERhusNSv6J0UEHZpoz5NAKTMQBD9TAQQtmTgspEuhlKE5p2FNnoelFRzjvqUM07qnJpUJ6sppx3Qij721I0AFOGmoHaazV2s3AL62VAq87ZqVf+mZ/LTceNvJKgrdUPCs2ua2acPBe0437CQdvvtS+wYIwBjCPOJKRSKcRiMWSzWcv1lWKWj4MhZhO3HAOw/XnlounoERHBpCh4qfrDHa6NiKgcCjpgREQ56MZDiYFxZTKxNZ0EpBj29vShIDiYxCDUaChmb2B9QZzDuNHQmWwGX0QG4HLws7jdyyPXrrqVimJlPu4WKtpvdJ9Si6RjUPYNLm/egc54uxZE6aCD0wKVfRWnL58DAAOkBg8H7VsIBJmw1K2gp8O6tUCQ06LMETIBxHz0o7oBOJvNoyDLTU28XZEQUEPvEYPyBJghm9ScJTou1NIjZl+Ecp5Rl42udLTzzryXJYlGRIVyntk9bqkpE9o+XhShbgiubb3j6v/ppbNWuIVvB2h9F5WWq1caEnHFEMxlGbLpm9MiLDAmAeBlL3T08gZWJlONc6VSVe0Kd+p9a8S+Dp02JiKYUOqnIiKagRACqZY4WhJxDGQyyObyEGAQTF2UyIVlslHfq9Hs7cvsHimiyPBrPhsZgv3hJ0y0l35rOI5Yn2f+W18sCcUb2OyRbLydaQd9y+yFJCkhorXdpv0u2AxLFQlLXQ9DOsLuCczsi0JA0+5iJkxGYCA89ctvO02dIMtuwBErtVUHVQOwhqwtNSQGfSNE5A0cRqjmHVW5AKvXEsWOiXTe6bLRzLOwGZXqhX0BDjX5zCiy0TSMVpNvRXcKAUGoztYa+nWs+meY9zUzP1cIgRgDUqkkWlpakMvlwLkSVhEAZFnWw09XsljE0RAsAMZU3c/Ho2qxwj3yCI6gQXjb0YjqoKQXRkQECU1H4XIB8Xgc6XQa2eyAMl8oBLh6ngnDUFJyywrGYNaGzZ43RfEZSjwrQsFpMZ7FeFvSEGzcAxRvlWJH8/4T3GwSdpGrhH5baqGo8iNBkpjqde4cQSoKB02TsKQDCOe+wEEdvYXNQztKBy2Cnw7F8cBsBLZTZAAGmpdwNwVFCz1CzXhDdeKLugEYoJt31KGcb9Rl0yarKbUhGlTzzh6VQfFcaz7UDcAA3TIFSk8INBPKnsDV5Jnd95pqW1RLvMKQU1tsWMkzvMIGaSEPJQik02m0tLQgm8kox1WPGVlddFlpXhSFT7St1/Hz2Ko9gbV/ibYnEfsC4W5HI0oT9r40IqIZ6GGCOYcUk5BOJSFJEnp7+5DnHNymxzpNOBb/7XEODnqNjehbt2LRvMxjY9vCROOSUuGcyzfWAsq2W27eg/axutez9N+FpF+vLRBQ9gw2Fl86zQGEqXqEoa6HIQ2AqX41WY5aEvRRW1iM8mH5RoBwpCX4aVDn2QQgMUntE61fOykDsBtCCMgQZCdMqU58WcJBE4RqvgUBynlHVbYghKwknXfqwhJKbTDV/NKgLJ+TEZ0KlI3AGp4r6t2uJfTtNJpmh0evVz2366Vmr15j8lRGoiWOVCoFBiCfzyuB8xizhdovH+/JVn95XavV1EHoYyPCyL7broYRSjpmRMS+iq7HSEzx8hWAEBzJZAKJRAsGBjLIqLqMGXP4Xvtxt78tRmGYjXku3itRG1ES88J3N73b3RgMWEcypbdv42rERu1eN3tyqefYDcD247pBWFJGh5JDiHHtujAQhnSEIQ0aQQ0LHcaxWeQJTJewpCXo6TAitNi23hAKyOULKBTk4hsJJFzr9GWIIg8Hp2ubIbOfycXmyab91vyydIKiASIoUM07snLBGAxRaNucIJt3tjBRAI3+AWi+ccmOPfyW+TjQfPk0nIzAjf42vN6nLTowLWYHlX7M6zvlLtcZ6dT+pfmt15tGevA3yvDrdEySJGVSTAgAAhIDWmIxtLW1IpdTFl3GYhI4r0x/9PJCtoda1OqcVzZXPJhWBxiCGX2sr9sI98MRjUaLXFSKqL5QJvqeIyLCgRKthEFWx8xxSFC2mOBgAAa1p5HJyegZ6FeMw+r2E0wIxevEpHNofb0fI7BFDzF1C5E3cBkIAQam62Sc22MQOd3iYgxWf5jFKGzku33Bo5mCKP1e7d6i8bHQxtDKe4UpsqFQR1mS4JAY0/cNBhTDsJORjuLcRTkEUWYnwpAOBgAsOHvRhn2mIahGeSfCkg4gPGkJejqYUAzAgPGtGAbgQgFcVk6aFQVKiTYrCJyYAdj8fq9zzQ2vHTxPqghvqOcdVfkoeypRzTMNpX/wXp3dDOxhjanJZv6XkmyANe/sxutmy1q8YMOYBKCAxXiO8A+0aoGux5m8X+tRz5rdljqmSQjEJIZ4PIZ0Ko1CIYd8QbZcyzmHJBV7P5T05C0ROlE5XlruWoTV8tPHUmhfIqgQGYApEn2fERERGnqfLZQtLpLJFiRb4ti1tw8Frq0EExCMQQKDBAkAhxBC12m89BQtrC+Der3N3lgqvHSEM9wpxLIHpcNEA/a+2OseoZtsq3kng1uMae0aZdFlAbFYDDFJQowpewgLwcnOD/gliDI7EaZ0BCEl+8qcRBQOmh5hSUuY0iEJ7m4AbvakmRtmuWSiTRrVvFNgIC0eqOcfTajnGVX5IiNwZRhemXSMmWZZqHp40y/T4lXYFPLROd9oLWbihNsSitjLtB5tCaXvzZwuI7ydYghOp9OISxIKch4yt357tXhfcZ76+6Yr9gY2vyn6LiKqglYfHmSa3Y9HRESECyE4GBha4nGk00kMDPSjP5MDj0mqtygAMEgMYCg2lrgZc83RS5z0kMgTuHy4iz7pOxyzI8VRrtzuEULbH5hB0wr9GZmtx4t0aWFdEFw0lmDKQoK4FjZarVuU5i/KIWjyuhGWdFA1Ou6r467IE5geYUkHEI60SFosDScDsAalCTQz5slqDpCbI6CabxpW8WhlHvW8owz1vKMqn9vAiAJU88zq0Vra66wR8jgaWRzONRuqZaogwDntwbFlNTiR/ktb2U41egp1Kv0maH9LxWgTUIVCAS0tLSgUCmCMoSXG0N7ahrxcgCwXb8VSaRvmZ6LU/CeH+9KKala7U+5jI6gRtZuliPqWiIiIZmHVRyQIISPOGFrTSRQEx96+ATAIcDUEsRJ+UPEIBopbeHevXlFk+PVamBa1i+742ZqjGs9g5Vzlz/UKFe0VYlozALsZnQEAjEGo0YZicS1cNCAJIyS5UN8fhDoUBBn9Eoa0NNvoGI2unAmKh3YpwvCNAFE6qOFpAAboTrDpIQRB18OAat4BZsMNvYpMOd8oQz3fqMrnZ2DULKjKBZgHZcYxSh2j56CRADRDVgtVrmbL4Y3h5R1rtig6WjuiGbtolGcwqKSdo9w2umFfpMLUvcsUQzCQiMfQ3t6OfC6rewPXIp2lQ0P7CC/tem9pKPexEZSI2kyNqP+IiIigijlKCWMMXJYhxSQkW+JItMSxt7cfWbkAMMlitGUAJId23llHKdZL7B53UUhof5Srg1VrLLafLnm9aSZXixxjv8PxGcLyDxhs9wmmby6h3K+ObxkQYxKYxCDFGCTGwNTFCkEgLPU8NOlAEx0gmvJW+jTbMF8rwpAGIDzpAMKRFsMAnC+Ac28DMLXJaquSQHPilfKkl917T4FG3lHON+pQzjur5yidb1UAgI9wlc0yJtIuU/03/fdm7yGrvdd7tXJz66CTAbjZMsEyCDcbWml8pxqGXJq/Ii35uMkQbIZaPlLBa/W+Pc+cws01Ol9r8U5zGylJMdV7XICBI5VMoaUlBs4FCoUCJEnSJ6/Uu+1b4/mS0flvrzanWEdUoviVv7LaXGqU+7OIZhKO9jFq5yMiIsKPaREwkxRtgctIJeJIJhLI5HLoGciASRKEuj8wY6qxTbi3k9ZQ0Nox03n1gKc3sL5pcHUpDAt+5ziK7vMRjtm/V7BQVUrrfU66qoAxv6tf56CPllqtrM1zmh2M7XIzJvToPDGJQWIS/HThFMbGzX5/LQh6GrR6UOnYqKx31fHZYcH8XXr1EUEkTOlo/nxn9QRa9lIGYA2yhhtdMVC8gSlCfbKLqjcw9XyjDPW8oyqfqGCA1Cio5pmGYdBkZPoHDfL9F7HFVRp2IzU1DNlo7QlsRCihV+eo4mTYBYonayhQy+/V7X4h5zF48GAwxpDNZtVrAc6VPc0AQJZl/fdq3uflceP1nEpTruUepTKNaDTBaA+jdjsiIiKiPCTG0JpMQjBg1969YC1x8IKseAtzjpZYvGQoaKdjFkOwTQcxX6toRUzddzZCo9rtOPzqbG6Lh50dTxyeawrfbI706GWQ9hbIaRzhvLiUSQIxJkFiDJJk9Uz3O55rpHE4LDpKGNJRb6/TaMRUGVT3ai6XMHwjgNGWhiE9QU2DbwMwQHOyJggGYA2K+Qe4GICddbSmQDXfqEM53+yRBag1oJEhuHy8BlbNhroB2O04BTmdjNSUoGgE1j3QEe1/6hcvAzDFulfrb7T4OQKCC8Tiyv7AEBy5vLJnsOoWofRRVby+OOyitoDH3/2VDqojb+AIKjoChT42IiIiIlQIQAiOdDqJlngMe3t6keO2PX0FEHNYwFZqywo3I3C0L3BpajUe8aO3uV/jvKjT0QisLQA1HbN4BbsUr/1Z5nGEfk4U1z3NU1pDUu+VJAmxmHN9avYcVljqeGjSgdqkJRoZ1Y6wGICBEH0nUTqaSlkGYA3KEzZCCMj6yrHmyuIE1bwzxKKXaXbFrdnKVtCgWucAurKZBxgUoSwbADWcKaC1J1S+V/vCA4CGbKW8Hymgtb0U219r/tEwBGv5xGF8D9TyjTLBWXigUGtDsLYEiQlACBmt6TRaWlpQKBQAfX9gDlFDD2S3kIuu9zrcVy6UF1tFVEvt27uoDY2IiIigjRACEhi44EgmW5BKJdDXn8VAJgsWk8A519tyiTF9X2BL+E6PLSy0X+3GFvPvml5Rb6+8IFGvuQ2nZ5YTxafolDD/6v5szqx/u3sdu8ulHRMCEMK+oECTXR3D2byDjWdo85TNGx+HoY6HIQ1AbdqcaFxUW8LUD4QlHUB40hK0dFRkAAZTS0R1AAAgAElEQVToTMqZO3vd60adpOZEy4JK3rlB2RhMPe+oQjnfKMnmJAkl+ZygKp82oALodIxBMQBrxyjIphEcgxwNA7CGEAKCqBcrdajXOaA+hmBtPzJZlhGPx5HP5xUvBHB0dHSAgSGfyyoTY0yqKH/M7V+5njP2U25hGP0JYjwk8pYPG8xZqWI0+t2IiIiIiNqj6S+SFAPAwTnH4EFtKHCBPXt6EEu0IJ/P61tYxFwWC7t7A1vHb0WT+/bnOD5j36QRelZ1HsIOBmFYdVa7wdfiJCK5j7fKkkswLSaOixwAkxR54kxSdemSj68rYanfYUhHuQbHaPRTfyIjMB28FnsFlSClo2IDMOA8OUch8eaw0KXChDQLyhOaAN3JdIB+3lHFr+Lb6G+4meXp583U6xtl+TQjsH2/h2YZN70HnDQMrtT7Vfvv2t/NltGQSQtlC9PfrOi6Rskr1DBmlvwDjTyz4/R9NFtGbUEfZeqVb+Z2U0CAQdn3N9nSgnQqiZgUQzafVWUAhOqFwFC6jjt9B6VCLzo8RQ1JbYTqszzDTyJNn61Zd4+oHu5wrN4aPRPMWvAuBuCmz5RGRERERNQFQ2/RfSgBLpBMp9ASl9DT0weZc3DGIMCUcNBMWdBm7xmc9RLVNMecr3P6Wzu2r/c8jYq64mdhs7uh1vKXfowxyXJeCF6kSli9nFXPXpdC91oUbhiCzWqqVHyvSeXRwkVLkgRJ9wYWqhczB0PMU45ajRvCgFs6KI6dvfBrdIzGPY0lDGGhg/Qd2LF/x0FOi50gpKUqAzBQPFlNLdFa6EWK3sDBmNS0Tp5TgXreUYZy3jVKtkreEuVbZZg9gQE6fQTlPANoy0fZKFcsF60+TAgBDnqDWLfypCBjEDyBgfoYgp0mNQUvAAyIx+NIJ5OQZa6/2xxu3OwdUY4c5Q3K3HV/s3dOOTRqgjKiOpym0v1MrwvlwoiIiIiIfQCzLpJoiSOVSmJgYAADubxyHoqZTFINwMqPP29gJxXFS4epVC8JC81YaOdXdy+1SNup1Eo92xwRsvTzvZ4DU6Z5y8EYg8Q0g7C68ECSAFPoc/M9dqNzrccNQSboaXGKPhCNb5pPmPqBoH8jGlE6GkfVBmAzVCfntM5fm7SmVjBU801DEJ2toZ5vlKGad5QNwADdfAOoy0Z3kRDlfANoymcJt0WwXK15RssADAAywTLVsJcppXIFzPuL06SeRmCLUZfJ4EJAFICO9lYkEglkBvohSRIEGDjnFX+b5YaEVs57yF3W2xUiI3D1OOa7U9lW+oIKQysRHVJERERERNQJoVrQGABJktDWmka+kEdf3wA4GESMAdzQWWJOC4wcQ0YWz+v5+Vti++7WLM3Wr8rJdzdPYgVWdLzIgxfQ54D9ymAe4xbpvkIZU9q9h7XxklsUL0mSEJckMGZ4BtvfxyTUTEGiNnarlDCkw24E3jdbHXqEKSJEGL4TIEpHo6irAZiasZUL7hgSjQKUlVCqBmCAdr5Rh3Le1VK2WqeScr4BdOXTPIEr8UirN9QjWVAztJpl0QxNFORywvo90NrSgKohuNT30Myypux9rlFPb2pJktTJOwGmTkIJIRCXgNbWVgAC+XyhaDKqUsoN0eRmCK7ICAy6/VkzccrLpnyTPgzAhIcPERERERENQuuidL0SQDqVQIxJ6BnIIFPIg5lCQEsAJOasszsZgu1GtVIL2cI0+V8JjdgLuBS18Qyu7NmaCdzsEe10hy+vYVFcT5X5Dus4yjD0CkiSpC9EkBhTPd5rWyZUx+XlEpZ0AOFKSxgIUz8QproVhrRQTkNNDcCAtaOkNCmsySUTXndDeaLLEI1GedqhnHfUoZp3tZLLUaGHMnfJhHeN9mrDqOYbQFs2qN5pVPoGM1TzjaJc5aymbjaGbLQMwFooaGrY9Tgzzh4YjSXIRmCg8rzjam8WUydJtTeYDfbpZAKpRAKyLEOWZeU4hLNltgRmrwb/aXCegHUKg+ZLBpMc1Mu8FpDdl8qnJbdCp+CIiIiICGLUujuyj6kF50glWtDSEkdfJodsLgcOQ0+QBACJWcbxTt7BxYsUvc4ZuoQkFe85vK9AwQBsx08459LPqOwdQogSc8TauMO7xhj6uGZYKl70bpGBMTAmEJMkxFR9u5bfHcW5lkoJS1rCko6wEBmB6RGlo77U3AAMOIfOaIYx2B7+w+0cNSjLBsCkAGnLOdWpyCbXcc+QLRGeUK5z5crWyJRQzjeAtmdrcZheGlBdxKTRaG9gu+HF12pooljDhjW/XJnQVp0L3RuYYn2z/24PSWw+1gy5grqgpBqZ7YZ4c7vAOUd7WyvSiQQy/X2AJAGa53C5/amLEdivh7HZK4eh+nrCCep5RVLUMsxyMynTfTfy9o2IiIioBfbFim7+ieVh95KlgBCK8as93YpMbgADuQJysuIhCSEDTJFYMhnSdAOxaadgp2glQnBX/cUM2YVXdUSJJEMff0bfUuNS74X8buMYLoS+8LIaGd08hZ3u0+prXJL0/YOhhk73NE+XGItR0ZerJUpHRL0ISz8QlroVlnQA1rkaCumqiwEYKO5UgcYVJIcoXgEWIAMwQFu+YgOweqL59Zl0vlGHct6VI1tkADYIjnwEGg8TlPOt0bKV9e0RzjeAngFYQ/MEpqIYmnHS5QAaYaHdZKNEPY3ATs/QvWU5R1ySkG5NgYEhn89XXOXNE0uVlbvzHn2ViKN5BNeaakqB2jdbNZbsLeHt4u+yiIiIiIg649wV0TP6mtH685gkIZlogRACvf0D4MIUUULzM7BNokqMwe7DazUEw+Oc7T6X42ElKAZgoDaewcp1gNukpdMzhBB6HdTHG9XIIEz32+dS7e8VApKk7B0sqZdJMDyEy118G6Z6HZa0hCUdYSIs3sBhqlthSQuldNTdAOx0rB4ZoL+Poax9foM8cdh8NEW8yWI4QD/vaEI53yyeaE2Uw4mg5BtFlIGO8julzjEI+Ub52VTzz7oavLn1zUlX8fK4bSZeOh3gbBxsJGbvVmqGdD/fQq0MwYwpIfY1OOdIpZSw0JxzcC6rx6vLo/IMwYZ+bh9al7viWsvJatoXx5DWFT8t4Lhmo21ytPQlERERERGe+Fv0Skh9aRyqQTLZ0oLWdAK79vQiy7keOUSL9FLkvQtt0t5q3LUuXIPlnPlfM7WIUhIkKIaB9qKWC5LNxuBysoFruwdrBnQXB2Nz/fMcP6kWZu2Z0I28TB/XCKa8QtKNwMpzJUlS32E8s1T9DUv9jtIRUS/C1A+EIR1hSIMGlbTUzQBsp15hK80rsioNP0Z1ohqgLZuGILrpVxDyjhpByDOqq1ap5x1l+TSDBWO09milnGdAsbGw2r61lumlnHdW2ZpX54JqAHYrWy/vikagT5YQMwADjTMC258lhFAmlYRAe3srYrEY8vmceq427yh1zHre9rfP++w4im6sJCp6foQLPgdOlY6vIiIiIiI06Oh0VDDra0o4aAbBZQwa1I6+gTz6MgMA3EI8GyGh7fsCm6/18gS2/x2WMKB+CJoB2EwtvIKtUYTK1EFNz/feO9hdFsfjwuoVLBTXd+g77Qllxlswo+5LkqR/A36qb5janzCkJQxpCBth6QfCUrfCkg6ARloCawC2d5oCplAxNXgeJSjLpiGIzg4FIe+aRanvkWreUTUAa1DNN4CubMYgjEbHaIdavpVaWVzNM2tNs/POyRjYDAOwbHqnVx1vdn65UcoAbM/nZngEhyEkNODdJ/vKU1PER/NEKecciXgM6XQKACDLsv5Mr32UnXQFsyz+vYGtzzFfZX6WPZ2UyzPwOOru2gQk976s6DklromIiIgIAQSHKaFBCAGwGCQm0JpoQQEMfX39KAhtobDzQsMYnLeoEKrBzK6TuuotQuielWEnyAZgO+UYfLXfrXqm5Ur9mGUxpQv6PsEmr+D/z967tseqK9maI0TaTnve1qqq0/X//1w/T5/uU3uvebWdoOgPQiCEIIG8MMB6957LeSEhkIRuQxGaMic9LgwLoDLYtdKgn49660PnGezKrzHdhRFxP55xriXFR/Fq3sM97Ik9eQED27iXj/Csh4vW1uRuAnBI6+01/ebj8K+3cjplnmhitg3InsB7hTH9Qs9/RhjTLITZPtc+8A1QwkVMHlb7pth1zzIwJB4ypF83Hborr6/JnK0pgGExkyHNPGGo4ZihTu697B+zjYE4f4fS5dL0ioVU/+/l+Iinh0eUVQWrtiMCjw3Czg3Qzg1sOl+JQuoFGCkxeOj6malEz4ACQHH2V6TrOTOZzIdiaiW0rE0g6kplRvh0fIJC8fP3K05VBYiBBmKwMQZVVTWirQF64aC7wi8634Wk3u+9mLAvql/K1L7i2HFjpxiK1tRzUFKFlSmLkUeuZfu/S/WTbSInCzE4FMaJw+0Zus+FV483DNPY+BL2ch97YU9CcOo+GCOmnWNr9o6x1r2sIgDPncCJj7fATScnmCeYmG0DDKV9jDZtCdb0ixeFsMGabsA2bGujea7f0KcGdACHbSFTF1etkf9bKHOOy1f+Xyo9MgvmIalFEUPce8XjHNvWYk7aXUL8e19PWK3w7fPnNiy01gssa2/h4uEATBgXLPUADvHhtvSM9w17nlIzUdnNAnAmk9kLZN2mzEwEQFEUeHl5ws+ff/D69gYUBaxaQASiQCECC9c/KKTdD3jc4xfJ7+Lj9hIKdIi9CsCeuZ7B6e+B7kzTsJCbFIPrw6vEXMIUD2MfFjpcODpVAPY3oFZhjMIURRsy2pjkiFdVnckbKvts4+NL2NO97IG9tAF7KVd7uQ/PGvezigAMnPd+UARhNIJP79EYbWGCidlGJZ49Yk43ZtjTjXUAw5xuzLYBrYDDsC9wPKBjFYCBc6uZ183zta8/RmvbPC9gxe0WoAwtPAA4yl486TG0wj0OOXYv24e8qJmYY9sl6dYTgf31qxKPDwWej88QBU5V6Y4VQVlVKBLXTIVnHptAHbapWz5Sg+yhFcuZCYxE5TmbgutXL5lM5kPTr6UY+j2Z+6KqMPUCwufnJ5xOJ3z//erEX9TjssoCpl1k6EuJiOn16Nsy1N+2JPXav99ryWOdP7kV1/cM1lFP4d5voc1ihaXX79vgiKfxB+fYVSECGDEwhQufbsREC3W3WSr21Ebs6V62zt49gbfKXu7l3vexmgDsCScNY4++tSMKs08yMdvnbLtdOM1LYU47ZpjTjXkvG+Z0Azjt6wpyfEwN4boGQx6kDJxbwbw2c9oufy8W98l/tsUH8er1JWLmvdONlbn2XSoGKwQCDUJlWzw/PePp6QFleUJZVjgUD7BajYq8sfA/N+R3/PXYIDuLwSMkk2FEAF6/+shkMpvG95XGIeiqZLaMcQKvL0bPz0eICH78+IkS3TFY2C8J+yQFhvolwyJw/F4GjtkDzPMnt2aOh/CwoNp5V//tL5QMy2UVRMSZOkY56wFsBY2c30RR6+av6/8765r7qQ82RmBMATGCg5jgO9lUPb6nZ3RP97J1sgjMxxbvI1V/f0gBOPkaHBMU7JNLDPYNdUScbet77w3BkHZbgznN2FexsqcdI6RmAeBNMw+r9+NQqCwmpojATfoiGO7f+D5YxfNUnk6x8V4icBaAz59DFVCtoGrx9fMnPBwe8frnDTDd/ByabPXfp+xqPxN0YvrUXgixLUOD7KF7Zs/buzBzwMQwvspkMluGqx+S2TciUi9YEzwfDB6fn/Dv77/wXlU94TcWdccE4O77kdeJz/bCRxaAgXki8PTjgrIT/NSHaa6QXtA7NC8eO0olr23T5wmvW5+ssTBeyNvYYxVigMIIHopisH/PyFbsnMKe7mUP7CUcNLDtsjW28HyLfDgB2OPEmzoshjduNWu6sE8uMdu3NKzmPWCdTN8C7GUuFIPZ8pY97Vjp2saTp2vXI+cGkKn3TGzJNif2rr+oKSXus9RzS8XWe0wuxOnG2PavKgSruolAsTgcDng+HiHWoipdeEVrvQjc3Wvc/229idv8jNN42Ls38Rm6k17nBnvMdclyRrzsFMPfXX54JpPZPXx9iEzmHKrAoVC8HJ/x688rXt8rWFFI3d+wqjCm21c3xgDauiX0F7S1x57rs+xJBPB8dBE4xVnBdfT7ccFW1Ts7pcdMYdmcY4eqwu8Z3P3ef6atl/DMYixwz1FhgEJM/YzFizpj7+jhOYp7tDl7aNf2cA97I9wWwDTP8poWLWNPZWsv95Ka97j6NVgEYKBtECpd1jDdmi1MLrHaqKQzT3GnaS+Vxz1gLWtA9ga+hG3Yxvecrplu1wgdtRbM5Q1wolYo/LKkHWu6xW3q3PJ2ayF4aHU9G0tF9EswxjR5VtkSRgQvx2ccHg6wVQW11om/Ynr2hYJvvDBhKNRRv2z0Q81lb+CAeA3UmX71FicjMpnMfEi6JZnMXbBqUUgBqOLzpyPeTif8/vUGa5xnpREDVBZSdEVgAWDQX5zWFXyD4z+QN3AWgMeZO4a21g5ER+yeLz7G1vJsvGB1bJF3bKdbozlkYxuJR1Xn9RPrn6pqLQYDIgaFMSiavn7X3pSQfXdvtx09p3u6l60zND7dGnu4B89e7uXW90ElAAN97z1G3ZB5gonVNiWfiVoyUZ3hLW8eZiGYNe1Y7fK0K1mzmDn3eqz1HHOZU1VYSDNoZ0o7xnS7lsB6SyF4CyLwUrsuSTOtvWYsFAYu3KJVCyjw17cvMAKcTifnuZAQgP3f1MTVHBHXfZf6TDrDgQ8lAje3JMGbgfvH6NeZTGbDEHVBMplVUFjACowUUC3x6eUJIgX+9eMnKlGIurDPVSJMuYjAQBolrh8yunts6nX82R4eSeb5EgaWjLfPf58QiOv/up8718JRPRfpMY1adYW54/074KXb7grs/++IrunDSJv4C1VIvXjT1N7BIgJjTOd58veUBeDl7Ole9kAWgbnYy30At70XOgEY6IcQtGR5yT65xG8f0Ia148lc9nRjhjXtmo7wynaMwZ52jIQCBFtjf8t0u/Tc3HnKaxsAWPUTJFz7IDGnWywELuGWe7zEfU1G7ukJLOJEX+8J7P9aa6HW4unxAc/PR1RV1Xg3DHk5TLXt3G/jr4dCL6Y9LbjzdhIzV8EyLprNZDJ9iLoRmcymcH0PrfcwdX2QhweD49MRP3/9wVtZwrreeuc3IX5f4JTIGz+bZ4Xggc+3RBaA5zGnfzl+rBNew0Pi+Y1m/16txVpNR3gZG3M1H010yIlLg9eRR+dewvldcc+nGIERwUEKGCOrtXtbfz5D9nQveyFeoLxV9lS29nIvt7iPjgBcVfMmcm5J2IjZ8Itzjc8dYZ9cYravXfnGUd48zGnGztQwtPd+brcwqGEtd2vb5euJeAVpHFqIzat1yMNwiY3XyIO4zVw7X8dID1w58lf9SuxghfTadsXpxdA/ist8PCmxJD9vJQRfw7Z7cBePYGuBQPwVoNlLr6qqOrScxdevn1EUBd5eX1EYA6va5LOINJNU4x6+Q0Jw6OHaeiXE4ReHzj20/xgfCe+PQVPPePpOOzyTyVwJwiYik/k4uIGgi1ZiDLQCVEo8SoFPn17w/fcfvJ5OTV+96V0ED64BnEAsQDx1v0QIlvoHW60aml4XbZ9pGwylXzy2OO8hPH6NxjlKA/FYMDj/0L53c68yaMNwCbao4P1/Fd17Gro/PybwRhYHgRiDg5i6L4/mQYuv7M553q45MI7vljAWAWkv97g19iICA/t/TtiJn+Nr34doXUO/vZ1QWXuTi1yKre3yITAYJjg97B0lZvuYw0Izpxs7Y2m3ZseEXQhmLnPr2jZcXlKhRllIrcSdax+zN/EtSQ1amfLWWguLdm8xBtuuUd5ujfe4XZqft/QG9v1MtrIWcg1P6kt+Z611NgggsPjr6zcAire390Y0ttaiMGZSW9sRjUdtTOdJarC9nbDQQwJwd4LaYhjiLnQms3lIm4FMJjOCADgeH2GM4H++/wTEoCwtHg6miXDSOV4EAoGJz5MQgc9FMNmDAJD3Ar4esRg71g8fms9oRdvz16gQRDWqdVdJeBaPnQMaPwnjxy/th/pzFGJgCne/D/WzGQvJ7u2AuDxy7qkLRbdIFoD52EP979lLGcr3MXA+LwC/v5coq+omF7kUX5FbnblZ/Z3gm1hqYfamQrCvIiPM+coOa9qxi8AAd9qtw/m6ijXNgPm23fNe9pRu98aq+wdw9JnCAe8lIuut8bZdSrxv26WMhU5j4lL7LkmzcELKWguFxcvxGU+Pj7BliUrbsNCpa4Xe4GN2pW1MT+gMeQNvMiR0JABfeFgm86EgbO4ymcydCPvARgQPDw84GMH3X79RqaKyFmKk3qu03zcxPT9g/90Zz9+JfZKtkAXg2zE1Ut607xN9XHcAALd40Gq7hHBy/7fuYC4pBs0c24Ti78eoqs5NX0Qg9RjCGFPvHyyAERQiEGjrFh0KxPUyyf4SjmG2/Hym2Nv9bJmt1/8he7kPYD/3cq37aAXgOgQ0wOlNBaQnDr1X8NqwTyxx25eF4L3CmnbNZP/KdqRgTTPgHrYtr8znrLS9N9MHdOuw9vXHYLfNKqCJvcTWhv15uFa+XjtEzjVtuyVreAOH1/bevlVVQQD853/8hbIsUZZl893Y6vvY/lQYuXN2+q/niMD+GqvSXD6LvZnMEERNViaT2QDdsLOKx0OBx+MTvn//iTKxADzuPxpN9yldn2dY+N2TCJwF4PswVQweWjg5FHLZH2qjXXzPReebZLMV9GfOJng1Jw7r3Zu2h8SRrEQERSE4GAMj4qIQjSzynPrsbfUZTbGne9kDZkf5sZeyle8jOEfoAewb/aHV+wzEk3MsArBn9YmlEZhtAwytfax2bQHmtGMWgQHetLutXcsqc/aQwZ6hQRwDLHbEsNrlYfMEDgm9Wplsu7a37TVF4OwJPO23vi9eFAXKsoSq4vHxgC+fXvD+fkoKwFMnZ6blZ39f4KHjh+xYhTlevp7ocO/TMd3fIZO5N8NeSiFEzVImk9k4bV9Xmu0oRBSfP7/g9+9X/H59g54Rbk3kIdxuVTHu/bsXETgLwPdjyuLwlMib+q77vcCqwiZmuOb0f68WMjr8r7afavsxwjdD94x6U29TGIgYFEWB4uA8/jGy6HSMLT6jKfZyH3thq/V/ir3ch2cv93PRPE4oAFdB5RlPgjElVigCxwKwYsrUym3Z+8ThdZHOXy7burBOprPDnqe81vGm3e3sukwAjtsvNuLB27lj713PsKYbwGmbs0kAL4hBqPpMjGkGpBdsXKu8D4WGnpov4XFhX5MpXz3XyN8p6dHfj6vroesnd8qyhAHw+dMnPDwUOJ3KzoLS2F5jTDJ9w2PDa3U/a3v74S3EQrAmPgvv5TYE4lfyEgOeydMOmyitZfYOUzkgqhYzmcwHR2tP3rpXDlHF8/MzKlvi9+83lFr3DRL1ljHGTd4nFna6l9L73ZAQLPWPtlQ9KnjHDh+FS0Ta0LlBE8eoNtLryJxJt3cxao9K19d4bPViaE/dje96Lgc/FoXUESLb/ryEVjUXdE7BbehoY5wonHq+h8Zy7UIPGT2Ona3Zu3eyJzAfe7kPYPm9DArAHlZvYD8xZ7nM6sDcgeKwTTDaOSCE1a4twJp2zIMdVrs8y+y7T6XNlnZDgzRGsm3LqJRLAPYwp9mtPG6nhhEeIxSmmReA3UMInnqOqqpgrcXDocCXL59RlWXjDRyPJ2IxeUgMPn/d/n0Mib5jiwOuzkRv34WHZzItYRFeUIbIqrRMJpO5iHgxn4jg+PSIojD45/tP2Lq/4vcabR0T3bEGaPYL7i986/cDR72D3Qebad7ZF8d/NKb2UWOh1/21QODVbuu87Ym20u2bh+eKF3BO7jOrm+edcnx6LGixJNaNqqKow0Ubg0YUbu+jfobhF4IMe/L7tDBmOzF32MaoH53sDczHXu4DWHYvZwXgrUwcVomwEQywpt/97RoWe4fIabcv2NONPewRa/rNt+vjCcBDYZuYbAxhtQvgti3cD5gJ5jQD+gP/a3oDh3/Da0w5/5Cn8tTf34tr5e81wkL7c1S2gqri6eERnz894+3tFda6kNFVVfXE3fB1LBRfIgL3JmUVgEmf72rPSXOaM97VqQ95ilWGndBZJvw8VYZyucpkMh+QeBHfw6HAy/EJP37+wp+T64u42M91vyGoVw2cJ2HcJ53rCeyO5uo3jpEFYD7m9k9DATj0ok2dy+8XHEY+mqoHjHsH+z9D0XCGz7XEC7pzj+2OwnUkAPfcFrUYfDBFMMbonq8bcQi1XryNZxfYTj3zkZANLQA6x17K117uA5h/L2cFYGAbk4e+s8K6gp41DW9v13zhN+bjpt3+YE4zZk/gEFYbh+1at0JeI73mD9Q48xTIti3B22Xr8FUehs4ma5oB/VXn12KKF+gQ8YIN9vS7BnPLqU+j8HeVKqAKtRaHwuDl5RlGBKfTCcaYUW9g/z625Zxd/QnaYU/gsXMuSseJom/nJ+tXBxkGEsUtWTQSDijxcby1UyaTydyPZDteT8IfDHA8PuH3n3e8vZ/cXqm1F3As1BZI90H6/cr0MR3v4cTvWGFfFP+RmbuQ3NrxRejxe1Vtnon4ulNsm4S24+PU2GrsORm7RvNdo9wCgNYirnYXzomgqD2EpQ4bbWrPYXearhf0Ftmq3XtlS23AOfZwH3u4B8+ce5kkAHsoJ7607rSFjRZpXlKmH25t13VCZnzMtNs+Y55cNGmnrpRa/164Bz406Zagb9v6lfG90+uS6zHmLaNNIaz2ebsqnRbG9p4wplnKpluJwdcIC83KNW2bK5R7/D5cZVk2n1VVhZfjI47HI97f33sTo/G5hsTgad7APrBbesJWeseO388kzqw+zWLvzgmLS8cbvX/ooqJwZn0db42UyWQy9ycWuny/wEciUWvx+dMzTu8Vfv75g0oVYtp+SV2oOvoAACAASURBVPjbog6hO7yYMPYQ7h8XnncL3YGtLIrPdElHLup8AhGTPDb8LLUty9j8+jlRdiiMdFd8dp7Cs7vfZ34wFrlJ/bIP7/lsFIV5dF7ChaBoQj/zRX8CxudZPWw2f0S2uhBojD3cg2cv9zL1Pi4SgK8Vpu8itF2h1DRQvsECGPSHBuaO1HVsu23csWuHh7wWzPm6NufyiiLtEpqlhnUIIRTpliAeLAAc4leq7QKmN5Tnjr9WfjDmK6NNIcz2+cgkgAuxJSiaz9d8Lhj7cmOTDtdkTljhIUIRmCHtQoYmdJbauLSOVG33zFJV1y9XharFp0/PeDw84PT+DjGmSc8p3sBzBeA4TKO3Kz7LtHB33j8IA52DdnCviA7jKSK7RzGt73ZJfKKx38ngmwvQ/nJai7ywIJPJZFKkPQpdG970DlTx/PwEgeDnj18oVVGJwEhXtFVVGBFABKYWi/w5w/MOLT7r2rANASALwNsmzDvnAdz3Zk1l75gnbjs31orKGvWvY6G3/gZxZ2is3x/+XC16vxO3l4u3sD4+OjBBUnQW9GxD7f0bLtbwHsLF4eD2Fa6/a5aABHrEMLb+b12PXIGp4zv2+mbvxPmUWpC8VfZStobmILbGpPpgjgAcwtwp8I2KgndwzJp+y+26ZCplOqzpBnDbxgxzuoUCDhPMaQaEQrB7z9SQMou1W8lXNuJVxUzlDXA2VdB6UGko7EutxAb4ntVbl7mlgjC7JzBw/ed1aejsdkJIYG0FtRYPDwe8vBxhK7tYTJ+/6l17g+6xSVpvU/Dz8Kiz9rEtQs3Mx83n15P6oWfvWgYFcNc+mUwmswHU4vHxAY+Pbl/gU1XVi4jq/oI6scfXt35f4JiUN/Cwx/A2RGDmaGiZ6QyPV8K9buN+b1e0HVqYqRKPJ5HsII15/6aOaT5L6LoKC2grAPtPhxgaC80dc8RenMYARVE0IaT9THgoZItE15Hadj8l0CwemWzGItjrmo/IFtqAqezpPhjnEedydn5kqQAM8E4GA21j12s3SPKTOe2AufZdJ8zzFPaVbhkPe7oxD4KY0845fdkm3BATS9PtXunNmq+sdgHctlkA1o9k64Hr2h3MIW9RYH3bPLf0Bg6ZGxp6SEBn45Ze1At+CRFBVZWo1I03Pr0c8fTwiLfXV4gIDqZAaasoLNzwRNGUfAsnZuNzzRKCE4ruiCNw5iaEKT6Q0Jr4RiY4ZxDDW8NkMpnMdlFVFHUkks+fX/Dz5y/8OZUQI7DWef6KqlN7agqIa1MCAc10QsW2gs45ETh1DAusi+Az8xgb63Vj1qRIefQOX0MRzL2PdNemRKIava562y4b5yz7rQGgsLYCUD+/4j4tjDSisDF+wbcX4N1rv4Ckfbru9/yz1jUfEed8vo0tAc6xp3L1Ee5ltwIw0F/x1ATAIMnXLaRfn/WFnG2mW+YczOnGHAqJza70KlG+sBpz0m2NNGbL15Bs23KsKio7X3C8B030FNVgMouDW3vcjk3SnYPdG3hNT+Du7wystSgKg7KeOLGVE3u/fvkEUXT2DR4TgePPzoXnD8tzeOhUAdidC715MqYxxaaZWERd6D//+uMkPG/tkslkMtvGWouiFmuOxye8lxX+/PnTcSJJtTcHMYOL0Tr9jI16AmcBeB+cGwOcF4jrV1Gf/Ny5WlEYvVDR4fnGznl2/KLd/lHH7oGfhtdfMj7q/D5SuQXtmFDERQsQERQHaURhg+DexdUyau83J8Ba33xUwlDjW2cvZWsv9wEMzG9cIgCHsE5+xd4jFqCarGFNN4dvGEMbE5PCifBo94A57ZhtY4Y53dgHQtxp5/9yCMBTO/8sabqmHUN5xpI2QywNK3tr/OpotaDyjI/7SkxpBtzPE9gzNTR0ahU7I/f2BB5azW9qTxtf91ZQqLWwVYVPL884Pj3BVhWqKlhZf4FNaaG4LyKKJvJb4nvQQAT2K/hBNabYAtL8J/E5Je0itnteMZPJZDK3R0Rg6zlQIwafnx/xVlb4+duJwEMir9F2cVm/r9E9f/g3fg0gGVZ6bZgXv2fmMTcf48X7QFoAjkXcKeJuE6HzTJEPhdRxO1NfuAuEIrTvWM0Z56Y9lbvnG0sXbX/YflcLwYUAB2NgTB1RIAvAu+VcOWZeCDSXLd/HnOhiW+GmAjDgVtCFF2KaePWNCJsADDB3rtrJaV4buW0D+O1jhDnNtiwErynutAMDvkY1tWKVCUabPMy2AcMDUgasKrTZNYgP36cDuNLunh63cz2Dm8WGUX+YhXsLwVN/W1mLsiqhqvivv75BRPD29gZjjNuCQdzee6jb33BCNX7Gp4eETr839fMYfiqo1zrWH1ruam+EVNosu5lUMnOV9u2x2WKVyWQyK3LNvpa1Fi/PR4gA37//hIWBNQpYbfoYKoAPSFEkxrVD3sD977i9gdnnPDLTuEbfPzzFlHH1lPF3eIwdKfrn5rbOeSenFv6rKtq9hKff07WI08cY0wkffTDhtWy9FNGEJ0D940Hb23m3vv1skb4ywdh2ZTuuAVtbtpQ93sfVBeB4xQ9Tollr3Wb1axuSgHMSPQvA14DdPlaY0415QMSdbtnDcCnZrmUw2+frESXYWiFFKAADHM/sWp7AQ+9jQvvY+sAeRhFYoW5BhCqstXg8HPDly2eU7+9tmlrrAqyZ/iLTVLsyRwju5TPgAttEHsDir13/h7d2STBg7OSsiyexLzImE7KpcpTJZDJEXLufZUVgrMXDQ4HHx0f8/PkH7+UJGgi/hZhaknGewEOLz1IewbHNzCIw83xHZjrX6ve3p/EvhqPuzLmm1gs9w3OoohM6Or5G+H4o+k/Krs5nCQF4sr03oLHTL0Q1AmN8+Gj3WqSVgc+Ng0avsSMvx72QBWA+9nIfQBj17IoCsId5whUAqth7hCxf10m/aV5IrHk7paFfiykdlUwa1vIGbCM0Eot9cZnXMHbOHSvgvh399GFJsxTZtmUw2wY4b0Jl64jUsHlQpwa7a4jBc4RgRm5p15Jy4j1qnABcLz5Qi69fPqEwBrYsmybDRtORcz1+x74fFfxVm0nctgyi3gNs/WdjCmYbZu4Wztogk8lk7g9DnzKFFYFRharFoTA4Ho/48+cVv1/foIWBKCCq0EBAkWDBmEi7p2NvEdmQ8EsqAndC6GY2zS37/X1huPv5OQ/d/vlar+B4PJV6NpaIt0PzsVqvfdbOKs++wHxOdJ5jT1rEDT9rzy0SCMMiKEzRhJB3Xr1eRO7OVY6GICapbzKOsA3ZA3spX1u/j5sKwB62iUNPHD5wLOTEGjALwADvhKbnXMO+Fuzpxgxr2nmraO0jtQuIbbvPczolTFB4LCvZtmUw21Zp36uRAeY088SeyrdmqhDMLgIDnEKwFAeU5Tu0srDWojgU+OvrZ5SnElVVoSiKTsQh/8yM1e1TReAxD57w2RTptlpWbyQCa+KsMsNrN0MFb02QyWQy14GpD7sEa23Tz/DCyqdPz3h/L/Hj969AXknfqzEGYfTW9pjpAvDQudfCEvdjM9O513gkvI7rO6cjXcXHecZE3lAUnmPH4uMUAMxFY7pzv4mdEzr6idj6c8BI0XyvAtiqgsB7BSuKwuBweIQIGs9hkb5AH881MNU1GceeROC9lK/d3MetBOApq3XWIOWNySYAA7duoKeLvUOwTWjG5YzNvhBm21hhTTN2ARjgtW0tAfiWx98TVttY7fKw2qcKWAidAAzwppknXtR3D+YMnO8tUM/l0rRTIBkeeMrQNVneRSFw4RWtVVSVBaTE55dPeH58cnsDizQTkmGoOS8EpxYCju1BFgrIfkxQ9G5K4CwL+5sarLIfvk9JfedchrofDZ9i9FypMHkZPrhr0kwmk+nC1h+9B4oKgGkj86iiEMXz0xNUBf/8/oUSLvRz3H/wxPsChyJw9/3wa0OU9qo5FPQeuOdYqb3W+YWywwckPosEYEUbGWhsDD0U9W1K+Oj+MWg63ku8j4c+716nW2fEx7vL13P66r2VLVRMMyARcVvXHOA8ht0ew5Kssz5iXc9OFoH52MN93NQDOGTMG3jtCU8/OWfd0prV7Ehx3Yb6NvsMsk4Os9oFTOtcZNKw5ms4OGLKT9b08nTFm9usRrxY4CBOQ1bbWO3yMNtnrYWt22uWeiTEWjs46bUmzaroO9e/Y+kQCpFsCyJjpjwTKbH3HGbmD2LR1loLq8CpKvH48Ihvn59hqxLWKtRaJ8wacXrqwPn8ucK/Q3gBeWyCRKCUz0CGB94WJpPJfERyW7Uc394/PT7CGIMfP36g1HoJmAKmABBtB+G88iQQY8LvponAAEdI6M4cB7ay6UUmhmXsG5sR2xX2w+Njhpx+wsWfFtMWLEwa90z1Irb9+7LoL4aVEctu4ajgxeI43Q6FgTEGhTGQ2lNYxKAQGbDRAnWdF26D485ZXwvtONHAItxjObOMdty5bdZuw67FHu7jbgIwgFUm5qYwt7FYg+s02LerhFk6FDGsdgEfw7ZbPOvU6QZe+1jtAqatEl1+Tq5z3QJm+1htY7XLU2m9oFe13tOHB2Yxc01v4I8QDnqJABx6rC4LC+0nMAzKsoItT/j85QXPj0+oqqpZkBCe3783xvTuayi0XHhs+MwlQzxyFfsMIbxPeiaT2Rpsfa2PiKrzCX58fMTT0wHff/zG+/sJUhhU1gJGOgvevPdWvAguFRJ6bPsJYH0ROHsA7wPWMciwRtAXgON+/rnxqP++SpTgOGLQHHunHSiNSLo07cd+t+ScbXoVXU9qVVhUeCgOzlO4AB4OB5iiAAAYn06oOudRVRg5NOdp8gcVoN0xWG7HlsMUEeIS9lQGtnovWQCu8R2bnmcmQLHkYlmjMdyIXhvWDgWQbVsKs23MNB2ile0YgjlfnWkK1csa1VvcI3O6Adz2sdrGapfHqtZRncIJouGQUPeENe3WFlrDybxzob4YSfaBY+ZWzZ0V4tPCWfUmdMRC1UChUBVU1QkPRYFPL88uHHRZoXbE6XkQx/e1dD/g9AQtxRAhszK8T3Qmk9kKjPNjGYdfJGatdaKuMTgeH/H+XuLnr1+AMVApIOjPpQrETd4H/Rq/hYR/3xw70u9YOxxo3gd4HzCPQTz9SIlAOD/kxxNj46zkYs/wv9qGjp5iw1ybh4/x7rLBoopoH5cxvSQWbefYNt7GCLSul6yt3OJdPx4yQCHOY9gY1HsLu5DSIhIs9O2PwXO7dj2yCMzFVu/jrgIwwN3opCpUC1DN7kxPv3Ci+H5sJX+ZYLXLw24fI526ZEU7UrDnZyvguPdT26R73Rdz+rHaxmqXh9k+VXWrllUg4r0S24mjNZm6+noN1vAETgmP545npNN+3SJLdZ43i7qYY41XbmVdGLKqqqBQvDwfcXx8hK0qWLXN5OrcBaf++GnCbxsKeuiYzH7hfHIzmQwLuU3YH3GfwojgeHxCVVb4/utXLZ60xNtJmEQ0lLiYDG0rEYYBXatsZS/gfcA69jhHym6NnqmpC2/9M2Tr4bQfZwCYFA90rvB69jjbP37KPJjfxnIR4rdzMu5abqqhEXNFBBC/8LY+NlqUbsSFji6KAmIExgvFAkgTSSkQg5dbm8H6kSCuzV7uZWv3cXcB2MPc+IQTh2wCsOd8+q0XMpI9b1lhtY3Vri3APFhizNd2cC1NWM8p4W/veS+M6RbCah+rXR5G+5r6w6AZnK3ZtqdgTDfPGiJwSGp/qhD2tANuKwADU4Vy7/mi/g0gLuSiAijLdxRi8Ne3r4BaVFU1eO5Br4Do8zGPnO5nursBeeY8vE9uJpNZSrKuTxyXn/+Ph7UWRVE4D2BxY1RjDAyA56dHVAD+/c93oA6XCrRirhdpDmI6IlVKBJ4iAIfvb44v7MI9p5GZB+P4IxWx59xCzqHb0FpRTYWUPnfvVi7zsr3KcfV4/9w4drrXcZ+2LmqdxlSrTpopBBCBiAYCMBIN4wGqVX0+C1Onc1EUKA7OU9gYgweyLa22yJ7GnFu4jymLybdwHyGrC8CMniNAO3HIKgADYYW+jrfvEIydihBm+5htA/jtY4Q5JDR7frp6eG0r0jCn3dQBwRrtLnO6Ad2+CVu/xKq6EGwiEEIhmDXk09pCMDCeHmvbNkYcHs1PFVwbmRgaOsb10xVV5cKVHY+PeHl5QXl6h6qFkYObCBIXgtHaKvCkn2DXmXCMzog6TaTvzZPZJ7xPbCazPVLPU1yVDj1zc6vcfnjeYfJznpmCAHh4eMDhYPDP95+wACpVmFpgiRcyi7i9gnvS1IAgnHptIrHsHn3uLADvB+ZxxyXE+gKA0T7/nPmSRhuY8KiNeSJ3BNYZ+dAc2+yr6wVchROLLYD+3EV4zWvkeysAj4+l0nkhtWqhMIWBEUFhBGIMjJFmUU3qPP739as24tOSO5Da3ZlIP5nL3sNBM87DjbEpW9cSgD2sk62haDO2P8DatJUi12QwwN+5YLaP1TZWu7ZAvHdOuHxjbZjztenvEtrIaJNnC7YxLwBjwU+4hJ6YTs/iavNTq6VZ8nXJSu5bMuSByojvCd/DOtFlZUbr1fqlWqh1K8///vwZRgxO1anjcWOMWZzWk/bpUyAvcN8XnE9mJvPxmOqhu5T8rGfm0ohD1uLp6REPDw/48fMXTlUFq+ltQfxrLwJ3ReL+fOhgdBLcv5+d9wHeB6xjjmvTerjGnw2HjA4/HxvPxmNLlfE2ZK7YPH4cmoupXknYnerBfOFq1/Y69V8fxtrXi8aJwsYYmNpzuGjC6ftDtbMQfko+hfuuc8y+Xs7a+8JfE5Y5o0vZwn2sLgB7WBuiUAjWO02CzYXVExjgzVcPs32MtjHatBXilbNMXRDufG1XXQK8i4XYYLXLw2zfmrbZYHwy1OPojp3S4eLWIDVgZnle4xXcazK22pURBYB6cuPmFxI0fjGjnlnhivb6M78avKoqVFWF4/GIv759wevvPzDuxE3YxqVMDRMtwtG2Z5bD+TRmMvsjWVfeWOxNkZ/5zBJ8f8Rai8eHAs/PR/z48Qev72+dFWGhF54P9Vyg2092n/evMdbfuKcnWBaA9wPrmOPadG9zXiyJVBqNio0IxsE6vpXk3PQfXswsUDtdPJ51LelrC7cuNS7t2hkQVQWsC0NtjEFRuLrz4bGAiOAgznvYzRfa3rlSacIwL3Et9uIJDOwnX9jvg0YABrbhFeTCzYFuZofZExjg7mRk2+bDatcWUPCmH5td4QrH1heNE7a087Da5WG27562Nf0fM23wFoaC8rD0mbbSl2NgU97AfkB+h+wUcUvp517KD/ZPVYnSVjAAvn35iofC4HR6h5hU4MUFtiXf+8UYbf4ZrqKfmQjnE5jJ8DC1amNr/2Pys55ZSmqxo4WiEMHL8xGvr+/4/fbWKWPxHsBeBEZ0TH2F6YvOEp/dAuY5jMw8cj4mhNQBj+Dw+G4v/8z5vSSs0fuztkw4d3C8reaPuceu1471EgJwNC83h06d6c+hUxZGt3VizxC/cFiAQuoQ+8Y0//yimmZxjfprzjafljWiQdyKfB+3h0YAHlphw5R4HQHYw2MegLBS5jJsC50MVhtZ7QK4bWMmjCzAxqp52oSzaW2IJ+s7ntRBuB2Jjsn72/ZhFObiNOt49hHYGa+4vaYt7Urh4LMzpxdFV4RTP2AdDhm3BnFZ86xtFzBs25qk9mzyfxnSzNOZzLixWbEf8LnLWT9ZWhuoCpSqqN7f8eXTC56PTyjtCWJD91xp7uVcfZPKCzc/4oXf7gSFxN7MnUZqwg1lbgrP05/JcNJ4JV7znIkH7+bRJRLk5z9zLRrxVxUqgoMAqCxePj3jvazw688rKmsBkWZfYATPlsB9LsG54gg6bejT4b7+PUSALADvh5yPffyjOZQ0XgCuVcfms1nXkHbvXq0vOuUMo4KtbY9JH3e+XujMd3T+xpHGumOd1LzNVBQ6cZTXn+0LPYTjX3bsULcYV4zAFK6uPZgCIi7cdBxG+ZI6NDUnef7okOXXbpYie/s18eWGYJr7uBS2e6ERgGPiCozFLqC1TbHOoOUc7WQwn3HMnY1s2zKYbWOGfRC1hvejTKhQO+I5mQAcXp8RRgHYkxK8mOz07f41bamuOAWpUWgShjTzWGs7k1osMHkCA2kReO26bAjVO4SErlnqDQy4PX/fywqnqoSoxX/+9RVGDKqq9GeHSAFry+Y3U0OXj3njxJ8bd6mWLACvDs+Tn8msg1/A0vvsA5Cf/8wt8X2I4+MDDAT//PqNCoC1FYqiaBdyuskAFEUBSfRzzgnAnWMwfMw1YV7EnpkH0xiIldDjtXE67a3qnCaEDmkb4XzArChgyc8U8ddq59UJGtxoX17tHztW56xZxuJF/AC6eaS2vjs3KHMhpt2+ww9FPX6rvYiNMbC2GrzW/Hq3q9AKrlOn+pDQvj1hjFw7Bca5j6Ww3QutAOxhbZg63sBcSRakGWc4aIA7X5lhto/ZNjbClGJNt1vb5b0AWuFvXgPZCMAfNP2WwmoXMN5RZ+ASb+A2ckgwMLvirYWDO6Y0Ay5Lt1vC4gkcp0kcboyZZj+4O2SpLAnerBVUClhVJ/pai6eXZ3x+foZ9PzmPZiO99mhOGT0nBLdeyYAIT/n/6HA/WZlMmj3t93YP8nOeuSedEKd13+7x4YCHwuDHz98o1S2K9PtDxP2BQsyAkNvtmwx5At9TBM7P1j5gH2ewotG4wZN6bs8l8dDYOBynnnM864y1rUK1FTpT9YW13f1yU8c2C1U6x1ynblm73PWv358TaKI4NWGkXR9MjBOEBQJj0ITyb9KuScOxhQDt4p4gKMRVuEc0iHuwh3vwMN1LTwAGuAzciseSh0kQZvYEBtav+MfIts2H1S42hlKJOf1uYlu/3zmbjhc1ZzVHm6+sdgF9z0ymdh9YlnbX9PYdRSVYocyXbmsJwOfyjO15GAoJzYjVhe65C3A92ukXM1BYRTNxoqo42QooK/zX33/D2sqFjg6Sd2n5HPPMad/Xfy+4TmY+vE9P5qOShdzbk5/7zFq0fTbn1/V0KPDw+IAfv15RliUg3WFwKOCaXsjV9pj44zX3BbbE/dLMdJjHF1siTsfhdO0/37NDJ9fH22BP4VaXkE7jd1GI5olj59T5VUzv2Ln10b3KpqvLnArbjDEn3ruK85E2xjR1t4iFiN+HWOow/3WdbFy6pNz0bK3dNFGcL5gz2dM4M9/HdaEXgIHuZDCzbQCXAAz4uovIoAjmTke2bRnMtq3NuZRhTruLbNPo7xXRWnxgTTnWPGW1y8MsAp9LOz+ps8r6eG1X5zKlGcAtAM897p4sHbjfC713Sdf5A9uq8mEXFdbWIrACx8cH/Me3r3h7e2tWw08NAZ0al4ztz9e+r3838x4yy+F8cjJ7ZOoTnZ/925Of+8w9GZqr9P2PQ1Hg5ekR33//wdvbO6ykPXoFApM4T6f/cMYTuD3X7eqaLADvA9axxZZJRTQLewf9ZzL2FFbEvYlU/RKOvlQBVYtK61WvZyKBXkMMHtNnQkfh9qfL6qKblVFx/1HrBOAxL+zkz0Ua8bizONmLw81b6yJAwXkPi9R1vHF7EhdFATGm9ipGLUTXZ7CXaWDxHsdbZS99Zob7EHXgVFawtl9ZMRg59OAx2egnjizaypjBLg+zEMzW8bhkpdQ9ybbxMzcVmNNtkW31grpb47tZQUQVKraar2uKdWEdzBQ22NMb3Ik0bf+qua2JlwqI8GwJkYqecqv8vfZq5zVh7pe0a3zu5ekOQM6HhW7Caqnfs7jdZ6lUC1uWMCL49OkFx8MDTqcTxPjfBUJw4+WczoNYBA7HJyLSmcoJJ3G9RWTV26bhezoyW2HuY8jWL9krTYh+oJNJ+VnPMNKdn3Stv4UrugcBjscj3t5P+PnrN2AK15fx5dq6vooR7yUoTSw/fz5VwJhh4fdceOhrEQrApMPvzAQYxxT7Rnpp3n3fF4yHSM2XhNv9qg3O7z9XN1aTWiAOxyzDNp2nd0+dd74OkkS73RW/w3FkPO699l7D4S8W118Juyf9LDmOtK0AXN9rUbcDxphaMDYQ2GZexxjfgLjz1bnavG/ajunFqnYmQGcNgSxMrPQcXj8MpDQdvGCBQ/CzvfW317yfjgDcPHjEDUHsFQRwFIhUKAY2rhWz/1awljtWuzzM9jHbdkuucdesaXc+HIz7K8DdZ2dUg/qXsLpjzVOA1zZWuzzevruFeJ6BtqMpiBTrGhNwDwH40nLDWu6YRWAATZjleyI6rfy0CyLaRspai8paWGvxcnzCl0+f8P7+3oRaVHXHFEUBtbYXas1H/TG1iCtoReOUENyxu4nxBeS9ga8H55ORYSEZWnUFOzLTaQTgKKPys57ZIgLF8XgEAPz7n++AKVDZynmFGdPrNxi/cC2YSJfExPiYN/AtPIHDMXcWgLcL63jiIzAuvI55DDustXVdIEkBePS6QYPqfjutjrBnIsbGgnLqftrvAC8GNuPb4Nh2WX2gXrdXOmtr91pc5fwSe9RWXU9hAaRwY1EjxonGdXsi4j7zHsdxGgZDUW9Y18s81t3CoxNhvp1ZZ/Km46zQXRDdXqVdtDA2lt4ya93L5gVglkLQTGhCr7G15c3IIvAyWO3yMNvHbNut2LMADIzb5lcbrlXTNHUxaVXHmq+sdgHoDSRY2n2PmwQhbvsVOBcK6p7co6xd4xpsz0RKWGTDL+C9q33BwvZJkxe1bX4PTlVFVVWo1EIF+PvzVzwcCpTlO7SyMIcDqqoCjNtfW93y+frS2pt8NRp5D0d29V4r4H2EiRz1NwPnk5C5N3lP3Y9Ffu4zW0XV7Rn59PQIYwTfv3+HNQeoALa0KArjwn6atv9QIO5TtOebGg762nVkFoD3A+uY4qPRzYdpHsCoxyGq9XhmQeuoPoSezltknHLKO+8oosn3o969QGJsuWzAtIWyPsdG792t8Ivl+rWx1N+pjowhGQAAIABJREFUVo13sQ83LQVqkbgVjAtTNIuGfAjp2Hd7ioWq6oRnF7KrwSZ+rHEYbW/4AGxzgUugEoA9sZcGU0KzCsEhtvYW8JUWE23acc02MVfKzLZ5mG1ktu1SbnVn7GnWaSMU/YZ7RSzp9NBW8tS/ZmpbmfskgCtzzHtiuee0bfPXSr84/xaFbbpjOrM+s9RCcLMoEnfv/54NC+2/tG4cYeqV0qWtYNUCChwfHvHy6QitnHfw0Kr61KRnkVi1P21v4FDIHr/HTAvnE5BZSvbQzaTIz3lmL1goDARQW4vABv/8+AmrgBjjoo4Y0/XT8pP20YOQEoS7n/f7GtesT70InAXgbcM6lsi0TI3Ad04AnifuSq/xtX7O78xA5dx1Lgk9fU547jqBXKZ1rPlsDF071sCW+kW7xUjxLwQqtjmX36ao2ZJAfHuEZq/69l/dFjWeyQIDE3kNn7MxXFY0nnds84BLufd9jArAQFdoZUnkVFgBFttCmlAMwGq9orH0cd9xCcAhrJ0RVrs8zPYx2zaVSSue4Dofotd59EPhi62u8/Wc+N6W+tb//v6QTd4EScRc5rJt0wnLPpttHkW9DQSpfQCggYq1dl1yaTplEXgbrLEYJxUWWup/VbyKXF1YLFOvnn8rT87b1yr+4+s3qC0hIjjZEmf30lZAAk/t5tqJ173nryMC601CNm6d/BRug+yNm7kmQ899Fp0yW8V7ASuAh4cCn56e8K/vP/FWlS6sZqIfAbgFZqHYYUy4qLN/neT2E7he3yILwPshj3O2Rz/PBNXCeYjUb0S8sNyXGP38n1tIe+hoDq3G0LjCJQXfKSJxeFxKAwqPaz6XwHiNzgU/wro+/TS4/HxDWk5qXiy+buo3/c/7ZUhQ53ug/4dlQX3Uqvr49lQWvjXwocmhtQdxvdWRmEg0Nn5xk4H4qFq1yKzSLqoe2grEjTd49cA53Mv+RgAuKwub8Mde4qZ/a4YKOYNtIa0HhDZhGVwIufvZeV4A9vipMR5YOyKsdnmY7Zviwcf2HMdMFYA917oL1qgHjU0KqNVemI9b4Jv6ZF5E12Z+HgBu+xpxn6CsbUUAdi+0mRSx4Eg/T5N0CviHZW1PYG/DlP4c40pcdtZ+hv2zANV6fVD7HN/MrnrgmvIG1vCA+nUzwAUAEVhrUVoXGvrl+Qlfn5/x+v4GFAZa2ea8wWPUltnIA7gJDR18hsTr+oPuSmmIjwK5a0T7g3rPNp+6fTBa9MJyfHNLMh+NqWOtXPYyW8T3f3y/4VAUOD494vfrH/z+8w6YAwQuXGfbL2j7KE25l3aP4OCj4PVwf+MaC3Wa/l1m82x1jJPpogDUhktv5YKFuBo4CHZGSr2Ldq4Q1gv+BAJATWIuJ15Yay8SsMd+G3vLNp+jtb/9vXQr08m0Z7vFQvfU2PmyufOEjX5AljydBv/tHjR09WQ6SPQmahOlHsN3jgvF/Ho+oWkbm/G2QEQ7n4kIDv7IwDtZYN1fL1YnhORu2xotNFhCsgBatKJ5PXeh6bLa2CQC1XYv6OSl/AIN033GRGvrT2WVFIDDE4R/mSY0ga59jLY1K+S4TAPg045PAPYwd0iybctgtu0ca1pOnW5aL3Sx2msgb1EnNykx4dTU6QZu+7Jty2gigHA2q/Ve3esKwEOkFh4w5TWTLVuhXRCJu3c16+Hg6DHx5IiIwKrCWuBUnvBgBF8/fUJhBO9lCTX1oLReXT4majcDUB0eSJ4NEY2F8xA7ID9t81haTNjagczHIT/jmY+OtRaHwuB4fMLr2wl/Xt8QBtYyiPcA7vYfjMafoXdc73eJ75eQReB9kMc2+8FaJyh5UvqD1uMXv/fq1PHZlHIyNPcXRiFLjfXPec6eFXcHBNNztvrfdz53n/aODQ9zEsr8OjS+5pCN+Zk8TzqNzgnO9Ue9bdE0ys6+EO6Pa177xebBwoF+2Q8XT6SXIPTbar92Il0GwvI+dFz4eZhMkwVg90PuQshsn+sc1eGgYzMJxtxZBF5Otm0Z1Is21jbgDLT5qgq3hWK7p8Q18neO2DtuHmm61TB53MawPq+x1yiTbQBQhZ77XKbVAzHF2bC2KxFHPWCC1S4P82LNtURgL9Qmc076eeoGV27QVFUVqtM7Pr8c8fL5E17f3gC48ItuoqX9zaANIm6/JMSDvGlC8EfwBOZ+qvi51oR+JnNP8nOf+ej4iVojik+fXvD6+o4ff17dlxKExhzAGNOEygSGvYDjz67VZljyPnFmGuxjm8w0Ol6sk45Dx1Ft6ZzPJeWnI2YlFOsp49qxyGJTrz+Xriesm1dplsSEp1xYzy4RtWedP/HZHkcR4TKlVnhPzX+d38JQTCTcD0xseE/kISF2KKFVzy9cd7/v5t7QwoKeY9YcATh1AUbYJ6qHKtu1YReBAd5yx2oXkG2bC59FaRjTDghEm6mN19i5wjdXqpZY0w3gtg3gF6gZbWP3BAYAJDvA65MF4OXEg2AmmonCe5ql7UTnuT53WO6MMRAxsFZR2hJWLbSs8F9//w1UFd5t1YSh8r89JwIXyUHieVF4z17A3E/T/WmyOYdZznwA8vOf+ej4voOqQqB4fj7CquLHj1+o0IoZcShHj4jA1OGg28/6xyRf4/J+YhaA9wH72CYzjVBMXfJsh1GbzoWPPjfuCUWoad7D0ukUXLLQf4nH8JTfzr6+YGDf4fS9zE+zC4T3pGfs4tPRkpoXSaXblPKlGi78Bob1s/a4Viw2dXnwJ0jbOs2OflSQcwsGdi8AMxKLwEwCcIuANQm3kLesMNvHZBuPJedhSjc3w66ABvWHvWwhTmpBlfY/WnZuqrTrkm1bBrNt1ZX2g7kJxAJw6i+boMmUp2Gd2aQXLl+Mc21W8wRWTHKlVbf8Fmpdu2akcJ7A1kKtRVWV+PLpM56ODyjLEtbadvL2TPkUuHCOUz1/u9/pLr084yfoWu38muwxnzKZS+FpLTMZPlq9QPHy9Agrgu+/fqNS18cIt5zwdCOFDIvAtwwHnQXg/cA0pskswfWg3WLWsed6vu+nqjpBeOEWTeEYaeh3Y+OorpDVjmyHPJ6nitOXEe6TXFvRE25Tolz3eACwZwThe7GlOuDc3NCQF2z94aRrjB11Lq1S5d2/UgF6hQfT2+Kp82LxgoLFAnB8QsaJuTHvkbXt9HZZ1Y4AnK6+1oHdI5i1cjpn15peapesoro1a9QlnCVoPszPQuOZWf9v9PgViqOfuGeEPV+B9dvSFFNWdK5WB0ObsNDMaScoVrakC7M3sIfVPus2eQLAJ0oFgbLuztQBU+o3ZVmhLCscTIG//v6Cyr6hqhSiD1CU9aDSpbsBoCMhGE3QMsYi8vkJ22n3ysSt81sT17hkJGO2mMiZDBGcLWMmw40VxfFwwFNxwP/8+InKCNQ6f7xCDAzEbUFh+v2FQtoxd7hnYXxc3Me4pL3LIvA+YB3LZOZxqRfwpdetoL3P7hFOOp5PhpoJ1099vmze4ZynaXzOs2NRcWL3uWv18SOf8yGNp7A4P7wpASkP40lzwMqqSvVRWe5Jfe0F8t4OV5auIAAD3A1FuDdXyNoTYK0HsF7psbw+bb5yewexwWqXh9m+e9nGmwLLYM/TpuN1pnVfQwBmTztGmBeSALzpBrRtPrWNCghZu58F4OVoIAAD9aCEyCfY6rojuqWrw621KMsKAPDycsTz8Rnv768Q8eXVhZCuZ18Gz1NE3sBDdvUnbbfpDbya4H/2gPQR20rdTIYHzhYxk9kKrp1/PBzw8PiAHz9/4a1sF5gJXNjnxnkk2ooiFIHd98GZbxAOuonsktk0rGOZzDxCIVTkvmN6jcZ1jePbNc575vt+/eXqS2vbMPt9+qPiKc4E5767hvexF+3G9Ji0Z3XjHz3pOtdg8J7iZEgJwBOvsZUx2cUC8BVpSsI1BWBPanUDw8REqjAyTFqHdtkVvSDOoddehnBFWDsprHZ5PpJ93Hd6XZjzVVWhVpsOVtLSFasZ9rRjpfHwJmjrQ5jTzA2AlHe1fN3mp0IarUlvVS8hjLYJAGi98ABOEBYRqtDQmxWB1aKqAFtZPBwKfP36gqqqAGsBAay6AZdJ3FzH0xfO8ybOj/Nhous2tf6KIzcdazwJLkRm+spr11+ZzJ7ha/kyGT5mtUMKoPb6PRiDT8cn/PPnFX/eXoFa0EkJtiLtojKj8edpW3rnmGsrsgC8JxjHMpn5+MWoLP3fNkJPt3y54pb6Zub5Lyi38W/VdhfU3MuO3rnQLuRuPYHr83fMWr5wh4E17Og5Hg2ZkDjuXGpfIgDfipsIwEDfS4OmwiEVp0N82lmAaxYHYYXDB0vFNQSrfax2AbcXgMPPeEv2Mujzte79sVUpzOkG8NmXXm3IB7Nt1PsCwz+v7YpThj7TFkRggM8+HwCwGUh6lCcstCWZOpy295S6iVTjAjiXJ0VVnWCtxd/fvsIUAnt6B4yBnTEInOsNnJ60nXatW3KNnMyhlzOZ7fDRxlqZzK37TU30FhFI3ed4Pj7j9+srfv75A3MoBvtwncVlUb/iVvsCK/j6vpll5HzcC0Mer7yMOcW1YZ0xu1Ph560uE4ml7tj0daWW/jhu6JpzbHHHCsKwzo2PXuCs15fR79f7YilrKTumzlmmROElKahNvkR2LDjXtbipABy+ZphUAvgFYBG3j4e11hUYIts8LCJwKv9YKpwhmO1jtg243L6hSQm/MoejVF8X5jxVVcDyCcAelrRLtZ9stoV1MYttKVhts9JfNEfHpL1z7kdvhS5x2jHa5geG2v2AQgTW5u/66XZeBFbXgbB1/QcDiOD9rYJVi8eD4K8vn1BVJaqp10TrKRyHcoxt6r+uR5oKGILo7Z2ph5SgvfC8i/eNymQyNyVVa/sF7Xsda2X2w9r9nxRWLIwaAAKFooLiCIOH4yPeTiV+/PoFMW18kVQfQdVtFVF4j+HIC3jod8AyARhw22MwpmdmOozjl8xU4rkj4DrLMtenEz1VJoh5I/MXscNi/1ypc4+fp3ucNqeYE031fHjpVlkMBeC+bhkKxoK1Rtd7rEum3NPQ2HRNr+CbCcDNBYKwkEyThx5WT2XAeQWF9jHZBsQrULjYwgIEVlhtnGsX512sA2ueAuH+7BL85bH3fAdsnbqZOU9Z618Pc7vqn4cKfOkGANA2JDSbfdZa2nxlfl57HrckYrBOGNTfiynewPHxZWVxOjnZ9++vX3AwirIsnSdwvYeT4vwEqRFpBornRGD/PnwOzI2ykO0Zy2Qy94ejhs7siWuNG3bdRtVpdDw+oqwsvv/85cJBa72BhLG9kK9x/6FoFpqhd0z82r+fmqLZC3g/5HxkoV5wqkB6H9jxp/Mj5WMYJcwmkmXJXME10i/UxdrPpN+REosw8pr/bbigZ14EvoQ7a49+mVLtL13WO+8jnWIvZXmt+7iLABy/ZuqQMRcg5lDQLf39AVlgz1tmWO2bFSLjhnZsEfY8JTUPAG/aAdm2JbDaBbS2VaQ1mAYdEqa+HNAd8DHCahcwHna5vyPt/VBweAID80Vgv/dvVVmc3t7x6eWIr1++4c/rTxjjB+3TniMT5MG4B3Df5nMCMNtznMlktgNH7ZzZK7l9SuNdMKCK4/EIwOLf//yANeIUXXUCb8r7yO8NbJr+R39+9hrewJa4z5uZDvPYJROSBeCpqCoUtcYy4VhgWr13jYh4qtpW8C4cbE809s4C13BivFe5cBZvoz2/V5rsVgAG+jfHLAQz2XZu9QoLc8IZ3Bv2xo7VPla7PL06ZSU7tgZrvrKLwKzpFsJoI6NNHnbbvPC1mpXxrJHvEwUGsUVP8QOkNqoAF9RlDuNCq0wUKm+B72IypN+58GHdgy2MFKiswipwOp1gYPDX108Q47yDzcQ4zamw0FNfjwnADM9tJpPZBuvXwJk9kduf5VgBpJ6vNSJ4fDjAFAb/+v4TFSwKKaDWOQXHhPsCixgY9Pvyo57Aic9S+LFMZtsw9L0zU5guUGYGxryJj0qtpcs7CayqCtOJ+uQq8WpAT1t6Dc/YfU0KcZw4ZnDBNNBplPZaHmc5rK2QBn6Bwk0F4BDm0Itr2jbWSQrnYZkfFJa9gVOwphurXR5G+4y6VUT+ieGzkB/GfAXCOpizLmFNNyDbthR228K67n4X7m7tMNQfCoVWpv5c2JdjhNW2URG4GYwCjQf4HeyJL8SSdlOEYL8YwYu81ireTyeUZYmvXz7j+fiEsnxHKiULSLsyXdDsnRlOzoYrwlOhGj1TQ0AzPcOZTGZdOGrazFbI7cd9aRY8qjaBOx8fH2CM4Mev3zhVFpB+5JBUXyH0BO7sGVx7CiM6B5BF4I8GS9874+H29qTC3/IFTVScbnHUrGtOXfaE3QEv4NSYc+i7sWvMtSmu91N2jJ2/KwJPtWPf5X0NO+8iAPtCyx4SGhgOI3gLO5troe9wM3Y8Jz4ctHY+Y4E57ZhtA7jsa7a/qItWDjE0H6b8TOHaoCv02G4Ae9qx2sdqF8BtmwVg69Wnbcy3G+HPHww2xjwUmaO7hP04Nk9lgDftfBFo2tUBc6T338uv2/8svQ8g0/M6NhCu1OJgCkAtVAUigDXA6U1RlRWMqfDf/9d/onw7dc/Rniy4EDqJFAvB/nV6slabANLNt1O8gjm78plM5sbw1LAfl3v3Cdbue2Tm4YVVMdI+sKo4HgocHh7wPz9/orIWqPse8L2AOps7i8S8ACwCmeENPEUEzgLwPmDqd39sLq+nP1xe3ng60aenlaaWdYupr1n3ab0SOLimqkKtDNbBc71QL/UEnnINoB63+szQfsS2tB6WGJcmvr5WJqf6X2cvHf12yfXuyV09gEO89whbp/NeAnBHDMc0ATj1Wza6tq2/UXjIdtKNC2bbgCwCL4U5X1vvQq72wcOcdqy2sdrlYbbPd/ZvsR1Es+J/ZCAxRtxnYurTNYMkQtsA3rRrJu3OCYV6JQF45klYntWlArW1QFmWqKoK3z6/4Pn5GeXpvT6pO4cRA6sWUhSoqgoigmIktWMBuGOfAhIK6hMEYO91bM8cn8lk9gNHzZq5pE/A0o/I3B9rLR4fHvDwcMDP37/xXlpUVlEYA2jlRN5I0PWT/0ak0RhCj+BwoVm8KFQCL+NBm0j6a5nlsPS5Pya30x8y92HM4e+S/LBV+3rI0fJeTPU6TnkqT+2zJK/RrIHqRq+7hNiu5roJO3sC8MRrhL9rFnTNsnI+Wi8mF6woAKc8IJg6rbeamJu66fjZ8xBX4K1tXAIwwJ1uAK99rHZ58irTZTDnqyq7fdm2JbDaxmqXx9VxV+g/dDz7wsmbZe01c7oNLehjgtU2F4fhvG2C8xOAZ6+18ARMaXcuLFZ8rBeBy6rE48MBXz9/gYFFZS2MtB74VVV1vHziAWk4QTu6b1/w1kwYzzCNxzKZzG3gqUEzQ+S6OHOOjneVKh4OBzw+HvDr9x+8vZ+cd5oxgB3uJwiAQkzQl+jOfw6GkU58FpIF4O3D1NfeN7et61MRWTMcdCJITeyZpQTglJ7WEzJn2BITniu8VhyRasq5LjnmXv2ijqjeu2YbaSv4wcQzC3xYjns9j6HIvJoA3BizAe+Ma53rFt47zJW4s+16KzKuCX+68cJqn4LXNnaY0i3uvNy7TZoDU7rFZNuWw9wvqXDBHrfqRN5bbmnBSrhnMSPM6Rfvd5RC/GDmjFfwOc/iOTCl2VRv4NCTRkTwWlao3t8hUPz15SseHx9xOr3hUHv+FvXf0PMmPO/gZO7YvsBRRqVScUp4x0wms114as/9k+vSzD3oTsgDn1+e8ePnb/x5fwcKA6j2luyl+gphtJHw6yXhoLMAvB+Y+tz7JAvAmelYaztisc4NKYvxxcvnwkLHx45F903NqU0dN89ZYL2kbE8RvMd+d2n/TuuFV/dkdQHYG8E42ZryUgbm2eh/c6swauyVuLOPzxMY4E07VrtCWG3MnsDLYcnTWADuevHxtA8AT5oNwWzfFmxji0wC1IvJJvlmxj8UiNy2Lb5V5JRrEA9W2GC2bYoA7JEgjODgr65cLNjS7tzq5673roG1FU6nElVl8fT0iG9fvqAq35qde+PzWbgQ0XEIqXN79tVn8y+AgX2W43vheYozmcy14Ko19wFTnyfzcVA4YTflJfXy/ITXt3f8+PUbpig638XH+r/NvsD12cNjxkTg1Hnz4vz9kPPxVtzfmzGzfVQ14S0sQDgXHs2LCwCr6CwEGloUMCTQpsJNp7YWiH83xFyP4VQUiridmeP1fOlzcelcYaNdLD7F1Lm91hHCsgjAQ5+t3ZkeK8CDG2/HqzFwXa+H5DVXrtTHCn/XNK7B0drpNka2bRnNatO6Ql27DtkKqbqOKe1CAUf1/vk6XsfxPg/XWMh0K5ht81hrqezxVOHzEAs5cW8fANTc5T4Y8zR8drMQPB9t/mr7ZiQ7JTzgjtnOlHbxgHaw/VB1oRkhOJUlyvIEUeA//v4GA8BWFcQEq4NFXD6MhHI89747sdt6C0WGoTNFIJ1czWQy5PDUhtuCsb+XyYyj9b8gUkgtBBhYPD8fcaos/vn5y3lqqbo9f4FO49+IwNru9yvN+R3+89Sis54nsDqHNKa+WWY5OR+XMjZwkuDvbdOXfZ4vM59KLVQBlX50hyk0ZWKi53AoLLfMve7ZOGHDv0yIy4K+A9hcUXkJ54TwFO2cGBCmw9S5qWvpiQqsLwA3xkSJCPB1xIe8lcMVEbcI9TzVNmb01ir4BTCnXbZtGdkbeDmjE9Yr0m0guWwD+J8HVrJty/DPQ9jnEBFAzarPL3tfLvZUZoTVtqkrVdcW/BmYErYqfj6stThVFqe3d3z9/Anfvn7G6f29CQcd7/M09nyNefj0j512LzxPcSaTGYOjFlwHpv5GJrMGbqF0PUltSzw9PQFS4N///ANzOOB0OjV9Ck/82i1NCz/D4LHNa/SfvxwGej+w9K+3zfrtU87HfeC9gNea81EFoMvCik8Vaafc163L89D5p4a7HjvH0He3nD9TBg/g0Jj4NVsnPs4g24hMen4G5Q4wV+jMAjDAnnactrHaBQST/Mir3ObCmq+hBzAr7GnHCLNtHlYbVV1IWKsKgbl5mOepNsWv2epfdhGY1S7AeQLPsW6NaA0snNvjKBRyfXjFqnJjsff3Ex4OBb59+wIDhdr2WPUTu6rNRK9J9K+neQKH34/cy8BvMpkMHzy14HXJdVAmMw0fwcj/7/hYQMXgX9+/Q4yBRbpdbxd8CYpO/6Tbp58qAucF+fuBqX+9PXjarpyP+8ALwKy0HsZAv/xr95V2o1iETPWsHWLKXr5LuWa46Xs+lzQCcEzsDs3U6fcbXd9qb99LYK/UmYVg5rTLti0jDzyWwZynQNs+uMlvrvpkC2nHCKtdHib7uotaBKquP8LYZ4rFVha7gOlhd9aA1i4AWBDWLwvBLUNho1QtjCmg6vrKp/KEqjzh29cveHx4gK0qGJF6X2YJflsvAhGBiQKBDaX7sOfP+XvheYIzmQywPcGXqR+QyeyNeOG7KlAAeHw4QA4F/vXPd1hpPXxTIrA/hxGBNGPtNgpXG1Wk+zv3bfcLpv5YZjk5H6fA37blfNwHzgkAM5dl35ZrzEPNLZ/eEzm8buocc8TaOQxdbwn3eDZVdfLOwavAOkDwdoWJp+AYgLGmmYfZPOa0y7Yth9s6TtjzFOivQGaB0aYtkNNtKf458P+40pLJlhSs9rHWb565tt1bbGdKu8n3Xue5E4INjk+PMEWB77/+4P/7n3/j6XhEVVVujz60/yQ4dRxucW66e/H54nvJZDI3QRP/mPFtWfgvk8ncDmNMsFBaIQJYEfw5nXA6nfAff33DQxAxaKxNXxLCmb1OymQyma0jIpud5D4X9nn2P4P2n2j3ff3PrWSy9d+Rfwlbz4WBvkX/NmzDr03rAXyqUFnrVGHDoQvHN8wUytXb5jpGCoW4sGzrmtWBdZKmNWvY3X8tWNPMw2wfq21tcdMmJDTANUHMCmueeroNI1d+Mqddtm0Z69uWKuP1ikb4trWt55jquFRoaGDdejjVx/R/WdPuloT+HZ3rpw703y2w7Z5pu/4z2ye1CtlP0vajWghOVYXqVAKw+Pb1LzwdBFVV1qEWDSqtB7eqTSQnMQYmqA/G9vpLvXbvz98Hz1OSyeyLoZ7trWs0prYvk8nMJ9WHtQoIFALg4eGAh6LAz19/8FaVUGMAVRhVoBaP/WS28xZyE5xFIxqHkUsAY/r9iPgcmX3wMfNyzjzTNtrPj5mP+0OhqHael3efk0klZ3T5pJag0c8VHUHZuWm4KF5SR9bw0bz8Xx/N65bPp2qwB/D7qYS1SiUAe1gn5YCuAJFDQs/H2cdV3jzMaZdtW4YlrkuYYc5TZhEY4E87VrJtMa2H7xgKV8+x1nGs23tswbNxDfuaK57JJmYhmDVf5w3yBG9v7yjLEl++vODT8QhblU4wNga2qlAUBQDAqoWF2xdYgjCP4XXH3oefnRWBB36fyWQu55KaKz+XmUymR93neHh4wMPDAd9//sJ7WUEMYFTCSJoAuh5ORv1n4XfpLSTixWWs/bDMPHI+hmy7jc15uQ8qtTnqwgYx6trP8Dmsbjx310QFaTyAyfYADmEXgMO/jCIwwFvJt3bxicCsaeZhto/VNlWmnRK2BXOehn8ZK2D2tGOE2Tbg3vZNbx+99y9r6jELwOFf/5rFPs+9yt1U4bfzG1IRmLUu6e7RN26jL4tVVeHtvYIA+O//+g+oLWHLCuZQwFrbOXdV56LBeRE4/izvC5zJ3BdR9MSXpCMCWZuUyWS2Q7f/bfH50yd8//4bb+/v0IMJ9vv1x3Trm6JeWFZ/O9hXyPXUfmHtU9+fbZfxnI/7oFKl2gc40+J8e+MP60VV0chc1e3pfFN7UgKwrwdYK4ShcH0MnYxw8tCub04S1nwFAI1HvUQwpxvAbR+rbc3zurIdW4TOyOR2AAAT0klEQVQ5T0lNa2BOO1aYbQNuYd/1FkNVQb/Ew9RfYmQoVDUbt7BtiejbO8dCuz6qEHxOBE6NcyoLnE4nqLX48vKMl5cjyrJ0IRzrCE5+LNKEYQQABcyot++YSDy+GCJ7Amcy8xh7XvhqqkwmszdULQ5FgafHB/x+fcevt1cIJBkJMqyvjIgLCx18F4Z8jmuw3DfYF4x96fuxr7L8sfNyH1joor3aM3dEAann98ZqkDJayH0LrLXbFoCZiCcNsyfwfLIIvBxm+5htA5AbzQUw5ym7EMyedqx8HNuiiRf1n2lnL5EpWHB63DLnJfBxReCk91k9lze31GQheBrdSdNh25pVuyIwRmBV8H46oSwrPD4c8Pe3z9CqcnnlJ2PrffvCyYGDpBeYDIm/7efnF9xmETjz0bhmeeeplTKZzF7x/Qj310DVoiiA49MRb69v+Pn23uuXeJr3ChSJ79vj0l7EjFF1Mstg6kffjv2X1Y+Rj/vGAlDwb2P1IamzxEzcxq1SdQLtSPt6kTn1XMJZVxOmfdHixGCyDWjtExHXMbLazNcqOAZ3zB0vEZ9K46nFlOcsMOcrO84zRkGtGmYm4+rgta3YB8x1LZNt8+tfGfmXwnXvZ9sFt1K/EKlX7LuVf2un3ZbaK2ZbL7Et7m0NlogbFpUxj9ePxpTxjB9fuOMsnh4ecHx8RHkq8X//73/BiuBQGKit3HH1hIDAoKjXHpdqMbZbVCrSUrioasxERb3/ODjGO5nMNQjH9vG/Sb9PPAyT699MJpO5It3FXX5rCeD19R1PxyO+PD/VjiwFrAJF0wdo+ygKRQVFpa23kvvORx0a7it81D7eHtleXqYCrC4bX2cyLAgAM99HIHNl1AJqBVCB1P8MutEyzmGQjtR1LZrxS8oD2BPvicY0ERZ2RJjsCrHWTbN4rwoFz1oi9kZbG9cTLtjTDeC1kdUuoK5P1jZig7Dnafs31yVzyLYtZ5p96+x5r6pNWGimftMW8pTZxjm2XSPM8xwuTbdbllO2PA3HNF7oPe8xY2CtxXtVojqd8PJ8xF9fvqA8vcNN7tYhoeEiEwm6kwTjYZ+H3mdv4Mw2YSmTXDVPJpP5qDR9jPrv8ekJp6rEz19/XJ9BASlaD944PLTRUFRG0+fwxPsCs8/dZqbB1n++Dh+vTO4zHz8efrzIug3pR0BtuzD0klm+atLY/zJGBWDPUDi8tRvv1Ep1YH27QpoJ12AvLhb4K33J4s1CWO1jtcuTw0EvhzVvu+INV13CmmYeVvtY7fKM27eO+OvZSljoe/bn4msPheNlLndjtnW+WSGrr5FuH0EIHivzYdhGf0xHMDYGr68lTlWFg1j8199/QaCoqhKAQAoDawWFtJ480/b+nfJ+/L5uuZo583FhaLemwFG7ZDKZzARU8eXliNe3N/z4844KBQRlr7/Q2RPYfdJ0L7vhn4fr6q3U4Zk+LP3my8jlbx/5mPH5WOUe531IJrMJ/rucLACfIUycMDxJvDptTUIvNIVCyTo7W6j4GfcGZk+3bN8ysifwcljzFAgFL766BOBPO1ZYbUvbNW3/j3ugqk2oVmD9vlzMvSO8zPKgJS1zQEKwDt9I//N75jqzJzDAm6+hJ3Dqc/8aAKwCMAZ/fv8GbIVvX7/h06cjTm9vMIFXTnguC0VxZl/gVNrHE7yj95D4TSYTsteywVmrZDKZzLBjimiF56cjSlX88+NXE8nQGNOLVOIxAwKw+914/b7X+n/PsPaZp5HLW8i28zLjUdUsAN8JUQlavOti76BFTBKAPbEQzNRgh0Irk11A1zZW13zuyt/Q2sdql4fZPlbbFLy2scOebtZy28eafqx2Ady2AaF9PAIw4DqYNhKQWFgjT/cmAvtdNGKxt9ldQ9cpjVkInu7dPmcRhFU4714ACsHbqcLpVOL4WOCvb58hlYUGoRoVTvyFqgvxOLCn6Xnxt91HcOAmOr9hq2sy1yPnbRfeViKTyWRawggjrWONxfHpAQaCf/36A2ttou1vX0vdp3Qisa0/N8Ex2RN4bzCPhYbJZS1mm/mYickC8P24qQCM2z+TswTgkHt7aMzB2naTdzbbfLpVE/bQWgNmIR3g9AYGuBtPZtsAbvuyR/AymOsRVYW1fHZ52J8HVrZim9YKHFP5q6z9cN7A1ygv7GWOtLsEgFsIZstX346mvIHH0qEsLd7eS5gC+M+/vsFoBVWBFYHAwijgVgK4fYRtPUFr6o/D84+FgR73BraIA2KJ3GrYnLkWbO0AI0MxZbhqj0wmk5mP73c8PDxAtML336+oXHehWTQW9k08ItLsCxzPAYTisD82JLc724Ojv9zvZzpyeZoKRz5mLiWLwFdEnQOiYJ2tjKqqcjXbDSIb88RKviJb6EAMrbZngdc2BePwmje9MpeSc3Y5YZhKJs6Fo1oT5rqE2batMLYKfi0KYzZRz6VC4S49zzVgy8ctcWnaMbYrt8QtXPJeNcNpF6ZLURi8fDpCUOB//7//B7/e3iEHA1QnFCKoLAA5NOcUbZ+xUHCOzxu/H3rt6A8zP1reMeLzd+hf5jw5lTKZzF7x7f/7+ztMccBfXz7jUFd6ilbcjfvlqT5D2J8I25exfkUmM51dyhmZTObeaL3AyUe02OmY6CIP4NR7lkRi70R09uDjSLIOzOnXNY0v8VjTjtUuD7N92RN4Ot0QUrffyH4pzra1rRiG/XlghTk6STfduEJCq2on7AxT2sUTSnNsGyur16ybGJ8Jbf7y2ea5Rrrdoqyy5efy51Jg4VYSv76+4vh4xN9fP0FtCSMGlWqzsnnoOTvnpTPdGzi2jKue2RI53VYg4fLLVUtkMpnMZST7GhY4Ph4gBfDPr1ecKgtjBFqHhS6k3qqtXlztRjfdSB/9/kS7QUl4qdy2bQu2vnKXXJamwp2PmalkD+DliLbtF6JXa+C3artFm7hYAA5hneSPV6Qx2edts8RTc5yNQdthdPZxrvriTDteu0JYbcwi8DJY89PDvC8we9qx2sdql6e1j6/9Kq1NhnVbm7ki8BplgLncNe0XT5Y2XDPdrl1m187TS8Yu1loYYyBSwMKiqixO7xXK8oT//l9/Q1QBreDroXDhlt8f3EXLELdXcCT0hrYNicDu/bCNWQTO9781eGv5TCaTuZww+offssFai6enBxhT4J8fP1FWFuKjaQXbOoTtWYH0IrL4M1WbDCWd4WftPjLloGaDrJ+PmWtRBmH2M9O45Z6+y1AoFFb9WLuAC3V/Ha42+5gb6ulM3b9rbTht4wwBvRXYJvU3Rd63bhHs5Y3ZPmbbmGFPN2b7CsI2YisDU7Z02wrM6ba2bfH15zwLUu8bZPUdoopCgMcnA/NwwP/zf77j549XPD4ce+dVF/cKKkClCgvtqbhTQzee88DfO+dCLa9dvjLTyaPPTCazdaa0R/EiUAWgRYG39wrV6YS/v3zG4+EAtRYQgVXbRpwJFmv6bStS27eMbx2R2Qq5D7MPcj7uh5yX++FWbeNVPIA9l4TpuzWpjgabfcwu++ydM2V1bQFv2rHa5WG2r/GQEQHE7RWQOQ97nrb2cdUl7OnGDLN93fJ22zLn6yjtXMZNzljtChHeW5SxrwRcby/gW8Jqn/0A/cxblVfWPJ1MUN2cygqv7yc8FAb/8fULRGovHHTrgspaWKhbGIJxwXLIg6edTI6MCH8LjnqGwYYMFxt/6jOZzA4xgTjraVovjfv61+VQGBwOBX78+oO3UwlTCGC1WXQGRFFBAJim/5CqUb2ncDe6SG6Pt8H9+8a5XNyCzY9xMgCc72iV87JHM/pUwBBG4ItpdFVcfxxyVQEY4K48mAVqAKjAPanJbRsQdwiYwn5zpx2nbax2eXJI6POw5yHQ3daNfV9ggDdNWe0CtmLbOp1Rv/evJxRtwr8sbSkw3ftwbRjtCrtKjPYBOST0rehKr4KyqvD69gZV4D///gvHxwLl+6mehDWdPnSptpnEdb+WZgOb1ETt2vsCM9VXme2y3ac9k8mwcqsFT/eqr1QVh6LA09MB37//wltVNY27GRBwRd135+7bfz3Wp8hwcZ9+cc7/W7Pl8U2mi9bRm3KOBtSJYe7gdHEtGoeMK5/36gIwwF2BMHsCe51ftc5wDrM6sOZt16yu1wELvGnHaRfAbRvQXZ3z0WHPqyFiAdh/qJ1veGBNZ+a2FeBNN2DIttsIwvEOIkNbUjCnlycWqdlgtCu0SFUZqzgAvN7A7PXcGI34X+97b4xBWVmUVvD65xdenh7xv/7rb7y/vQHqvm9COBo3qWvU7wnYF4BTfe7UHsHu/bCdZmZ6biX9M5zw1dKZTGarJFuj1B64N7j23QRguHba2ArH52f8++dvvFUnGGOcJ/DIXr4H6Y9tVBXGmKBPlQXgrXG78U6c70Ne5JlrwTh2zczjowvAbhtk6TS9ppnX3VZbYm/gcHYTARjg9h4BuO3zNtksAi9CGRMtgDX9WO0C+G3jte4ymNP9mqhqrzvg8pWzLmHOlzA8L1O76mFNu75d1+2kNiGdF+QJs+i1hX3E2O1i7DLdbN+bK5Rb1vw8h2vRuh7+YgxUgdPphNOphIjB3399wYMRqLX90PDNogHp1E6h0BvWEXMEYCcs98+ZydySbT7NmUzmVqwp4s5hrbpLAai1KIzAiODp+ITvP3/jdCqb/uSQCFyg2ycYGlOkPIFzn4Cb2/SNc57fm62OcTI1tRPLRxaAxe5nCwGr9baTuF4bePU9gMc8SJgnDuP397JxLM0U9X5tnI5oxA2E71SubMYAvOmWbVtKI64Qw27fXZmYFuoPJa2DbTRBzwRreWO1y9Pa15FClp2rPWnj+dstLxIfecam/nuG8sfuCexhFNJd9cbdz7x2NJdrnStV3tgiz3g6z6w60V/hVkSLCBQGlQK/fr9Cq3f859/f8Pz0hKosG68eGGnGJQrA2PQ+fWN797XfRWkXpFvsTcyYnpntwt1KZDIfg6Hn8P9v71ybW8VhMPzKJGnSs9Pds7f///t2dmZnektJg7UfsAkQk+ZGeQE9p5m0ORRUGWTZsuQ+rH3Vh3T4iNf2MZI4Xd8L6tqXTI0Y+kahcOKwD1VBFqJYPz7g+eUDeZ4DWQYPj0xc0LmUXyKQsIZMnFTCV3lZjXbo9i8MTm4fg6XalnRwMmHYx9LGF4Tm89CQPco5Lr0bidtVwr8pkAoA3zrX0FsGcJs4iRPLfDCRmjwc8kGJjVqXyxPfw8wdhVY177hg1hnALR+zbJ5YNma9fTsX6kJ9rXwmGaztyioXwC0bkJLvcp+p3Eaihz3GahneANdkTFs2RhirzzBnAgP3f17vqXO28UMXXfdbfbwR37e7Pbz3WIji7z//gN9/QiCVf5NlGYrCNxKiUoPS00Hgw89ODueOH0uYMG6f3zBuhbuHMIx5cEsAOHXMXPoIVvuVCbB6WOL1fYdtnsNlZYA3Vvbw2gzoClAGiAEUqsg62s+5830LY1iu89WtLdlgH0cb5+O9RzHxALDodIK9KaoYKlCVxhhVADi+s92EY5DNA9SJ/MydBWsQGGDXm8l2DcwloZn11iepv/sii6ClHWE1Jcztyiobq1yRpnxfB4Bjhu93BKKYdceeDcwqFxCyQpnlIw0Es5e9P0U98BsX6IoI8t0eu88CIorff33CepHBFwUkZAGXW620dxTv3revPb46TOSGn6M8td9t7wU8Nt0anPBaOMOYN8dZoPNmVLZKAeeAH5s1nl/f8bbbAVKWiG5X+IgIwgIwlD5ChnT1ENsTeBxc7qNbGzLCPA40LsOrT4zUpsXUA8BASAYN8YV79H3fFgCukyonyNCRd00cMslWkLuDrJ0GcxAYYNYbp1wRVvkqWzKwHClYddYLrWD8zRZAU1PePLC2rcl1PecGgoe4L9mzgb0/aIVJtgjr/VeVhSakL53NeW9goDUOUw8VB3VLvL29AcUnfj494XFTloQuj3MQOX/MVK9q1KwCVZsYrj45zvaR2nkM41zG+0QaxrhoB/aM2xmT/fIot5Rwqlhv1njd5vjIc6g79O/iy4WG7dLOIgLvPRwAJwf/wDnXWFyXqjpi8HDaB7b2GhNjHs8YBxSKYgptqUCcA+OqI/x97H1zpu+WPnCuOhwdsZGddaCThNWRZZWLnTi4YdTeHNpUVasyz1J73Yxg8qvM+oD1nju1VyULKfkU5WRL/cUA24DRysVND+as9lR5ZSbqW/GcRBzUe6DY4ZfNA5arFf59ecU//z3DZctwUPfiiq69kVNyKMqFrRqWHKQkS/2uYbTRxMswjOuQM18uZHnedaw1Q8ZuvxxCpSyXYfuxw4/NCuvlEvA+9NmCegQ31Y97NLfSah6j5guQc5l/btbCMIwTKMo94xWQEAGbs8W45/zH4BnAjGXT2vIxyQaUm3qXjg8onwRWh+xYLC7l8eqNU646Q8t4ZCdqz+aYSkIz2ruzuXe2b+d1+Afn5zwPQ7Q14/6nkcH9knj5jkvGCZRoTxTDBzfb95n3vpXdNxzt0rastm3oviuFVu9hIc3M9Na1T+65sG4r09RZPfumeUx0XyT8p8JhVxTIP3I4Af76+RsWmZQTu2GvXgGgodSYa5xbkva0+l6CuyShjFdKXarJxTpMujWGh8+SGsbXpO7bvi1bskpDz9c0jpmSzfLqkWULfO73WDgHB8XjeoWX7Q7veV528mGbiYZv1Fosr6pwEIiTMPGP4CCgDAhYOWhqun3zVBuFRjXoYBybGoFU/Kf1WX3u2Y/tGauJ6xLfzRVfWzgN3Nbv/Q/tGiTOTIxAEAAAAABJRU5ErkJggg==) # ## Data Overview # The dataset contains the prices and other attributes of almost 54,000 diamonds. You can download the dataset from [here](https://www.kaggle.com/datasets/shivam2503/diamonds/code). # **Content** # **price:** The price of the Diamond # **carat:** The carat value of the Diamond # **cut:** The cut type of the Diamond, it determines the shine # **color:** The color value of the Diamond # **clarity:** The carat type of the Diamond # **depth:** The depth value of the Diamond # **table:** Flat facet on its surface — the large, flat surface facet that you can see when you look at the diamond from above. # **x:** Width of the diamond # **y:** Length of the diamond # **z:** Height of the diamond # ![image1](https://assets.diamondnexus.com/q_auto,f_auto/diamond_nexus/blog/2019/may/29e/the-4-cs.jpg) # **Diamond Clarity** : Refers to the grade we give a diamond based on the blemishes and inclusions found in a diamond. # **Diamond Color** : Diamond color refers to how clear a diamond is. The diamond color scale ranges from D (completely clear) to Z (a warm yellowish ting). # **Diamond Cut**: Diamond cut refers to the proportions and technical specs that determine how brilliant a diamond is (Fair, Good, Very Good, Premium, Ideal) # **Diamond Depth** : Depth % refers to the height of the diamond, from the culet to the top of the table. # **Diamond Table** : Diamond table % is determined by dividing the width of the table (top surface area) by the width (diameter) of the diamond. # # Get the Data import pandas as pd import numpy as np import matplotlib.pyplot as plt import plotly.express as px import seaborn as sns from sklearn.preprocessing import OneHotEncoder from sklearn.preprocessing import StandardScaler from sklearn.pipeline import Pipeline from sklearn.impute import SimpleImputer from sklearn.compose import ColumnTransformer from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import RandomForestRegressor from sklearn.linear_model import LinearRegression, Ridge from xgboost import XGBRegressor from sklearn.neighbors import KNeighborsRegressor import lightgbm as lgb from sklearn.model_selection import cross_val_score from sklearn.model_selection import GridSearchCV from sklearn.metrics import mean_squared_error from sklearn import metrics diamond = pd.read_csv( "/content/drive/MyDrive/Colab Notebooks/SHAI_Level_1/Diamond Project/diamond-price-prediction/train.csv" ) diamond_test = pd.read_csv( "/content/drive/MyDrive/Colab Notebooks/SHAI_Level_1/Diamond Project/diamond-price-prediction/test.csv" ) # # Data Description diamond.shape diamond.head() diamond.info() # **Basic Data Observations** # - price: Continuous, This is the Target Variable! # - carat: Continuous # - cut: Categorical # - color: Categorical # - clarity: Categorical # - depth: Continuous # - table: Continuous # - x: Continuous # - y: Continuous # - z: Continuous diamond.describe() # **Basic Data Statistical Observations** # - The minimum values of "x", "y" and "z" are zero. Since a diamond cannot have a dimension equal to zero, this means there are incorrect values in the data. # - "y" and "z" have some outliers that need to be removed or filled with another value. # - Since the id column is independently and identically distributed, we will drop it. # # Data Exploration diamond["price"].hist() # The data distribution of the target is good. There are enough number of rows for each type of values # ## Categorical variables: Bar plot plt.figure(figsize=(18, 3)) plt.subplot(1, 3, 1) diamond.groupby("cut").size().plot.barh( y="avg_traffic", x="weather_main", legend=None, color="#e8a811" ) plt.subplot(1, 3, 2) diamond.groupby("color").size().plot.barh( y="avg_traffic", x="weather_main", legend=None, color="#1192e8" ) plt.subplot(1, 3, 3) diamond.groupby("clarity").size().plot.barh( y="avg_traffic", x="weather_main", legend=None, color="#fa4d56" ) # We can't see columns that are too skewed, as they are not correlated with the target variable and provide no useful information. However, in this dataset, all three categorical variables will be selected for our model. # ## Continuous variables: Histogram diamond.hist(["carat", "depth", "table", "x", "y", "z"], figsize=(18, 10)) # The ideal histogram is a bell curve or slightly skewed bell curve. If there is too much skewness, then outlier treatment should be done. # Two option to handle outliers : # - Delete the outlier Records # - Impute the outlier values with a value # # Data Cleaning diamond.drop(["Id"], axis=1, inplace=True) diamond = diamond.drop(diamond[diamond["x"] == 0].index) diamond = diamond.drop(diamond[diamond["y"] == 0].index) diamond = diamond.drop(diamond[diamond["z"] == 0].index) # ## Replacing outliers diamond["x"][diamond["x"] > 2].sort_values(ascending=True) pd.options.mode.chained_assignment = None diamond["x"][diamond["x"] < 3.73] = 3.73 diamond["y"][diamond["y"] < 20].sort_values(ascending=False) diamond["y"][diamond["y"] > 20] = 10.54 diamond["z"][diamond["z"] < 20].sort_values(ascending=False) diamond["z"][diamond["z"] > 8] = 6.98 diamond.hist(["x", "y", "z"], figsize=(18, 8)) diamond["z"][diamond["z"] > 2].sort_values(ascending=True) diamond["z"][diamond["z"] < 2] = 2.06 diamond.hist(["x", "y", "z"], figsize=(18, 8)) diamond.describe() # ## Missing values diamond.isnull().sum() # There are no missing values in our data # # Data Exploration | Part 2 # ## Continuous Vs Continuous : Scatter Charts continuous_cols = list(diamond.select_dtypes(include=["int64", "float64"]).columns) for predictor in continuous_cols: diamond.plot.scatter( x=predictor, y="price", figsize=(10, 5), title=predictor + " VS " + "price" ) # There could be three scenarios # - Increasing Trend # - Decreasing Trend # - No Trend # ## Continuous Vs Continuous : Correlation Value correlation = diamond[continuous_cols].corr() correlation fig = plt.figure(figsize=(10, 5)) sns.heatmap(diamond.corr(), annot=True, cmap="viridis") correlation["price"][abs(correlation["price"]) > 0.2] # Obser # - carat has a strong positive correlation with price (0.92). # - depth has a weak negative correlation (-0.01) with price, indicating that the depth of a diamond does not have a significant impact on its price. # - Table has a moderate positive correlation with price (0.13). # - x, y and z dimensions have a strong positive correlation with price (>0.88). # ## Categorical Vs Continuous : Box Plots categorical_cols = list(diamond.select_dtypes(include=["object"]).columns) fig, ax = plt.subplots(nrows=1, ncols=len(categorical_cols), figsize=(18, 5)) for col, i in zip(categorical_cols, range(len(categorical_cols))): diamond.boxplot(column="price", by=col, figsize=(5, 5), vert=True, ax=ax[i]) # In our data, all three categorical variables looks correlated with the Target variable (price). # Selected categorical columns for our model: 'cut', 'color', 'clarity' diamond.head() # # Feature Selection & Tranformation diamond["volume"] = diamond.x * diamond.y * diamond.z selected_cols = ["carat", "volume", "cut", "color", "clarity", "price"] diamond = diamond[selected_cols] diamond.head() diamond["cut"] = diamond["cut"].map( {"Fair": 0, "Good": 1, "Very Good": 2, "Premium": 3, "Ideal": 4} ) diamond["color"] = diamond["color"].map( {"J": 0, "I": 1, "H": 2, "G": 3, "F": 4, "E": 5, "D": 6} ) diamond["clarity"] = diamond["clarity"].map( {"I1": 0, "SI2": 1, "SI1": 2, "VS2": 3, "VS1": 4, "VVS2": 5, "VVS1": 6, "IF": 7} ) diamond.head() target = "price" features = ["carat", "volume", "cut", "color", "clarity"] X_train_prepared = diamond[features] y_train = diamond[target] # # Select and Train a Model # ## Train Model lin_reg_pipe = LinearRegression() dec_tree_pipe = DecisionTreeRegressor() ranfor_pipe = RandomForestRegressor() xgb_pipe = XGBRegressor() lgbm_pipe = lgb.LGBMRegressor() pipelines = [lin_reg_pipe, dec_tree_pipe, ranfor_pipe, xgb_pipe, lgbm_pipe] for pipe in pipelines: pipe.fit(X_train_prepared, y_train) # ## Evaluation Using Cross-Validation cv_results_rms = [] for model in pipelines: cv_score = cross_val_score( model, X_train_prepared, y_train, scoring="neg_root_mean_squared_error", cv=10 ) cv_results_rms.append(cv_score) # ## Measure Model’s RMSE print("LinearRegression", cv_results_rms[0].mean()) print("DecisionTree", cv_results_rms[1].mean()) print("RandomForest", cv_results_rms[2].mean()) print("XGBRegressor", cv_results_rms[3].mean()) print("LGBMRegressor", cv_results_rms[4].mean()) # ## Grid Search params = { "n_estimators": [100, 200], "max_depth": [5, 10], "learning_rate": [0.01, 0.1], } grid_model = GridSearchCV( lgb.LGBMRegressor(random_state=101), params, scoring="neg_root_mean_squared_error", cv=5, ) grid_model.fit(X_train_prepared, y_train) # ## Analyze the Best Models cvres = grid_model.cv_results_ for mean_score, params in zip(cvres["mean_test_score"], cvres["params"]): print(np.sqrt(-mean_score), params) final_model = grid_model.best_estimator_ # # Evaluate Your System on the Test Set diamond_test.head() diamond_test["volume"] = diamond_test.x * diamond_test.y * diamond_test.z Id = diamond_test["Id"] diamond_test.drop(["Id", "depth", "table", "x", "y", "z"], axis=1, inplace=True) diamond_test["cut"] = diamond_test["cut"].map( {"Fair": 0, "Good": 1, "Very Good": 2, "Premium": 3, "Ideal": 4} ) diamond_test["color"] = diamond_test["color"].map( {"J": 0, "I": 1, "H": 2, "G": 3, "F": 4, "E": 5, "D": 6} ) diamond_test["clarity"] = diamond_test["clarity"].map( {"I1": 0, "SI2": 1, "SI1": 2, "VS2": 3, "VS1": 4, "VVS2": 5, "VVS1": 6, "IF": 7} ) diamond_test.head() pred = final_model.predict(diamond_test) data = {"Id": Id, "price": pred} submission = pd.DataFrame(data=data) submission.head() submission.describe()
false
0
229,261
0
229,261
229,261
129801610
# # Bundesliga pretrained YOLO-NAS Ball Detection # https://www.kaggle.com/code/stpeteishii/bundesliga-pretrained-yolov8-ball-detection import cv2 import matplotlib.pyplot as plt import torch from IPython.display import Video from torchinfo import summary from super_gradients.training import models from super_gradients.training import Trainer from super_gradients.training import dataloaders from super_gradients.training.dataloaders.dataloaders import ( coco_detection_yolo_format_train, coco_detection_yolo_format_val, ) model = models.get("yolo_nas_l", pretrained_weights="coco") input_video_path = "/kaggle/input/dfl-bundesliga-data-shootout/test/019d5b34_1.mp4" output_video_path = "./detections.mp4" Video(input_video_path, width=600, height=400, embed=True) device = "cuda" if torch.cuda.is_available() else "cpu" model.to(device).predict(input_video_path).save(output_video_path) from IPython.display import Image Image(open("./detections.gif", "rb").read())
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/801/129801610.ipynb
null
null
[{"Id": 129801610, "ScriptId": 38592403, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 2648923, "CreationDate": "05/16/2023 14:59:30", "VersionNumber": 12.0, "Title": "Bundesliga pretrained YOLO-NAS Ball Detection", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 42.0, "LinesInsertedFromPrevious": 1.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 41.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 5}]
null
null
null
null
# # Bundesliga pretrained YOLO-NAS Ball Detection # https://www.kaggle.com/code/stpeteishii/bundesliga-pretrained-yolov8-ball-detection import cv2 import matplotlib.pyplot as plt import torch from IPython.display import Video from torchinfo import summary from super_gradients.training import models from super_gradients.training import Trainer from super_gradients.training import dataloaders from super_gradients.training.dataloaders.dataloaders import ( coco_detection_yolo_format_train, coco_detection_yolo_format_val, ) model = models.get("yolo_nas_l", pretrained_weights="coco") input_video_path = "/kaggle/input/dfl-bundesliga-data-shootout/test/019d5b34_1.mp4" output_video_path = "./detections.mp4" Video(input_video_path, width=600, height=400, embed=True) device = "cuda" if torch.cuda.is_available() else "cpu" model.to(device).predict(input_video_path).save(output_video_path) from IPython.display import Image Image(open("./detections.gif", "rb").read())
false
0
313
5
313
313
129801534
import numpy as np import pandas as pd from IPython.display import display train = pd.read_csv("/kaggle/input/playground-series-s3e6/train.csv") test = pd.read_csv("/kaggle/input/playground-series-s3e6/test.csv") sets = {"train": train, "test": test} # Step 1. Trying to understand the data I'm working with. # For this exercise I will be using LinearRegression. # Task: to predict 'price' - pure 'regression'. # Quality metric: RMSE. # display(train.head()) display(train.shape) display(train.columns) display(train.dtypes) # # Step 2. Data filter. # # Checking and deletion of any diplicates in train data. # Checking if there are any missing values. # Dropping 1 particular column from train data: 'id'. # # import seaborn as sns def dropping_duplicates(data: pd.DataFrame, name: str): print( f"A number of duplicated rows in {name} is {data.duplicated().sum()}, they were dropped." ) data.drop_duplicates(inplace=True) def miss_values_check(data: pd.DataFrame, name: str): print(f"A number of NaN values in {name} is {data.isnull().sum().sum()}") if data.isnull().sum().sum() > 0: sns.heatmap(data.isnull()) def drop_columns(data: pd.DataFrame, name: str, columns: list): print(f"These columns in {name} are no needed: {columns}, they were dropped.") data.drop(columns, axis=1, inplace=True) for n in sets: if n == "train": dropping_duplicates(sets[n], n) drop_columns(sets[n], n, ["id"]) miss_values_check(sets[n], n) # Step 3. Searching for outliers (just visual). from sklearn.utils import shuffle import matplotlib.pyplot as plt # https://www.kaggle.com/code/viktortaran/ps-s-3-e-6#%E2%9C%855.5.-GB ########################################## ### It is the most valnurable features ### ########################################## target = "price" X = train.drop([target], axis=1) y = train[target] X, y = shuffle(X, y, random_state=42) X = X.reset_index(drop=True) y = y.reset_index(drop=True) num_cols_outl = [ "squareMeters", "floors", "cityCode", "made", "basement", "attic", "garage", ] from sklearn.neighbors import LocalOutlierFactor cont_FEATURES = num_cols_outl def plot_outliers(data, target, df, feature, threshold=8): mean, std = np.mean(df), np.std(df) z_score = np.abs((df - mean) / std) good = z_score < threshold print(f"\033[0;33;40m Rejection {(~good).sum()} points \033[0;30;0m ") visual_scatter = np.random.normal(size=df.size) plt.scatter(df[good], visual_scatter[good], s=2, label="Good", color="#4CAF50") plt.scatter(df[~good], visual_scatter[~good], s=8, label="Bad", color="#F44336") plt.legend(loc="upper right") plt.title(feature) plt.show() data, target = data[good], target[good] return data, target for feature in cont_FEATURES: print(feature) X, y = plot_outliers(X, y, X[feature], feature) train_initial = train.copy() # Step 4. Handle found outliers (index drop). import itertools def oneDArray(x): return list(itertools.chain(*x)) # num_cols_outl=['squareMeters', 'floors', 'cityCode', 'made', 'basement', 'attic', 'garage'] outlines = [] outlines.append(train[train.squareMeters >= 6 * 1e6]) outlines.append(train[train.floors >= 5000]) outlines.append(train[train.cityCode >= 4e5]) outlines.append(train[train.made >= 9000]) outlines.append(train[train.basement >= 8e4]) outlines.append(train[train.attic >= 2e4]) outlines.append(train[train.attic >= 8e4]) outlier_ids = [] for outline in outlines: # display(outline) outline_indexes = [int(x) for x in outline.index] outlier_ids.append(outline_indexes) outlier_ids = sorted(list(set(oneDArray(outlier_ids)))) print(outlier_ids) train = train.drop(outlier_ids) train = train.reset_index(drop=True) print(train.shape, train_initial.shape) # Step 5. Checking if outlier's drop really helps. plt.figure(figsize=(16, 16), dpi=90) sns.heatmap( train_initial.corr(), xticklabels=train_initial.corr().columns, yticklabels=train_initial.corr().columns, annot=True, ) plt.figure(figsize=(16, 16), dpi=90) sns.heatmap( train.corr(), xticklabels=train.corr().columns, yticklabels=train.corr().columns, annot=True, ) plt.figure(figsize=(10, 10), dpi=90) plt.scatter(train.price, train.squareMeters) plt.xlabel("Price") plt.ylabel("Square meters") # Step 6. Fit resulted train data to linear model; predict and save results (for test data). from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_squared_error X_train, X_val, y_train, y_val = train_test_split( train.drop(["price"], axis=1), train["price"], test_size=0.2, shuffle=True, random_state=42, ) lr = LinearRegression() lr.fit(X_train, y_train) pred = lr.predict(X_val) rmse = np.sqrt(mean_squared_error(y_val, pred)) print(f"RMSE: {rmse}") price_prediction = lr.predict(test[train.drop(columns=["price"]).columns]) submit_df = pd.DataFrame({"id": test["id"], "price": price_prediction}) submit_df.to_csv("submission.csv", index=False) submit_df
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/801/129801534.ipynb
null
null
[{"Id": 129801534, "ScriptId": 38589202, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11736731, "CreationDate": "05/16/2023 14:58:55", "VersionNumber": 2.0, "Title": "sad_03_iakovtsev", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 159.0, "LinesInsertedFromPrevious": 27.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 132.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 3}]
null
null
null
null
import numpy as np import pandas as pd from IPython.display import display train = pd.read_csv("/kaggle/input/playground-series-s3e6/train.csv") test = pd.read_csv("/kaggle/input/playground-series-s3e6/test.csv") sets = {"train": train, "test": test} # Step 1. Trying to understand the data I'm working with. # For this exercise I will be using LinearRegression. # Task: to predict 'price' - pure 'regression'. # Quality metric: RMSE. # display(train.head()) display(train.shape) display(train.columns) display(train.dtypes) # # Step 2. Data filter. # # Checking and deletion of any diplicates in train data. # Checking if there are any missing values. # Dropping 1 particular column from train data: 'id'. # # import seaborn as sns def dropping_duplicates(data: pd.DataFrame, name: str): print( f"A number of duplicated rows in {name} is {data.duplicated().sum()}, they were dropped." ) data.drop_duplicates(inplace=True) def miss_values_check(data: pd.DataFrame, name: str): print(f"A number of NaN values in {name} is {data.isnull().sum().sum()}") if data.isnull().sum().sum() > 0: sns.heatmap(data.isnull()) def drop_columns(data: pd.DataFrame, name: str, columns: list): print(f"These columns in {name} are no needed: {columns}, they were dropped.") data.drop(columns, axis=1, inplace=True) for n in sets: if n == "train": dropping_duplicates(sets[n], n) drop_columns(sets[n], n, ["id"]) miss_values_check(sets[n], n) # Step 3. Searching for outliers (just visual). from sklearn.utils import shuffle import matplotlib.pyplot as plt # https://www.kaggle.com/code/viktortaran/ps-s-3-e-6#%E2%9C%855.5.-GB ########################################## ### It is the most valnurable features ### ########################################## target = "price" X = train.drop([target], axis=1) y = train[target] X, y = shuffle(X, y, random_state=42) X = X.reset_index(drop=True) y = y.reset_index(drop=True) num_cols_outl = [ "squareMeters", "floors", "cityCode", "made", "basement", "attic", "garage", ] from sklearn.neighbors import LocalOutlierFactor cont_FEATURES = num_cols_outl def plot_outliers(data, target, df, feature, threshold=8): mean, std = np.mean(df), np.std(df) z_score = np.abs((df - mean) / std) good = z_score < threshold print(f"\033[0;33;40m Rejection {(~good).sum()} points \033[0;30;0m ") visual_scatter = np.random.normal(size=df.size) plt.scatter(df[good], visual_scatter[good], s=2, label="Good", color="#4CAF50") plt.scatter(df[~good], visual_scatter[~good], s=8, label="Bad", color="#F44336") plt.legend(loc="upper right") plt.title(feature) plt.show() data, target = data[good], target[good] return data, target for feature in cont_FEATURES: print(feature) X, y = plot_outliers(X, y, X[feature], feature) train_initial = train.copy() # Step 4. Handle found outliers (index drop). import itertools def oneDArray(x): return list(itertools.chain(*x)) # num_cols_outl=['squareMeters', 'floors', 'cityCode', 'made', 'basement', 'attic', 'garage'] outlines = [] outlines.append(train[train.squareMeters >= 6 * 1e6]) outlines.append(train[train.floors >= 5000]) outlines.append(train[train.cityCode >= 4e5]) outlines.append(train[train.made >= 9000]) outlines.append(train[train.basement >= 8e4]) outlines.append(train[train.attic >= 2e4]) outlines.append(train[train.attic >= 8e4]) outlier_ids = [] for outline in outlines: # display(outline) outline_indexes = [int(x) for x in outline.index] outlier_ids.append(outline_indexes) outlier_ids = sorted(list(set(oneDArray(outlier_ids)))) print(outlier_ids) train = train.drop(outlier_ids) train = train.reset_index(drop=True) print(train.shape, train_initial.shape) # Step 5. Checking if outlier's drop really helps. plt.figure(figsize=(16, 16), dpi=90) sns.heatmap( train_initial.corr(), xticklabels=train_initial.corr().columns, yticklabels=train_initial.corr().columns, annot=True, ) plt.figure(figsize=(16, 16), dpi=90) sns.heatmap( train.corr(), xticklabels=train.corr().columns, yticklabels=train.corr().columns, annot=True, ) plt.figure(figsize=(10, 10), dpi=90) plt.scatter(train.price, train.squareMeters) plt.xlabel("Price") plt.ylabel("Square meters") # Step 6. Fit resulted train data to linear model; predict and save results (for test data). from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_squared_error X_train, X_val, y_train, y_val = train_test_split( train.drop(["price"], axis=1), train["price"], test_size=0.2, shuffle=True, random_state=42, ) lr = LinearRegression() lr.fit(X_train, y_train) pred = lr.predict(X_val) rmse = np.sqrt(mean_squared_error(y_val, pred)) print(f"RMSE: {rmse}") price_prediction = lr.predict(test[train.drop(columns=["price"]).columns]) submit_df = pd.DataFrame({"id": test["id"], "price": price_prediction}) submit_df.to_csv("submission.csv", index=False) submit_df
false
0
1,719
3
1,719
1,719
129556927
<jupyter_start><jupyter_text>Cancer Data **570 cancer cells and 30 features to determine whether the cancer cells in our data are benign or malignant** **Our cancer data contains 2 types of cancers: 1. benign cancer (B) and 2. malignant cancer (M).** Kaggle dataset identifier: cancer-data <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import seaborn as sns from scipy.stats import ttest_ind from sklearn.model_selection import train_test_split, GridSearchCV, learning_curve from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.svm import SVC from sklearn.metrics import confusion_matrix, classification_report from sklearn.preprocessing import StandardScaler import warnings warnings.filterwarnings(action="ignore") # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input/cancer-data/Cancer_Data.csv"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session df = pd.read_csv("/kaggle/input/cancer-data/Cancer_Data.csv") df # # I. Analyse du Dataset (EDA) # ### 1. Analyse de la forme: # - **Variable Target**: diagnosis # - **Nombre de liges et de colonnes**: (569, 33) # - **Types de variables**: Qualitative: 1, Quantitative: 32 # - **Nombre de valeurs manquantes**: 0 # ### 2. Analyse du fond: # - **Visualisation de la target**: # - B: 357 # - M: 212 # - La distribution de la majorité des variables est asymétrique... et le reste suit une distribution normale # - **Relation target/variable**: # - La majorité des variables ont une valeur différente pour chaque type de cancer... Mais seulement les variables liées à la **compacité**, au **périmetre**, au **rayon**, la **texture** et la **concavité** semblent être beaucoup plus liées au type de cancer. hypothese à tester ? # - **Relation variable/variable**: # - Certaines variables sont tres correlées: +0.9 de correlation # #### hypothese nulle (h0): # - Les variables liées à la **compacité**, au **périmetre**, au **rayon**, la **texture** et la **concavité** sont significativement différentes des autres # - h0 = les quantités moyennes des variables sont égales quelque soit le type de cancer -> **h0 réjetée** df.shape df.drop(["id", "Unnamed: 32"], axis=1, inplace=True) df.info() df.isna().sum() df.describe() df["diagnosis"].value_counts() sns.countplot(data=df, x="diagnosis") # - Visualisation de la distribution des variables continu i = 1 plt.figure(figsize=(20, 60)) for col in df.select_dtypes("float"): plt.subplot(10, 3, i) sns.distplot(df[col]) i += 1 benign_df = df[df["diagnosis"] == "B"] malignant_df = df[df["diagnosis"] == "M"] numerical_features = df.drop("diagnosis", axis=1) # - Relation Target/Variable i = 1 plt.figure(figsize=(20, 60)) for col in df.select_dtypes("float"): plt.subplot(10, 3, i) sns.distplot(benign_df[col], label="Beningn") sns.distplot(malignant_df[col], label="Malignant") plt.legend() i += 1 i = 1 plt.figure(figsize=(20, 60)) for col in df.select_dtypes("float"): plt.subplot(10, 3, i) sns.boxplot(data=df, x=df["diagnosis"], y=df[col]) i += 1 # - Matrice de correlation plt.figure(figsize=(20, 8)) sns.heatmap(numerical_features.corr(), annot=True, cbar=False) print(f"{benign_df.shape}\n{malignant_df.shape}") balanced_benign_df = benign_df.sample(malignant_df.shape[0]) # - Test de student afin de tester l'hypothese citée ci-haut def t_test(col): alpha = 0.02 stat, p = ttest_ind(balanced_benign_df[col].dropna(), malignant_df[col].dropna()) if p < alpha: return "h0 rejetée" else: return 0 for col in numerical_features: print(f"{col :-<50} {t_test(col)}") # # II. Pre-processing diagnosis_cat = {"M": 0, "B": 1} # Suppression_des_outliers def imputation(col, df): q1 = df.groupby("diagnosis")[col].quantile(0.25) q3 = df.groupby("diagnosis")[col].quantile(0.75) iqr = q3 - q1 limit1 = q1 - 1.5 * iqr limit2 = q3 + 1.5 * iqr outliers = df[ (df[col] < limit1.loc[df["diagnosis"]].values) | (df[col] > limit2.loc[df["diagnosis"]].values) ] df = df.drop(outliers.index) return df for col in numerical_features: df = imputation(col, df) df.shape df["diagnosis"] = df["diagnosis"].map(diagnosis_cat) y = df["diagnosis"] x = df.drop(["diagnosis"], axis=1) # - Standarisation des données afin d'optimiser nos modeles X = StandardScaler().fit_transform(x) # # III. Modelisation # - Train test split X_train, X_test, y_train, y_test = train_test_split( X, y, train_size=0.8, random_state=0 ) print(f"Train set: {X_train.shape}\nTest set: {X_test.shape}") # ## Validation des résultats des estimateurs choisi # - KNN # - Regression Logistique # - Arbre de Décision # - Random Forest # - SVC models = { "LogisticRegression": LogisticRegression(random_state=0), "KNN": KNeighborsClassifier(), "RandomForest": RandomForestClassifier(random_state=0), "DecisionTree": DecisionTreeClassifier(random_state=0), "SVC": SVC(random_state=0), } def models_evaluation(models): for name, model in models.items(): model.fit(X_train, y_train) print(f"{name}\nScore: {model.score(X_test, y_test)}") print(confusion_matrix(y_test, model.predict(X_test))) print(classification_report(y_test, model.predict(X_test))) N, train_score, val_score = learning_curve( model, X_train, y_train, train_sizes=np.linspace(0.1, 1.0, 30), cv=5, scoring="f1", ) plt.plot(N, train_score.mean(axis=1), label="train") plt.plot(N, val_score.mean(axis=1), label="validation") plt.xlabel("Train_sizes") plt.legend() plt.show() models_evaluation(models) def final_model_evaluation(model): print(confusion_matrix(y_test, model.predict(X_test))) print(classification_report(y_test, model.predict(X_test))) N, train_score, val_score = learning_curve( model, X_train, y_train, train_sizes=np.linspace(0.1, 1.0, 30), cv=5, scoring="f1", ) plt.plot(N, train_score.mean(axis=1), label="train") plt.plot(N, val_score.mean(axis=1), label="validation") plt.xlabel("Train_sizes") plt.legend() plt.show() # ## Optimisation de l'estimateur LR # - **Score**: 98% # - **Accuracy**: 98% # - **Precision**: # - *Malignant Cancer (M)*: 100% # - *Benign Cancer (B)*: 97% # - **Recall**: # - *Malignant Cancer (M)*: 96% # - *Benign Cancer (B)*: 100% # - **f1-score**: # - *Malignant Cancer (M)*: 98% # - *Benign Cancer (B)*: 99% lr_model = LogisticRegression() lr_model.fit(X_train, y_train) lr_params = { "penalty": ["l1", "l2", "elasticnet"], "C": np.arange(1, 11), "tol": [1e-1, 1e-2, 1e-2, 1e-4, 1e-5, 1e-6], } lr_grid = GridSearchCV(lr_model, param_grid=lr_params, scoring="f1", cv=5) lr_grid.fit(X_train, y_train) print(lr_grid.best_score_) print(lr_grid.best_params_) lr_model = lr_grid.best_estimator_ lr_model lr_model.score(X_test, y_test) final_model_evaluation(lr_model) # ## Optimisation de l'estimateur KNN # - **Score**: 93% # - **Accuracy**: 94% # - **Precision**: # - *Malignant Cancer (M)*: 96% # - *Benign Cancer (B)*: 93% # - **Recall**: # - *Malignant Cancer (M)*: 88% # - *Benign Cancer (B)*: 97% # - **f1-score**: # - *Malignant Cancer (M)*: 92% # - *Benign Cancer (B)*: 95% knn_model = KNeighborsClassifier() knn_model.fit(X_train, y_train) params_grid = { "n_neighbors": np.arange(1, 11), "metric": ["euclidean", "manhattan", "minkowski"], } knn_grid = GridSearchCV(knn_model, param_grid=params_grid, scoring="f1", cv=5) knn_grid.fit(X_train, y_train) print(knn_grid.best_params_) print(knn_grid.best_score_) knn_model = knn_grid.best_estimator_ knn_model knn_model.score(X_test, y_test) final_model_evaluation(knn_model) # ## Optimisation de l'estimateur SVC # - **Score**: 98% # - **Accuracy**: 98% # - **Precision**: # - *Malignant Cancer (M)*: 100% # - *Benign Cancer (B)*: 97% # - **Recall**: # - *Malignant Cancer (M)*: 96% # - *Benign Cancer (B)*: 100% # - **f1-score**: # - *Malignant Cancer (M)*: 98% # - *Benign Cancer (B)*: 99% svc_params = { "C": np.arange(1, 10), "degree": [1, 2, 3, 4, 5], "gamma": ["scale", "auto"], } svc_model = SVC() svc_model.fit(X_train, y_train) svc_grid = GridSearchCV(svc_model, param_grid=svc_params, scoring="f1", cv=5) svc_grid.fit(X_train, y_train) print(svc_grid.best_score_) print(svc_grid.best_params_) svc_model = svc_grid.best_estimator_ svc_model print(svc_model.score(X_test, y_test)) final_model_evaluation(svc_model)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/556/129556927.ipynb
cancer-data
erdemtaha
[{"Id": 129556927, "ScriptId": 38522379, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14427913, "CreationDate": "05/14/2023 20:05:12", "VersionNumber": 3.0, "Title": "Pr\u00e9diction du type de cancer", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 322.0, "LinesInsertedFromPrevious": 15.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 307.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
[{"Id": 185737697, "KernelVersionId": 129556927, "SourceDatasetVersionId": 5212576}]
[{"Id": 5212576, "DatasetId": 3032092, "DatasourceVersionId": 5284991, "CreatorUserId": 2498226, "LicenseName": "Other (specified in description)", "CreationDate": "03/22/2023 07:57:00", "VersionNumber": 1.0, "Title": "Cancer Data", "Slug": "cancer-data", "Subtitle": "Benign and malignant cancer data", "Description": "**570 cancer cells and 30 features to determine whether the cancer cells in our data are benign or malignant**\n\n**Our cancer data contains 2 types of cancers: 1. benign cancer (B) and 2. malignant cancer (M).**", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3032092, "CreatorUserId": 2498226, "OwnerUserId": 2498226.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5212576.0, "CurrentDatasourceVersionId": 5284991.0, "ForumId": 3071494, "Type": 2, "CreationDate": "03/22/2023 07:57:00", "LastActivityDate": "03/22/2023", "TotalViews": 66608, "TotalDownloads": 11493, "TotalVotes": 209, "TotalKernels": 70}]
[{"Id": 2498226, "UserName": "erdemtaha", "DisplayName": "Erdem Taha", "RegisterDate": "11/15/2018", "PerformanceTier": 1}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import seaborn as sns from scipy.stats import ttest_ind from sklearn.model_selection import train_test_split, GridSearchCV, learning_curve from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.svm import SVC from sklearn.metrics import confusion_matrix, classification_report from sklearn.preprocessing import StandardScaler import warnings warnings.filterwarnings(action="ignore") # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input/cancer-data/Cancer_Data.csv"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session df = pd.read_csv("/kaggle/input/cancer-data/Cancer_Data.csv") df # # I. Analyse du Dataset (EDA) # ### 1. Analyse de la forme: # - **Variable Target**: diagnosis # - **Nombre de liges et de colonnes**: (569, 33) # - **Types de variables**: Qualitative: 1, Quantitative: 32 # - **Nombre de valeurs manquantes**: 0 # ### 2. Analyse du fond: # - **Visualisation de la target**: # - B: 357 # - M: 212 # - La distribution de la majorité des variables est asymétrique... et le reste suit une distribution normale # - **Relation target/variable**: # - La majorité des variables ont une valeur différente pour chaque type de cancer... Mais seulement les variables liées à la **compacité**, au **périmetre**, au **rayon**, la **texture** et la **concavité** semblent être beaucoup plus liées au type de cancer. hypothese à tester ? # - **Relation variable/variable**: # - Certaines variables sont tres correlées: +0.9 de correlation # #### hypothese nulle (h0): # - Les variables liées à la **compacité**, au **périmetre**, au **rayon**, la **texture** et la **concavité** sont significativement différentes des autres # - h0 = les quantités moyennes des variables sont égales quelque soit le type de cancer -> **h0 réjetée** df.shape df.drop(["id", "Unnamed: 32"], axis=1, inplace=True) df.info() df.isna().sum() df.describe() df["diagnosis"].value_counts() sns.countplot(data=df, x="diagnosis") # - Visualisation de la distribution des variables continu i = 1 plt.figure(figsize=(20, 60)) for col in df.select_dtypes("float"): plt.subplot(10, 3, i) sns.distplot(df[col]) i += 1 benign_df = df[df["diagnosis"] == "B"] malignant_df = df[df["diagnosis"] == "M"] numerical_features = df.drop("diagnosis", axis=1) # - Relation Target/Variable i = 1 plt.figure(figsize=(20, 60)) for col in df.select_dtypes("float"): plt.subplot(10, 3, i) sns.distplot(benign_df[col], label="Beningn") sns.distplot(malignant_df[col], label="Malignant") plt.legend() i += 1 i = 1 plt.figure(figsize=(20, 60)) for col in df.select_dtypes("float"): plt.subplot(10, 3, i) sns.boxplot(data=df, x=df["diagnosis"], y=df[col]) i += 1 # - Matrice de correlation plt.figure(figsize=(20, 8)) sns.heatmap(numerical_features.corr(), annot=True, cbar=False) print(f"{benign_df.shape}\n{malignant_df.shape}") balanced_benign_df = benign_df.sample(malignant_df.shape[0]) # - Test de student afin de tester l'hypothese citée ci-haut def t_test(col): alpha = 0.02 stat, p = ttest_ind(balanced_benign_df[col].dropna(), malignant_df[col].dropna()) if p < alpha: return "h0 rejetée" else: return 0 for col in numerical_features: print(f"{col :-<50} {t_test(col)}") # # II. Pre-processing diagnosis_cat = {"M": 0, "B": 1} # Suppression_des_outliers def imputation(col, df): q1 = df.groupby("diagnosis")[col].quantile(0.25) q3 = df.groupby("diagnosis")[col].quantile(0.75) iqr = q3 - q1 limit1 = q1 - 1.5 * iqr limit2 = q3 + 1.5 * iqr outliers = df[ (df[col] < limit1.loc[df["diagnosis"]].values) | (df[col] > limit2.loc[df["diagnosis"]].values) ] df = df.drop(outliers.index) return df for col in numerical_features: df = imputation(col, df) df.shape df["diagnosis"] = df["diagnosis"].map(diagnosis_cat) y = df["diagnosis"] x = df.drop(["diagnosis"], axis=1) # - Standarisation des données afin d'optimiser nos modeles X = StandardScaler().fit_transform(x) # # III. Modelisation # - Train test split X_train, X_test, y_train, y_test = train_test_split( X, y, train_size=0.8, random_state=0 ) print(f"Train set: {X_train.shape}\nTest set: {X_test.shape}") # ## Validation des résultats des estimateurs choisi # - KNN # - Regression Logistique # - Arbre de Décision # - Random Forest # - SVC models = { "LogisticRegression": LogisticRegression(random_state=0), "KNN": KNeighborsClassifier(), "RandomForest": RandomForestClassifier(random_state=0), "DecisionTree": DecisionTreeClassifier(random_state=0), "SVC": SVC(random_state=0), } def models_evaluation(models): for name, model in models.items(): model.fit(X_train, y_train) print(f"{name}\nScore: {model.score(X_test, y_test)}") print(confusion_matrix(y_test, model.predict(X_test))) print(classification_report(y_test, model.predict(X_test))) N, train_score, val_score = learning_curve( model, X_train, y_train, train_sizes=np.linspace(0.1, 1.0, 30), cv=5, scoring="f1", ) plt.plot(N, train_score.mean(axis=1), label="train") plt.plot(N, val_score.mean(axis=1), label="validation") plt.xlabel("Train_sizes") plt.legend() plt.show() models_evaluation(models) def final_model_evaluation(model): print(confusion_matrix(y_test, model.predict(X_test))) print(classification_report(y_test, model.predict(X_test))) N, train_score, val_score = learning_curve( model, X_train, y_train, train_sizes=np.linspace(0.1, 1.0, 30), cv=5, scoring="f1", ) plt.plot(N, train_score.mean(axis=1), label="train") plt.plot(N, val_score.mean(axis=1), label="validation") plt.xlabel("Train_sizes") plt.legend() plt.show() # ## Optimisation de l'estimateur LR # - **Score**: 98% # - **Accuracy**: 98% # - **Precision**: # - *Malignant Cancer (M)*: 100% # - *Benign Cancer (B)*: 97% # - **Recall**: # - *Malignant Cancer (M)*: 96% # - *Benign Cancer (B)*: 100% # - **f1-score**: # - *Malignant Cancer (M)*: 98% # - *Benign Cancer (B)*: 99% lr_model = LogisticRegression() lr_model.fit(X_train, y_train) lr_params = { "penalty": ["l1", "l2", "elasticnet"], "C": np.arange(1, 11), "tol": [1e-1, 1e-2, 1e-2, 1e-4, 1e-5, 1e-6], } lr_grid = GridSearchCV(lr_model, param_grid=lr_params, scoring="f1", cv=5) lr_grid.fit(X_train, y_train) print(lr_grid.best_score_) print(lr_grid.best_params_) lr_model = lr_grid.best_estimator_ lr_model lr_model.score(X_test, y_test) final_model_evaluation(lr_model) # ## Optimisation de l'estimateur KNN # - **Score**: 93% # - **Accuracy**: 94% # - **Precision**: # - *Malignant Cancer (M)*: 96% # - *Benign Cancer (B)*: 93% # - **Recall**: # - *Malignant Cancer (M)*: 88% # - *Benign Cancer (B)*: 97% # - **f1-score**: # - *Malignant Cancer (M)*: 92% # - *Benign Cancer (B)*: 95% knn_model = KNeighborsClassifier() knn_model.fit(X_train, y_train) params_grid = { "n_neighbors": np.arange(1, 11), "metric": ["euclidean", "manhattan", "minkowski"], } knn_grid = GridSearchCV(knn_model, param_grid=params_grid, scoring="f1", cv=5) knn_grid.fit(X_train, y_train) print(knn_grid.best_params_) print(knn_grid.best_score_) knn_model = knn_grid.best_estimator_ knn_model knn_model.score(X_test, y_test) final_model_evaluation(knn_model) # ## Optimisation de l'estimateur SVC # - **Score**: 98% # - **Accuracy**: 98% # - **Precision**: # - *Malignant Cancer (M)*: 100% # - *Benign Cancer (B)*: 97% # - **Recall**: # - *Malignant Cancer (M)*: 96% # - *Benign Cancer (B)*: 100% # - **f1-score**: # - *Malignant Cancer (M)*: 98% # - *Benign Cancer (B)*: 99% svc_params = { "C": np.arange(1, 10), "degree": [1, 2, 3, 4, 5], "gamma": ["scale", "auto"], } svc_model = SVC() svc_model.fit(X_train, y_train) svc_grid = GridSearchCV(svc_model, param_grid=svc_params, scoring="f1", cv=5) svc_grid.fit(X_train, y_train) print(svc_grid.best_score_) print(svc_grid.best_params_) svc_model = svc_grid.best_estimator_ svc_model print(svc_model.score(X_test, y_test)) final_model_evaluation(svc_model)
false
1
3,223
1
3,314
3,223
129556495
<jupyter_start><jupyter_text>Flappy Bird - Gymnasium Dataset took from playing [Flappy Bird - Gymnasium](https://github.com/markub3327/flappy-bird-gymnasium). I played 135 times (135 games), where each row is a frame of the game grouped by the 'game' number. It's important to notice that the pipes' positions depend on the game's screen, according to [this piece of code](https://github.com/markub3327/flappy-bird-gymnasium/blob/main/flappy_bird_gymnasium/envs/game_logic.py#L122) I got the data rendering a 310x568 game screen. Got it with the following script: ``` import time import keyboard import pyautogui import flappy_bird_gymnasium import gymnasium render = True human_playing = True fps = 30 frame_time = 1.0 / fps paused = False env = gymnasium.make("FlappyBird-v0", audio_on=False) def get_user_action(): while True: if is_game_window_active() and keyboard.is_pressed(' '): return 1 else: return 0 def is_game_window_active(): window = pyautogui.getActiveWindow() if window is None: return False else: return window.title == 'pygame window' def choose_action(env, human_playing): if human_playing: return get_user_action() return env.action_space.sample() game_number = 0 while True: env.reset() done = False game_number += 1 while not done: # Pause game by pressing 'p' if is_game_window_active() and keyboard.is_pressed('p'): paused = not paused time.sleep(0.1) if paused: continue # Maintain a constant FPS start_time = time.time() action = choose_action(env, human_playing) state, reward, done, _, info = env.step(action) row = str(game_number) + ',' for i in range(len(state)): row += str(state[i]) + ',' row += str(action) + ',' + str(reward) + ',' + str(info['score']) + '\n' with open('flappy_bird.csv', 'a') as f: f.write(row) if render: env.render() # Maintain a constant FPS elapsed_time = time.time() - start_time if human_playing and elapsed_time &lt; frame_time: time.sleep(frame_time - elapsed_time) if done: break ``` Kaggle dataset identifier: flappy-bird-gymnasium <jupyter_script>import numpy as np import pandas as pd from keras import Sequential from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, EarlyStopping from keras.layers import Dense, Dropout, LSTM from keras.optimizers import Adam from matplotlib import pyplot as plt from sklearn.model_selection import train_test_split from sklearn.decomposition import PCA from sklearn.preprocessing import StandardScaler def pca(df): X = df.drop(["game", "action", "score", "reward"], axis=1) scaler = StandardScaler() X_std = scaler.fit_transform(X) pca = PCA() pca.fit_transform(X_std) exp_var_ratio = pca.explained_variance_ratio_ print(exp_var_ratio) plt.plot(np.cumsum(exp_var_ratio)) plt.xlabel("Number of Principal Components") plt.ylabel("Cumulative Explained Variance Ratio") plt.show() # [0.44881726 0.17563808 0.15772861 0.09005831 0.05178211 0.02017475 0.01790775 0.01511343 0.01061269 0.00680455 0.00398384 0.00137861] df = pd.read_csv("/kaggle/input/flappy-bird-gymnasium/flappy_bird.csv") # print PCA # pca(df); # remove all frames that happened right before a negative reward for index, row in df.iterrows(): if row["reward"] < 0: df.drop( df[ (df["score"] == row["score"]) & (df["game"] == row["game"]) & (df.index <= index) ].index, inplace=True, ) # remove the third pair of pipes to see if it trains better (spoiler: it didn't) # also remove score and reward because we don't need them to train df.drop( columns=[ "next_next_pipe_horizontal_position", "next_next_top pipe_vertical_position", "next_next_bottom pipe_vertical_position", "score", "reward", ], inplace=True, ) # get groups of 20 frames groupped by game. It gets 0:19, 1:20, 2:21 and so on data, y_data = [], [] for _, game in df.groupby("game"): for i in range(20, len(game)): data.append(game[i - 20 : i].drop(columns=["game", "action"]).values) y_data.append(game[i - 20 : i]["action"].values[-1]) data, y_data = np.array(data), np.array(y_data) # split training and testing data X_train, X_test, y_train, y_test = train_test_split(data, y_data, test_size=0.2) model = Sequential() model.add( LSTM(32, input_shape=(X_train.shape[1:]), activation="relu", return_sequences=True) ) model.add(Dropout(0.3)) model.add(LSTM(units=16, return_sequences=True)) model.add(Dropout(0.3)) model.add(LSTM(units=8, return_sequences=False)) model.add(Dense(1, activation="sigmoid")) model.compile( loss="mean_squared_error", optimizer=Adam(learning_rate=0.0001), metrics=["accuracy"], ) model.fit( X_train, y_train, epochs=300, batch_size=32, verbose=1, validation_data=[X_test, y_test], callbacks=[ EarlyStopping(monitor="loss", min_delta=1e-10, patience=20, verbose=1), ReduceLROnPlateau(monitor="loss", factor=0.2, patience=15, verbose=1), ModelCheckpoint( filepath="weights.h5", monitor="loss", save_best_only=True, verbose=1 ), ], ) model.save("flappy_bird_model.h5") evaluation = model.evaluate(X_test, y_test, verbose=1) print(evaluation) prediction = model.predict(X_test, verbose=1) prediction = [1 if x > 0.5 else 0 for x in prediction] prediction = np.array(prediction) print(prediction)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/556/129556495.ipynb
flappy-bird-gymnasium
mateusscheper
[{"Id": 129556495, "ScriptId": 38521079, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14907143, "CreationDate": "05/14/2023 19:58:59", "VersionNumber": 1.0, "Title": "[WIP] LSTM - Predicting the action to be taken", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 86.0, "LinesInsertedFromPrevious": 86.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 185737081, "KernelVersionId": 129556495, "SourceDatasetVersionId": 5678814}]
[{"Id": 5678814, "DatasetId": 3264662, "DatasourceVersionId": 5754367, "CreatorUserId": 14907143, "LicenseName": "CC0: Public Domain", "CreationDate": "05/13/2023 21:24:59", "VersionNumber": 1.0, "Title": "Flappy Bird - Gymnasium", "Slug": "flappy-bird-gymnasium", "Subtitle": "Game data from playing Flappy Bird - Gymnasium.", "Description": "Dataset took from playing [Flappy Bird - Gymnasium](https://github.com/markub3327/flappy-bird-gymnasium). \nI played 135 times (135 games), where each row is a frame of the game grouped by the 'game' number.\n\nIt's important to notice that the pipes' positions depend on the game's screen, according to [this piece of code](https://github.com/markub3327/flappy-bird-gymnasium/blob/main/flappy_bird_gymnasium/envs/game_logic.py#L122) I got the data rendering a 310x568 game screen.\n\nGot it with the following script: \n```\nimport time\nimport keyboard\nimport pyautogui\nimport flappy_bird_gymnasium\nimport gymnasium\n\nrender = True\nhuman_playing = True\nfps = 30\nframe_time = 1.0 / fps\npaused = False\nenv = gymnasium.make(\"FlappyBird-v0\", audio_on=False)\n\n\ndef get_user_action():\n while True:\n if is_game_window_active() and keyboard.is_pressed(' '):\n return 1\n else:\n return 0\n\n\ndef is_game_window_active():\n window = pyautogui.getActiveWindow()\n if window is None:\n return False\n else:\n return window.title == 'pygame window'\n\n\ndef choose_action(env, human_playing):\n if human_playing:\n return get_user_action()\n return env.action_space.sample()\n\n\ngame_number = 0\nwhile True:\n env.reset()\n done = False\n game_number += 1\n\n while not done:\n # Pause game by pressing 'p'\n if is_game_window_active() and keyboard.is_pressed('p'):\n paused = not paused\n time.sleep(0.1)\n if paused:\n continue\n\n # Maintain a constant FPS\n start_time = time.time()\n\n action = choose_action(env, human_playing)\n state, reward, done, _, info = env.step(action)\n\n row = str(game_number) + ','\n for i in range(len(state)):\n row += str(state[i]) + ','\n row += str(action) + ',' + str(reward) + ',' + str(info['score']) + '\\n'\n with open('flappy_bird.csv', 'a') as f:\n f.write(row)\n\n if render:\n env.render()\n\n # Maintain a constant FPS\n elapsed_time = time.time() - start_time\n if human_playing and elapsed_time &lt; frame_time:\n time.sleep(frame_time - elapsed_time)\n\n if done:\n break\n```", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3264662, "CreatorUserId": 14907143, "OwnerUserId": 14907143.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5718874.0, "CurrentDatasourceVersionId": 5795026.0, "ForumId": 3330267, "Type": 2, "CreationDate": "05/13/2023 21:24:59", "LastActivityDate": "05/13/2023", "TotalViews": 284, "TotalDownloads": 6, "TotalVotes": 1, "TotalKernels": 0}]
[{"Id": 14907143, "UserName": "mateusscheper", "DisplayName": "Mateus Scheper", "RegisterDate": "05/02/2023", "PerformanceTier": 1}]
import numpy as np import pandas as pd from keras import Sequential from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, EarlyStopping from keras.layers import Dense, Dropout, LSTM from keras.optimizers import Adam from matplotlib import pyplot as plt from sklearn.model_selection import train_test_split from sklearn.decomposition import PCA from sklearn.preprocessing import StandardScaler def pca(df): X = df.drop(["game", "action", "score", "reward"], axis=1) scaler = StandardScaler() X_std = scaler.fit_transform(X) pca = PCA() pca.fit_transform(X_std) exp_var_ratio = pca.explained_variance_ratio_ print(exp_var_ratio) plt.plot(np.cumsum(exp_var_ratio)) plt.xlabel("Number of Principal Components") plt.ylabel("Cumulative Explained Variance Ratio") plt.show() # [0.44881726 0.17563808 0.15772861 0.09005831 0.05178211 0.02017475 0.01790775 0.01511343 0.01061269 0.00680455 0.00398384 0.00137861] df = pd.read_csv("/kaggle/input/flappy-bird-gymnasium/flappy_bird.csv") # print PCA # pca(df); # remove all frames that happened right before a negative reward for index, row in df.iterrows(): if row["reward"] < 0: df.drop( df[ (df["score"] == row["score"]) & (df["game"] == row["game"]) & (df.index <= index) ].index, inplace=True, ) # remove the third pair of pipes to see if it trains better (spoiler: it didn't) # also remove score and reward because we don't need them to train df.drop( columns=[ "next_next_pipe_horizontal_position", "next_next_top pipe_vertical_position", "next_next_bottom pipe_vertical_position", "score", "reward", ], inplace=True, ) # get groups of 20 frames groupped by game. It gets 0:19, 1:20, 2:21 and so on data, y_data = [], [] for _, game in df.groupby("game"): for i in range(20, len(game)): data.append(game[i - 20 : i].drop(columns=["game", "action"]).values) y_data.append(game[i - 20 : i]["action"].values[-1]) data, y_data = np.array(data), np.array(y_data) # split training and testing data X_train, X_test, y_train, y_test = train_test_split(data, y_data, test_size=0.2) model = Sequential() model.add( LSTM(32, input_shape=(X_train.shape[1:]), activation="relu", return_sequences=True) ) model.add(Dropout(0.3)) model.add(LSTM(units=16, return_sequences=True)) model.add(Dropout(0.3)) model.add(LSTM(units=8, return_sequences=False)) model.add(Dense(1, activation="sigmoid")) model.compile( loss="mean_squared_error", optimizer=Adam(learning_rate=0.0001), metrics=["accuracy"], ) model.fit( X_train, y_train, epochs=300, batch_size=32, verbose=1, validation_data=[X_test, y_test], callbacks=[ EarlyStopping(monitor="loss", min_delta=1e-10, patience=20, verbose=1), ReduceLROnPlateau(monitor="loss", factor=0.2, patience=15, verbose=1), ModelCheckpoint( filepath="weights.h5", monitor="loss", save_best_only=True, verbose=1 ), ], ) model.save("flappy_bird_model.h5") evaluation = model.evaluate(X_test, y_test, verbose=1) print(evaluation) prediction = model.predict(X_test, verbose=1) prediction = [1 if x > 0.5 else 0 for x in prediction] prediction = np.array(prediction) print(prediction)
false
1
1,152
0
1,841
1,152
129556072
# # Introduction # This notebook contains the code for the feature engineering, modeling and predictions. A big part of the code is based on [this notebook by Chris Deotte](https://www.kaggle.com/code/cdeotte/xgboost-baseline-0-680) since at first it didn't occur to me to load the dataset in chunks to avoid the memory issues that the main dataset size was causing. # For the model, we use the [XGBoost](https://xgboost.readthedocs.io/en/latest/) library. # # Setup # # Import libraries import gc import jo_wilder import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.model_selection import GroupKFold from xgboost import XGBClassifier, plot_importance from sklearn.metrics import f1_score # Setup matplotlib # # Data loading # # Path to files test_csv_path = "/kaggle/input/student-performance-and-game-play/test.csv" train_csv_path = "/kaggle/input/student-performance-and-game-play/train.csv" target_labels_csv = "/kaggle/input/student-performance-and-game-play/train_labels.csv" # Load only session_id column tmp = pd.read_csv(train_csv_path, usecols=[0]) tmp = tmp.groupby("session_id")["session_id"].agg("count") # Calculate chunks and skips pieces = 25 chunks = int(np.ceil(len(tmp) / pieces)) reads = [] skips = [0] for k in range(pieces): a = k * chunks b = (k + 1) * chunks if b > len(tmp): b = len(tmp) r = tmp.iloc[a:b].sum() reads.append(r) skips.append(skips[-1] + r) print(f"pieces: {pieces} of sizes: {reads}") train_df = pd.read_csv(train_csv_path, nrows=reads[0]) train_df.head() target_df = pd.read_csv(target_labels_csv) target_df["session"] = target_df.session_id.apply(lambda x: int(x.split("_")[0])) target_df["q"] = target_df.session_id.apply(lambda x: int(x.split("_")[-1][1:])) target_df["correct"] = target_df["correct"].astype("int8") target_df["q"] = target_df["q"].astype("int8") target_df.head() # # Feature engineering # categorical_cols = [ "event_name", "fqid", "room_fqid", "text", "text_fqid", ] numerical_cols = [ "elapsed_time", "level", "page", "room_coor_x", "room_coor_y", "screen_coor_x", "screen_coor_y", "hover_duration", ] event_list = train_df["event_name"].unique().tolist() event_list, len(event_list) name_list = train_df["text"].unique().tolist() name_list, len(name_list) fqid_list = train_df["fqid"].unique().tolist() fqid_list, len(fqid_list) room_list = train_df["room_fqid"].unique().tolist() room_list, len(room_list) groupby_cols = ["session_id", "level_group"] def feature_engineer(train_df): dfs = [] agg_functions = {c: ["mean", "std", "sum", "max", "min"] for c in numerical_cols} for c, funcs in agg_functions.items(): tmp = train_df.groupby(groupby_cols)[c].agg(funcs) tmp.columns = [f"{c}_{agg_name}" for agg_name in funcs] dfs.append(tmp) for c in categorical_cols: tmp = train_df.groupby(groupby_cols)[c].agg("nunique") tmp.name = f"{tmp.name}_nunique" dfs.append(tmp) for c in event_list: train_df[c] = (train_df["event_name"] == c).astype(np.int8) for c in event_list: tmp = train_df.groupby(groupby_cols).agg({c: "sum", "elapsed_time": "sum"}) tmp.rename( columns={c: f"{c}_sum", "elapsed_time": f"{c}_elapsed_time_sum"}, inplace=True, ) dfs.append(tmp) for c in room_list: train_df[c] = (train_df["room_fqid"] == c).astype(np.int8) for c in room_list: tmp = train_df.groupby(groupby_cols)[c].agg("sum") tmp.name = f"{tmp.name}_sum" dfs.append(tmp) # Frequency encoding of fqid fqid_counts = train_df["fqid"].value_counts() train_df["fqid_freq_encoded"] = train_df["fqid"].map(fqid_counts) tmp = train_df.groupby(groupby_cols)["fqid_freq_encoded"].agg( ["mean", "sum", "max", "min"] ) tmp.columns = [f"fqid_freq_encoded_{agg_name}" for agg_name in tmp.columns] dfs.append(tmp) train_df.drop(columns=["fqid", "fqid_freq_encoded"], inplace=True) # Frequency encoding of text text_counts = train_df["text"].value_counts() train_df["text_freq_encoded"] = train_df["text"].map(text_counts) tmp = train_df.groupby(groupby_cols)["text_freq_encoded"].agg( ["mean", "sum", "max", "min"] ) tmp.columns = [f"text_freq_encoded_{agg_name}" for agg_name in tmp.columns] dfs.append(tmp) train_df.drop(columns=["text", "text_freq_encoded"], inplace=True) df = pd.concat(dfs, axis=1).fillna(-1) df = df.reset_index().set_index("session_id") _ = gc.collect() return df # Process train_df in chunks all_chunks = [] for k in range(pieces): rows = 0 if k > 0: rows = range(1, skips[k] + 1) train_df = pd.read_csv(train_csv_path, skiprows=rows, nrows=reads[k]) df = feature_engineer(train_df) all_chunks.append(df) # Clean memory del train_df _ = gc.collect() # Concatenate all chunks df = pd.concat(all_chunks, axis=0) df.shape df.head() df.columns # # Train model # features = [c for c in df.columns if c != "level_group"] users = df.index.unique() gkf = GroupKFold(n_splits=7) oof = pd.DataFrame( data=np.zeros((len(users), 18)), index=users, ) models = {} for i, (train_index, test_index) in enumerate(gkf.split(X=df, groups=df.index)): print(f"Fold {i + 1} => ", end="") xgb_params = { "objective": "binary:logistic", "eval_metric": "logloss", "learning_rate": 0.05, "max_depth": 4, "n_estimators": 1000, "early_stopping_rounds": 50, "tree_method": "hist", "subsample": 0.8, "colsample_bytree": 0.4, "use_label_encoder": False, } for t in range(1, 19): if t <= 3: grp = "0-4" elif t <= 13: grp = "5-12" elif t <= 22: grp = "13-22" # Train data train_x = df.iloc[train_index] train_x = train_x.loc[train_x.level_group == grp] train_users = train_x.index.values train_y = target_df.loc[target_df.q == t].set_index("session").loc[train_users] # Valid data valid_x = df.iloc[test_index] valid_x = valid_x.loc[valid_x.level_group == grp] valid_users = valid_x.index.values valid_y = target_df.loc[target_df.q == t].set_index("session").loc[valid_users] # Train model clf = XGBClassifier(**xgb_params) clf.fit( train_x[features].astype("float32"), train_y["correct"], eval_set=[(valid_x[features].astype("float32"), valid_y["correct"])], verbose=0, ) print(f"{t}({clf.best_ntree_limit}), ", end="") # Save model and predict valid oof models[f"{grp}_{t}"] = clf oof.loc[valid_users, t - 1] = clf.predict_proba( valid_x[features].astype("float32") )[:, 1] print() # # CV score # true = oof.copy() for k in range(18): # Get labels for each question tmp = target_df.loc[target_df.q == k + 1].set_index("session").loc[users] true[k] = tmp.correct.values scores = [] thresholds = [] best_score = 0 best_threshold = 0 for threshold in np.arange(0.4, 0.81, 0.01): print(f"{threshold:.02f}, ", end="") preds = (oof.values.reshape((-1)) > threshold).astype("int") m = f1_score(true.values.reshape((-1)), preds, average="macro") scores.append(m) thresholds.append(threshold) if m > best_score: best_score = m best_threshold = threshold plt.figure(figsize=(20, 5)) plt.plot(thresholds, scores, "-o", color="blue") plt.scatter([best_threshold], [best_score], color="blue", s=300, alpha=1) plt.xlabel("Threshold", size=14) plt.ylabel("Validation F1 Score", size=14) plt.title( f"Threshold vs. F1_Score with Best F1_Score = {best_score:.3f} at Best Threshold = {best_threshold:.3}", size=18, ) plt.show() print("When using optimal threshold...") for k in range(18): # Compute f1 score for each question m = f1_score( true[k].values, (oof[k].values > best_threshold).astype("int"), average="macro" ) print(f"Q{k}: F1 =", m) # Compute overall F1 score m = f1_score( true.values.reshape((-1)), (oof.values.reshape((-1)) > best_threshold).astype("int"), average="macro", ) print("==> Overall F1 =", m) # # Infer test data # # Create environment env = jo_wilder.make_env() iter_test = env.iter_test() # Clear memory del target_df, df, oof, true gc.collect() limits = {"0-4": (1, 4), "5-12": (4, 14), "13-22": (14, 19)} for test, sample_submission in iter_test: # FEATURE ENGINEER TEST DATA df = feature_engineer(test) # INFER TEST DATA grp = test.level_group.values[0] a, b = limits[grp] for t in range(a, b): clf = models[f"{grp}_{t}"] p = clf.predict_proba(df[features].astype("float32"))[0, 1] mask = sample_submission.session_id.str.contains(f"q{t}") sample_submission.loc[mask, "correct"] = int(p > best_threshold) env.predict(sample_submission) # # Submission # df = pd.read_csv("submission.csv") df.head() df.correct.mean()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/556/129556072.ipynb
null
null
[{"Id": 129556072, "ScriptId": 38248179, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13861709, "CreationDate": "05/14/2023 19:52:50", "VersionNumber": 4.0, "Title": "XGBoost", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 345.0, "LinesInsertedFromPrevious": 71.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 274.0, "LinesInsertedFromFork": 340.0, "LinesDeletedFromFork": 23.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 5.0, "TotalVotes": 0}]
null
null
null
null
# # Introduction # This notebook contains the code for the feature engineering, modeling and predictions. A big part of the code is based on [this notebook by Chris Deotte](https://www.kaggle.com/code/cdeotte/xgboost-baseline-0-680) since at first it didn't occur to me to load the dataset in chunks to avoid the memory issues that the main dataset size was causing. # For the model, we use the [XGBoost](https://xgboost.readthedocs.io/en/latest/) library. # # Setup # # Import libraries import gc import jo_wilder import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.model_selection import GroupKFold from xgboost import XGBClassifier, plot_importance from sklearn.metrics import f1_score # Setup matplotlib # # Data loading # # Path to files test_csv_path = "/kaggle/input/student-performance-and-game-play/test.csv" train_csv_path = "/kaggle/input/student-performance-and-game-play/train.csv" target_labels_csv = "/kaggle/input/student-performance-and-game-play/train_labels.csv" # Load only session_id column tmp = pd.read_csv(train_csv_path, usecols=[0]) tmp = tmp.groupby("session_id")["session_id"].agg("count") # Calculate chunks and skips pieces = 25 chunks = int(np.ceil(len(tmp) / pieces)) reads = [] skips = [0] for k in range(pieces): a = k * chunks b = (k + 1) * chunks if b > len(tmp): b = len(tmp) r = tmp.iloc[a:b].sum() reads.append(r) skips.append(skips[-1] + r) print(f"pieces: {pieces} of sizes: {reads}") train_df = pd.read_csv(train_csv_path, nrows=reads[0]) train_df.head() target_df = pd.read_csv(target_labels_csv) target_df["session"] = target_df.session_id.apply(lambda x: int(x.split("_")[0])) target_df["q"] = target_df.session_id.apply(lambda x: int(x.split("_")[-1][1:])) target_df["correct"] = target_df["correct"].astype("int8") target_df["q"] = target_df["q"].astype("int8") target_df.head() # # Feature engineering # categorical_cols = [ "event_name", "fqid", "room_fqid", "text", "text_fqid", ] numerical_cols = [ "elapsed_time", "level", "page", "room_coor_x", "room_coor_y", "screen_coor_x", "screen_coor_y", "hover_duration", ] event_list = train_df["event_name"].unique().tolist() event_list, len(event_list) name_list = train_df["text"].unique().tolist() name_list, len(name_list) fqid_list = train_df["fqid"].unique().tolist() fqid_list, len(fqid_list) room_list = train_df["room_fqid"].unique().tolist() room_list, len(room_list) groupby_cols = ["session_id", "level_group"] def feature_engineer(train_df): dfs = [] agg_functions = {c: ["mean", "std", "sum", "max", "min"] for c in numerical_cols} for c, funcs in agg_functions.items(): tmp = train_df.groupby(groupby_cols)[c].agg(funcs) tmp.columns = [f"{c}_{agg_name}" for agg_name in funcs] dfs.append(tmp) for c in categorical_cols: tmp = train_df.groupby(groupby_cols)[c].agg("nunique") tmp.name = f"{tmp.name}_nunique" dfs.append(tmp) for c in event_list: train_df[c] = (train_df["event_name"] == c).astype(np.int8) for c in event_list: tmp = train_df.groupby(groupby_cols).agg({c: "sum", "elapsed_time": "sum"}) tmp.rename( columns={c: f"{c}_sum", "elapsed_time": f"{c}_elapsed_time_sum"}, inplace=True, ) dfs.append(tmp) for c in room_list: train_df[c] = (train_df["room_fqid"] == c).astype(np.int8) for c in room_list: tmp = train_df.groupby(groupby_cols)[c].agg("sum") tmp.name = f"{tmp.name}_sum" dfs.append(tmp) # Frequency encoding of fqid fqid_counts = train_df["fqid"].value_counts() train_df["fqid_freq_encoded"] = train_df["fqid"].map(fqid_counts) tmp = train_df.groupby(groupby_cols)["fqid_freq_encoded"].agg( ["mean", "sum", "max", "min"] ) tmp.columns = [f"fqid_freq_encoded_{agg_name}" for agg_name in tmp.columns] dfs.append(tmp) train_df.drop(columns=["fqid", "fqid_freq_encoded"], inplace=True) # Frequency encoding of text text_counts = train_df["text"].value_counts() train_df["text_freq_encoded"] = train_df["text"].map(text_counts) tmp = train_df.groupby(groupby_cols)["text_freq_encoded"].agg( ["mean", "sum", "max", "min"] ) tmp.columns = [f"text_freq_encoded_{agg_name}" for agg_name in tmp.columns] dfs.append(tmp) train_df.drop(columns=["text", "text_freq_encoded"], inplace=True) df = pd.concat(dfs, axis=1).fillna(-1) df = df.reset_index().set_index("session_id") _ = gc.collect() return df # Process train_df in chunks all_chunks = [] for k in range(pieces): rows = 0 if k > 0: rows = range(1, skips[k] + 1) train_df = pd.read_csv(train_csv_path, skiprows=rows, nrows=reads[k]) df = feature_engineer(train_df) all_chunks.append(df) # Clean memory del train_df _ = gc.collect() # Concatenate all chunks df = pd.concat(all_chunks, axis=0) df.shape df.head() df.columns # # Train model # features = [c for c in df.columns if c != "level_group"] users = df.index.unique() gkf = GroupKFold(n_splits=7) oof = pd.DataFrame( data=np.zeros((len(users), 18)), index=users, ) models = {} for i, (train_index, test_index) in enumerate(gkf.split(X=df, groups=df.index)): print(f"Fold {i + 1} => ", end="") xgb_params = { "objective": "binary:logistic", "eval_metric": "logloss", "learning_rate": 0.05, "max_depth": 4, "n_estimators": 1000, "early_stopping_rounds": 50, "tree_method": "hist", "subsample": 0.8, "colsample_bytree": 0.4, "use_label_encoder": False, } for t in range(1, 19): if t <= 3: grp = "0-4" elif t <= 13: grp = "5-12" elif t <= 22: grp = "13-22" # Train data train_x = df.iloc[train_index] train_x = train_x.loc[train_x.level_group == grp] train_users = train_x.index.values train_y = target_df.loc[target_df.q == t].set_index("session").loc[train_users] # Valid data valid_x = df.iloc[test_index] valid_x = valid_x.loc[valid_x.level_group == grp] valid_users = valid_x.index.values valid_y = target_df.loc[target_df.q == t].set_index("session").loc[valid_users] # Train model clf = XGBClassifier(**xgb_params) clf.fit( train_x[features].astype("float32"), train_y["correct"], eval_set=[(valid_x[features].astype("float32"), valid_y["correct"])], verbose=0, ) print(f"{t}({clf.best_ntree_limit}), ", end="") # Save model and predict valid oof models[f"{grp}_{t}"] = clf oof.loc[valid_users, t - 1] = clf.predict_proba( valid_x[features].astype("float32") )[:, 1] print() # # CV score # true = oof.copy() for k in range(18): # Get labels for each question tmp = target_df.loc[target_df.q == k + 1].set_index("session").loc[users] true[k] = tmp.correct.values scores = [] thresholds = [] best_score = 0 best_threshold = 0 for threshold in np.arange(0.4, 0.81, 0.01): print(f"{threshold:.02f}, ", end="") preds = (oof.values.reshape((-1)) > threshold).astype("int") m = f1_score(true.values.reshape((-1)), preds, average="macro") scores.append(m) thresholds.append(threshold) if m > best_score: best_score = m best_threshold = threshold plt.figure(figsize=(20, 5)) plt.plot(thresholds, scores, "-o", color="blue") plt.scatter([best_threshold], [best_score], color="blue", s=300, alpha=1) plt.xlabel("Threshold", size=14) plt.ylabel("Validation F1 Score", size=14) plt.title( f"Threshold vs. F1_Score with Best F1_Score = {best_score:.3f} at Best Threshold = {best_threshold:.3}", size=18, ) plt.show() print("When using optimal threshold...") for k in range(18): # Compute f1 score for each question m = f1_score( true[k].values, (oof[k].values > best_threshold).astype("int"), average="macro" ) print(f"Q{k}: F1 =", m) # Compute overall F1 score m = f1_score( true.values.reshape((-1)), (oof.values.reshape((-1)) > best_threshold).astype("int"), average="macro", ) print("==> Overall F1 =", m) # # Infer test data # # Create environment env = jo_wilder.make_env() iter_test = env.iter_test() # Clear memory del target_df, df, oof, true gc.collect() limits = {"0-4": (1, 4), "5-12": (4, 14), "13-22": (14, 19)} for test, sample_submission in iter_test: # FEATURE ENGINEER TEST DATA df = feature_engineer(test) # INFER TEST DATA grp = test.level_group.values[0] a, b = limits[grp] for t in range(a, b): clf = models[f"{grp}_{t}"] p = clf.predict_proba(df[features].astype("float32"))[0, 1] mask = sample_submission.session_id.str.contains(f"q{t}") sample_submission.loc[mask, "correct"] = int(p > best_threshold) env.predict(sample_submission) # # Submission # df = pd.read_csv("submission.csv") df.head() df.correct.mean()
false
0
3,108
0
3,108
3,108
129556786
# # Introduction # The RSNA-MICCAI Brain Tumor Radiogenomic Classification competition is a multi-class classification problem where the goal is to predict the subtype of brain tumor present in a given MRI scan based on radiomic features. # There are three classes: # - LGG (low-grade glioma) # - HGG (high-grade glioma) # - WT (hemangioblastoma) # The dataset you will be working with consists of MRI scans from the National Cancer Institute (NCI) and The Cancer Imaging Archive (TCIA) datasets. The images are provided in DICOM format and are accompanied by a CSV file containing radiomic features extracted from the images. # Here's the competition [RSNA-MICCAI Brain Tumor Radiogenomic Classification](https://www.kaggle.com/competitions/rsna-miccai-brain-tumor-radiogenomic-classification/data?select=train_labels.csv) # # Dataset # The exact mpMRI scans included are: # - Fluid Attenuated Inversion Recovery (FLAIR) # * What it is: These are images that detect brain abnormalities, such as edema and inflammatory lesions. These images are sensitive to the detection of anomalies related to inflammatory and infectious diseases of the central nervous system. # * What it highlights: It helps to detect anomalies in the brain that might not be visible in other MRI sequences. # * These images allow for the detection of brain abnormalities related to inflammatory and infectious diseases of the central nervous system. # - T1-weighted pre-contrast (T1w) # * What it is: These are images that highlight soft tissues, such as muscles and nerves, and are useful for visualizing normal brain structures. # * What it highlights: It allows the visualization of the normal brain structures and also helps in the detection of tumors and lesions. # * These images allow for the detection of brain tumors and lesions. # - T1-weighted post-contrast (T1Gd) # * What it is: These are images that use a contrast agent to detect vascular anomalies, such as tumors and lesions, which are more visible after contrast agent administration. # * What it highlights: It enhances the visibility of vascular anomalies, such as tumors and lesions, making it easier to detect them. # * These images allow for the detection of vascular anomalies, such as tumors and lesions. # - T2-weighted (T2) # * What it is: These images detect abnormalities related to demyelination, such as multiple sclerosis, as well as brain tumors and lesions. # * What it highlights: It helps in the detection of anomalies related to cerebrospinal fluid, such as cysts and brain tumors. # * These images allow for the detection of anomalies related to demyelination, brain tumors, lesions, and cerebrospinal fluid. # # Necessary imports import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns # image dicom reader import pydicom from pydicom import dcmread
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/556/129556786.ipynb
null
null
[{"Id": 129556786, "ScriptId": 38521430, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 3864685, "CreationDate": "05/14/2023 20:03:04", "VersionNumber": 4.0, "Title": "RSNA-MICCAI Brain Tumor Classification", "EvaluationDate": "05/14/2023", "IsChange": false, "TotalLines": 48.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 48.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# # Introduction # The RSNA-MICCAI Brain Tumor Radiogenomic Classification competition is a multi-class classification problem where the goal is to predict the subtype of brain tumor present in a given MRI scan based on radiomic features. # There are three classes: # - LGG (low-grade glioma) # - HGG (high-grade glioma) # - WT (hemangioblastoma) # The dataset you will be working with consists of MRI scans from the National Cancer Institute (NCI) and The Cancer Imaging Archive (TCIA) datasets. The images are provided in DICOM format and are accompanied by a CSV file containing radiomic features extracted from the images. # Here's the competition [RSNA-MICCAI Brain Tumor Radiogenomic Classification](https://www.kaggle.com/competitions/rsna-miccai-brain-tumor-radiogenomic-classification/data?select=train_labels.csv) # # Dataset # The exact mpMRI scans included are: # - Fluid Attenuated Inversion Recovery (FLAIR) # * What it is: These are images that detect brain abnormalities, such as edema and inflammatory lesions. These images are sensitive to the detection of anomalies related to inflammatory and infectious diseases of the central nervous system. # * What it highlights: It helps to detect anomalies in the brain that might not be visible in other MRI sequences. # * These images allow for the detection of brain abnormalities related to inflammatory and infectious diseases of the central nervous system. # - T1-weighted pre-contrast (T1w) # * What it is: These are images that highlight soft tissues, such as muscles and nerves, and are useful for visualizing normal brain structures. # * What it highlights: It allows the visualization of the normal brain structures and also helps in the detection of tumors and lesions. # * These images allow for the detection of brain tumors and lesions. # - T1-weighted post-contrast (T1Gd) # * What it is: These are images that use a contrast agent to detect vascular anomalies, such as tumors and lesions, which are more visible after contrast agent administration. # * What it highlights: It enhances the visibility of vascular anomalies, such as tumors and lesions, making it easier to detect them. # * These images allow for the detection of vascular anomalies, such as tumors and lesions. # - T2-weighted (T2) # * What it is: These images detect abnormalities related to demyelination, such as multiple sclerosis, as well as brain tumors and lesions. # * What it highlights: It helps in the detection of anomalies related to cerebrospinal fluid, such as cysts and brain tumors. # * These images allow for the detection of anomalies related to demyelination, brain tumors, lesions, and cerebrospinal fluid. # # Necessary imports import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns # image dicom reader import pydicom from pydicom import dcmread
false
0
782
0
782
782
129556647
import os import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from scipy.stats import beta from sklearn.preprocessing import OrdinalEncoder from sklearn.pipeline import Pipeline, make_pipeline from sklearn.ensemble import HistGradientBoostingClassifier from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split, GridSearchCV, TimeSeriesSplit from sklearn.feature_selection import mutual_info_classif, SelectKBest from sklearn.feature_selection import RFECV from sklearn.metrics import log_loss, accuracy_score, brier_score_loss from sklearn.inspection import permutation_importance from xgboost import XGBClassifier import shap def update_beta(swing_col): """ a function that updates the alpha and beta params of the beta to give a bayesian estimate of a player's swing perc """ a, b = 5, 5 expected_swing_pct_list = [] for swing in swing_col: expected_swing_pct_list.append(beta(a, b).mean()) if swing == 1: a += 1 else: b += 1 return pd.Series(expected_swing_pct_list, index=swing_col.index) # excluding columns with data that can only be known after a swing # has or hasn't occurred post_swing_cols = [ "hc_x", "hc_y", "hit_location", "launch_speed", "launch_angle", "babip_value", "iso_value", "woba_value", "woba_denom", "launch_speed_angle", "post_away_score", "post_home_score", "post_fld_score", "post_bat_score", "delta_run_exp", "estimated_ba_using_speedangle", "estimated_woba_using_speedangle", "events", "type", "delta_home_win_exp", "bb_type", "hit_distance_sc", "description", "des", ] # columns with id or redundant info. names excluded rather because # than id numbers because there are duplicates. umpire, spin_dir are null useless_or_null_cols = [ "pitch_uid", "game_pk", "sv_id", "pitcher_name", "batter_name", "umpire", "spin_dir", "pitcher.1", "fielder_2.1", "fielder_2", "fielder_3", "fielder_4", "fielder_5", "fielder_6", "fielder_7", "fielder_8", "fielder_9", ] # loading df w/excluded cols to save mem df = pd.read_csv( "train.csv", usecols=lambda x: x not in post_swing_cols and x not in useless_or_null_cols, ) df = df.sort_values(by="game_date").reset_index(drop=True) df.head() # engineering a few features # maybe players swing conditional on a lead/deficit etc df["score_differential"] = df["bat_score"].sub(df["fld_score"]) # maybe total movement matters more than movement in either dimension df["total_movement"] = np.sqrt(df["pfx_x"] ** 2 + df["pfx_z"] ** 2) # a larger strike zone seems like it would force more swinging df["total_strike_zone"] = df["sz_top"].sub(df["sz_bot"]) # month? some proxy for temperature. who knows? df["game_date"] = pd.to_datetime(df["game_date"]) df["month"] = df["game_date"].dt.month df["year"] = df["game_date"].dt.year df["first_day_season"] = df.groupby("year")["game_date"].transform(lambda x: x.min()) # changing the first day of 2020 to March 15th (made up, but in between the others) df.loc[ df["first_day_season"] == pd.to_datetime("2020-07-23"), "first_day_season" ] = pd.to_datetime("2020-03-15") # days since beginning of season df["days_since_firstday"] = (df["game_date"] - df["first_day_season"]).dt.days # converting strikes and balls to a 'count' category, might help df["count"] = df["strikes"].astype(str) + "_" + df["balls"].astype(str) df["count"] = df["count"].astype("category") # simple binaries for above/below strike zone df["above_sztop"] = (df["sz_top"] <= df["plate_z"]).astype(int) df["below_szbot"] = (df["sz_bot"] >= df["plate_z"]).astype(int) # simple binary for 'over plate' when it crosses plate df["over_plate"] = abs(df["plate_x"]) <= 0.83 # unique pitches dict -> how many pitches does each pitcher have? unique_pitches_dict = df.groupby("pitcher")["pitch_type"].nunique().to_dict() df["pitcher_unique_pitches"] = df["pitcher"].map(unique_pitches_dict) # pitch type pitch percentage by pitcher pitch_type_df = ( df.groupby("pitcher")["pitch_type"] .value_counts(normalize=True) .reset_index(name="percentage") ) pitch_type_dict = pitch_type_df.set_index(["pitcher", "pitch_type"])[ "percentage" ].to_dict() df["pitch_type_perc"] = [ pitch_type_dict[(x, y)] for x, y in zip(df["pitcher"], df["pitch_type"]) ] # determining pitcher handedness using the release x coordinate df["pitcher_release_median"] = df.groupby("pitcher")["release_pos_x"].transform( lambda x: x.median() ) df["pitcher_distance_from_median"] = df["release_pos_x"].sub( df["pitcher_release_median"] ) df["pitcher_handedness"] = np.where(df["pitcher_release_median"] <= 0, 1, 0) # calculating z score of pitch velocity in the y dimension for # each pitcher and pitch-type df["pitch_speed_z_score"] = df.groupby(["pitcher", "pitch_type"])["vy0"].transform( lambda x: (x - x.mean()) / x.std() ) # calc'ing z score of acceleration in y dimension for # each pitcher and pitch type df["pitch_yacceleration_z_score"] = df.groupby(["pitcher", "pitch_type"])[ "ay" ].transform(lambda x: (x - x.mean()) / x.std()) # same calc for z dimension df["pitch_zacceleration_z_score"] = df.groupby(["pitcher", "pitch_type"])[ "az" ].transform(lambda x: (x - x.mean()) / x.std()) # plotting release x, y joint plot, just inspecting data etc sns.jointplot( x="release_pos_x", y="release_pos_z", data=df[df["pitcher"] == 453286], kind="hex" ) # since the dataset game_dates do not overlap, i'll create some expanding statistics # for each player. i will fillna as if i know nothing and use only the expanding # swing percentage to date in the dataset(s) # i've decided this sucks and if i use it i'll use a beta-binomial. # note to self: see if it improves model on a .sample() for the data # also see if it improves model if it is applied to batter-zone groups df["batter_swing_perc"] = df.groupby("batter")["swing"].transform( lambda x: x.shift().expanding(min_periods=30).mean() ) df["pitcher_swing_perc"] = df.groupby("pitcher")["swing"].transform( lambda x: x.shift().expanding(min_periods=30).mean() ) # pfx_x, pfx_z = horizonal_movement_catcher, vertical_movement_catcher # plate_x, plate_z = horizontal_position_plate, vertical_position_plate5 # sz_top, sz_bot = strike_zone_top, strike_zone_bottom # vx0, vy0, vz0 = velocity_in_x, velocity_in_y, velocity_in_z # ax, ay, az = acceleration_in_x, velocity_in_y, velocity_in_z # checking the summary stats of a few columns df[["sz_top", "sz_bot", "plate_x", "plate_z", "pfx_x", "pfx_z"]].describe().T # figuring out what zones mean df.groupby("zone").agg({"plate_x": ["min", "max"], "plate_z": ["min", "max"]}) # looking at the swing % over game year and month # looks like it is increasing - build a linear model # perhaps df.groupby(["game_year", "month"])["swing"].mean().plot() df["batter_swing_prob"] = df.groupby("batter", group_keys=False)["swing"].apply( update_beta ) df["pitcher_swing_prob"] = df.groupby("pitcher")["swing"].apply(update_beta) df["pitcher_swing_prob"].hist() # extracted w/permutation importance important_features = [ "total_strike_zone", "sz_bot", "sz_top", "zone", "attack_zone", "plate_x", "count", "pfx_x", "pfx_z", "plate_z", "game_type", "over_plate", "batter_swing_prob", "pitcher_swing_prob", ] eighty_perc = int(np.ceil(0.8 * len(df))) X = df[important_features].copy() y = df["swing"] X_train, X_test = X.iloc[:eighty_perc].copy(), X.iloc[eighty_perc:].copy() y_train, y_test = y.iloc[:eighty_perc].copy(), y.iloc[eighty_perc:].copy() cat_columns = X_train.select_dtypes(exclude="number").columns cat_indices = [X_train.columns.get_loc(col) for col in cat_columns] pipeline = Pipeline( [ ( "encoder", OrdinalEncoder(unknown_value=9999999, handle_unknown="use_encoded_value"), ), ( "model", HistGradientBoostingClassifier( learning_rate=0.1, max_depth=6, min_samples_leaf=4, categorical_features=cat_indices, random_state=24, ), ), ] ) # param_grid = { # 'model__max_depth': [4, 5, 6], # 'model__min_samples_leaf': [4, 5, 6] # } # gridsearch = GridSearchCV(pipeline, param_grid=param_grid, cv=5) # gridsearch.fit(X_train, y_train) pipeline.fit(X_train, y_train) preds = pipeline.predict_proba(X_test) print("the log loss on the test set is:", log_loss(y_test, preds[:, 1])) # calculating permutation importance using neg log loss on 10% of the dataset p = permutation_importance( pipeline, X_test[important_features], y_test, n_repeats=5, scoring="neg_log_loss", max_samples=0.1, random_state=44, ) # converting the permutation importance results to a dataframe imp_df = pd.DataFrame( { "feature": X_test[important_features].columns, "importance_mean": p["importances_mean"], "importance_std": p["importances_std"], } ) # displaying the df sorted by the mean of importances imp_df.sort_values(by="importance_mean", ascending=False) # creating a smaller dataset for the shap explainer X_test_small = X_test[important_features].sample(8000).copy() # set the tree explainer as the model of the pipeline explainer = shap.TreeExplainer(pipeline["model"]) # apply the preprocessing to x_test observations = pipeline["encoder"].transform(X_test_small) # get Shap values from preprocessed data shap_values = explainer.shap_values(observations) # plot the feature importance shap.summary_plot(shap_values, X_test_small) # i'm satisfied with the method, so training the model on the entire dataset final_model = Pipeline( [ ( "encoder", OrdinalEncoder(unknown_value=9999999, handle_unknown="use_encoded_value"), ), ( "model", HistGradientBoostingClassifier( learning_rate=0.1, max_depth=6, min_samples_leaf=4, categorical_features=cat_indices, random_state=24, ), ), ] ) final_model.fit(X, y) # looking at the predicted probabilities on the original dataset df_preds = final_model.predict_proba(X[important_features])[:, 1] df["preds"] = df_preds pd.Series(df_preds).hist() plt.show() # getting the beta params for each player - i could update the # beta params for each row of the test set, but i will not do # this batter_swing_dict = df.groupby("batter")["batter_swing_prob"].last().to_dict() pitcher_swing_dict = df.groupby("pitcher")["pitcher_swing_prob"].last().to_dict() # loading test dataframe test_df = pd.read_csv("test.csv") # creating the features needed for the model test_df["over_plate"] = abs(test_df["plate_x"]) <= 0.83 test_df["count"] = test_df["strikes"].astype(str) + "_" + test_df["balls"].astype(str) test_df["count"] = test_df["count"].astype("category") test_df["total_strike_zone"] = test_df["sz_top"].sub(test_df["sz_bot"]) test_df["batter_swing_prob"] = [ batter_swing_dict[batter] if batter in batter_swing_dict else 0.47 for batter in test_df["batter"] ] test_df["pitcher_swing_prob"] = [ pitcher_swing_dict[pitcher] if pitcher in pitcher_swing_dict else 0.47 for pitcher in test_df["pitcher"] ] # getting the preds for test datase test_preds = final_model.predict_proba(test_df[important_features])[:, 1] test_df["swing"] = test_preds # creating the submission df sub_df = test_df[["pitch_uid", "swing"]].copy() # saving the submission df to csv sub_df.to_csv("submission3.csv", index=False) # looking at the hist plot of the final preds # the distribution differs a fair bit, may need to revisit sub_df["swing"].hist()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/556/129556647.ipynb
null
null
[{"Id": 129556647, "ScriptId": 38523773, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4012613, "CreationDate": "05/14/2023 20:01:05", "VersionNumber": 1.0, "Title": "nick_wan_swing", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 305.0, "LinesInsertedFromPrevious": 305.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 3}]
null
null
null
null
import os import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from scipy.stats import beta from sklearn.preprocessing import OrdinalEncoder from sklearn.pipeline import Pipeline, make_pipeline from sklearn.ensemble import HistGradientBoostingClassifier from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split, GridSearchCV, TimeSeriesSplit from sklearn.feature_selection import mutual_info_classif, SelectKBest from sklearn.feature_selection import RFECV from sklearn.metrics import log_loss, accuracy_score, brier_score_loss from sklearn.inspection import permutation_importance from xgboost import XGBClassifier import shap def update_beta(swing_col): """ a function that updates the alpha and beta params of the beta to give a bayesian estimate of a player's swing perc """ a, b = 5, 5 expected_swing_pct_list = [] for swing in swing_col: expected_swing_pct_list.append(beta(a, b).mean()) if swing == 1: a += 1 else: b += 1 return pd.Series(expected_swing_pct_list, index=swing_col.index) # excluding columns with data that can only be known after a swing # has or hasn't occurred post_swing_cols = [ "hc_x", "hc_y", "hit_location", "launch_speed", "launch_angle", "babip_value", "iso_value", "woba_value", "woba_denom", "launch_speed_angle", "post_away_score", "post_home_score", "post_fld_score", "post_bat_score", "delta_run_exp", "estimated_ba_using_speedangle", "estimated_woba_using_speedangle", "events", "type", "delta_home_win_exp", "bb_type", "hit_distance_sc", "description", "des", ] # columns with id or redundant info. names excluded rather because # than id numbers because there are duplicates. umpire, spin_dir are null useless_or_null_cols = [ "pitch_uid", "game_pk", "sv_id", "pitcher_name", "batter_name", "umpire", "spin_dir", "pitcher.1", "fielder_2.1", "fielder_2", "fielder_3", "fielder_4", "fielder_5", "fielder_6", "fielder_7", "fielder_8", "fielder_9", ] # loading df w/excluded cols to save mem df = pd.read_csv( "train.csv", usecols=lambda x: x not in post_swing_cols and x not in useless_or_null_cols, ) df = df.sort_values(by="game_date").reset_index(drop=True) df.head() # engineering a few features # maybe players swing conditional on a lead/deficit etc df["score_differential"] = df["bat_score"].sub(df["fld_score"]) # maybe total movement matters more than movement in either dimension df["total_movement"] = np.sqrt(df["pfx_x"] ** 2 + df["pfx_z"] ** 2) # a larger strike zone seems like it would force more swinging df["total_strike_zone"] = df["sz_top"].sub(df["sz_bot"]) # month? some proxy for temperature. who knows? df["game_date"] = pd.to_datetime(df["game_date"]) df["month"] = df["game_date"].dt.month df["year"] = df["game_date"].dt.year df["first_day_season"] = df.groupby("year")["game_date"].transform(lambda x: x.min()) # changing the first day of 2020 to March 15th (made up, but in between the others) df.loc[ df["first_day_season"] == pd.to_datetime("2020-07-23"), "first_day_season" ] = pd.to_datetime("2020-03-15") # days since beginning of season df["days_since_firstday"] = (df["game_date"] - df["first_day_season"]).dt.days # converting strikes and balls to a 'count' category, might help df["count"] = df["strikes"].astype(str) + "_" + df["balls"].astype(str) df["count"] = df["count"].astype("category") # simple binaries for above/below strike zone df["above_sztop"] = (df["sz_top"] <= df["plate_z"]).astype(int) df["below_szbot"] = (df["sz_bot"] >= df["plate_z"]).astype(int) # simple binary for 'over plate' when it crosses plate df["over_plate"] = abs(df["plate_x"]) <= 0.83 # unique pitches dict -> how many pitches does each pitcher have? unique_pitches_dict = df.groupby("pitcher")["pitch_type"].nunique().to_dict() df["pitcher_unique_pitches"] = df["pitcher"].map(unique_pitches_dict) # pitch type pitch percentage by pitcher pitch_type_df = ( df.groupby("pitcher")["pitch_type"] .value_counts(normalize=True) .reset_index(name="percentage") ) pitch_type_dict = pitch_type_df.set_index(["pitcher", "pitch_type"])[ "percentage" ].to_dict() df["pitch_type_perc"] = [ pitch_type_dict[(x, y)] for x, y in zip(df["pitcher"], df["pitch_type"]) ] # determining pitcher handedness using the release x coordinate df["pitcher_release_median"] = df.groupby("pitcher")["release_pos_x"].transform( lambda x: x.median() ) df["pitcher_distance_from_median"] = df["release_pos_x"].sub( df["pitcher_release_median"] ) df["pitcher_handedness"] = np.where(df["pitcher_release_median"] <= 0, 1, 0) # calculating z score of pitch velocity in the y dimension for # each pitcher and pitch-type df["pitch_speed_z_score"] = df.groupby(["pitcher", "pitch_type"])["vy0"].transform( lambda x: (x - x.mean()) / x.std() ) # calc'ing z score of acceleration in y dimension for # each pitcher and pitch type df["pitch_yacceleration_z_score"] = df.groupby(["pitcher", "pitch_type"])[ "ay" ].transform(lambda x: (x - x.mean()) / x.std()) # same calc for z dimension df["pitch_zacceleration_z_score"] = df.groupby(["pitcher", "pitch_type"])[ "az" ].transform(lambda x: (x - x.mean()) / x.std()) # plotting release x, y joint plot, just inspecting data etc sns.jointplot( x="release_pos_x", y="release_pos_z", data=df[df["pitcher"] == 453286], kind="hex" ) # since the dataset game_dates do not overlap, i'll create some expanding statistics # for each player. i will fillna as if i know nothing and use only the expanding # swing percentage to date in the dataset(s) # i've decided this sucks and if i use it i'll use a beta-binomial. # note to self: see if it improves model on a .sample() for the data # also see if it improves model if it is applied to batter-zone groups df["batter_swing_perc"] = df.groupby("batter")["swing"].transform( lambda x: x.shift().expanding(min_periods=30).mean() ) df["pitcher_swing_perc"] = df.groupby("pitcher")["swing"].transform( lambda x: x.shift().expanding(min_periods=30).mean() ) # pfx_x, pfx_z = horizonal_movement_catcher, vertical_movement_catcher # plate_x, plate_z = horizontal_position_plate, vertical_position_plate5 # sz_top, sz_bot = strike_zone_top, strike_zone_bottom # vx0, vy0, vz0 = velocity_in_x, velocity_in_y, velocity_in_z # ax, ay, az = acceleration_in_x, velocity_in_y, velocity_in_z # checking the summary stats of a few columns df[["sz_top", "sz_bot", "plate_x", "plate_z", "pfx_x", "pfx_z"]].describe().T # figuring out what zones mean df.groupby("zone").agg({"plate_x": ["min", "max"], "plate_z": ["min", "max"]}) # looking at the swing % over game year and month # looks like it is increasing - build a linear model # perhaps df.groupby(["game_year", "month"])["swing"].mean().plot() df["batter_swing_prob"] = df.groupby("batter", group_keys=False)["swing"].apply( update_beta ) df["pitcher_swing_prob"] = df.groupby("pitcher")["swing"].apply(update_beta) df["pitcher_swing_prob"].hist() # extracted w/permutation importance important_features = [ "total_strike_zone", "sz_bot", "sz_top", "zone", "attack_zone", "plate_x", "count", "pfx_x", "pfx_z", "plate_z", "game_type", "over_plate", "batter_swing_prob", "pitcher_swing_prob", ] eighty_perc = int(np.ceil(0.8 * len(df))) X = df[important_features].copy() y = df["swing"] X_train, X_test = X.iloc[:eighty_perc].copy(), X.iloc[eighty_perc:].copy() y_train, y_test = y.iloc[:eighty_perc].copy(), y.iloc[eighty_perc:].copy() cat_columns = X_train.select_dtypes(exclude="number").columns cat_indices = [X_train.columns.get_loc(col) for col in cat_columns] pipeline = Pipeline( [ ( "encoder", OrdinalEncoder(unknown_value=9999999, handle_unknown="use_encoded_value"), ), ( "model", HistGradientBoostingClassifier( learning_rate=0.1, max_depth=6, min_samples_leaf=4, categorical_features=cat_indices, random_state=24, ), ), ] ) # param_grid = { # 'model__max_depth': [4, 5, 6], # 'model__min_samples_leaf': [4, 5, 6] # } # gridsearch = GridSearchCV(pipeline, param_grid=param_grid, cv=5) # gridsearch.fit(X_train, y_train) pipeline.fit(X_train, y_train) preds = pipeline.predict_proba(X_test) print("the log loss on the test set is:", log_loss(y_test, preds[:, 1])) # calculating permutation importance using neg log loss on 10% of the dataset p = permutation_importance( pipeline, X_test[important_features], y_test, n_repeats=5, scoring="neg_log_loss", max_samples=0.1, random_state=44, ) # converting the permutation importance results to a dataframe imp_df = pd.DataFrame( { "feature": X_test[important_features].columns, "importance_mean": p["importances_mean"], "importance_std": p["importances_std"], } ) # displaying the df sorted by the mean of importances imp_df.sort_values(by="importance_mean", ascending=False) # creating a smaller dataset for the shap explainer X_test_small = X_test[important_features].sample(8000).copy() # set the tree explainer as the model of the pipeline explainer = shap.TreeExplainer(pipeline["model"]) # apply the preprocessing to x_test observations = pipeline["encoder"].transform(X_test_small) # get Shap values from preprocessed data shap_values = explainer.shap_values(observations) # plot the feature importance shap.summary_plot(shap_values, X_test_small) # i'm satisfied with the method, so training the model on the entire dataset final_model = Pipeline( [ ( "encoder", OrdinalEncoder(unknown_value=9999999, handle_unknown="use_encoded_value"), ), ( "model", HistGradientBoostingClassifier( learning_rate=0.1, max_depth=6, min_samples_leaf=4, categorical_features=cat_indices, random_state=24, ), ), ] ) final_model.fit(X, y) # looking at the predicted probabilities on the original dataset df_preds = final_model.predict_proba(X[important_features])[:, 1] df["preds"] = df_preds pd.Series(df_preds).hist() plt.show() # getting the beta params for each player - i could update the # beta params for each row of the test set, but i will not do # this batter_swing_dict = df.groupby("batter")["batter_swing_prob"].last().to_dict() pitcher_swing_dict = df.groupby("pitcher")["pitcher_swing_prob"].last().to_dict() # loading test dataframe test_df = pd.read_csv("test.csv") # creating the features needed for the model test_df["over_plate"] = abs(test_df["plate_x"]) <= 0.83 test_df["count"] = test_df["strikes"].astype(str) + "_" + test_df["balls"].astype(str) test_df["count"] = test_df["count"].astype("category") test_df["total_strike_zone"] = test_df["sz_top"].sub(test_df["sz_bot"]) test_df["batter_swing_prob"] = [ batter_swing_dict[batter] if batter in batter_swing_dict else 0.47 for batter in test_df["batter"] ] test_df["pitcher_swing_prob"] = [ pitcher_swing_dict[pitcher] if pitcher in pitcher_swing_dict else 0.47 for pitcher in test_df["pitcher"] ] # getting the preds for test datase test_preds = final_model.predict_proba(test_df[important_features])[:, 1] test_df["swing"] = test_preds # creating the submission df sub_df = test_df[["pitch_uid", "swing"]].copy() # saving the submission df to csv sub_df.to_csv("submission3.csv", index=False) # looking at the hist plot of the final preds # the distribution differs a fair bit, may need to revisit sub_df["swing"].hist()
false
0
3,852
3
3,852
3,852
129898826
<jupyter_start><jupyter_text>Bank Customer Churn RowNumber—corresponds to the record (row) number and has no effect on the output. CustomerId—contains random values and has no effect on customer leaving the bank. Surname—the surname of a customer has no impact on their decision to leave the bank. CreditScore—can have an effect on customer churn, since a customer with a higher credit score is less likely to leave the bank. Geography—a customer’s location can affect their decision to leave the bank. Gender—it’s interesting to explore whether gender plays a role in a customer leaving the bank. Age—this is certainly relevant, since older customers are less likely to leave their bank than younger ones. Tenure—refers to the number of years that the customer has been a client of the bank. Normally, older clients are more loyal and less likely to leave a bank. Balance—also a very good indicator of customer churn, as people with a higher balance in their accounts are less likely to leave the bank compared to those with lower balances. NumOfProducts—refers to the number of products that a customer has purchased through the bank. HasCrCard—denotes whether or not a customer has a credit card. This column is also relevant, since people with a credit card are less likely to leave the bank. IsActiveMember—active customers are less likely to leave the bank. EstimatedSalary—as with balance, people with lower salaries are more likely to leave the bank compared to those with higher salaries. Exited—whether or not the customer left the bank. Complain—customer has complaint or not. Satisfaction Score—Score provided by the customer for their complaint resolution. Card Type—type of card hold by the customer. Points Earned—the points earned by the customer for using credit card. Acknowledgements As we know, it is much more expensive to sign in a new client than keeping an existing one. It is advantageous for banks to know what leads a client towards the decision to leave the company. Churn prevention allows companies to develop loyalty programs and retention campaigns to keep as many customers as possible. Kaggle dataset identifier: bank-customer-churn <jupyter_script>import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) df = pd.read_csv("/kaggle/input/bank-customer-churn/Customer-Churn-Records.csv") import matplotlib.pyplot as plt import seaborn as sns df.head() df.info() df.isnull().sum() df.describe() df.shape fig, axes = plt.subplots(1, 3, figsize=(16, 5)) idx = 0 for col in ["Geography", "Gender", "Card Type"]: sns.histplot(data=df[col], ax=axes[idx]) axes[idx].set_ylabel("Number of bank customers") idx += 1 type_card = pd.pivot_table( df, index="Card Type", values=[ "CreditScore", "Balance", "NumOfProducts", "EstimatedSalary", "Point Earned", ], aggfunc=np.mean, ).reset_index() type_card fig, axes = plt.subplots(1, 5, figsize=(30, 10)) idx = 0 for col in [ "Balance", "CreditScore", "EstimatedSalary", "NumOfProducts", "Point Earned", ]: sns.barplot(x=type_card["Card Type"], y=type_card[col], ax=axes[idx], width=0.8) axes[idx].set_xticklabels(type_card["Card Type"], rotation=90) axes[idx].set_ylabel(f"Average of {col}") idx += 1 plt.figure(figsize=(6, 4)) sns.stripplot(df, x="Exited", y="Satisfaction Score", hue="Tenure") tenure = pd.pivot_table( df, index=["Tenure", "Satisfaction Score"], values="CustomerId", columns="Exited", aggfunc=len, ).reset_index() tenure.rename(columns={0: "left", 1: "No left"}, inplace=True) tenure.head() fig, axes = plt.subplots(4, 3, figsize=(20, 15)) for i, col in enumerate(set(tenure["Tenure"])): idx = i // 3 idr = i % 3 temp_tenure = tenure[tenure["Tenure"] == col] sns.barplot( x=temp_tenure["Satisfaction Score"], y=temp_tenure["No left"], ax=axes[idx, idr] ) axes[idx, idr].set_ylabel("Number of costumers") axes[idx, idr].set_title(f"Number of years: {col}") axes[idx, idr].set_xlabel("") fig.delaxes(axes[3, 2]) plt.show() sns.stripplot(df, x="Exited", y="Balance") med = df["Balance"].median() feature = ["Balance", "Exited"] balance = df[feature] upper = balance[balance["Balance"] >= med] upper_1 = upper[upper["Exited"] == 1]["Balance"].count() upper_0 = upper[upper["Exited"] == 0]["Balance"].count() lower = balance[balance["Balance"] < med] lower_1 = lower[lower["Exited"] == 1]["Balance"].count() lower_0 = lower[lower["Exited"] == 0]["Balance"].count() plt.pie( [upper_1, upper_0, lower_1, lower_0], labels=[ "Upper med - exited", "Upper med - left", "Lower med - no lef", "Upper med - left", ], autopct="%1.1f%%", ) # So the customers have high balance and feel exited with the bank still more than the low balance. # However, still 37.5% customes with high balance but don't feel exited. not_exited = df[df["Exited"] == 0] not_exited_high = not_exited[not_exited["Balance"] >= med] not_exited_high fea = [ "CreditScore", "Age", "Tenure", "EstimatedSalary", "Satisfaction Score", "Point Earned", ] fig, axes = plt.subplots(3, 2, figsize=(15, 9)) for i, col in enumerate(not_exited_high[fea]): idx = i // 2 idr = i % 2 sns.boxplot(y=not_exited_high[col], ax=axes[idx, idr]) axes[idx, idr].set_title(col, color="red", size=20)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/898/129898826.ipynb
bank-customer-churn
radheshyamkollipara
[{"Id": 129898826, "ScriptId": 38502330, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14708690, "CreationDate": "05/17/2023 09:38:36", "VersionNumber": 2.0, "Title": "bank chunk", "EvaluationDate": "05/17/2023", "IsChange": false, "TotalLines": 102.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 102.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 3}]
[{"Id": 186310786, "KernelVersionId": 129898826, "SourceDatasetVersionId": 5550559}]
[{"Id": 5550559, "DatasetId": 3197960, "DatasourceVersionId": 5625285, "CreatorUserId": 14862076, "LicenseName": "Other (specified in description)", "CreationDate": "04/28/2023 16:32:01", "VersionNumber": 1.0, "Title": "Bank Customer Churn", "Slug": "bank-customer-churn", "Subtitle": "Bank Customer Data for Customer Churn", "Description": "RowNumber\u2014corresponds to the record (row) number and has no effect on the output.\nCustomerId\u2014contains random values and has no effect on customer leaving the bank.\nSurname\u2014the surname of a customer has no impact on their decision to leave the bank.\nCreditScore\u2014can have an effect on customer churn, since a customer with a higher credit score is less likely to leave the bank.\nGeography\u2014a customer\u2019s location can affect their decision to leave the bank.\nGender\u2014it\u2019s interesting to explore whether gender plays a role in a customer leaving the bank.\nAge\u2014this is certainly relevant, since older customers are less likely to leave their bank than younger ones.\nTenure\u2014refers to the number of years that the customer has been a client of the bank. Normally, older clients are more loyal and less likely to leave a bank.\nBalance\u2014also a very good indicator of customer churn, as people with a higher balance in their accounts are less likely to leave the bank compared to those with lower balances.\nNumOfProducts\u2014refers to the number of products that a customer has purchased through the bank.\nHasCrCard\u2014denotes whether or not a customer has a credit card. This column is also relevant, since people with a credit card are less likely to leave the bank.\nIsActiveMember\u2014active customers are less likely to leave the bank.\nEstimatedSalary\u2014as with balance, people with lower salaries are more likely to leave the bank compared to those with higher salaries.\nExited\u2014whether or not the customer left the bank.\nComplain\u2014customer has complaint or not.\nSatisfaction Score\u2014Score provided by the customer for their complaint resolution.\nCard Type\u2014type of card hold by the customer.\nPoints Earned\u2014the points earned by the customer for using credit card.\n\nAcknowledgements\n\nAs we know, it is much more expensive to sign in a new client than keeping an existing one.\n\nIt is advantageous for banks to know what leads a client towards the decision to leave the company.\n\nChurn prevention allows companies to develop loyalty programs and retention campaigns to keep as many customers as possible.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3197960, "CreatorUserId": 14862076, "OwnerUserId": 14862076.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5550559.0, "CurrentDatasourceVersionId": 5625285.0, "ForumId": 3262570, "Type": 2, "CreationDate": "04/28/2023 16:32:01", "LastActivityDate": "04/28/2023", "TotalViews": 39315, "TotalDownloads": 6814, "TotalVotes": 97, "TotalKernels": 52}]
[{"Id": 14862076, "UserName": "radheshyamkollipara", "DisplayName": "Radheshyam Kollipara", "RegisterDate": "04/28/2023", "PerformanceTier": 0}]
import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) df = pd.read_csv("/kaggle/input/bank-customer-churn/Customer-Churn-Records.csv") import matplotlib.pyplot as plt import seaborn as sns df.head() df.info() df.isnull().sum() df.describe() df.shape fig, axes = plt.subplots(1, 3, figsize=(16, 5)) idx = 0 for col in ["Geography", "Gender", "Card Type"]: sns.histplot(data=df[col], ax=axes[idx]) axes[idx].set_ylabel("Number of bank customers") idx += 1 type_card = pd.pivot_table( df, index="Card Type", values=[ "CreditScore", "Balance", "NumOfProducts", "EstimatedSalary", "Point Earned", ], aggfunc=np.mean, ).reset_index() type_card fig, axes = plt.subplots(1, 5, figsize=(30, 10)) idx = 0 for col in [ "Balance", "CreditScore", "EstimatedSalary", "NumOfProducts", "Point Earned", ]: sns.barplot(x=type_card["Card Type"], y=type_card[col], ax=axes[idx], width=0.8) axes[idx].set_xticklabels(type_card["Card Type"], rotation=90) axes[idx].set_ylabel(f"Average of {col}") idx += 1 plt.figure(figsize=(6, 4)) sns.stripplot(df, x="Exited", y="Satisfaction Score", hue="Tenure") tenure = pd.pivot_table( df, index=["Tenure", "Satisfaction Score"], values="CustomerId", columns="Exited", aggfunc=len, ).reset_index() tenure.rename(columns={0: "left", 1: "No left"}, inplace=True) tenure.head() fig, axes = plt.subplots(4, 3, figsize=(20, 15)) for i, col in enumerate(set(tenure["Tenure"])): idx = i // 3 idr = i % 3 temp_tenure = tenure[tenure["Tenure"] == col] sns.barplot( x=temp_tenure["Satisfaction Score"], y=temp_tenure["No left"], ax=axes[idx, idr] ) axes[idx, idr].set_ylabel("Number of costumers") axes[idx, idr].set_title(f"Number of years: {col}") axes[idx, idr].set_xlabel("") fig.delaxes(axes[3, 2]) plt.show() sns.stripplot(df, x="Exited", y="Balance") med = df["Balance"].median() feature = ["Balance", "Exited"] balance = df[feature] upper = balance[balance["Balance"] >= med] upper_1 = upper[upper["Exited"] == 1]["Balance"].count() upper_0 = upper[upper["Exited"] == 0]["Balance"].count() lower = balance[balance["Balance"] < med] lower_1 = lower[lower["Exited"] == 1]["Balance"].count() lower_0 = lower[lower["Exited"] == 0]["Balance"].count() plt.pie( [upper_1, upper_0, lower_1, lower_0], labels=[ "Upper med - exited", "Upper med - left", "Lower med - no lef", "Upper med - left", ], autopct="%1.1f%%", ) # So the customers have high balance and feel exited with the bank still more than the low balance. # However, still 37.5% customes with high balance but don't feel exited. not_exited = df[df["Exited"] == 0] not_exited_high = not_exited[not_exited["Balance"] >= med] not_exited_high fea = [ "CreditScore", "Age", "Tenure", "EstimatedSalary", "Satisfaction Score", "Point Earned", ] fig, axes = plt.subplots(3, 2, figsize=(15, 9)) for i, col in enumerate(not_exited_high[fea]): idx = i // 2 idr = i % 2 sns.boxplot(y=not_exited_high[col], ax=axes[idx, idr]) axes[idx, idr].set_title(col, color="red", size=20)
false
1
1,157
3
1,658
1,157
129521876
# # *Loading libraries* # loading libraries import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from tqdm.auto import tqdm tqdm.pandas() from sklearn import metrics from sklearn import model_selection from sklearn.neighbors import KNeighborsClassifier from sklearn.model_selection import GridSearchCV from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import ExtraTreesClassifier from sklearn.ensemble import AdaBoostClassifier from sklearn.ensemble import GradientBoostingClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import GaussianNB from sklearn.svm import SVC from tensorflow import keras from tensorflow.keras.layers import Input, Dense from tensorflow.keras.models import Sequential from tensorflow.keras.callbacks import EarlyStopping from tensorflow.keras import activations # loading datasets path_train = "/kaggle/input/icr-identify-age-related-conditions/train.csv" path_test = "/kaggle/input/icr-identify-age-related-conditions/test.csv" path_submis = "/kaggle/input/icr-identify-age-related-conditions/sample_submission.csv" path_greeks = "/kaggle/input/icr-identify-age-related-conditions/greeks.csv" train = pd.read_csv(path_train).drop(columns="Id") test = pd.read_csv(path_test).drop(columns="Id") greeks = pd.read_csv(path_greeks) train["EJ"] = train["EJ"].map({"A": 0, "B": 1}) test["EJ"] = test["EJ"].map({"A": 0, "B": 1}) # # Explaration Data Analysis # shape for each datasets print(f"Shape of the train data : {train.shape}") print(f"Shape of the test data : {test.shape}") # checking missing values train dataset train_miss = train.isnull().sum() print(f"Column Count") for index, row in train_miss[train_miss > 0].items(): print(f"{index} {row}") train.describe().transpose() train.info() # ***We can use visualization techniques to discover missing values. The heatmap is appropriate for visualization. Each line indicates missing data in a row.*** plt.figure(figsize=(16, 14)) sns.heatmap(train.isnull(), yticklabels=False, cbar=False, cmap="PuBuGn") plt.show() # ***There are some common methods for handling missing values in a Pandas DataFrame: fillna(), interpolate() and SimpleImputer from sklearn.impute*** # fill missing values with the mean of the column train_mean_filled = train.copy() train_mean_filled.fillna(train_mean_filled.mean(), inplace=True) # correlation coefficent columns for target corr_target = train_mean_filled.corrwith(train_mean_filled["Class"])[:-1].sort_values( ascending=False ) plt.figure(figsize=(10, 10)) sns.barplot(y=corr_target.index, x=corr_target.values) plt.show() # interpolate missing values using linear interpolation train_interpolate = train.copy() train_interpolate.interpolate(method="polynomial", order=5) # correlation coefficent columns for target corr_target = train_interpolate.corrwith(train_interpolate["Class"])[:-1].sort_values( ascending=False ) plt.figure(figsize=(10, 10)) sns.barplot(y=corr_target.index, x=corr_target.values) plt.show() from sklearn.impute import SimpleImputer # create an imputer object and fit it to the data imputer = SimpleImputer(strategy="mean") imputer.fit(train) # transform the data and replace missing values train_imputed = pd.DataFrame(imputer.transform(train), columns=train.columns) # correlation coefficent columns for target corr_target = train_imputed.corrwith(train_imputed["Class"])[:-1].sort_values( ascending=False ) plt.figure(figsize=(10, 10)) sns.barplot(y=corr_target.index, x=corr_target.values) plt.show() corr = train.iloc[:, 1:].corr() mask = np.triu(np.ones_like(corr, dtype=bool)) plt.figure(figsize=(16, 14)) ax = sns.heatmap( corr, vmin=-1, vmax=1, center=0, cmap=sns.diverging_palette(20, 220, n=200), square=True, mask=mask, ) ax.set_xticklabels(ax.get_xticklabels(), rotation=45, horizontalalignment="right") labels = ["Class 0", "Class 1"] sizes = [train["Class"].tolist().count(0), train["Class"].tolist().count(1)] explode = (0, 0.1) fig, ax = plt.subplots() ax.pie( sizes, explode=explode, labels=labels, autopct="%1.2f%%", shadow=True, startangle=180, ) plt.show() # multiple plots with seaborn for x, y in zip( train_mean_filled.iloc[:, :-29].columns.tolist(), train_mean_filled.iloc[:, -29:-1].columns.tolist(), ): fig, axs = plt.subplots(ncols=3, figsize=(15, 5)) sns.scatterplot(data=train_mean_filled, x=x, y=y, hue="Class", ax=axs[0]) sns.rugplot(data=train_mean_filled, x=x, y=y, hue="Class", ax=axs[0]) sns.histplot( data=train_mean_filled, x=x, hue="Class", color="blue", kde=True, ax=axs[1] ) sns.histplot( data=train_mean_filled, x=y, hue="Class", color="green", kde=True, ax=axs[2] ) plt.show() # Condition the regression fit on another variable and represent it using color plt.figure(figsize=(12, 10), edgecolor="blue", frameon=False) for x, y in zip( train_mean_filled.iloc[:, :-29].columns.tolist(), train_mean_filled.iloc[:, -29:-1].columns.tolist(), ): sns.lmplot(data=train_mean_filled, x=x, y=y, hue="Class") plt.title("Regression plot with " + x + " and " + y + " columns.") plt.show() # **Building models** from sklearn.model_selection import GridSearchCV, KFold from xgboost import XGBClassifier from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score # split data into X and y X = train_mean_filled.iloc[:, :-1] Y = train_mean_filled.iloc[:, -1] # split data into train and test sets seed = 7 test_size = 0.15 X_train, X_test, y_train, y_test = train_test_split( X, Y, test_size=test_size, random_state=seed ) # fit model no training data model = XGBClassifier( eval_metric="logloss", learning_rate=0.05, max_delta_step=6, booster="gbtree", early_stopping_rounds=15, # set early stopping rounds in constructor n_estimators=500, ) # XGBClassifier(base_score = 0.5, booster = 'gbtree', colsample_bylevel = 1, # colsample_bytree=1, gamma = 0, learning_rate = 0.01, max_delta_step = 6, # max_depth=3, min_child_weight=1, missing=None, n_estimators = 500, # n_jobs = 1, nthread = None, objective = 'binary:logistic', random_state = 0, # reg_alpha = 0, reg_lambda = 1, scale_pos_weight = 23.4, seed = None, # silent = True, subsample = 1) eval_set = [(X_test, y_test)] model.fit(X_train, y_train, eval_set=eval_set, verbose=True) # make predictions for test data y_pred = model.predict(X_test) predictions = [round(value) for value in y_pred] # evaluate predictions accuracy = accuracy_score(y_test, predictions) print("Accuracy: %.2f%%" % (accuracy * 100.0)) # ==================================================== # Metric # ==================================================== def balanced_log_loss(y_true, y_pred): y_pred = np.clip(y_pred, 1e-15, 1 - 1e-15) nc = np.bincount(y_true) w0, w1 = 1 / (nc[0] / y_true.shape[0]), 1 / (nc[1] / y_true.shape[0]) balanced_log_loss_score = ( -w0 / nc[0] * (np.sum(np.where(y_true == 0, 1, 0) * np.log(1 - y_pred))) - w1 / nc[1] * (np.sum(np.where(y_true != 0, 1, 0) * np.log(y_pred))) ) / (w0 + w1) return balanced_log_loss_score balanced_log_loss(y_test, y_pred) def predict(X): y = np.zeros(len(X)) for model in tqdm(xgb): y += model.predict(xgb.DMatrix(X)) return y / len(models_) predictions = predict(test) sub = pd.read_csv(path_submis) sub["class_1"] = predictions sub["class_0"] = 1 - predictions sub.to_csv("submission.csv", index=False) sub.head(3) # XGBoost # n_jobs=-1 to allow run it on all cores params = { "n_estimators": [100, 200, 500, 1000], "learning_rate": [0.01, 0.05, 0.1, 0.001], "booster": ["gbtree", "gblinear"], "gamma": [0, 0.5, 1], "reg_alpha": [0, 0.5, 1], "reg_lambda": [0.5, 1, 5], "base_score": [0.2, 0.5, 1], } gs2 = GridSearchCV( XGBClassifier(n_jobs=-1), params, n_jobs=-1, cv=KFold(n_splits=3), scoring="roc_auc" ) gs2.fit(X_train, y_train) print("Best score:", gs2.best_score_) print("Best score:", gs2.best_params_)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/521/129521876.ipynb
null
null
[{"Id": 129521876, "ScriptId": 38471728, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 9401530, "CreationDate": "05/14/2023 14:02:30", "VersionNumber": 4.0, "Title": "Age-Related Conditions EDA and Classification", "EvaluationDate": "05/14/2023", "IsChange": false, "TotalLines": 251.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 251.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# # *Loading libraries* # loading libraries import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from tqdm.auto import tqdm tqdm.pandas() from sklearn import metrics from sklearn import model_selection from sklearn.neighbors import KNeighborsClassifier from sklearn.model_selection import GridSearchCV from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import ExtraTreesClassifier from sklearn.ensemble import AdaBoostClassifier from sklearn.ensemble import GradientBoostingClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import GaussianNB from sklearn.svm import SVC from tensorflow import keras from tensorflow.keras.layers import Input, Dense from tensorflow.keras.models import Sequential from tensorflow.keras.callbacks import EarlyStopping from tensorflow.keras import activations # loading datasets path_train = "/kaggle/input/icr-identify-age-related-conditions/train.csv" path_test = "/kaggle/input/icr-identify-age-related-conditions/test.csv" path_submis = "/kaggle/input/icr-identify-age-related-conditions/sample_submission.csv" path_greeks = "/kaggle/input/icr-identify-age-related-conditions/greeks.csv" train = pd.read_csv(path_train).drop(columns="Id") test = pd.read_csv(path_test).drop(columns="Id") greeks = pd.read_csv(path_greeks) train["EJ"] = train["EJ"].map({"A": 0, "B": 1}) test["EJ"] = test["EJ"].map({"A": 0, "B": 1}) # # Explaration Data Analysis # shape for each datasets print(f"Shape of the train data : {train.shape}") print(f"Shape of the test data : {test.shape}") # checking missing values train dataset train_miss = train.isnull().sum() print(f"Column Count") for index, row in train_miss[train_miss > 0].items(): print(f"{index} {row}") train.describe().transpose() train.info() # ***We can use visualization techniques to discover missing values. The heatmap is appropriate for visualization. Each line indicates missing data in a row.*** plt.figure(figsize=(16, 14)) sns.heatmap(train.isnull(), yticklabels=False, cbar=False, cmap="PuBuGn") plt.show() # ***There are some common methods for handling missing values in a Pandas DataFrame: fillna(), interpolate() and SimpleImputer from sklearn.impute*** # fill missing values with the mean of the column train_mean_filled = train.copy() train_mean_filled.fillna(train_mean_filled.mean(), inplace=True) # correlation coefficent columns for target corr_target = train_mean_filled.corrwith(train_mean_filled["Class"])[:-1].sort_values( ascending=False ) plt.figure(figsize=(10, 10)) sns.barplot(y=corr_target.index, x=corr_target.values) plt.show() # interpolate missing values using linear interpolation train_interpolate = train.copy() train_interpolate.interpolate(method="polynomial", order=5) # correlation coefficent columns for target corr_target = train_interpolate.corrwith(train_interpolate["Class"])[:-1].sort_values( ascending=False ) plt.figure(figsize=(10, 10)) sns.barplot(y=corr_target.index, x=corr_target.values) plt.show() from sklearn.impute import SimpleImputer # create an imputer object and fit it to the data imputer = SimpleImputer(strategy="mean") imputer.fit(train) # transform the data and replace missing values train_imputed = pd.DataFrame(imputer.transform(train), columns=train.columns) # correlation coefficent columns for target corr_target = train_imputed.corrwith(train_imputed["Class"])[:-1].sort_values( ascending=False ) plt.figure(figsize=(10, 10)) sns.barplot(y=corr_target.index, x=corr_target.values) plt.show() corr = train.iloc[:, 1:].corr() mask = np.triu(np.ones_like(corr, dtype=bool)) plt.figure(figsize=(16, 14)) ax = sns.heatmap( corr, vmin=-1, vmax=1, center=0, cmap=sns.diverging_palette(20, 220, n=200), square=True, mask=mask, ) ax.set_xticklabels(ax.get_xticklabels(), rotation=45, horizontalalignment="right") labels = ["Class 0", "Class 1"] sizes = [train["Class"].tolist().count(0), train["Class"].tolist().count(1)] explode = (0, 0.1) fig, ax = plt.subplots() ax.pie( sizes, explode=explode, labels=labels, autopct="%1.2f%%", shadow=True, startangle=180, ) plt.show() # multiple plots with seaborn for x, y in zip( train_mean_filled.iloc[:, :-29].columns.tolist(), train_mean_filled.iloc[:, -29:-1].columns.tolist(), ): fig, axs = plt.subplots(ncols=3, figsize=(15, 5)) sns.scatterplot(data=train_mean_filled, x=x, y=y, hue="Class", ax=axs[0]) sns.rugplot(data=train_mean_filled, x=x, y=y, hue="Class", ax=axs[0]) sns.histplot( data=train_mean_filled, x=x, hue="Class", color="blue", kde=True, ax=axs[1] ) sns.histplot( data=train_mean_filled, x=y, hue="Class", color="green", kde=True, ax=axs[2] ) plt.show() # Condition the regression fit on another variable and represent it using color plt.figure(figsize=(12, 10), edgecolor="blue", frameon=False) for x, y in zip( train_mean_filled.iloc[:, :-29].columns.tolist(), train_mean_filled.iloc[:, -29:-1].columns.tolist(), ): sns.lmplot(data=train_mean_filled, x=x, y=y, hue="Class") plt.title("Regression plot with " + x + " and " + y + " columns.") plt.show() # **Building models** from sklearn.model_selection import GridSearchCV, KFold from xgboost import XGBClassifier from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score # split data into X and y X = train_mean_filled.iloc[:, :-1] Y = train_mean_filled.iloc[:, -1] # split data into train and test sets seed = 7 test_size = 0.15 X_train, X_test, y_train, y_test = train_test_split( X, Y, test_size=test_size, random_state=seed ) # fit model no training data model = XGBClassifier( eval_metric="logloss", learning_rate=0.05, max_delta_step=6, booster="gbtree", early_stopping_rounds=15, # set early stopping rounds in constructor n_estimators=500, ) # XGBClassifier(base_score = 0.5, booster = 'gbtree', colsample_bylevel = 1, # colsample_bytree=1, gamma = 0, learning_rate = 0.01, max_delta_step = 6, # max_depth=3, min_child_weight=1, missing=None, n_estimators = 500, # n_jobs = 1, nthread = None, objective = 'binary:logistic', random_state = 0, # reg_alpha = 0, reg_lambda = 1, scale_pos_weight = 23.4, seed = None, # silent = True, subsample = 1) eval_set = [(X_test, y_test)] model.fit(X_train, y_train, eval_set=eval_set, verbose=True) # make predictions for test data y_pred = model.predict(X_test) predictions = [round(value) for value in y_pred] # evaluate predictions accuracy = accuracy_score(y_test, predictions) print("Accuracy: %.2f%%" % (accuracy * 100.0)) # ==================================================== # Metric # ==================================================== def balanced_log_loss(y_true, y_pred): y_pred = np.clip(y_pred, 1e-15, 1 - 1e-15) nc = np.bincount(y_true) w0, w1 = 1 / (nc[0] / y_true.shape[0]), 1 / (nc[1] / y_true.shape[0]) balanced_log_loss_score = ( -w0 / nc[0] * (np.sum(np.where(y_true == 0, 1, 0) * np.log(1 - y_pred))) - w1 / nc[1] * (np.sum(np.where(y_true != 0, 1, 0) * np.log(y_pred))) ) / (w0 + w1) return balanced_log_loss_score balanced_log_loss(y_test, y_pred) def predict(X): y = np.zeros(len(X)) for model in tqdm(xgb): y += model.predict(xgb.DMatrix(X)) return y / len(models_) predictions = predict(test) sub = pd.read_csv(path_submis) sub["class_1"] = predictions sub["class_0"] = 1 - predictions sub.to_csv("submission.csv", index=False) sub.head(3) # XGBoost # n_jobs=-1 to allow run it on all cores params = { "n_estimators": [100, 200, 500, 1000], "learning_rate": [0.01, 0.05, 0.1, 0.001], "booster": ["gbtree", "gblinear"], "gamma": [0, 0.5, 1], "reg_alpha": [0, 0.5, 1], "reg_lambda": [0.5, 1, 5], "base_score": [0.2, 0.5, 1], } gs2 = GridSearchCV( XGBClassifier(n_jobs=-1), params, n_jobs=-1, cv=KFold(n_splits=3), scoring="roc_auc" ) gs2.fit(X_train, y_train) print("Best score:", gs2.best_score_) print("Best score:", gs2.best_params_)
false
0
2,775
0
2,775
2,775
129521109
<jupyter_start><jupyter_text>Biomechanical features of orthopedic patients ### Context **The data have been organized in two different but related classification tasks.** - column_3C_weka.csv (file with three class labels) - The first task consists in classifying patients as belonging to one out of three categories: Normal (100 patients), Disk Hernia (60 patients) or Spondylolisthesis (150 patients). - column_2C_weka.csv (file with two class labels) - For the second task, the categories Disk Hernia and Spondylolisthesis were merged into a single category labelled as 'abnormal'. Thus, the second task consists in classifying patients as belonging to one out of two categories: Normal (100 patients) or Abnormal (210 patients). ### Content Field Descriptions: Each patient is represented in the data set by six biomechanical attributes derived from the shape and orientation of the pelvis and lumbar spine (each one is a column): - pelvic incidence - pelvic tilt - lumbar lordosis angle - sacral slope - pelvic radius - grade of spondylolisthesis Kaggle dataset identifier: biomechanical-features-of-orthopedic-patients <jupyter_script># Alternative colab link: https://colab.research.google.com/drive/1CEv0anIpUnr9hM_dNOGL25J2hzFC1_UJ?usp=sharing import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import seaborn as sns from sklearn.svm import SVC # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory # import warnings import warnings # ignore warnings warnings.filterwarnings("ignore") from subprocess import check_output print(check_output(["ls", "../input"]).decode("utf8")) # Any results you write to the current directory are saved as output. # read csv (comma separated value) into data data = pd.read_csv("../input/column_2C_weka.csv") print(plt.style.available) # look at available plot styles plt.style.use("ggplot") # to see features and target variable data.head() # Well know question is is there any NaN value and length of this data so lets look at info data.info() data.describe() # pd.plotting.scatter_matrix: # * green: *normal* and red: *abnormal* # * c: color # * figsize: figure size # * diagonal: histohram of each features # * alpha: opacity # * s: size of marker # * marker: marker type color_list = ["red" if i == "Abnormal" else "green" for i in data.loc[:, "class"]] pd.plotting.scatter_matrix( data.loc[:, data.columns != "class"], c=color_list, figsize=[15, 15], diagonal="hist", alpha=0.5, s=200, marker="*", edgecolor="black", ) plt.show() # Okay, as you understand in scatter matrix there are relations between each feature but how many *normal(green)* and *abnormal(red)* classes are there. # * Searborn library has *countplot()* that counts number of classes # * Also you can print it with *value_counts()* method # This data looks like balanced. Actually there is no definiton or numeric value of balanced data but this data is balanced enough for us. # Now lets learn first classification method KNN sns.countplot(x="class", data=data) data.loc[:, "class"].value_counts() from sklearn.metrics import accuracy_score, confusion_matrix from sklearn.model_selection import train_test_split import random newdt = data["class"] newdt = newdt.replace("Abnormal", 0) newdt = newdt.replace("Normal", 1) features = [ "pelvic_incidence", "pelvic_tilt numeric", "lumbar_lordosis_angle", "sacral_slope", "pelvic_radius", ] X = data.loc[:, data.columns != "class"] y = newdt print(X.head()) print(y.head()) # GA to SVM # **Algorithm Steps** # 1. Initialize parameters: a target function (a.k.a. fitness-function), a region of interest. # 2. Generate a random population with n elements # 3. Apply genetic algorithm operators to a population: crossover & mutation -> create a new population. # 4. Create a temopary population: concatenate a population with a new population. # 5. Calculate fitness-function for each element of a temporary population. # 6. Sort our temporary population elements by their fitness function. # 7. Select n best elements from a temporary population. If stop criterions are not over -> return to step 3. Else -> choose the 1st element of the population, it will be our solution. ## !!! "Precomputed kernel parameter: Precomputed matrix must be a square matrix." # kernel = ["linear", "rbf", "sigmoid", "precomputed"] kernel = ["linear", "poly", "rbf", "sigmoid"] degrees = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] gamma = [1, 10, 100, 1000] # c = [1, 10, 100, 200, 300, 400,600,800, 1000] # gamma = [0.0001, 0.001, 0.01, 0.1, 1, 10] c = [0.0001, 0.001, 0.01, 0.1, 1, 10, 20, 30] random_state = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100] # init_population Fonksiyonu random olarak bir popülasyon oluşturmaya yaramaktadır. def init_population(size): population = [] for i in range(size): chromosome = np.ones(5) chromosome = [ kernel[random.randint(0, len(kernel) - 1)], degrees[random.randint(0, len(degrees) - 1)], gamma[random.randint(0, len(gamma) - 1)], c[random.randint(0, len(c) - 1)], random_state[random.randint(0, len(random_state) - 1)], ] population.append(chromosome) print("[init_population]: population: ", population) return population # SVC Hesaplama metotu def compute_acc(X, y, kernel, degres, gamma, c, random_state): clf = SVC( kernel=str(kernel), degree=int(degres), gamma=int(gamma), C=float(c), random_state=int(random_state), ) x_train, x_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=666 ) clf.fit(x_train, y_train) y_pred = clf.predict(x_test) acc = accuracy_score(y_test, y_pred) return acc # Bütün sınıflandırıcılar için uygunluk değerini döndürmeye yarayan fonksiyondur. def acc_score(population): Score = pd.DataFrame({"Parameter": population}) acc = [] acc.append(compute_acc()) Score["Accuracy"] = acc Score.sort_values(by="Accuracy", ascending=False, inplace=True) Score.reset_index(drop=True, inplace=True) return Score # fitness_score En iyi ebeveynleri uygunluk skorlarıyla birlikte döndürmektedir. def fitness_score(X, y, population): scores = [] for chromosome in population: result = compute_acc( X, y, kernel=str(chromosome[0]), degres=int(chromosome[1]), gamma=int(chromosome[2]), c=float(chromosome[3]), random_state=int(chromosome[4]), ) scores.append(result) scores, population = np.array(scores), np.array(population, dtype=object) inds = np.argsort(scores) return list(scores[inds][::-1]), list(population[inds, :][::-1]) # selection fonksiyonu en iyi ebeveynlerin seçilmesini sağlamaktadır def selection(pop_after_fit, n_parents): population_nextgen = [] for i in range(n_parents): population_nextgen.append(pop_after_fit[i]) return population_nextgen # crossover birinci ebeveyn ile ikinci ebeveyn arasında gen çaprazlanmasını sağlamaktadır def crossover(pop_after_sel): pop_nextgen = pop_after_sel for i in range(0, len(pop_after_sel), 2): new_par = [] child_1, child_2 = pop_nextgen[i], pop_nextgen[i + 1] new_par = np.concatenate( (child_1[: len(child_1) // 2], child_2[len(child_1) // 2 :]) ) pop_nextgen.append(new_par) return pop_nextgen # Mutasyon amacıyla verileri karıştırma metotu def shuffle(arr, mutation_rate): temp = 0 arr = np.array(arr, dtype=object) shapes = arr.shape for i in range(mutation_rate): rand_arr_1, rnd_arr_in_1 = ( random.randint(0, shapes[0]) - 1, random.randint(0, shapes[1]) - 1, ) rand_arr_2 = random.randint(0, shapes[0]) - 1 temp = arr[rand_arr_1][rnd_arr_in_1] arr[rand_arr_1][rnd_arr_in_1] = arr[rand_arr_2][rnd_arr_in_1] arr[rand_arr_2][rnd_arr_in_1] = temp return arr # mutation random bir şekilde seçilen bit ya da gen üzerinde mutasyon sağlamakta # ve onları rastgele çevirmektedir. def mutation(pop_after_cross, mutation_rate, n_feat): pop_next = shuffle(arr=pop_after_cross, mutation_rate=mutation_rate) return pop_next # generations belirtilen nesil sayısı için yukarıdaki tüm fonksiyonları çalıştırmaktadır # n_feat: Populasyondaki feature sayısı. Bu popülasyon için 5 feature vardır. # n_parents: Ebeveyn seçimi için sayısı kadar iterasyon. Size ile aynı verilmesi uygun # mutation_rate: mutasyon oranı # Yeni generation için tekrar sayısı def generations(X, y, size, n_feat, n_parents, mutation_rate, n_gen): best_chromosom = [] best_score = [] population_nextgen = init_population(size) for i in range(n_gen): scores, pop_after_fit = fitness_score(X, y, population_nextgen) print("[generations] Best score in generation", i + 1, ":", scores[:1]) # 2 pop_after_sel = selection(pop_after_fit, n_parents) pop_after_cross = crossover(pop_after_sel) population_nextgen = mutation(pop_after_cross, mutation_rate, n_feat) best_chromosom.append(pop_after_fit[0]) best_score.append(scores[0]) print("[generations] Scores: ", scores, " pop_after_fit: ", pop_after_fit) return best_chromosom, best_score from sklearn.svm import SVC chromo_df_pd, score_pd = generations( X, y, size=5, n_feat=5, n_parents=5, mutation_rate=20, n_gen=20 )
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/521/129521109.ipynb
biomechanical-features-of-orthopedic-patients
null
[{"Id": 129521109, "ScriptId": 37558720, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6886723, "CreationDate": "05/14/2023 13:55:49", "VersionNumber": 8.0, "Title": "Genetic Algorithm to SVM", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 221.0, "LinesInsertedFromPrevious": 4.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 217.0, "LinesInsertedFromFork": 145.0, "LinesDeletedFromFork": 744.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 76.0, "TotalVotes": 0}]
[{"Id": 185663327, "KernelVersionId": 129521109, "SourceDatasetVersionId": 3987}]
[{"Id": 3987, "DatasetId": 2374, "DatasourceVersionId": 3987, "CreatorUserId": 484516, "LicenseName": "CC0: Public Domain", "CreationDate": "09/06/2017 22:54:11", "VersionNumber": 1.0, "Title": "Biomechanical features of orthopedic patients", "Slug": "biomechanical-features-of-orthopedic-patients", "Subtitle": "Classifying patients based on six features", "Description": "### Context\n\n**The data have been organized in two different but related classification tasks.** \n\n- column_3C_weka.csv (file with three class labels)\n - The first task consists in classifying patients as belonging to one out of three categories: Normal (100 patients), Disk Hernia (60 patients) or Spondylolisthesis (150 patients). \n\n- column_2C_weka.csv (file with two class labels)\n - For the second task, the categories Disk Hernia and Spondylolisthesis were merged into a single category labelled as 'abnormal'. Thus, the second task consists in classifying patients as belonging to one out of two categories: Normal (100 patients) or Abnormal (210 patients).\n\n### Content\n\nField Descriptions:\n\nEach patient is represented in the data set by six biomechanical attributes derived from the shape and orientation of the pelvis and lumbar spine (each one is a column): \n\n- pelvic incidence\n- pelvic tilt\n- lumbar lordosis angle\n- sacral slope\n- pelvic radius\n- grade of spondylolisthesis\n\n### Acknowledgements\n\nThe original dataset was downloaded from UCI ML repository:\n\nLichman, M. (2013). UCI Machine Learning Repository [http://archive.ics.uci.edu/ml]. Irvine, CA: University of California, School of Information and Computer Science\n\nFiles were converted to CSV\n\n### Inspiration\n\nUse these biomechanical features to classify patients according to their labels", "VersionNotes": "Initial release", "TotalCompressedBytes": 51144.0, "TotalUncompressedBytes": 51144.0}]
[{"Id": 2374, "CreatorUserId": 484516, "OwnerUserId": NaN, "OwnerOrganizationId": 7.0, "CurrentDatasetVersionId": 3987.0, "CurrentDatasourceVersionId": 3987.0, "ForumId": 6351, "Type": 2, "CreationDate": "09/06/2017 22:54:11", "LastActivityDate": "02/05/2018", "TotalViews": 92149, "TotalDownloads": 18837, "TotalVotes": 228, "TotalKernels": 446}]
null
# Alternative colab link: https://colab.research.google.com/drive/1CEv0anIpUnr9hM_dNOGL25J2hzFC1_UJ?usp=sharing import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import seaborn as sns from sklearn.svm import SVC # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory # import warnings import warnings # ignore warnings warnings.filterwarnings("ignore") from subprocess import check_output print(check_output(["ls", "../input"]).decode("utf8")) # Any results you write to the current directory are saved as output. # read csv (comma separated value) into data data = pd.read_csv("../input/column_2C_weka.csv") print(plt.style.available) # look at available plot styles plt.style.use("ggplot") # to see features and target variable data.head() # Well know question is is there any NaN value and length of this data so lets look at info data.info() data.describe() # pd.plotting.scatter_matrix: # * green: *normal* and red: *abnormal* # * c: color # * figsize: figure size # * diagonal: histohram of each features # * alpha: opacity # * s: size of marker # * marker: marker type color_list = ["red" if i == "Abnormal" else "green" for i in data.loc[:, "class"]] pd.plotting.scatter_matrix( data.loc[:, data.columns != "class"], c=color_list, figsize=[15, 15], diagonal="hist", alpha=0.5, s=200, marker="*", edgecolor="black", ) plt.show() # Okay, as you understand in scatter matrix there are relations between each feature but how many *normal(green)* and *abnormal(red)* classes are there. # * Searborn library has *countplot()* that counts number of classes # * Also you can print it with *value_counts()* method # This data looks like balanced. Actually there is no definiton or numeric value of balanced data but this data is balanced enough for us. # Now lets learn first classification method KNN sns.countplot(x="class", data=data) data.loc[:, "class"].value_counts() from sklearn.metrics import accuracy_score, confusion_matrix from sklearn.model_selection import train_test_split import random newdt = data["class"] newdt = newdt.replace("Abnormal", 0) newdt = newdt.replace("Normal", 1) features = [ "pelvic_incidence", "pelvic_tilt numeric", "lumbar_lordosis_angle", "sacral_slope", "pelvic_radius", ] X = data.loc[:, data.columns != "class"] y = newdt print(X.head()) print(y.head()) # GA to SVM # **Algorithm Steps** # 1. Initialize parameters: a target function (a.k.a. fitness-function), a region of interest. # 2. Generate a random population with n elements # 3. Apply genetic algorithm operators to a population: crossover & mutation -> create a new population. # 4. Create a temopary population: concatenate a population with a new population. # 5. Calculate fitness-function for each element of a temporary population. # 6. Sort our temporary population elements by their fitness function. # 7. Select n best elements from a temporary population. If stop criterions are not over -> return to step 3. Else -> choose the 1st element of the population, it will be our solution. ## !!! "Precomputed kernel parameter: Precomputed matrix must be a square matrix." # kernel = ["linear", "rbf", "sigmoid", "precomputed"] kernel = ["linear", "poly", "rbf", "sigmoid"] degrees = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] gamma = [1, 10, 100, 1000] # c = [1, 10, 100, 200, 300, 400,600,800, 1000] # gamma = [0.0001, 0.001, 0.01, 0.1, 1, 10] c = [0.0001, 0.001, 0.01, 0.1, 1, 10, 20, 30] random_state = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100] # init_population Fonksiyonu random olarak bir popülasyon oluşturmaya yaramaktadır. def init_population(size): population = [] for i in range(size): chromosome = np.ones(5) chromosome = [ kernel[random.randint(0, len(kernel) - 1)], degrees[random.randint(0, len(degrees) - 1)], gamma[random.randint(0, len(gamma) - 1)], c[random.randint(0, len(c) - 1)], random_state[random.randint(0, len(random_state) - 1)], ] population.append(chromosome) print("[init_population]: population: ", population) return population # SVC Hesaplama metotu def compute_acc(X, y, kernel, degres, gamma, c, random_state): clf = SVC( kernel=str(kernel), degree=int(degres), gamma=int(gamma), C=float(c), random_state=int(random_state), ) x_train, x_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=666 ) clf.fit(x_train, y_train) y_pred = clf.predict(x_test) acc = accuracy_score(y_test, y_pred) return acc # Bütün sınıflandırıcılar için uygunluk değerini döndürmeye yarayan fonksiyondur. def acc_score(population): Score = pd.DataFrame({"Parameter": population}) acc = [] acc.append(compute_acc()) Score["Accuracy"] = acc Score.sort_values(by="Accuracy", ascending=False, inplace=True) Score.reset_index(drop=True, inplace=True) return Score # fitness_score En iyi ebeveynleri uygunluk skorlarıyla birlikte döndürmektedir. def fitness_score(X, y, population): scores = [] for chromosome in population: result = compute_acc( X, y, kernel=str(chromosome[0]), degres=int(chromosome[1]), gamma=int(chromosome[2]), c=float(chromosome[3]), random_state=int(chromosome[4]), ) scores.append(result) scores, population = np.array(scores), np.array(population, dtype=object) inds = np.argsort(scores) return list(scores[inds][::-1]), list(population[inds, :][::-1]) # selection fonksiyonu en iyi ebeveynlerin seçilmesini sağlamaktadır def selection(pop_after_fit, n_parents): population_nextgen = [] for i in range(n_parents): population_nextgen.append(pop_after_fit[i]) return population_nextgen # crossover birinci ebeveyn ile ikinci ebeveyn arasında gen çaprazlanmasını sağlamaktadır def crossover(pop_after_sel): pop_nextgen = pop_after_sel for i in range(0, len(pop_after_sel), 2): new_par = [] child_1, child_2 = pop_nextgen[i], pop_nextgen[i + 1] new_par = np.concatenate( (child_1[: len(child_1) // 2], child_2[len(child_1) // 2 :]) ) pop_nextgen.append(new_par) return pop_nextgen # Mutasyon amacıyla verileri karıştırma metotu def shuffle(arr, mutation_rate): temp = 0 arr = np.array(arr, dtype=object) shapes = arr.shape for i in range(mutation_rate): rand_arr_1, rnd_arr_in_1 = ( random.randint(0, shapes[0]) - 1, random.randint(0, shapes[1]) - 1, ) rand_arr_2 = random.randint(0, shapes[0]) - 1 temp = arr[rand_arr_1][rnd_arr_in_1] arr[rand_arr_1][rnd_arr_in_1] = arr[rand_arr_2][rnd_arr_in_1] arr[rand_arr_2][rnd_arr_in_1] = temp return arr # mutation random bir şekilde seçilen bit ya da gen üzerinde mutasyon sağlamakta # ve onları rastgele çevirmektedir. def mutation(pop_after_cross, mutation_rate, n_feat): pop_next = shuffle(arr=pop_after_cross, mutation_rate=mutation_rate) return pop_next # generations belirtilen nesil sayısı için yukarıdaki tüm fonksiyonları çalıştırmaktadır # n_feat: Populasyondaki feature sayısı. Bu popülasyon için 5 feature vardır. # n_parents: Ebeveyn seçimi için sayısı kadar iterasyon. Size ile aynı verilmesi uygun # mutation_rate: mutasyon oranı # Yeni generation için tekrar sayısı def generations(X, y, size, n_feat, n_parents, mutation_rate, n_gen): best_chromosom = [] best_score = [] population_nextgen = init_population(size) for i in range(n_gen): scores, pop_after_fit = fitness_score(X, y, population_nextgen) print("[generations] Best score in generation", i + 1, ":", scores[:1]) # 2 pop_after_sel = selection(pop_after_fit, n_parents) pop_after_cross = crossover(pop_after_sel) population_nextgen = mutation(pop_after_cross, mutation_rate, n_feat) best_chromosom.append(pop_after_fit[0]) best_score.append(scores[0]) print("[generations] Scores: ", scores, " pop_after_fit: ", pop_after_fit) return best_chromosom, best_score from sklearn.svm import SVC chromo_df_pd, score_pd = generations( X, y, size=5, n_feat=5, n_parents=5, mutation_rate=20, n_gen=20 )
false
0
2,818
0
3,127
2,818
129521112
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import seaborn as sns import torch.nn as nn import torch import torch.optim as optim from PIL import Image from matplotlib import pyplot as plt from torchvision import transforms, models from torch.utils.data import Dataset, DataLoader from sklearn.model_selection import train_test_split from torch.utils.data import ConcatDataset # # EDA # ## Load Data TRAIN_PATH = "../working/train" TEST_PATH = "../working/test1" DEVICE = "cuda:0" if torch.cuda.is_available() else "cpu" os.listdir("../input/dogs-vs-cats/") labels_list = [] train_imgs = os.listdir("../working/train") for f in train_imgs: cat_or_dog = f.split(".")[0] if cat_or_dog == "cat": labels_list.append(0) elif cat_or_dog == "dog": labels_list.append(1) print(f"Images count: {len(train_imgs)}, Labels count: {len(labels_list)}") df_train = pd.DataFrame(columns=["image", "label"]) df_train["image"] = train_imgs df_train["label"] = labels_list df_train.head(5) ax = sns.countplot(data=df_train, x="label") ax.set_title("Count of labels") shape_arr = [] for i in range(0, df_train.shape[0]): img_path = os.path.join(TRAIN_PATH, df_train["image"][i]) with Image.open(img_path) as img: width, height = img.size shape_arr.append((width, height)) img_path = os.path.join(TRAIN_PATH, df_train["image"][1]) with Image.open(img_path) as img: plt.imshow(img) # ## Data Augmentation # We add noise to datasets to improve accuracy. X_train, X_test, y_train, y_test = train_test_split( df_train["image"], df_train["label"], test_size=0.15, stratify=df_train["label"] ) print(f"Train size = {X_train.shape[0]}, Test size = {X_test.shape[0]}") # ### Normalize Transform # Normalize data. Calculating mean and standard deviation can be time-consuming, so we can choose representative values to normalize the data. normalize_transform = transforms.Compose( [ transforms.Resize((128, 128)), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)), ] ) # ### Original Data class DogsCatsDataset(Dataset): def __init__(self, x, y, submission=False): self.x = x.reset_index() self.y = y.reset_index() self.submission = submission def __len__(self): return self.x.shape[0] def load_image(self, path): prefix = TRAIN_PATH if self.submission: prefix = TEST_PATH return Image.open(os.path.join(prefix, path["image"])) def __getitem__(self, index): img = self.load_image(self.x.iloc[index]) label = self.y.iloc[index]["label"] img = normalize_transform(img) sample = {"image": img, "label": label} return sample train_data = DogsCatsDataset(X_train, y_train) # ### Horizontal Flip class HorizontalFlipTransformedDogsCatsDataset(Dataset): def __init__(self, x, y): self.x = x.reset_index() self.y = y.reset_index() self.transform = transforms.RandomHorizontalFlip(p=1) def __len__(self): return self.x.shape[0] def __getitem__(self, index): img_path = os.path.join(TRAIN_PATH, self.x.iloc[index]["image"]) img = Image.open(img_path) img = self.transform(img) label = self.y.iloc[index]["label"] img = normalize_transform(img) sample = {"image": img, "label": label} return sample horizontal_flip_transformed_train_data = HorizontalFlipTransformedDogsCatsDataset( X_train, y_train ) # ### Vertical Flip class VerticalFlipTransformedDogsCatsDataset(Dataset): def __init__(self, x, y): self.x = x.reset_index() self.y = y.reset_index() self.transform = transforms.RandomVerticalFlip(p=1) def __len__(self): return self.x.shape[0] def __getitem__(self, index): img_path = os.path.join(TRAIN_PATH, self.x.iloc[index]["image"]) img = Image.open(img_path) img = self.transform(img) label = self.y.iloc[index]["label"] img = normalize_transform(img) sample = {"image": img, "label": label} return sample vertical_flip_transformed_train_data = VerticalFlipTransformedDogsCatsDataset( X_train, y_train ) # ### Random Rotation class RotationTransformedDogsCatsDataset(Dataset): def __init__(self, x, y): self.x = x.reset_index() self.y = y.reset_index() self.transform = transforms.RandomRotation(360) def __len__(self): return self.x.shape[0] def __getitem__(self, index): img_path = os.path.join(TRAIN_PATH, self.x.iloc[index]["image"]) img = Image.open(img_path) img = self.transform(img) label = self.y.iloc[index]["label"] img = normalize_transform(img) sample = {"image": img, "label": label} return sample rotation_transformed_train_data = RotationTransformedDogsCatsDataset(X_train, y_train) # ### AutoAugment class AutoAugmentTransformedDogsCatsDataset(Dataset): def __init__(self, x, y): self.x = x.reset_index() self.y = y.reset_index() self.transform = transforms.RandomRotation(360) def __len__(self): return self.x.shape[0] def __getitem__(self, index): img_path = os.path.join(TRAIN_PATH, self.x.iloc[index]["image"]) img = Image.open(img_path) img = self.transform(img) label = self.y.iloc[index]["label"] img = normalize_transform(img) sample = {"image": img, "label": label} return sample auto_augment_transformed_train_data = AutoAugmentTransformedDogsCatsDataset( X_train, y_train ) whole_data = ConcatDataset( [ train_data, horizontal_flip_transformed_train_data, vertical_flip_transformed_train_data, rotation_transformed_train_data, auto_augment_transformed_train_data, ] ) print(len(whole_data)) # ## Create train/test Dataset & DataLoader test_data = DogsCatsDataset(X_test, y_test) train_loader = DataLoader(train_data, batch_size=32, drop_last=True, shuffle=True) test_loader = DataLoader(test_data, batch_size=32, drop_last=True, shuffle=True) for i, data in enumerate(train_loader): print(data["image"].shape) print(data["label"].shape) break # ## Implement CNN Model # 3 convolution layers and 3 fully connected layers. Use pooling at all convolution layers. class ConvNet(nn.Module): def __init__(self): super().__init__() self.cv1 = nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3) self.batch_norm1 = nn.BatchNorm2d(num_features=32) self.relu1 = nn.ReLU() self.max_pool1 = nn.MaxPool2d(2) self.cv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3) self.batch_norm2 = nn.BatchNorm2d(num_features=64) self.relu2 = nn.ReLU() self.max_pool2 = nn.MaxPool2d(2) self.cv3 = nn.Conv2d(in_channels=64, out_channels=80, kernel_size=3) self.batch_norm3 = nn.BatchNorm2d(num_features=80) self.relu3 = nn.ReLU() self.max_pool3 = nn.MaxPool2d(2) self.fc1 = nn.Linear(15680, 2000) self.fc_relu1 = nn.ReLU() self.fc2 = nn.Linear(2000, 500) self.fc_relu2 = nn.ReLU() self.fc3 = nn.Linear(500, 2) def forward(self, x): x = self.max_pool1(self.relu1(self.batch_norm1(self.cv1(x)))) x = self.max_pool2(self.relu2(self.batch_norm2(self.cv2(x)))) x = self.max_pool3(self.relu3(self.batch_norm3(self.cv3(x)))) x = x.view(x.size(0), -1) x = self.fc_relu1(self.fc1(x)) x = self.fc_relu2(self.fc2(x)) x = self.fc3(x) return x net = ConvNet() print(net) # ## Let's train # Set epochs to 10. n_epochs = 10 net.train() net.to(DEVICE) total_loss = 0 loss_list = [] acc_list = [] criterion = nn.CrossEntropyLoss() optimizer = optim.Adam(net.parameters(), lr=0.001) for epoch in range(n_epochs): for batch_no, data in enumerate(train_loader, 0): samples, labels = data["image"], data["label"] samples = samples.to(DEVICE) labels = labels.to(DEVICE) optimizer.zero_grad() output = net(samples) loss = criterion(output, labels) loss.backward() optimizer.step() total_loss += loss.item() if batch_no % 100 == 0: pred = torch.argmax(output, dim=1) correct = pred.eq(labels) acc = torch.mean(correct.float()) acc = acc.to("cpu") print( f"[Epoch {epoch}/{n_epochs}] Iteration {i} -> Train Loss: {total_loss/200}, Accuracy: {acc}" ) loss_list.append(total_loss / 200) acc_list.append(acc) total_loss = 0 plt.plot(loss_list[1:], label="loss") plt.plot(acc_list[1:], label="accuracy") plt.legend() plt.show() # ## Test # Test our model to predict test data that we splitted before. net.eval() loss_list = [] acc_list = [] loss = 0 with torch.no_grad(): for batch_no, data in enumerate(test_loader, 0): samples, labels = data["image"], data["label"] samples = samples.to(DEVICE) labels = labels.to(DEVICE) optimizer.zero_grad() output = net(samples) loss = criterion(output, labels) pred = torch.argmax(output, dim=1) correct = pred.eq(labels) acc = torch.mean(correct.float()) acc = acc.to("cpu") acc_list.append(acc) loss = loss.to("cpu") loss_list.append(loss) print(f"Mean acc = {np.mean(acc_list)}. Mean loss = {np.mean(loss_list)}") # ## Prediction # Try real data prediction. test_imgs = os.listdir(TEST_PATH) label_list = [0] * len(test_imgs) test_ids = [] for fname in test_imgs: test_ids.append(fname.split(".")[0]) print(f"Image count: {len(test_ids)}\nLabel count: {len(label_list)}") test_df = pd.DataFrame(columns=["image", "label"]) test_df["image"] = test_imgs test_df["label"] = label_list test_data = DogsCatsDataset(test_df["image"], test_df["label"], submission=True) test_loader = DataLoader(test_data, batch_size=32) net.eval() net.to(DEVICE) preds = torch.Tensor() preds = preds.to(DEVICE) predictions_frame = pd.DataFrame(columns=["ids", "preds"]) predictions_frame["ids"] = test_ids with torch.no_grad(): for batch_no, data in enumerate(test_loader): samples, labels = data["image"], data["label"] samples = samples.to(DEVICE) labels = labels.to(DEVICE) output = net(samples) pred = torch.argmax(output, dim=1) preds = torch.cat((preds, pred), 0) preds = preds.to("cpu") predictions_frame["preds"] = preds predictions_frame.to_csv(f"submission.csv", header=False, index=False)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/521/129521112.ipynb
null
null
[{"Id": 129521112, "ScriptId": 38308699, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13713274, "CreationDate": "05/14/2023 13:55:52", "VersionNumber": 3.0, "Title": "Dogs vs cats with CNN", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 396.0, "LinesInsertedFromPrevious": 1.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 395.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import seaborn as sns import torch.nn as nn import torch import torch.optim as optim from PIL import Image from matplotlib import pyplot as plt from torchvision import transforms, models from torch.utils.data import Dataset, DataLoader from sklearn.model_selection import train_test_split from torch.utils.data import ConcatDataset # # EDA # ## Load Data TRAIN_PATH = "../working/train" TEST_PATH = "../working/test1" DEVICE = "cuda:0" if torch.cuda.is_available() else "cpu" os.listdir("../input/dogs-vs-cats/") labels_list = [] train_imgs = os.listdir("../working/train") for f in train_imgs: cat_or_dog = f.split(".")[0] if cat_or_dog == "cat": labels_list.append(0) elif cat_or_dog == "dog": labels_list.append(1) print(f"Images count: {len(train_imgs)}, Labels count: {len(labels_list)}") df_train = pd.DataFrame(columns=["image", "label"]) df_train["image"] = train_imgs df_train["label"] = labels_list df_train.head(5) ax = sns.countplot(data=df_train, x="label") ax.set_title("Count of labels") shape_arr = [] for i in range(0, df_train.shape[0]): img_path = os.path.join(TRAIN_PATH, df_train["image"][i]) with Image.open(img_path) as img: width, height = img.size shape_arr.append((width, height)) img_path = os.path.join(TRAIN_PATH, df_train["image"][1]) with Image.open(img_path) as img: plt.imshow(img) # ## Data Augmentation # We add noise to datasets to improve accuracy. X_train, X_test, y_train, y_test = train_test_split( df_train["image"], df_train["label"], test_size=0.15, stratify=df_train["label"] ) print(f"Train size = {X_train.shape[0]}, Test size = {X_test.shape[0]}") # ### Normalize Transform # Normalize data. Calculating mean and standard deviation can be time-consuming, so we can choose representative values to normalize the data. normalize_transform = transforms.Compose( [ transforms.Resize((128, 128)), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)), ] ) # ### Original Data class DogsCatsDataset(Dataset): def __init__(self, x, y, submission=False): self.x = x.reset_index() self.y = y.reset_index() self.submission = submission def __len__(self): return self.x.shape[0] def load_image(self, path): prefix = TRAIN_PATH if self.submission: prefix = TEST_PATH return Image.open(os.path.join(prefix, path["image"])) def __getitem__(self, index): img = self.load_image(self.x.iloc[index]) label = self.y.iloc[index]["label"] img = normalize_transform(img) sample = {"image": img, "label": label} return sample train_data = DogsCatsDataset(X_train, y_train) # ### Horizontal Flip class HorizontalFlipTransformedDogsCatsDataset(Dataset): def __init__(self, x, y): self.x = x.reset_index() self.y = y.reset_index() self.transform = transforms.RandomHorizontalFlip(p=1) def __len__(self): return self.x.shape[0] def __getitem__(self, index): img_path = os.path.join(TRAIN_PATH, self.x.iloc[index]["image"]) img = Image.open(img_path) img = self.transform(img) label = self.y.iloc[index]["label"] img = normalize_transform(img) sample = {"image": img, "label": label} return sample horizontal_flip_transformed_train_data = HorizontalFlipTransformedDogsCatsDataset( X_train, y_train ) # ### Vertical Flip class VerticalFlipTransformedDogsCatsDataset(Dataset): def __init__(self, x, y): self.x = x.reset_index() self.y = y.reset_index() self.transform = transforms.RandomVerticalFlip(p=1) def __len__(self): return self.x.shape[0] def __getitem__(self, index): img_path = os.path.join(TRAIN_PATH, self.x.iloc[index]["image"]) img = Image.open(img_path) img = self.transform(img) label = self.y.iloc[index]["label"] img = normalize_transform(img) sample = {"image": img, "label": label} return sample vertical_flip_transformed_train_data = VerticalFlipTransformedDogsCatsDataset( X_train, y_train ) # ### Random Rotation class RotationTransformedDogsCatsDataset(Dataset): def __init__(self, x, y): self.x = x.reset_index() self.y = y.reset_index() self.transform = transforms.RandomRotation(360) def __len__(self): return self.x.shape[0] def __getitem__(self, index): img_path = os.path.join(TRAIN_PATH, self.x.iloc[index]["image"]) img = Image.open(img_path) img = self.transform(img) label = self.y.iloc[index]["label"] img = normalize_transform(img) sample = {"image": img, "label": label} return sample rotation_transformed_train_data = RotationTransformedDogsCatsDataset(X_train, y_train) # ### AutoAugment class AutoAugmentTransformedDogsCatsDataset(Dataset): def __init__(self, x, y): self.x = x.reset_index() self.y = y.reset_index() self.transform = transforms.RandomRotation(360) def __len__(self): return self.x.shape[0] def __getitem__(self, index): img_path = os.path.join(TRAIN_PATH, self.x.iloc[index]["image"]) img = Image.open(img_path) img = self.transform(img) label = self.y.iloc[index]["label"] img = normalize_transform(img) sample = {"image": img, "label": label} return sample auto_augment_transformed_train_data = AutoAugmentTransformedDogsCatsDataset( X_train, y_train ) whole_data = ConcatDataset( [ train_data, horizontal_flip_transformed_train_data, vertical_flip_transformed_train_data, rotation_transformed_train_data, auto_augment_transformed_train_data, ] ) print(len(whole_data)) # ## Create train/test Dataset & DataLoader test_data = DogsCatsDataset(X_test, y_test) train_loader = DataLoader(train_data, batch_size=32, drop_last=True, shuffle=True) test_loader = DataLoader(test_data, batch_size=32, drop_last=True, shuffle=True) for i, data in enumerate(train_loader): print(data["image"].shape) print(data["label"].shape) break # ## Implement CNN Model # 3 convolution layers and 3 fully connected layers. Use pooling at all convolution layers. class ConvNet(nn.Module): def __init__(self): super().__init__() self.cv1 = nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3) self.batch_norm1 = nn.BatchNorm2d(num_features=32) self.relu1 = nn.ReLU() self.max_pool1 = nn.MaxPool2d(2) self.cv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3) self.batch_norm2 = nn.BatchNorm2d(num_features=64) self.relu2 = nn.ReLU() self.max_pool2 = nn.MaxPool2d(2) self.cv3 = nn.Conv2d(in_channels=64, out_channels=80, kernel_size=3) self.batch_norm3 = nn.BatchNorm2d(num_features=80) self.relu3 = nn.ReLU() self.max_pool3 = nn.MaxPool2d(2) self.fc1 = nn.Linear(15680, 2000) self.fc_relu1 = nn.ReLU() self.fc2 = nn.Linear(2000, 500) self.fc_relu2 = nn.ReLU() self.fc3 = nn.Linear(500, 2) def forward(self, x): x = self.max_pool1(self.relu1(self.batch_norm1(self.cv1(x)))) x = self.max_pool2(self.relu2(self.batch_norm2(self.cv2(x)))) x = self.max_pool3(self.relu3(self.batch_norm3(self.cv3(x)))) x = x.view(x.size(0), -1) x = self.fc_relu1(self.fc1(x)) x = self.fc_relu2(self.fc2(x)) x = self.fc3(x) return x net = ConvNet() print(net) # ## Let's train # Set epochs to 10. n_epochs = 10 net.train() net.to(DEVICE) total_loss = 0 loss_list = [] acc_list = [] criterion = nn.CrossEntropyLoss() optimizer = optim.Adam(net.parameters(), lr=0.001) for epoch in range(n_epochs): for batch_no, data in enumerate(train_loader, 0): samples, labels = data["image"], data["label"] samples = samples.to(DEVICE) labels = labels.to(DEVICE) optimizer.zero_grad() output = net(samples) loss = criterion(output, labels) loss.backward() optimizer.step() total_loss += loss.item() if batch_no % 100 == 0: pred = torch.argmax(output, dim=1) correct = pred.eq(labels) acc = torch.mean(correct.float()) acc = acc.to("cpu") print( f"[Epoch {epoch}/{n_epochs}] Iteration {i} -> Train Loss: {total_loss/200}, Accuracy: {acc}" ) loss_list.append(total_loss / 200) acc_list.append(acc) total_loss = 0 plt.plot(loss_list[1:], label="loss") plt.plot(acc_list[1:], label="accuracy") plt.legend() plt.show() # ## Test # Test our model to predict test data that we splitted before. net.eval() loss_list = [] acc_list = [] loss = 0 with torch.no_grad(): for batch_no, data in enumerate(test_loader, 0): samples, labels = data["image"], data["label"] samples = samples.to(DEVICE) labels = labels.to(DEVICE) optimizer.zero_grad() output = net(samples) loss = criterion(output, labels) pred = torch.argmax(output, dim=1) correct = pred.eq(labels) acc = torch.mean(correct.float()) acc = acc.to("cpu") acc_list.append(acc) loss = loss.to("cpu") loss_list.append(loss) print(f"Mean acc = {np.mean(acc_list)}. Mean loss = {np.mean(loss_list)}") # ## Prediction # Try real data prediction. test_imgs = os.listdir(TEST_PATH) label_list = [0] * len(test_imgs) test_ids = [] for fname in test_imgs: test_ids.append(fname.split(".")[0]) print(f"Image count: {len(test_ids)}\nLabel count: {len(label_list)}") test_df = pd.DataFrame(columns=["image", "label"]) test_df["image"] = test_imgs test_df["label"] = label_list test_data = DogsCatsDataset(test_df["image"], test_df["label"], submission=True) test_loader = DataLoader(test_data, batch_size=32) net.eval() net.to(DEVICE) preds = torch.Tensor() preds = preds.to(DEVICE) predictions_frame = pd.DataFrame(columns=["ids", "preds"]) predictions_frame["ids"] = test_ids with torch.no_grad(): for batch_no, data in enumerate(test_loader): samples, labels = data["image"], data["label"] samples = samples.to(DEVICE) labels = labels.to(DEVICE) output = net(samples) pred = torch.argmax(output, dim=1) preds = torch.cat((preds, pred), 0) preds = preds.to("cpu") predictions_frame["preds"] = preds predictions_frame.to_csv(f"submission.csv", header=False, index=False)
false
0
3,545
0
3,545
3,545
129689946
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # ## Overview # I have always loved dogs and finally adopted my first dog last year during the pandemic. I wanted to explore how many dogs were adopted during this specific time period and study the changes in their status. However, I faced challenges converting 'MonthYear' data format into the appropriate form. Therefore, I changed my research questions to the following: Q1. What are the most popular top 10 animal names of shelter animals in Austin, Texas? Q2. What types of animals make up the shelter population in Austin, Texas? # ## Data Profile # Key Features: Gender - Intact male, Neutered male, Spayed female, Neutered female, Unknown, Age - Weeks, months, years, Color, Breed # Input Requirements: Open data portal in a JSON format but I used a csv # Output Format: csv # Limitations: Only covers animal outcomes in Austin, Texas. # ## Analysis # Load pandas import pandas as pd # Read the CSV file into a pandas DataFrame df = pd.read_csv( "/kaggle/input/austin-animal-center-outcomes/Austin_Animal_Center_Outcomes.csv" ) # Display the first few rows of the DataFrame print(df.head()) # find out keys/labels for column # Retrieve the column keys as a list column_keys = df.columns.tolist() # Print the column keys print(column_keys) # Convert the 'MonthYear' column to datetime format, handling errors by setting unmatched values to NaN # df['MonthYear'] = pd.to_datetime(df['MonthYear'], format='%B %Y', errors='coerce') # Convert the 'MonthYear' column to the desired format # df['MonthYear'] = df['MonthYear'].dt.strftime('%m-%Y') # Identify the column containing the names name_column = "Name" # Replace with the actual column name in your dataset # Get the count of each name name_counts = df[name_column].value_counts() # Sort the counts in descending order sorted_names = name_counts.sort_values(ascending=False) # Print the top 10 most popular names top_names = sorted_names.head(10) print(top_names) dog_name_column = "Name" # Replace with the actual column name in your dataset top_10_names = df[name_column].value_counts().nlargest(10) fig = px.bar(top_10_names, x=top_10_names.index, y=top_10_names.values) fig.update_layout(xaxis_title="Name", yaxis_title="Frequency") fig.show() import pandas as pd import plotly.express as px df = pd.read_csv( "/kaggle/input/austin-animal-center-outcomes/Austin_Animal_Center_Outcomes.csv" ) animal_column = "Animal Type" # Replace with the actual column name in your dataset animal_counts = df[animal_column].value_counts() animal_proportions = animal_counts / len(df) * 100 fig = px.pie(names=animal_proportions.index, values=animal_proportions.values) fig.update_traces(textposition="inside", textinfo="percent+label") fig.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/689/129689946.ipynb
null
null
[{"Id": 129689946, "ScriptId": 38535893, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14417680, "CreationDate": "05/15/2023 19:14:26", "VersionNumber": 1.0, "Title": "Mini Project 1_Jeeah Eom", "EvaluationDate": "05/15/2023", "IsChange": true, "TotalLines": 90.0, "LinesInsertedFromPrevious": 90.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # ## Overview # I have always loved dogs and finally adopted my first dog last year during the pandemic. I wanted to explore how many dogs were adopted during this specific time period and study the changes in their status. However, I faced challenges converting 'MonthYear' data format into the appropriate form. Therefore, I changed my research questions to the following: Q1. What are the most popular top 10 animal names of shelter animals in Austin, Texas? Q2. What types of animals make up the shelter population in Austin, Texas? # ## Data Profile # Key Features: Gender - Intact male, Neutered male, Spayed female, Neutered female, Unknown, Age - Weeks, months, years, Color, Breed # Input Requirements: Open data portal in a JSON format but I used a csv # Output Format: csv # Limitations: Only covers animal outcomes in Austin, Texas. # ## Analysis # Load pandas import pandas as pd # Read the CSV file into a pandas DataFrame df = pd.read_csv( "/kaggle/input/austin-animal-center-outcomes/Austin_Animal_Center_Outcomes.csv" ) # Display the first few rows of the DataFrame print(df.head()) # find out keys/labels for column # Retrieve the column keys as a list column_keys = df.columns.tolist() # Print the column keys print(column_keys) # Convert the 'MonthYear' column to datetime format, handling errors by setting unmatched values to NaN # df['MonthYear'] = pd.to_datetime(df['MonthYear'], format='%B %Y', errors='coerce') # Convert the 'MonthYear' column to the desired format # df['MonthYear'] = df['MonthYear'].dt.strftime('%m-%Y') # Identify the column containing the names name_column = "Name" # Replace with the actual column name in your dataset # Get the count of each name name_counts = df[name_column].value_counts() # Sort the counts in descending order sorted_names = name_counts.sort_values(ascending=False) # Print the top 10 most popular names top_names = sorted_names.head(10) print(top_names) dog_name_column = "Name" # Replace with the actual column name in your dataset top_10_names = df[name_column].value_counts().nlargest(10) fig = px.bar(top_10_names, x=top_10_names.index, y=top_10_names.values) fig.update_layout(xaxis_title="Name", yaxis_title="Frequency") fig.show() import pandas as pd import plotly.express as px df = pd.read_csv( "/kaggle/input/austin-animal-center-outcomes/Austin_Animal_Center_Outcomes.csv" ) animal_column = "Animal Type" # Replace with the actual column name in your dataset animal_counts = df[animal_column].value_counts() animal_proportions = animal_counts / len(df) * 100 fig = px.pie(names=animal_proportions.index, values=animal_proportions.values) fig.update_traces(textposition="inside", textinfo="percent+label") fig.show()
false
0
978
0
978
978
129689677
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os from sklearn import linear_model import matplotlib as mpl import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report import scipy.stats as stats from xgboost import XGBClassifier from sklearn.preprocessing import StandardScaler, PolynomialFeatures df = pd.read_csv("/kaggle/input/titanic/train.csv") tdf = df.set_index("PassengerId") tdf.info() tdf tdf["Age"] = pd.to_numeric(df["Age"], errors="coerce") # tdf["Age"] = tdf["Age"].fillna(round(df["Age"].median())) tdf["Sex"] = tdf["Sex"].replace("female", 0) tdf["Sex"] = tdf["Sex"].replace("male", 1) tdf # reg = linear_model.LogisticRegression() # y = tdf["Survived"] # X = tdf[["Pclass", "Age", "Sex"]] # X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) # reg.fit(X_train, y_train) # y_pred = reg.predict(X_test) # print(classification_report(y_test, y_pred)) # using XGBOOST model model = XGBClassifier() y = tdf["Survived"] X = tdf[["Pclass", "Age", "Sex"]] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=0) model.fit(X_train, y_train) y_pred = model.predict(X_test) # y = tdf["Survived"] # X = tdf[["Pclass", "Age", "Sex"]] # # Get 60% of the dataset as the training set. Put the remaining 40% in temporary variables: x_ and y_. # x_train, x_, y_train, y_ = train_test_split(X, y, test_size = 0.25, random_state=1) # # Split the 40% subset above into two: one half for cross validation and the other for the test set # x_cv, X_test, y_cv, y_test = train_test_split(x_, y_, test_size=0.50, random_state=1) # # Delete temporary variables # del x_, y_ # model = XGBClassifier() # model.fit(x_train, y_train) # y_pred = model.predict(x_test) model.score(X=X_test, y=y_test) test_df = pd.read_csv("/kaggle/input/titanic/test.csv") f_df = test_df.set_index("PassengerId") f_df["Age"] = f_df["Age"].fillna(round((test_df["Age"]).median())) f_df["Sex"] = f_df["Sex"].replace("female", 0) f_df["Sex"] = f_df["Sex"].replace("male", 1) f_df X = f_df[["Pclass", "Age", "Sex"]] f_pred = model.predict(X) print(f_pred) output = pd.DataFrame({"PassengerId": test_df.PassengerId, "Survived": f_pred}) output.to_csv("submission.csv", index=False) print("Your submission was successfully saved!")
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/689/129689677.ipynb
null
null
[{"Id": 129689677, "ScriptId": 36840846, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13203601, "CreationDate": "05/15/2023 19:11:41", "VersionNumber": 10.0, "Title": "titanic", "EvaluationDate": "05/15/2023", "IsChange": true, "TotalLines": 85.0, "LinesInsertedFromPrevious": 7.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 78.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os from sklearn import linear_model import matplotlib as mpl import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report import scipy.stats as stats from xgboost import XGBClassifier from sklearn.preprocessing import StandardScaler, PolynomialFeatures df = pd.read_csv("/kaggle/input/titanic/train.csv") tdf = df.set_index("PassengerId") tdf.info() tdf tdf["Age"] = pd.to_numeric(df["Age"], errors="coerce") # tdf["Age"] = tdf["Age"].fillna(round(df["Age"].median())) tdf["Sex"] = tdf["Sex"].replace("female", 0) tdf["Sex"] = tdf["Sex"].replace("male", 1) tdf # reg = linear_model.LogisticRegression() # y = tdf["Survived"] # X = tdf[["Pclass", "Age", "Sex"]] # X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) # reg.fit(X_train, y_train) # y_pred = reg.predict(X_test) # print(classification_report(y_test, y_pred)) # using XGBOOST model model = XGBClassifier() y = tdf["Survived"] X = tdf[["Pclass", "Age", "Sex"]] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=0) model.fit(X_train, y_train) y_pred = model.predict(X_test) # y = tdf["Survived"] # X = tdf[["Pclass", "Age", "Sex"]] # # Get 60% of the dataset as the training set. Put the remaining 40% in temporary variables: x_ and y_. # x_train, x_, y_train, y_ = train_test_split(X, y, test_size = 0.25, random_state=1) # # Split the 40% subset above into two: one half for cross validation and the other for the test set # x_cv, X_test, y_cv, y_test = train_test_split(x_, y_, test_size=0.50, random_state=1) # # Delete temporary variables # del x_, y_ # model = XGBClassifier() # model.fit(x_train, y_train) # y_pred = model.predict(x_test) model.score(X=X_test, y=y_test) test_df = pd.read_csv("/kaggle/input/titanic/test.csv") f_df = test_df.set_index("PassengerId") f_df["Age"] = f_df["Age"].fillna(round((test_df["Age"]).median())) f_df["Sex"] = f_df["Sex"].replace("female", 0) f_df["Sex"] = f_df["Sex"].replace("male", 1) f_df X = f_df[["Pclass", "Age", "Sex"]] f_pred = model.predict(X) print(f_pred) output = pd.DataFrame({"PassengerId": test_df.PassengerId, "Survived": f_pred}) output.to_csv("submission.csv", index=False) print("Your submission was successfully saved!")
false
0
950
0
950
950
129667686
<jupyter_start><jupyter_text>Marijuana Arrests in Toronto: Racial Disparities ``` Data on police treatment of individuals arrested in Toronto for simple possession of small quantities of marijuana. The data are part of a larger data set featured in a series of articles in the Toronto Star newspaper. A data frame with 5226 observations on the following 8 variables. ``` | Column | Description | | --- | --- | | released | Whether or not the arrestee was released with a summons; a factor with levels: No; Yes. | | colour | The arrestee's race; a factor with levels: Black; White. | | year | 1997 through 2002; a numeric vector. | | age | in years; a numeric vector. | | sex | a factor with levels: Female; Male. | | employed | a factor with levels: No; Yes. | | citizen | a factor with levels: No; Yes. | | checks | Number of police data bases (of previous arrests, previous convictions, parole status, etc. – 6 in all) on which the arrestee's name appeared; a numeric vector | # Source Personal communication from Michael Friendly, York University. Kaggle dataset identifier: arrests-for-marijuana-possession <jupyter_script>import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns plt.style.use("ggplot") import time from math import * from ydata_profiling import ProfileReport from sklearn.preprocessing import LabelEncoder df = pd.read_csv("/kaggle/input/arrests-for-marijuana-possession/Arrests.csv") df df = df.drop(["Unnamed: 0"], axis=1) df df.info() le = LabelEncoder() for itr, col in enumerate(df.columns): if df[col].dtypes == "object": df[col] = le.fit_transform(df[col]) df corr_matrix = df.corr() corr_matrix sns.heatmap(corr_matrix, annot=True, fmt=".3f", cmap="crest") # Nothing can be inferred from the above correlation matrix implying that the no two features/attributes are related to each other # ### Lets look at year 1997 df_1997 = df[df["year"] == 1997] df_1997
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/667/129667686.ipynb
arrests-for-marijuana-possession
utkarshx27
[{"Id": 129667686, "ScriptId": 38558945, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11661305, "CreationDate": "05/15/2023 15:41:12", "VersionNumber": 1.0, "Title": "jignagda_marijuana", "EvaluationDate": "05/15/2023", "IsChange": true, "TotalLines": 44.0, "LinesInsertedFromPrevious": 44.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
[{"Id": 185977317, "KernelVersionId": 129667686, "SourceDatasetVersionId": 5631796}]
[{"Id": 5631796, "DatasetId": 3238325, "DatasourceVersionId": 5707058, "CreatorUserId": 13364933, "LicenseName": "CC0: Public Domain", "CreationDate": "05/08/2023 10:17:21", "VersionNumber": 1.0, "Title": "Marijuana Arrests in Toronto: Racial Disparities", "Slug": "arrests-for-marijuana-possession", "Subtitle": "Marijuana Arrests in Toronto: Race, Release, and Policing (1997-2002)", "Description": "``` \nData on police treatment of individuals arrested in Toronto for simple possession of small quantities of marijuana. The data are part of a larger data set featured in a series of articles in the Toronto Star newspaper. A data frame with 5226 observations on the following 8 variables.\n```\n| Column | Description |\n| --- | --- |\n| released | Whether or not the arrestee was released with a summons; a factor with levels: No; Yes.\n |\n| colour | The arrestee's race; a factor with levels: Black; White. |\n| year | 1997 through 2002; a numeric vector. |\n| age | in years; a numeric vector. |\n| sex | a factor with levels: Female; Male. |\n| employed | a factor with levels: No; Yes. |\n| citizen | a factor with levels: No; Yes. |\n| checks | Number of police data bases (of previous arrests, previous convictions, parole status, etc. \u2013 6 in all) on which the arrestee's name appeared; a numeric vector |\n\n# Source\nPersonal communication from Michael Friendly, York University.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3238325, "CreatorUserId": 13364933, "OwnerUserId": 13364933.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5631796.0, "CurrentDatasourceVersionId": 5707058.0, "ForumId": 3303517, "Type": 2, "CreationDate": "05/08/2023 10:17:21", "LastActivityDate": "05/08/2023", "TotalViews": 8788, "TotalDownloads": 1614, "TotalVotes": 49, "TotalKernels": 14}]
[{"Id": 13364933, "UserName": "utkarshx27", "DisplayName": "Utkarsh Singh", "RegisterDate": "01/21/2023", "PerformanceTier": 2}]
import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns plt.style.use("ggplot") import time from math import * from ydata_profiling import ProfileReport from sklearn.preprocessing import LabelEncoder df = pd.read_csv("/kaggle/input/arrests-for-marijuana-possession/Arrests.csv") df df = df.drop(["Unnamed: 0"], axis=1) df df.info() le = LabelEncoder() for itr, col in enumerate(df.columns): if df[col].dtypes == "object": df[col] = le.fit_transform(df[col]) df corr_matrix = df.corr() corr_matrix sns.heatmap(corr_matrix, annot=True, fmt=".3f", cmap="crest") # Nothing can be inferred from the above correlation matrix implying that the no two features/attributes are related to each other # ### Lets look at year 1997 df_1997 = df[df["year"] == 1997] df_1997
false
1
267
1
591
267
129667081
<jupyter_start><jupyter_text>PostPartum Depression In our research, we gathered a dataset of 1503 records from a medical hospital using a questionnaire administered through a Google form. This dataset has not yet been published. Our dataset includes 15 attributes, where I select 10 attributes, 9 of which were used for analysis and 1 of which was the target attribute. The target attribute, "Feeling Anxious," was chosen as a predictor of postpartum depression. Kaggle dataset identifier: postpartum-depression <jupyter_script># post partum depression is a common psychological disorder specifically in countries with high rate of stressors. physicians should be aware of the symptoms to screen high risk mothers for further diagnosis of the depression. this disorder could result in serious consequence from lack of breast feeding to baby up to infanticide or suicicide. by raising awareness toward this phenomenon we could prevent these unwanted consequences. # my goal with this coding was to find the most common symptoms linked to this disorder to somehow create a faster referral tool for diagnosis of postpartum depression. # I according to Cramer's V correlation, high rate of anxiety, impaired concentration, irratibility , insomnia and sleep disorder are highly linked to this phenomenon. so during visit of the patient by asking these questions one could select patients for screening of Edinbrugh postnatal depresion scale, in this manner patients are chosen faster for diagnosis of the post partum depression. # # import dataset import pandas as pd df1 = pd.read_csv("/kaggle/input/postpartum-depression/post natal data.csv") df1 = pd.DataFrame(df1) df1 df1.drop(columns="Timestamp", inplace=True) df2 = df1.sample(frac=1).reset_index(drop=True) df2 = pd.DataFrame(df2) from sklearn.preprocessing import LabelEncoder le = LabelEncoder() df3 = df2.apply(le.fit_transform) import matplotlib.pyplot as plt import seaborn as sns plt.figure(figsize=(10, 6)) sns.heatmap(df3.corr(), annot=True, cmap="coolwarm") plt.title("Heatmap for pearson correlation") plt.show() from scipy.stats import chi2_contingency import numpy as np def cramers_v(x, y): confusion_matrix = pd.crosstab(x, y) chi2 = chi2_contingency(confusion_matrix)[0] n = confusion_matrix.sum().sum() phi2 = chi2 / n r, k = confusion_matrix.shape phi2corr = max(0, phi2 - ((k - 1) * (r - 1)) / (n - 1)) rcorr = r - ((r - 1) ** 2) / (n - 1) kcorr = k - ((k - 1) ** 2) / (n - 1) return np.sqrt(phi2corr / min((kcorr - 1), (rcorr - 1))) corr_matrix = pd.DataFrame(index=df3.columns, columns=df3.columns) for i, col1 in enumerate(df3.columns): for j, col2 in enumerate(df3.columns): if i >= j: continue corr_matrix.loc[col1, col2] = cramers_v(df3[col1], df3[col2]) corr_matrix.loc[col2, col1] = corr_matrix.loc[col1, col2] corr_matrix = corr_matrix.astype(float) import seaborn as sns import matplotlib.pyplot as plt plt.figure(figsize=(10, 10)) sns.heatmap(corr_matrix, annot=True, cmap="coolwarm") plt.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/667/129667081.ipynb
postpartum-depression
parvezalmuqtadir2348
[{"Id": 129667081, "ScriptId": 38558826, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13127163, "CreationDate": "05/15/2023 15:36:36", "VersionNumber": 1.0, "Title": "symptoms for PDS screening", "EvaluationDate": "05/15/2023", "IsChange": true, "TotalLines": 56.0, "LinesInsertedFromPrevious": 56.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 185976395, "KernelVersionId": 129667081, "SourceDatasetVersionId": 4881865}]
[{"Id": 4881865, "DatasetId": 2830731, "DatasourceVersionId": 4949042, "CreatorUserId": 9585487, "LicenseName": "CC BY-NC-SA 4.0", "CreationDate": "01/21/2023 19:50:10", "VersionNumber": 1.0, "Title": "PostPartum Depression", "Slug": "postpartum-depression", "Subtitle": "Mental Disorder of pregnant women", "Description": "In our research, we gathered a dataset of 1503 records from a medical hospital using a\nquestionnaire administered through a Google form. This dataset has not yet been published.\nOur dataset includes 15 attributes, where I select 10 attributes, 9 of which were used for\nanalysis and 1 of which was the target attribute. The target attribute, \"Feeling Anxious,\"\nwas chosen as a predictor of postpartum depression.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 2830731, "CreatorUserId": 9585487, "OwnerUserId": 9585487.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 4881865.0, "CurrentDatasourceVersionId": 4949042.0, "ForumId": 2865921, "Type": 2, "CreationDate": "01/21/2023 19:50:10", "LastActivityDate": "01/21/2023", "TotalViews": 10791, "TotalDownloads": 1298, "TotalVotes": 32, "TotalKernels": 9}]
[{"Id": 9585487, "UserName": "parvezalmuqtadir2348", "DisplayName": "Md Parvez Mosaraf", "RegisterDate": "02/05/2022", "PerformanceTier": 1}]
# post partum depression is a common psychological disorder specifically in countries with high rate of stressors. physicians should be aware of the symptoms to screen high risk mothers for further diagnosis of the depression. this disorder could result in serious consequence from lack of breast feeding to baby up to infanticide or suicicide. by raising awareness toward this phenomenon we could prevent these unwanted consequences. # my goal with this coding was to find the most common symptoms linked to this disorder to somehow create a faster referral tool for diagnosis of postpartum depression. # I according to Cramer's V correlation, high rate of anxiety, impaired concentration, irratibility , insomnia and sleep disorder are highly linked to this phenomenon. so during visit of the patient by asking these questions one could select patients for screening of Edinbrugh postnatal depresion scale, in this manner patients are chosen faster for diagnosis of the post partum depression. # # import dataset import pandas as pd df1 = pd.read_csv("/kaggle/input/postpartum-depression/post natal data.csv") df1 = pd.DataFrame(df1) df1 df1.drop(columns="Timestamp", inplace=True) df2 = df1.sample(frac=1).reset_index(drop=True) df2 = pd.DataFrame(df2) from sklearn.preprocessing import LabelEncoder le = LabelEncoder() df3 = df2.apply(le.fit_transform) import matplotlib.pyplot as plt import seaborn as sns plt.figure(figsize=(10, 6)) sns.heatmap(df3.corr(), annot=True, cmap="coolwarm") plt.title("Heatmap for pearson correlation") plt.show() from scipy.stats import chi2_contingency import numpy as np def cramers_v(x, y): confusion_matrix = pd.crosstab(x, y) chi2 = chi2_contingency(confusion_matrix)[0] n = confusion_matrix.sum().sum() phi2 = chi2 / n r, k = confusion_matrix.shape phi2corr = max(0, phi2 - ((k - 1) * (r - 1)) / (n - 1)) rcorr = r - ((r - 1) ** 2) / (n - 1) kcorr = k - ((k - 1) ** 2) / (n - 1) return np.sqrt(phi2corr / min((kcorr - 1), (rcorr - 1))) corr_matrix = pd.DataFrame(index=df3.columns, columns=df3.columns) for i, col1 in enumerate(df3.columns): for j, col2 in enumerate(df3.columns): if i >= j: continue corr_matrix.loc[col1, col2] = cramers_v(df3[col1], df3[col2]) corr_matrix.loc[col2, col1] = corr_matrix.loc[col1, col2] corr_matrix = corr_matrix.astype(float) import seaborn as sns import matplotlib.pyplot as plt plt.figure(figsize=(10, 10)) sns.heatmap(corr_matrix, annot=True, cmap="coolwarm") plt.show()
false
1
798
0
924
798
129667499
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session from pathlib import Path base = Path("/kaggle/input/cat-in-the-dat/") train = pd.read_csv(base / "train.csv") test = pd.read_csv(base / "test.csv") submission = pd.read_csv(base / "sample_submission.csv") train.head(3).T # # 베이스라인 모델 # 7.3.1 피처 엔지니어링 # - 데이터 합치기 # - 웟-핫 인코딩 # - 데이터 나누기 # 7.3.2 모델 훈련 # 7.3.3 모델 성능 검증 # 7.3.4 예측 및 결과 제출 # ### 피처 엔지니어링 all_data = pd.concat([train, test], ignore_index=True) all_data.drop(["target"], axis=1, inplace=True) all_data from sklearn.preprocessing import OneHotEncoder encoder = OneHotEncoder() all_data_encoded = encoder.fit_transform(all_data) all_data_encoded len_train = len(train) X = all_data_encoded[:len_train] X_test = all_data_encoded[len_train:] y = train["target"] from sklearn.model_selection import train_test_split X_train, X_val, y_train, y_val = train_test_split( X, y, test_size=0.1, stratify=y, random_state=10 ) # ### 모델 훈련 from sklearn.linear_model import LogisticRegression lr = LogisticRegression(max_iter=1000, random_state=42) lr.fit(X_train, y_train) # ### 모델 성능 검증 lr.predict_proba(X_val) lr.predict(X_val) from sklearn.metrics import roc_auc_score y_valid_preds = lr.predict(X_val) roc_auc = roc_auc_score(y_val, y_valid_preds) print(f"검증 데이터 ROC AUC: {roc_auc:.4f}") # ### 예측 및 결과 제출 y_preds = lr.predict_proba(X_test)[:, 1] submission["target"] = y_preds submission.to_csv("basemodel_submission.csv", index=False) # # 성능 개선 # 7.4.1 피처 엔지니어링_1 : 피처 맞춤 인코딩 # - 데이터 합치기 # - 이진 피처 인코딩 # - 순서형 피처 인코딩 # - 명목형 피처 인코딩 # - 희소 행렬 변환 # - 날짜 피처 인코딩 # - 삼각함수 변환 # 7.4.2 피처 엔지니어링_2 : 피처 스케일링 # - 순서형 피처 스케일링 # - 인코딩 및 스케일링된 피처 합치기 # - 데이터 나누기 # 7.4.3 하이퍼파라미터 최적화 # 7.4.4 모델 성능 검증 # 7.5.1 최고 성능 모델 전체 데이터셋 적용 # - val 데이터셋 나누지 않기 # - 모든 train 데이터로 학습 # 7.4.5 예측 및 결과 제출 def resume_table(df): print(f"데이터셋 shape: {df.shape}") summary = pd.DataFrame(df.dtypes, columns=["data_type"]) summary = summary.reset_index() summary = summary.rename(columns={"index": "feature"}) summary["n_missing_values"] = df.isnull().sum().values summary["n_unique_values"] = df.nunique().values for i in range(3): summary[f"{i + 1}th_data_point"] = df.iloc[i].values return summary resume_table(train) # # Feature Engineering # # 1. 불필요한 피처 삭제 # - id : 데이터 단순 구분 역할 # - bin_0 : 카이제곱 검정에 의해 상관관계 없음 판정 # all_data.drop(["id", "bin_0"], axis=1, inplace=True) all_data.head(1).T # # 2. Feature Encoding # - 각 피처 특성에 걸맞게 인코딩 # ## 2-1. 이진 피처 인코딩 all_data["bin_3"] = all_data["bin_3"].map({"F": 0, "T": 1}) all_data["bin_4"] = all_data["bin_4"].map({"N": 0, "Y": 1}) # ## 2-2. 순서형 피처 인코딩 ord1_dict = {"Novice": 0, "Contributor": 1, "Expert": 2, "Master": 3, "Grandmaster": 4} ord2_dict = { "Freezing": 0, "Cold": 1, "Warm": 2, "Hot": 3, "Boiling Hot": 4, "Lava Hot": 5, } all_data["ord_1"] = all_data["ord_1"].apply(lambda x: ord1_dict[x]) all_data["ord_2"] = all_data["ord_2"].apply(lambda x: ord2_dict[x]) resume_table(all_data) from sklearn.preprocessing import OrdinalEncoder ord_345 = ["ord_3", "ord_4", "ord_5"] ord_encoder = OrdinalEncoder() all_data[ord_345] = ord_encoder.fit_transform(all_data[ord_345]) for feat, cats in zip(ord_345, ord_encoder.categories_): print(feat) print(cats) all_data.head().T # ## 2-3. 명목형 피처 인코딩 nom_features = ["nom_" + str(i) for i in range(10)] from sklearn.preprocessing import OneHotEncoder oh_encoder = OneHotEncoder() # 아래 변환은 불가! sparse matrix로 열의 개수가 달라지기 때문 # all_data[nom_features] = oh_encoder.fit_transform(all_data[nom_features]) encoded_nom_matrix = oh_encoder.fit_transform(all_data[nom_features]) encoded_nom_matrix all_data.drop(nom_features, axis=1, inplace=True) # ## 2-4. 날짜 피처 인코딩 # - reference: # - https://scikit-learn.org/stable/auto_examples/applications/plot_cyclical_feature_engineering.html#trigonometric-features # ### 1) day from sklearn.preprocessing import FunctionTransformer def sin_transformer(period): return FunctionTransformer(lambda x: np.sin(x / period * 2 * np.pi)) def cos_transformer(period): return FunctionTransformer(lambda x: np.cos(x / period * 2 * np.pi)) # ### period = 7 a = np.array([1, 2, 3, 4, 5, 6, 7]) sin_encoded_a = sin_transformer(7).fit_transform(a) np.round(sin_encoded_a, 3) cos_encoded_a = cos_transformer(7).fit_transform(a) np.round(cos_encoded_a, 3) all_data["day_sin"] = sin_transformer(7).fit_transform(all_data["day"]) # Conclusion # - 주기성에서 period는 '구간의 갯수' 여야 한다 # - 1에서 7까지 6구간(6칸) 이므로 period = 6이 적절 all_data.head(3).T # ### 2) month encoded_day_matrix = oh_encoder.fit_transform(all_data[["day"]]) encoded_month_matrix = oh_encoder.fit_transform(all_data[["month"]]) all_data.drop(["day", "month"], axis=1, inplace=True) encoded_day_matrix, encoded_month_matrix # # 3. Feature Scaling # ### 3-1. 순서형 피처 스케일링 from sklearn.preprocessing import MinMaxScaler ord_feats = ["ord_" + str(i) for i in range(6)] all_data[ord_feats] = MinMaxScaler().fit_transform(all_data[ord_feats]) resume_table(all_data) # ### 3-2. 피처 모두 합치기 from scipy import sparse data_sprs_day_sin = sparse.hstack( [sparse.csc_matrix(all_data), encoded_nom_matrix, encoded_month_matrix], format="csr", ) all_data.drop(["day_sin"], axis=1, inplace=True) data_sprs_day_oh = sparse.hstack( [ sparse.csc_matrix(all_data), encoded_nom_matrix, encoded_day_matrix, encoded_month_matrix, ], format="csr", ) data_sprs_day_sin data_sprs_day_oh # ### 3-3. 데이터 나누기 n_train = len(train) X = data_sprs_day_sin[:n_train] X_test = data_sprs_day_sin[n_train:] y = train["target"] from sklearn.model_selection import train_test_split X_train, X_val, y_train, y_val = train_test_split( X, y, stratify=y, test_size=0.1, random_state=10 ) # ## 4. Optimizing Hyper-Parameters from sklearn.model_selection import GridSearchCV from sklearn.linear_model import LogisticRegression lr = LogisticRegression() params = { "C": [0.1, 0.125, 0.2], "max_iter": [800, 900, 1000], "solver": ["liblinear"], "random_state": [42], } gs_lr = GridSearchCV(estimator=lr, param_grid=params, scoring="roc_auc", cv=5) gs_lr.fit(X_train, y_train) print(f"최적 하이퍼파라미터: {gs_lr.best_params_}") from sklearn.metrics import roc_auc_score model = gs_lr.best_estimator_ y_val_preds = model.predict(X_val) roc_auc = roc_auc_score(y_val, y_val_preds) print(f"검증 데이터 ROC AUC: {roc_auc:.4f}") y_preds = model.predict_proba(X_test)[:, 1] submission["target"] = y_preds submission.to_csv("optimizing_lr_model_with_tri.csv", index=False) # ## Comparison X = data_sprs_day_oh[:n_train] X_test = data_sprs_day_oh[n_train:] y = train["target"] from sklearn.model_selection import train_test_split X_train, X_val, y_train, y_val = train_test_split( X, y, stratify=y, test_size=0.1, random_state=10 ) # 최적 하이퍼파라미터: {'C': 0.125, 'max_iter': 800, 'random_state': 42, 'solver': 'liblinear'} lr = LogisticRegression(C=0.125, max_iter=800, solver="liblinear", random_state=42) lr.fit(X_train, y_train) y_val_preds = lr.predict(X_val) roc_auc = roc_auc_score(y_val, y_val_preds) print(f"검증 데이터 ROC AUC: {roc_auc:.4f}") y_preds = lr.predict_proba(X_test)[:, 1] submission["target"] = y_preds submission.to_csv("optimizing_lr_model_with_all_onehot.csv", index=False) # # 4. 최종 성능 개선 # ## 1. Optuna 적용 import optuna from sklearn.linear_model import LogisticRegression from sklearn.metrics import roc_auc_score # Define an objective function to be maximized def objective(trial): # Suggest values for hyperparameters using a trial object C = trial.suggest_float("C", 10e-10, 10) # Create the model model = LogisticRegression( C=C, max_iter=800, solver="liblinear", random_state=42, n_jobs=-1 ) # Evaluate the model on the validation set model.fit(X_train, y_train) y_val_preds = model.predict(X_val) roc_auc = roc_auc_score(y_val, y_val_preds) return roc_auc # Create a study study = optuna.create_study(direction="maximize") study.optimize(objective, n_trials=100) best_params = study.best_params best_score = study.best_value print("Best hyperparameters: {}".format(best_params)) print("Best score: {}".format(best_score)) model = LogisticRegression( C=0.6611790103090663, max_iter=800, solver="liblinear", random_state=42 ) model.fit(X_train, y_train) predictions = model.predict_proba(X_test)[:, 1] submission["target"] = predictions submission.to_csv("lr_with_optuna_all_onehot.csv", index=False) submission.head() # ## Result # - 1위 전략에서 C 파라미터를 optuna 사용해 최적화 해봤다길래 따라해봤으나 오히려 성능 저하... # - https://www.kaggle.com/competitions/cat-in-the-dat/discussion/121356 # - 다른 params 값을 미리 주고서 C를 최적화하는 것이 아니라 다른 방법이 있는건가? # - 스터디 때 논의해 보고 싶은 부분! # ## 2. Validation Set 없이 모든 데이터셋 훈련 # 최적 하이퍼파라미터: {'C': 0.125, 'max_iter': 800, 'random_state': 42, 'solver': 'liblinear'} lr = LogisticRegression(C=0.125, max_iter=800, solver="liblinear", random_state=42) lr.fit(X, y) predictions = lr.predict_proba(X_test)[:, 1] submission["target"] = predictions submission.to_csv("lr_with_no_val_all_onehot.csv", index=False) submission.head()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/667/129667499.ipynb
null
null
[{"Id": 129667499, "ScriptId": 38509195, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6510486, "CreationDate": "05/15/2023 15:39:37", "VersionNumber": 5.0, "Title": "cat_in_the_dat_Modeling_ridi", "EvaluationDate": "05/15/2023", "IsChange": true, "TotalLines": 450.0, "LinesInsertedFromPrevious": 87.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 363.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session from pathlib import Path base = Path("/kaggle/input/cat-in-the-dat/") train = pd.read_csv(base / "train.csv") test = pd.read_csv(base / "test.csv") submission = pd.read_csv(base / "sample_submission.csv") train.head(3).T # # 베이스라인 모델 # 7.3.1 피처 엔지니어링 # - 데이터 합치기 # - 웟-핫 인코딩 # - 데이터 나누기 # 7.3.2 모델 훈련 # 7.3.3 모델 성능 검증 # 7.3.4 예측 및 결과 제출 # ### 피처 엔지니어링 all_data = pd.concat([train, test], ignore_index=True) all_data.drop(["target"], axis=1, inplace=True) all_data from sklearn.preprocessing import OneHotEncoder encoder = OneHotEncoder() all_data_encoded = encoder.fit_transform(all_data) all_data_encoded len_train = len(train) X = all_data_encoded[:len_train] X_test = all_data_encoded[len_train:] y = train["target"] from sklearn.model_selection import train_test_split X_train, X_val, y_train, y_val = train_test_split( X, y, test_size=0.1, stratify=y, random_state=10 ) # ### 모델 훈련 from sklearn.linear_model import LogisticRegression lr = LogisticRegression(max_iter=1000, random_state=42) lr.fit(X_train, y_train) # ### 모델 성능 검증 lr.predict_proba(X_val) lr.predict(X_val) from sklearn.metrics import roc_auc_score y_valid_preds = lr.predict(X_val) roc_auc = roc_auc_score(y_val, y_valid_preds) print(f"검증 데이터 ROC AUC: {roc_auc:.4f}") # ### 예측 및 결과 제출 y_preds = lr.predict_proba(X_test)[:, 1] submission["target"] = y_preds submission.to_csv("basemodel_submission.csv", index=False) # # 성능 개선 # 7.4.1 피처 엔지니어링_1 : 피처 맞춤 인코딩 # - 데이터 합치기 # - 이진 피처 인코딩 # - 순서형 피처 인코딩 # - 명목형 피처 인코딩 # - 희소 행렬 변환 # - 날짜 피처 인코딩 # - 삼각함수 변환 # 7.4.2 피처 엔지니어링_2 : 피처 스케일링 # - 순서형 피처 스케일링 # - 인코딩 및 스케일링된 피처 합치기 # - 데이터 나누기 # 7.4.3 하이퍼파라미터 최적화 # 7.4.4 모델 성능 검증 # 7.5.1 최고 성능 모델 전체 데이터셋 적용 # - val 데이터셋 나누지 않기 # - 모든 train 데이터로 학습 # 7.4.5 예측 및 결과 제출 def resume_table(df): print(f"데이터셋 shape: {df.shape}") summary = pd.DataFrame(df.dtypes, columns=["data_type"]) summary = summary.reset_index() summary = summary.rename(columns={"index": "feature"}) summary["n_missing_values"] = df.isnull().sum().values summary["n_unique_values"] = df.nunique().values for i in range(3): summary[f"{i + 1}th_data_point"] = df.iloc[i].values return summary resume_table(train) # # Feature Engineering # # 1. 불필요한 피처 삭제 # - id : 데이터 단순 구분 역할 # - bin_0 : 카이제곱 검정에 의해 상관관계 없음 판정 # all_data.drop(["id", "bin_0"], axis=1, inplace=True) all_data.head(1).T # # 2. Feature Encoding # - 각 피처 특성에 걸맞게 인코딩 # ## 2-1. 이진 피처 인코딩 all_data["bin_3"] = all_data["bin_3"].map({"F": 0, "T": 1}) all_data["bin_4"] = all_data["bin_4"].map({"N": 0, "Y": 1}) # ## 2-2. 순서형 피처 인코딩 ord1_dict = {"Novice": 0, "Contributor": 1, "Expert": 2, "Master": 3, "Grandmaster": 4} ord2_dict = { "Freezing": 0, "Cold": 1, "Warm": 2, "Hot": 3, "Boiling Hot": 4, "Lava Hot": 5, } all_data["ord_1"] = all_data["ord_1"].apply(lambda x: ord1_dict[x]) all_data["ord_2"] = all_data["ord_2"].apply(lambda x: ord2_dict[x]) resume_table(all_data) from sklearn.preprocessing import OrdinalEncoder ord_345 = ["ord_3", "ord_4", "ord_5"] ord_encoder = OrdinalEncoder() all_data[ord_345] = ord_encoder.fit_transform(all_data[ord_345]) for feat, cats in zip(ord_345, ord_encoder.categories_): print(feat) print(cats) all_data.head().T # ## 2-3. 명목형 피처 인코딩 nom_features = ["nom_" + str(i) for i in range(10)] from sklearn.preprocessing import OneHotEncoder oh_encoder = OneHotEncoder() # 아래 변환은 불가! sparse matrix로 열의 개수가 달라지기 때문 # all_data[nom_features] = oh_encoder.fit_transform(all_data[nom_features]) encoded_nom_matrix = oh_encoder.fit_transform(all_data[nom_features]) encoded_nom_matrix all_data.drop(nom_features, axis=1, inplace=True) # ## 2-4. 날짜 피처 인코딩 # - reference: # - https://scikit-learn.org/stable/auto_examples/applications/plot_cyclical_feature_engineering.html#trigonometric-features # ### 1) day from sklearn.preprocessing import FunctionTransformer def sin_transformer(period): return FunctionTransformer(lambda x: np.sin(x / period * 2 * np.pi)) def cos_transformer(period): return FunctionTransformer(lambda x: np.cos(x / period * 2 * np.pi)) # ### period = 7 a = np.array([1, 2, 3, 4, 5, 6, 7]) sin_encoded_a = sin_transformer(7).fit_transform(a) np.round(sin_encoded_a, 3) cos_encoded_a = cos_transformer(7).fit_transform(a) np.round(cos_encoded_a, 3) all_data["day_sin"] = sin_transformer(7).fit_transform(all_data["day"]) # Conclusion # - 주기성에서 period는 '구간의 갯수' 여야 한다 # - 1에서 7까지 6구간(6칸) 이므로 period = 6이 적절 all_data.head(3).T # ### 2) month encoded_day_matrix = oh_encoder.fit_transform(all_data[["day"]]) encoded_month_matrix = oh_encoder.fit_transform(all_data[["month"]]) all_data.drop(["day", "month"], axis=1, inplace=True) encoded_day_matrix, encoded_month_matrix # # 3. Feature Scaling # ### 3-1. 순서형 피처 스케일링 from sklearn.preprocessing import MinMaxScaler ord_feats = ["ord_" + str(i) for i in range(6)] all_data[ord_feats] = MinMaxScaler().fit_transform(all_data[ord_feats]) resume_table(all_data) # ### 3-2. 피처 모두 합치기 from scipy import sparse data_sprs_day_sin = sparse.hstack( [sparse.csc_matrix(all_data), encoded_nom_matrix, encoded_month_matrix], format="csr", ) all_data.drop(["day_sin"], axis=1, inplace=True) data_sprs_day_oh = sparse.hstack( [ sparse.csc_matrix(all_data), encoded_nom_matrix, encoded_day_matrix, encoded_month_matrix, ], format="csr", ) data_sprs_day_sin data_sprs_day_oh # ### 3-3. 데이터 나누기 n_train = len(train) X = data_sprs_day_sin[:n_train] X_test = data_sprs_day_sin[n_train:] y = train["target"] from sklearn.model_selection import train_test_split X_train, X_val, y_train, y_val = train_test_split( X, y, stratify=y, test_size=0.1, random_state=10 ) # ## 4. Optimizing Hyper-Parameters from sklearn.model_selection import GridSearchCV from sklearn.linear_model import LogisticRegression lr = LogisticRegression() params = { "C": [0.1, 0.125, 0.2], "max_iter": [800, 900, 1000], "solver": ["liblinear"], "random_state": [42], } gs_lr = GridSearchCV(estimator=lr, param_grid=params, scoring="roc_auc", cv=5) gs_lr.fit(X_train, y_train) print(f"최적 하이퍼파라미터: {gs_lr.best_params_}") from sklearn.metrics import roc_auc_score model = gs_lr.best_estimator_ y_val_preds = model.predict(X_val) roc_auc = roc_auc_score(y_val, y_val_preds) print(f"검증 데이터 ROC AUC: {roc_auc:.4f}") y_preds = model.predict_proba(X_test)[:, 1] submission["target"] = y_preds submission.to_csv("optimizing_lr_model_with_tri.csv", index=False) # ## Comparison X = data_sprs_day_oh[:n_train] X_test = data_sprs_day_oh[n_train:] y = train["target"] from sklearn.model_selection import train_test_split X_train, X_val, y_train, y_val = train_test_split( X, y, stratify=y, test_size=0.1, random_state=10 ) # 최적 하이퍼파라미터: {'C': 0.125, 'max_iter': 800, 'random_state': 42, 'solver': 'liblinear'} lr = LogisticRegression(C=0.125, max_iter=800, solver="liblinear", random_state=42) lr.fit(X_train, y_train) y_val_preds = lr.predict(X_val) roc_auc = roc_auc_score(y_val, y_val_preds) print(f"검증 데이터 ROC AUC: {roc_auc:.4f}") y_preds = lr.predict_proba(X_test)[:, 1] submission["target"] = y_preds submission.to_csv("optimizing_lr_model_with_all_onehot.csv", index=False) # # 4. 최종 성능 개선 # ## 1. Optuna 적용 import optuna from sklearn.linear_model import LogisticRegression from sklearn.metrics import roc_auc_score # Define an objective function to be maximized def objective(trial): # Suggest values for hyperparameters using a trial object C = trial.suggest_float("C", 10e-10, 10) # Create the model model = LogisticRegression( C=C, max_iter=800, solver="liblinear", random_state=42, n_jobs=-1 ) # Evaluate the model on the validation set model.fit(X_train, y_train) y_val_preds = model.predict(X_val) roc_auc = roc_auc_score(y_val, y_val_preds) return roc_auc # Create a study study = optuna.create_study(direction="maximize") study.optimize(objective, n_trials=100) best_params = study.best_params best_score = study.best_value print("Best hyperparameters: {}".format(best_params)) print("Best score: {}".format(best_score)) model = LogisticRegression( C=0.6611790103090663, max_iter=800, solver="liblinear", random_state=42 ) model.fit(X_train, y_train) predictions = model.predict_proba(X_test)[:, 1] submission["target"] = predictions submission.to_csv("lr_with_optuna_all_onehot.csv", index=False) submission.head() # ## Result # - 1위 전략에서 C 파라미터를 optuna 사용해 최적화 해봤다길래 따라해봤으나 오히려 성능 저하... # - https://www.kaggle.com/competitions/cat-in-the-dat/discussion/121356 # - 다른 params 값을 미리 주고서 C를 최적화하는 것이 아니라 다른 방법이 있는건가? # - 스터디 때 논의해 보고 싶은 부분! # ## 2. Validation Set 없이 모든 데이터셋 훈련 # 최적 하이퍼파라미터: {'C': 0.125, 'max_iter': 800, 'random_state': 42, 'solver': 'liblinear'} lr = LogisticRegression(C=0.125, max_iter=800, solver="liblinear", random_state=42) lr.fit(X, y) predictions = lr.predict_proba(X_test)[:, 1] submission["target"] = predictions submission.to_csv("lr_with_no_val_all_onehot.csv", index=False) submission.head()
false
0
4,017
0
4,017
4,017
129667807
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import pandas as pd, numpy as np, seaborn as sns, matplotlib.pyplot as plt train_df = pd.read_csv( "/kaggle/input/cmc-ml-steam-activity-prediction/train.csv", index_col="index" ) test_df = pd.read_csv( "/kaggle/input/cmc-ml-steam-activity-prediction/test.csv", index_col="index" ) achievements_df = pd.read_csv( "/kaggle/input/cmc-ml-steam-activity-prediction/achievements_stats.csv", index_col="Unnamed: 0", ) games_df = pd.read_csv( "/kaggle/input/cmc-ml-steam-activity-prediction/games_details.csv", index_col="Unnamed: 0", ) friends_df = pd.read_csv( "/kaggle/input/cmc-ml-steam-activity-prediction/users_friends.csv", index_col="Unnamed: 0", ) users_df = pd.read_csv( "/kaggle/input/cmc-ml-steam-activity-prediction/users.csv", index_col="Unnamed: 0" ) TARGET = "playtime_forever" main_df = pd.concat([train_df.reset_index(), test_df.reset_index()], axis=0) main_df["is_train"] = ~main_df.playtime_forever.isna() main_df["playtime_forever"] = main_df.playtime_forever.apply(lambda x: np.log1p(x)) main_df users_df["timecreated"] = pd.to_datetime(users_df.timecreated) games_df["release_date"] = pd.to_datetime(games_df.release_date) games_df["rating"][games_df.rating == 0] = pd.NA main_df = main_df.merge( games_df[["game_id", "release_date", "rating"]], on="game_id", how="left" ).merge( users_df[ [ "user_id", "communityvisibilitystate", "profilestate", "personastate", "timecreated", ] ], on="user_id", how="left", ) main_df["dates_delta"] = main_df.timecreated - main_df.release_date main_df["dates_delta"] = main_df.dates_delta.astype("timedelta64[D]") # main_df.drop(columns=['release_date', 'timecreated'], inplace=True) main_df from catboost import CatBoostRegressor from sklearn.model_selection import train_test_split from sklearn.metrics import mean_squared_log_error def split_data(X, y, val=0.2, test=0.3, random_state=42): X_train, X_test, y_train, y_test = train_test_split( X, y, train_size=(1 - test), random_state=random_state ) val = val / (1 - test) X_train, X_val, y_train, y_val = train_test_split( X_train, y_train, train_size=(1 - val), random_state=random_state ) out = { "train": {"X": X_train, "y": y_train}, "val": {"X": X_val, "y": y_val}, "test": {"X": X_test, "y": y_test}, } return out def training_pipeline(df, drop_columns, target=TARGET, catboost_params=None): df = df[df.is_train] df = df.drop(columns=drop_columns) X, y = df.drop(columns=target), df[target] data = split_data(X, y) model = CatBoostRegressor(**catboost_params) model.fit( data["train"]["X"], data["train"]["y"], eval_set=(data["val"]["X"], data["val"]["y"]), verbose=False, ) print( "Train:", mean_squared_log_error( data["train"]["y"], model.predict(data["train"]["X"]), squared=False ), ) print( "Val:", mean_squared_log_error( data["val"]["y"], model.predict(data["val"]["X"]), squared=False ), ) print( "Test:", mean_squared_log_error( data["test"]["y"], model.predict(data["test"]["X"]), squared=False ), ) return model drop_columns = [ "index", "user_id", "game_id", "game_name", "is_train", "release_date", "timecreated", ] catboost_params = dict( iterations=1000, loss_function="RMSE", random_seed=42, task_type="CPU" ) model = training_pipeline(main_df, drop_columns, catboost_params=catboost_params) import datetime def now(): return datetime.datetime.now().strftime("%Y-%m-%d_%H.%M.%S") def make_pred(model, main_df, drop_columns, target=TARGET): preds = model.predict( main_df[~main_df.is_train].drop(columns=drop_columns + [TARGET]) ) preds = pd.DataFrame({"playtime_forever": np.expm1(preds)}).reset_index().round(5) preds.to_csv(f"/kaggle/working/submission_{now()}.csv", index=False) make_pred(model, main_df, drop_columns)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/667/129667807.ipynb
null
null
[{"Id": 129667807, "ScriptId": 38559660, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 10432401, "CreationDate": "05/15/2023 15:42:11", "VersionNumber": 1.0, "Title": "notebookfe582184a7", "EvaluationDate": "05/15/2023", "IsChange": true, "TotalLines": 93.0, "LinesInsertedFromPrevious": 93.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import pandas as pd, numpy as np, seaborn as sns, matplotlib.pyplot as plt train_df = pd.read_csv( "/kaggle/input/cmc-ml-steam-activity-prediction/train.csv", index_col="index" ) test_df = pd.read_csv( "/kaggle/input/cmc-ml-steam-activity-prediction/test.csv", index_col="index" ) achievements_df = pd.read_csv( "/kaggle/input/cmc-ml-steam-activity-prediction/achievements_stats.csv", index_col="Unnamed: 0", ) games_df = pd.read_csv( "/kaggle/input/cmc-ml-steam-activity-prediction/games_details.csv", index_col="Unnamed: 0", ) friends_df = pd.read_csv( "/kaggle/input/cmc-ml-steam-activity-prediction/users_friends.csv", index_col="Unnamed: 0", ) users_df = pd.read_csv( "/kaggle/input/cmc-ml-steam-activity-prediction/users.csv", index_col="Unnamed: 0" ) TARGET = "playtime_forever" main_df = pd.concat([train_df.reset_index(), test_df.reset_index()], axis=0) main_df["is_train"] = ~main_df.playtime_forever.isna() main_df["playtime_forever"] = main_df.playtime_forever.apply(lambda x: np.log1p(x)) main_df users_df["timecreated"] = pd.to_datetime(users_df.timecreated) games_df["release_date"] = pd.to_datetime(games_df.release_date) games_df["rating"][games_df.rating == 0] = pd.NA main_df = main_df.merge( games_df[["game_id", "release_date", "rating"]], on="game_id", how="left" ).merge( users_df[ [ "user_id", "communityvisibilitystate", "profilestate", "personastate", "timecreated", ] ], on="user_id", how="left", ) main_df["dates_delta"] = main_df.timecreated - main_df.release_date main_df["dates_delta"] = main_df.dates_delta.astype("timedelta64[D]") # main_df.drop(columns=['release_date', 'timecreated'], inplace=True) main_df from catboost import CatBoostRegressor from sklearn.model_selection import train_test_split from sklearn.metrics import mean_squared_log_error def split_data(X, y, val=0.2, test=0.3, random_state=42): X_train, X_test, y_train, y_test = train_test_split( X, y, train_size=(1 - test), random_state=random_state ) val = val / (1 - test) X_train, X_val, y_train, y_val = train_test_split( X_train, y_train, train_size=(1 - val), random_state=random_state ) out = { "train": {"X": X_train, "y": y_train}, "val": {"X": X_val, "y": y_val}, "test": {"X": X_test, "y": y_test}, } return out def training_pipeline(df, drop_columns, target=TARGET, catboost_params=None): df = df[df.is_train] df = df.drop(columns=drop_columns) X, y = df.drop(columns=target), df[target] data = split_data(X, y) model = CatBoostRegressor(**catboost_params) model.fit( data["train"]["X"], data["train"]["y"], eval_set=(data["val"]["X"], data["val"]["y"]), verbose=False, ) print( "Train:", mean_squared_log_error( data["train"]["y"], model.predict(data["train"]["X"]), squared=False ), ) print( "Val:", mean_squared_log_error( data["val"]["y"], model.predict(data["val"]["X"]), squared=False ), ) print( "Test:", mean_squared_log_error( data["test"]["y"], model.predict(data["test"]["X"]), squared=False ), ) return model drop_columns = [ "index", "user_id", "game_id", "game_name", "is_train", "release_date", "timecreated", ] catboost_params = dict( iterations=1000, loss_function="RMSE", random_seed=42, task_type="CPU" ) model = training_pipeline(main_df, drop_columns, catboost_params=catboost_params) import datetime def now(): return datetime.datetime.now().strftime("%Y-%m-%d_%H.%M.%S") def make_pred(model, main_df, drop_columns, target=TARGET): preds = model.predict( main_df[~main_df.is_train].drop(columns=drop_columns + [TARGET]) ) preds = pd.DataFrame({"playtime_forever": np.expm1(preds)}).reset_index().round(5) preds.to_csv(f"/kaggle/working/submission_{now()}.csv", index=False) make_pred(model, main_df, drop_columns)
false
0
1,560
0
1,560
1,560
129667665
<jupyter_start><jupyter_text>Company Bankruptcy Prediction ### Similar Datasets - The Boston House-Price Data: [LINK](https://www.kaggle.com/fedesoriano/the-boston-houseprice-data) - Gender Pay Gap Dataset: [LINK](https://www.kaggle.com/fedesoriano/gender-pay-gap-dataset) - Spanish Wine Quality Dataset: [LINK](https://www.kaggle.com/datasets/fedesoriano/spanish-wine-quality-dataset) ### Context The data were collected from the Taiwan Economic Journal for the years 1999 to 2009. Company bankruptcy was defined based on the business regulations of the Taiwan Stock Exchange. ### Attribute Information **Version 2:** Updated column names and description to make the data easier to understand (Y = Output feature, X = Input features) Y - Bankrupt?: Class label X1 - ROA(C) before interest and depreciation before interest: Return On Total Assets(C) X2 - ROA(A) before interest and % after tax: Return On Total Assets(A) X3 - ROA(B) before interest and depreciation after tax: Return On Total Assets(B) X4 - Operating Gross Margin: Gross Profit/Net Sales X5 - Realized Sales Gross Margin: Realized Gross Profit/Net Sales X6 - Operating Profit Rate: Operating Income/Net Sales X7 - Pre-tax net Interest Rate: Pre-Tax Income/Net Sales X8 - After-tax net Interest Rate: Net Income/Net Sales X9 - Non-industry income and expenditure/revenue: Net Non-operating Income Ratio X10 - Continuous interest rate (after tax): Net Income-Exclude Disposal Gain or Loss/Net Sales X11 - Operating Expense Rate: Operating Expenses/Net Sales X12 - Research and development expense rate: (Research and Development Expenses)/Net Sales X13 - Cash flow rate: Cash Flow from Operating/Current Liabilities X14 - Interest-bearing debt interest rate: Interest-bearing Debt/Equity X15 - Tax rate (A): Effective Tax Rate X16 - Net Value Per Share (B): Book Value Per Share(B) X17 - Net Value Per Share (A): Book Value Per Share(A) X18 - Net Value Per Share (C): Book Value Per Share(C) X19 - Persistent EPS in the Last Four Seasons: EPS-Net Income X20 - Cash Flow Per Share X21 - Revenue Per Share (Yuan ¥): Sales Per Share X22 - Operating Profit Per Share (Yuan ¥): Operating Income Per Share X23 - Per Share Net profit before tax (Yuan ¥): Pretax Income Per Share X24 - Realized Sales Gross Profit Growth Rate X25 - Operating Profit Growth Rate: Operating Income Growth X26 - After-tax Net Profit Growth Rate: Net Income Growth X27 - Regular Net Profit Growth Rate: Continuing Operating Income after Tax Growth X28 - Continuous Net Profit Growth Rate: Net Income-Excluding Disposal Gain or Loss Growth X29 - Total Asset Growth Rate: Total Asset Growth X30 - Net Value Growth Rate: Total Equity Growth X31 - Total Asset Return Growth Rate Ratio: Return on Total Asset Growth X32 - Cash Reinvestment %: Cash Reinvestment Ratio X33 - Current Ratio X34 - Quick Ratio: Acid Test X35 - Interest Expense Ratio: Interest Expenses/Total Revenue X36 - Total debt/Total net worth: Total Liability/Equity Ratio X37 - Debt ratio %: Liability/Total Assets X38 - Net worth/Assets: Equity/Total Assets X39 - Long-term fund suitability ratio (A): (Long-term Liability+Equity)/Fixed Assets X40 - Borrowing dependency: Cost of Interest-bearing Debt X41 - Contingent liabilities/Net worth: Contingent Liability/Equity X42 - Operating profit/Paid-in capital: Operating Income/Capital X43 - Net profit before tax/Paid-in capital: Pretax Income/Capital X44 - Inventory and accounts receivable/Net value: (Inventory+Accounts Receivables)/Equity X45 - Total Asset Turnover X46 - Accounts Receivable Turnover X47 - Average Collection Days: Days Receivable Outstanding X48 - Inventory Turnover Rate (times) X49 - Fixed Assets Turnover Frequency X50 - Net Worth Turnover Rate (times): Equity Turnover X51 - Revenue per person: Sales Per Employee X52 - Operating profit per person: Operation Income Per Employee X53 - Allocation rate per person: Fixed Assets Per Employee X54 - Working Capital to Total Assets X55 - Quick Assets/Total Assets X56 - Current Assets/Total Assets X57 - Cash/Total Assets X58 - Quick Assets/Current Liability X59 - Cash/Current Liability X60 - Current Liability to Assets X61 - Operating Funds to Liability X62 - Inventory/Working Capital X63 - Inventory/Current Liability X64 - Current Liabilities/Liability X65 - Working Capital/Equity X66 - Current Liabilities/Equity X67 - Long-term Liability to Current Assets X68 - Retained Earnings to Total Assets X69 - Total income/Total expense X70 - Total expense/Assets X71 - Current Asset Turnover Rate: Current Assets to Sales X72 - Quick Asset Turnover Rate: Quick Assets to Sales X73 - Working capitcal Turnover Rate: Working Capital to Sales X74 - Cash Turnover Rate: Cash to Sales X75 - Cash Flow to Sales X76 - Fixed Assets to Assets X77 - Current Liability to Liability X78 - Current Liability to Equity X79 - Equity to Long-term Liability X80 - Cash Flow to Total Assets X81 - Cash Flow to Liability X82 - CFO to Assets X83 - Cash Flow to Equity X84 - Current Liability to Current Assets X85 - Liability-Assets Flag: 1 if Total Liability exceeds Total Assets, 0 otherwise X86 - Net Income to Total Assets X87 - Total assets to GNP price X88 - No-credit Interval X89 - Gross Profit to Sales X90 - Net Income to Stockholder's Equity X91 - Liability to Equity X92 - Degree of Financial Leverage (DFL) X93 - Interest Coverage Ratio (Interest expense to EBIT) X94 - Net Income Flag: 1 if Net Income is Negative for the last two years, 0 otherwise X95 - Equity to Liability ### Source Deron Liang and Chih-Fong Tsai, deronliang '@' gmail.com; cftsai '@' mgt.ncu.edu.tw, National Central University, Taiwan The data was obtained from UCI Machine Learning Repository: [https://archive.ics.uci.edu/ml/datasets/Taiwanese+Bankruptcy+Prediction](https://archive.ics.uci.edu/ml/datasets/Taiwanese+Bankruptcy+Prediction) ### Relevant Papers Liang, D., Lu, C.-C., Tsai, C.-F., and Shih, G.-A. (2016) Financial Ratios and Corporate Governance Indicators in Bankruptcy Prediction: A Comprehensive Study. European Journal of Operational Research, vol. 252, no. 2, pp. 561-572. [https://www.sciencedirect.com/science/article/pii/S0377221716000412](https://www.sciencedirect.com/science/article/pii/S0377221716000412) Kaggle dataset identifier: company-bankruptcy-prediction <jupyter_code>import pandas as pd df = pd.read_csv('company-bankruptcy-prediction/data.csv') df.info() <jupyter_output><class 'pandas.core.frame.DataFrame'> RangeIndex: 6819 entries, 0 to 6818 Data columns (total 96 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Bankrupt? 6819 non-null int64 1 ROA(C) before interest and depreciation before interest 6819 non-null float64 2 ROA(A) before interest and % after tax 6819 non-null float64 3 ROA(B) before interest and depreciation after tax 6819 non-null float64 4 Operating Gross Margin 6819 non-null float64 5 Realized Sales Gross Margin 6819 non-null float64 6 Operating Profit Rate 6819 non-null float64 7 Pre-tax net Interest Rate 6819 non-null float64 8 After-tax net Interest Rate 6819 non-null float64 9 Non-industry income and expenditure/revenue 6819 non-null float64 10 Continuous interest rate (after tax) 6819 non-null float64 11 Operating Expense Rate 6819 non-null float64 12 Research and development expense rate 6819 non-null float64 13 Cash flow rate 6819 non-null float64 14 Interest-bearing debt interest rate 6819 non-null float64 15 Tax rate (A) 6819 non-null float64 16 Net Value Per Share (B) 6819 non-null float64 17 Net Value Per Share (A) 6819 non-null float64 18 Net Value Per Share (C) 6819 non-null float64 19 Persistent EPS in the Last Four Seasons 6819 non-null float64 20 Cash Flow Per Share 6819 non-null float64 21 Revenue Per Share (Yuan ¥) 6819 non-null float64 22 Operating Profit Per Share (Yuan ¥) 6819 non-null float64 23 Per Share Net profit before tax (Yuan ¥) 6819 non-null float64 24 Realized Sales Gross Profit Growth Rate 6819 non-null float64 25 Operating Profit Growth Rate 6819 non-null float64 26 After-tax Net Profit Growth Rate 6819 non-null float64 27 Regular Net Profit Growth Rate 6819 non-null float64 28 Continuous Net Profit Growth Rate 6819 non-null float64 29 Total Asset Growth Rate 6819 non-null float64 30 Net Value Growth Rate 6819 non-null float64 31 Total Asset Return Growth Rate Ratio 6819 non-null float64 32 Cash Reinvestment % 6819 non-null float64 33 Current Ratio 6819 non-null float64 34 Quick Ratio 6819 non-null float64 35 Interest Expense Ratio 6819 non-null float64 36 Total debt/Total net worth 6819 non-null float64 37 Debt ratio % 6819 non-null float64 38 Net worth/Assets 6819 non-null float64 39 Long-term fund suitability ratio (A) 6819 non-null float64 40 Borrowing dependency 6819 non-null float64 41 Contingent liabilities/Net worth 6819 non-null float64 42 Operating profit/Paid-in capital 6819 non-null float64 43 Net profit before tax/Paid-in capital 6819 non-null float64 44 Inventory and accounts receivable/Net value 6819 non-null float64 45 Total Asset Turnover 6819 non-null float64 46 Accounts Receivable Turnover 6819 non-null float64 47 Average Collection Days 6819 non-null float64 48 Inventory Turnover Rate (times) 6819 non-null float64 49 Fixed Assets Turnover Frequency 6819 non-null float64 50 Net Worth Turnover Rate (times) 6819 non-null float64 51 Revenue per person 6819 non-null float64 52 Operating profit per person 6819 non-null float64 53 Allocation rate per person 6819 non-null float64 54 Working Capital to Total Assets 6819 non-null float64 55 Quick Assets/Total Assets 6819 non-null float64 56 Current Assets/Total Assets 6819 non-null float64 57 Cash/Total Assets 6819 non-null float64 58 Quick Assets/Current Liability 6819 non-null float64 59 Cash/Current Liability 6819 non-null float64 60 Current Liability to Assets 6819 non-null float64 61 Operating Funds to Liability 6819 non-null float64 62 Inventory/Working Capital 6819 non-null float64 63 Inventory/Current Liability 6819 non-null float64 64 Current Liabilities/Liability 6819 non-null float64 65 Working Capital/Equity 6819 non-null float64 66 Current Liabilities/Equity 6819 non-null float64 67 Long-term Liability to Current Assets 6819 non-null float64 68 Retained Earnings to Total Assets 6819 non-null float64 69 Total income/Total expense 6819 non-null float64 70 Total expense/Assets 6819 non-null float64 71 Current Asset Turnover Rate 6819 non-null float64 72 Quick Asset Turnover Rate 6819 non-null float64 73 Working capitcal Turnover Rate 6819 non-null float64 74 Cash Turnover Rate 6819 non-null float64 75 Cash Flow to Sales 6819 non-null float64 76 Fixed Assets to Assets 6819 non-null float64 77 Current Liability to Liability 6819 non-null float64 78 Current Liability to Equity 6819 non-null float64 79 Equity to Long-term Liability 6819 non-null float64 80 Cash Flow to Total Assets 6819 non-null float64 81 Cash Flow to Liability 6819 non-null float64 82 CFO to Assets 6819 non-null float64 83 Cash Flow to Equity 6819 non-null float64 84 Current Liability to Current Assets 6819 non-null float64 85 Liability-Assets Flag 6819 non-null int64 86 Net Income to Total Assets 6819 non-null float64 87 Total assets to GNP price 6819 non-null float64 88 No-credit Interval 6819 non-null float64 89 Gross Profit to Sales 6819 non-null float64 90 Net Income to Stockholder's Equity 6819 non-null float64 91 Liability to Equity 6819 non-null float64 92 Degree of Financial Leverage (DFL) 6819 non-null float64 93 Interest Coverage Ratio (Interest expense to EBIT) 6819 non-null float64 94 Net Income Flag 6819 non-null int64 95 Equity to Liability 6819 non-null float64 dtypes: float64(93), int64(3) memory usage: 5.0 MB <jupyter_text>Examples: { "Bankrupt?": 1.0, " ROA(C) before interest and depreciation before interest": 0.3705942573, " ROA(A) before interest and % after tax": 0.4243894461, " ROA(B) before interest and depreciation after tax": 0.4057497725, " Operating Gross Margin": 0.6014572133, " Realized Sales Gross Margin": 0.6014572133, " Operating Profit Rate": 0.9989692032, " Pre-tax net Interest Rate": 0.7968871459, " After-tax net Interest Rate": 0.8088093609, " Non-industry income and expenditure/revenue": 0.3026464339, " Continuous interest rate (after tax)": 0.7809848502000001, " Operating Expense Rate": 0.0001256969, " Research and development expense rate": 0.0, " Cash flow rate": 0.4581431435, " Interest-bearing debt interest rate": 0.0007250725000000001, " Tax rate (A)": 0.0, " Net Value Per Share (B)": 0.1479499389, " Net Value Per Share (A)": 0.1479499389, " Net Value Per Share (C)": 0.1479499389, " Persistent EPS in the Last Four Seasons": 0.16914058810000002, "...": "and 76 more columns" } { "Bankrupt?": 1.0, " ROA(C) before interest and depreciation before interest": 0.4642909375, " ROA(A) before interest and % after tax": 0.53821413, " ROA(B) before interest and depreciation after tax": 0.5167300177, " Operating Gross Margin": 0.6102350855, " Realized Sales Gross Margin": 0.6102350855, " Operating Profit Rate": 0.9989459782000001, " Pre-tax net Interest Rate": 0.7973801913, " After-tax net Interest Rate": 0.8093007257, " Non-industry income and expenditure/revenue": 0.3035564303, " Continuous interest rate (after tax)": 0.7815059743, " Operating Expense Rate": 0.0002897851, " Research and development expense rate": 0.0, " Cash flow rate": 0.4618672572, " Interest-bearing debt interest rate": 0.0006470647000000001, " Tax rate (A)": 0.0, " Net Value Per Share (B)": 0.18225106400000002, " Net Value Per Share (A)": 0.18225106400000002, " Net Value Per Share (C)": 0.18225106400000002, " Persistent EPS in the Last Four Seasons": 0.20894393500000003, "...": "and 76 more columns" } { "Bankrupt?": 1.0, " ROA(C) before interest and depreciation before interest": 0.4260712719, " ROA(A) before interest and % after tax": 0.4990187527, " ROA(B) before interest and depreciation after tax": 0.47229509070000003, " Operating Gross Margin": 0.6014500065, " Realized Sales Gross Margin": 0.601363525, " Operating Profit Rate": 0.9988573535, " Pre-tax net Interest Rate": 0.7964033693, " After-tax net Interest Rate": 0.8083875215, " Non-industry income and expenditure/revenue": 0.3020351773, " Continuous interest rate (after tax)": 0.7802839362, " Operating Expense Rate": 0.0002361297, " Research and development expense rate": 25500000.0, " Cash flow rate": 0.4585205875, " Interest-bearing debt interest rate": 0.0007900790000000001, " Tax rate (A)": 0.0, " Net Value Per Share (B)": 0.1779107497, " Net Value Per Share (A)": 0.1779107497, " Net Value Per Share (C)": 0.193712865, " Persistent EPS in the Last Four Seasons": 0.1805805049, "...": "and 76 more columns" } { "Bankrupt?": 1.0, " ROA(C) before interest and depreciation before interest": 0.39984400140000004, " ROA(A) before interest and % after tax": 0.4512647187, " ROA(B) before interest and depreciation after tax": 0.4577332834, " Operating Gross Margin": 0.5835411292, " Realized Sales Gross Margin": 0.5835411292, " Operating Profit Rate": 0.9986997471000001, " Pre-tax net Interest Rate": 0.7969669683, " After-tax net Interest Rate": 0.8089655977, " Non-industry income and expenditure/revenue": 0.30334953600000003, " Continuous interest rate (after tax)": 0.7812409912, " Operating Expense Rate": 0.00010788880000000001, " Research and development expense rate": 0.0, " Cash flow rate": 0.4657054427, " Interest-bearing debt interest rate": 0.00044904490000000004, " Tax rate (A)": 0.0, " Net Value Per Share (B)": 0.1541865071, " Net Value Per Share (A)": 0.1541865071, " Net Value Per Share (C)": 0.1541865071, " Persistent EPS in the Last Four Seasons": 0.1937222275, "...": "and 76 more columns" } <jupyter_script># # Company Bankruptcy # * Company bankruptcy occurs when a company cannot pay its debts and obligations to creditors, resulting in the company's assets being liquidated to repay those debts. # * This can lead to the company ceasing operations and potentially going out of business. # ## Why it is important to study bankruptcy # * Studying bankruptcy is important because it is a significant economic event that affects individuals, businesses, and society as a whole. # * Understanding bankruptcy can help make informed financial decisions and promote economic stability. # ## About data source - Taiwan Economic Journal # The Taiwanese Economic Journal is a prominent economics-focused media outlet based in Taiwan. # They cover a range of topics related to the # * Taiwanese and global economies # * including finance, business # * trade, and investment # ## Attribute information # * Y - Bankrupt?: Class label # * X1 - ROA(C) before interest and depreciation before interest: Return On Total Assets(C) # * X2 - ROA(A) before interest and % after tax: Return On Total Assets(A) # * X3 - ROA(B) before interest and depreciation after tax: Return On Total Assets(B) # * X4 - Operating Gross Margin: Gross Profit/Net Sales # * X5 - Realized Sales Gross Margin: Realized Gross Profit/Net Sales # * X6 - Operating Profit Rate: Operating Income/Net Sales # * X7 - Pre-tax net Interest Rate: Pre-Tax Income/Net Sales # * X8 - After-tax net Interest Rate: Net Income/Net Sales # * X9 - Non-industry income and expenditure/revenue: Net Non-operating Income Ratio # * X10 - Continuous interest rate (after tax): Net Income-Exclude Disposal Gain or Loss/Net Sales # * X11 - Operating Expense Rate: Operating Expenses/Net Sales # * X12 - Research and development expense rate: (Research and Development Expenses)/Net Sales # * X13 - Cash flow rate: Cash Flow from Operating/Current Liabilities # * X14 - Interest-bearing debt interest rate: Interest-bearing Debt/Equity # * X15 - Tax rate (A): Effective Tax Rate # * X16 - Net Value Per Share (B): Book Value Per Share(B) # * X17 - Net Value Per Share (A): Book Value Per Share(A) # * X18 - Net Value Per Share (C): Book Value Per Share(C) # * X19 - Persistent EPS in the Last Four Seasons: EPS-Net Income # * X20 - Cash Flow Per Share # * X21 - Revenue Per Share (Yuan ¥): Sales Per Share # * X22 - Operating Profit Per Share (Yuan ¥): Operating Income Per Share # * X23 - Per Share Net profit before tax (Yuan ¥): Pretax Income Per Share # * X24 - Realized Sales Gross Profit Growth Rate # * X25 - Operating Profit Growth Rate: Operating Income Growth # * X26 - After-tax Net Profit Growth Rate: Net Income Growth # * X27 - Regular Net Profit Growth Rate: Continuing Operating Income after Tax Growth # * X28 - Continuous Net Profit Growth Rate: Net Income-Excluding Disposal Gain or Loss Growth # * X29 - Total Asset Growth Rate: Total Asset Growth # * X30 - Net Value Growth Rate: Total Equity Growth # * X31 - Total Asset Return Growth Rate Ratio: Return on Total Asset Growth # * X32 - Cash Reinvestment %: Cash Reinvestment Ratio # * X33 - Current Ratio # * X34 - Quick Ratio: Acid Test # * X35 - Interest Expense Ratio: Interest Expenses/Total Revenue # * X36 - Total debt/Total net worth: Total Liability/Equity Ratio # * X37 - Debt ratio %: Liability/Total Assets # * X38 - Net worth/Assets: Equity/Total Assets # * X39 - Long-term fund suitability ratio (A): (Long-term Liability+Equity)/Fixed Assets # * X40 - Borrowing dependency: Cost of Interest-bearing Debt # * X41 - Contingent liabilities/Net worth: Contingent Liability/Equity # * X42 - Operating profit/Paid-in capital: Operating Income/Capital # * X43 - Net profit before tax/Paid-in capital: Pretax Income/Capital # * X44 - Inventory and accounts receivable/Net value: (Inventory+Accounts Receivables)/Equity # * X45 - Total Asset Turnover # * X46 - Accounts Receivable Turnover # * X47 - Average Collection Days: Days Receivable Outstanding # * X48 - Inventory Turnover Rate (times) # * X49 - Fixed Assets Turnover Frequency # * X50 - Net Worth Turnover Rate (times): Equity Turnover # * X51 - Revenue per person: Sales Per Employee # * X52 - Operating profit per person: Operation Income Per Employee # * X53 - Allocation rate per person: Fixed Assets Per Employee # * X54 - Working Capital to Total Assets # * X55 - Quick Assets/Total Assets # * X56 - Current Assets/Total Assets # * X57 - Cash/Total Assets # * X58 - Quick Assets/Current Liability # * X59 - Cash/Current Liability # * X60 - Current Liability to Assets # * X61 - Operating Funds to Liability # * X62 - Inventory/Working Capital # * X63 - Inventory/Current Liability # * X64 - Current Liabilities/Liability # * X65 - Working Capital/Equity # * X66 - Current Liabilities/Equity # * X67 - Long-term Liability to Current Assets # * X68 - Retained Earnings to Total Assets # * X69 - Total income/Total expense # * X70 - Total expense/Assets # * X71 - Current Asset Turnover Rate: Current Assets to Sales # * X72 - Quick Asset Turnover Rate: Quick Assets to Sales # * X73 - Working capitcal Turnover Rate: Working Capital to Sales # * X74 - Cash Turnover Rate: Cash to Sales # * X75 - Cash Flow to Sales # * X76 - Fixed Assets to Assets # * X77 - Current Liability to Liability # * X78 - Current Liability to Equity # * X79 - Equity to Long-term Liability # * X80 - Cash Flow to Total Assets # * X81 - Cash Flow to Liability # * X82 - CFO to Assets # * X83 - Cash Flow to Equity # * X84 - Current Liability to Current Assets # * X85 - Liability-Assets Flag: 1 if Total Liability exceeds Total Assets, 0 otherwise # * X86 - Net Income to Total Assets # * X87 - Total assets to GNP price # * X88 - No-credit Interval # * X89 - Gross Profit to Sales # * X90 - Net Income to Stockholder's Equity # * X91 - Liability to Equity # * X92 - Degree of Financial Leverage (DFL) # * X93 - Interest Coverage Ratio (Interest expense to EBIT) # * X94 - Net Income Flag: 1 if Net Income is Negative for the last two years, 0 otherwise # * X95 - Equity to Liability # Importing libraries required import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # import dataprep.eda as eda # EDA, Cleaning import seaborn as sns # Data visualization import matplotlib.pyplot as plt # Data visualization # Data analysis and ML Library from sklearn.preprocessing import StandardScaler from sklearn.pipeline import Pipeline from sklearn.model_selection import train_test_split, GridSearchCV from sklearn.linear_model import Lasso # New libraries are installed from here # !pip install dataprep # File available import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # Functions # # Lets take a look into data company_df = pd.read_csv("/kaggle/input/company-bankruptcy-prediction/data.csv") print( "Number of Records : ", company_df.shape[0], "\nNumber of Features : ", company_df.shape[1], ) company_df.columns = company_df.columns.str.strip() company_df.columns company_df.head() company_df.describe() company_df.info() # **Observations :** # * All the given features are numeric *(int64 or float64)* # * Column *Net Income Flag* and *Liability-Assets Flag* looks like a catogorical columns # * There is no missing values # Lets check for the presence of missing values missing_value_count = pd.DataFrame(company_df.isna().sum()) missing_value_count.columns = ["Count"] print( "Total number of columns with missing values :", len(missing_value_count[missing_value_count.Count > 0]), ) # ### Categorical column distribution pd.DataFrame(company_df["Net Income Flag"].value_counts()).plot.bar( y="Net Income Flag", rot=0 ) plt.title("Net income flag distribution") plt.show() print("Net Income Flag Distribution\n") print(pd.DataFrame(company_df["Net Income Flag"].value_counts())) # Observations : # pd.DataFrame(company_df["Liability-Assets Flag"].value_counts()).plot.bar( y="Liability-Assets Flag", rot=0 ) plt.title("Liability-Assets Flag distribution") plt.show() print("Liability-Assets Flag Distribution\n") print(pd.DataFrame(company_df["Liability-Assets Flag"].value_counts())) pd.DataFrame(company_df["Bankrupt?"].value_counts()).plot.bar(y="Bankrupt?", rot=0) plt.title("Bankrupt distribution") plt.show() print("Bankrupt Distribution\n") print(pd.DataFrame(company_df["Bankrupt?"].value_counts())) # ### Too many columns to perform EDA # In the bankruptcy data we have has 90+ columns. Carrying out EDA/Modelling on 90+ columns is a time and resource consuming process. # Possible solutions to select columns count : # 1. **Manual column selection** - Based on Domain knowledge # 2. **Feature selection methods** - Filter methods, wrapper methods and methods [(Click here)](https://www.analyticsvidhya.com/blog/2016/12/introduction-to-feature-selection-methods-with-an-example-or-how-to-select-the-right-variables/#h2_3) # 3. **Dimensionality reduction** - High demensional data to Low dimensional data representation [(Click here)](https://machinelearningmastery.com/dimensionality-reduction-algorithms-with-python/) # As I have limited knowledge in this domain 🤓, Lets start selecting features using *Feature selection methods* # Reference : # * [sklearn - Feature selection module](https://scikit-learn.org/stable/modules/feature_selection.html) # ## Feature selection # ### Filter methods company_corr = pd.DataFrame(company_df.corr(numeric_only=True)) company_corr = pd.DataFrame(company_corr["Bankrupt?"]) # Remove specific indices indices_to_remove = ["Liability-Assets Flag", "Net Income Flag", "Bankrupt?"] company_corr = company_corr.drop(indices_to_remove) plt.figure(figsize=(8, 17)) sns.barplot(y=company_corr.index, x=company_corr["Bankrupt?"]) plt.title("Pearson correllation with Bankruptcy") plt.show() # Lets see what features has weak correlation to strong correlation (>|0.15|) company_corr[(company_corr["Bankrupt?"] < -0.15) | (company_corr["Bankrupt?"] > 0.15)] # Select above mentioned features to find correlation between each other correlated_features = list( company_corr[ (company_corr["Bankrupt?"] < -0.15) | (company_corr["Bankrupt?"] > 0.15) ].index ) + ["Bankrupt?"] corr_test = company_df[correlated_features] plt.figure(figsize=(14, 14)) corr = corr_test.corr() sns.heatmap(corr, cmap="crest", annot=True, fmt=".1f") plt.show() # Observations : # * Lets remove features with correlation with other features - 1.0 # Features selected from Filter methods selected_features_set1 = [ "ROA(A) before interest and % after tax", "Net Value Per Share (A)", "Debt ratio %", "Working Capital to Total Assets", "Current Liability to Current Assets", "Net Income to Stockholder's Equity", "Bankrupt?", ] sns.heatmap( company_df[selected_features_set1].corr(), cmap="crest", annot=True, fmt=".1f" ) plt.show() # Features selected from **Filter methods** : # 1. ROA(A) before interest and % after tax # 2. Net Value Per Share (A) # 3. Debt ratio % # 4. Working Capital to Total Assets # 5. Current Liability to Current Assets # 6. Net Income to Stockholders Equity # Lets try selecting features with **Embedded methods ([Lasso Regression](https://towardsdatascience.com/feature-selection-in-machine-learning-using-lasso-regression-7809c7c2771a), and Random forest method)**, post that will finalize the features # ### Embedded Methods X = company_df[company_df.columns[1:]] y = company_df[company_df.columns[0]] # #### Lasso Regression lasso_reg_model = Lasso(fit_intercept=True, positive=True, random_state=42) lasso_reg_model.fit(X, y) lasso_reg_model coefficients = lasso_reg_model.coef_ features = lasso_reg_model.feature_names_in_ selected_features_set2 = features[coefficients != 0] selected_features_set2
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/667/129667665.ipynb
company-bankruptcy-prediction
fedesoriano
[{"Id": 129667665, "ScriptId": 38228088, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6304122, "CreationDate": "05/15/2023 15:41:01", "VersionNumber": 2.0, "Title": "Bankruptcy - EDA & Modelling", "EvaluationDate": "05/15/2023", "IsChange": true, "TotalLines": 304.0, "LinesInsertedFromPrevious": 53.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 251.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 185977277, "KernelVersionId": 129667665, "SourceDatasetVersionId": 1938459}]
[{"Id": 1938459, "DatasetId": 1111894, "DatasourceVersionId": 1977115, "CreatorUserId": 6402661, "LicenseName": "Data files \u00a9 Original Authors", "CreationDate": "02/13/2021 19:36:03", "VersionNumber": 2.0, "Title": "Company Bankruptcy Prediction", "Slug": "company-bankruptcy-prediction", "Subtitle": "Bankruptcy data from the Taiwan Economic Journal for the years 1999\u20132009", "Description": "### Similar Datasets\n\n- The Boston House-Price Data: [LINK](https://www.kaggle.com/fedesoriano/the-boston-houseprice-data)\n- Gender Pay Gap Dataset: [LINK](https://www.kaggle.com/fedesoriano/gender-pay-gap-dataset)\n- Spanish Wine Quality Dataset: [LINK](https://www.kaggle.com/datasets/fedesoriano/spanish-wine-quality-dataset)\n\n\n### Context\n\nThe data were collected from the Taiwan Economic Journal for the years 1999 to 2009. Company bankruptcy was defined based on the business regulations of the Taiwan Stock Exchange.\n\n\n### Attribute Information\n\n**Version 2:** Updated column names and description to make the data easier to understand (Y = Output feature, X = Input features)\n\n\nY - Bankrupt?: Class label\nX1 - ROA(C) before interest and depreciation before interest: Return On Total Assets(C)\nX2 - ROA(A) before interest and % after tax: Return On Total Assets(A)\nX3 - ROA(B) before interest and depreciation after tax: Return On Total Assets(B)\nX4 - Operating Gross Margin: Gross Profit/Net Sales\nX5 - Realized Sales Gross Margin: Realized Gross Profit/Net Sales\nX6 - Operating Profit Rate: Operating Income/Net Sales\nX7 - Pre-tax net Interest Rate: Pre-Tax Income/Net Sales\nX8 - After-tax net Interest Rate: Net Income/Net Sales\nX9 - Non-industry income and expenditure/revenue: Net Non-operating Income Ratio\nX10 - Continuous interest rate (after tax): Net Income-Exclude Disposal Gain or Loss/Net Sales\nX11 - Operating Expense Rate: Operating Expenses/Net Sales\nX12 - Research and development expense rate: (Research and Development Expenses)/Net Sales\nX13 - Cash flow rate: Cash Flow from Operating/Current Liabilities\nX14 - Interest-bearing debt interest rate: Interest-bearing Debt/Equity\nX15 - Tax rate (A): Effective Tax Rate\nX16 - Net Value Per Share (B): Book Value Per Share(B)\nX17 - Net Value Per Share (A): Book Value Per Share(A)\nX18 - Net Value Per Share (C): Book Value Per Share(C)\nX19 - Persistent EPS in the Last Four Seasons: EPS-Net Income\nX20 - Cash Flow Per Share\nX21 - Revenue Per Share (Yuan \u00a5): Sales Per Share\nX22 - Operating Profit Per Share (Yuan \u00a5): Operating Income Per Share\nX23 - Per Share Net profit before tax (Yuan \u00a5): Pretax Income Per Share\nX24 - Realized Sales Gross Profit Growth Rate\nX25 - Operating Profit Growth Rate: Operating Income Growth\nX26 - After-tax Net Profit Growth Rate: Net Income Growth\nX27 - Regular Net Profit Growth Rate: Continuing Operating Income after Tax Growth\nX28 - Continuous Net Profit Growth Rate: Net Income-Excluding Disposal Gain or Loss Growth\nX29 - Total Asset Growth Rate: Total Asset Growth\nX30 - Net Value Growth Rate: Total Equity Growth\nX31 - Total Asset Return Growth Rate Ratio: Return on Total Asset Growth\nX32 - Cash Reinvestment %: Cash Reinvestment Ratio\nX33 - Current Ratio\nX34 - Quick Ratio: Acid Test\nX35 - Interest Expense Ratio: Interest Expenses/Total Revenue\nX36 - Total debt/Total net worth: Total Liability/Equity Ratio\nX37 - Debt ratio %: Liability/Total Assets\nX38 - Net worth/Assets: Equity/Total Assets\nX39 - Long-term fund suitability ratio (A): (Long-term Liability+Equity)/Fixed Assets\nX40 - Borrowing dependency: Cost of Interest-bearing Debt\nX41 - Contingent liabilities/Net worth: Contingent Liability/Equity\nX42 - Operating profit/Paid-in capital: Operating Income/Capital\nX43 - Net profit before tax/Paid-in capital: Pretax Income/Capital\nX44 - Inventory and accounts receivable/Net value: (Inventory+Accounts Receivables)/Equity\nX45 - Total Asset Turnover\nX46 - Accounts Receivable Turnover\nX47 - Average Collection Days: Days Receivable Outstanding\nX48 - Inventory Turnover Rate (times)\nX49 - Fixed Assets Turnover Frequency\nX50 - Net Worth Turnover Rate (times): Equity Turnover\nX51 - Revenue per person: Sales Per Employee\nX52 - Operating profit per person: Operation Income Per Employee\nX53 - Allocation rate per person: Fixed Assets Per Employee\nX54 - Working Capital to Total Assets\nX55 - Quick Assets/Total Assets\nX56 - Current Assets/Total Assets\nX57 - Cash/Total Assets\nX58 - Quick Assets/Current Liability\nX59 - Cash/Current Liability\nX60 - Current Liability to Assets\nX61 - Operating Funds to Liability\nX62 - Inventory/Working Capital\nX63 - Inventory/Current Liability\nX64 - Current Liabilities/Liability\nX65 - Working Capital/Equity\nX66 - Current Liabilities/Equity\nX67 - Long-term Liability to Current Assets\nX68 - Retained Earnings to Total Assets\nX69 - Total income/Total expense\nX70 - Total expense/Assets\nX71 - Current Asset Turnover Rate: Current Assets to Sales\nX72 - Quick Asset Turnover Rate: Quick Assets to Sales\nX73 - Working capitcal Turnover Rate: Working Capital to Sales\nX74 - Cash Turnover Rate: Cash to Sales\nX75 - Cash Flow to Sales\nX76 - Fixed Assets to Assets\nX77 - Current Liability to Liability\nX78 - Current Liability to Equity\nX79 - Equity to Long-term Liability\nX80 - Cash Flow to Total Assets\nX81 - Cash Flow to Liability\nX82 - CFO to Assets\nX83 - Cash Flow to Equity\nX84 - Current Liability to Current Assets\nX85 - Liability-Assets Flag: 1 if Total Liability exceeds Total Assets, 0 otherwise\nX86 - Net Income to Total Assets\nX87 - Total assets to GNP price\nX88 - No-credit Interval\nX89 - Gross Profit to Sales\nX90 - Net Income to Stockholder's Equity\nX91 - Liability to Equity\nX92 - Degree of Financial Leverage (DFL)\nX93 - Interest Coverage Ratio (Interest expense to EBIT)\nX94 - Net Income Flag: 1 if Net Income is Negative for the last two years, 0 otherwise\nX95 - Equity to Liability\n\n\n### Source\n\nDeron Liang and Chih-Fong Tsai, deronliang '@' gmail.com; cftsai '@' mgt.ncu.edu.tw, National Central University, Taiwan\nThe data was obtained from UCI Machine Learning Repository: [https://archive.ics.uci.edu/ml/datasets/Taiwanese+Bankruptcy+Prediction](https://archive.ics.uci.edu/ml/datasets/Taiwanese+Bankruptcy+Prediction)\n\n\n### Relevant Papers\n\nLiang, D., Lu, C.-C., Tsai, C.-F., and Shih, G.-A. (2016) Financial Ratios and Corporate Governance Indicators in Bankruptcy Prediction: A Comprehensive Study. European Journal of Operational Research, vol. 252, no. 2, pp. 561-572.\n[https://www.sciencedirect.com/science/article/pii/S0377221716000412](https://www.sciencedirect.com/science/article/pii/S0377221716000412)", "VersionNotes": "Updated column descriptions", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 1111894, "CreatorUserId": 6402661, "OwnerUserId": 6402661.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 1938459.0, "CurrentDatasourceVersionId": 1977115.0, "ForumId": 1129209, "Type": 2, "CreationDate": "01/22/2021 08:05:03", "LastActivityDate": "01/22/2021", "TotalViews": 312149, "TotalDownloads": 32190, "TotalVotes": 610, "TotalKernels": 169}]
[{"Id": 6402661, "UserName": "fedesoriano", "DisplayName": "fedesoriano", "RegisterDate": "12/18/2020", "PerformanceTier": 4}]
# # Company Bankruptcy # * Company bankruptcy occurs when a company cannot pay its debts and obligations to creditors, resulting in the company's assets being liquidated to repay those debts. # * This can lead to the company ceasing operations and potentially going out of business. # ## Why it is important to study bankruptcy # * Studying bankruptcy is important because it is a significant economic event that affects individuals, businesses, and society as a whole. # * Understanding bankruptcy can help make informed financial decisions and promote economic stability. # ## About data source - Taiwan Economic Journal # The Taiwanese Economic Journal is a prominent economics-focused media outlet based in Taiwan. # They cover a range of topics related to the # * Taiwanese and global economies # * including finance, business # * trade, and investment # ## Attribute information # * Y - Bankrupt?: Class label # * X1 - ROA(C) before interest and depreciation before interest: Return On Total Assets(C) # * X2 - ROA(A) before interest and % after tax: Return On Total Assets(A) # * X3 - ROA(B) before interest and depreciation after tax: Return On Total Assets(B) # * X4 - Operating Gross Margin: Gross Profit/Net Sales # * X5 - Realized Sales Gross Margin: Realized Gross Profit/Net Sales # * X6 - Operating Profit Rate: Operating Income/Net Sales # * X7 - Pre-tax net Interest Rate: Pre-Tax Income/Net Sales # * X8 - After-tax net Interest Rate: Net Income/Net Sales # * X9 - Non-industry income and expenditure/revenue: Net Non-operating Income Ratio # * X10 - Continuous interest rate (after tax): Net Income-Exclude Disposal Gain or Loss/Net Sales # * X11 - Operating Expense Rate: Operating Expenses/Net Sales # * X12 - Research and development expense rate: (Research and Development Expenses)/Net Sales # * X13 - Cash flow rate: Cash Flow from Operating/Current Liabilities # * X14 - Interest-bearing debt interest rate: Interest-bearing Debt/Equity # * X15 - Tax rate (A): Effective Tax Rate # * X16 - Net Value Per Share (B): Book Value Per Share(B) # * X17 - Net Value Per Share (A): Book Value Per Share(A) # * X18 - Net Value Per Share (C): Book Value Per Share(C) # * X19 - Persistent EPS in the Last Four Seasons: EPS-Net Income # * X20 - Cash Flow Per Share # * X21 - Revenue Per Share (Yuan ¥): Sales Per Share # * X22 - Operating Profit Per Share (Yuan ¥): Operating Income Per Share # * X23 - Per Share Net profit before tax (Yuan ¥): Pretax Income Per Share # * X24 - Realized Sales Gross Profit Growth Rate # * X25 - Operating Profit Growth Rate: Operating Income Growth # * X26 - After-tax Net Profit Growth Rate: Net Income Growth # * X27 - Regular Net Profit Growth Rate: Continuing Operating Income after Tax Growth # * X28 - Continuous Net Profit Growth Rate: Net Income-Excluding Disposal Gain or Loss Growth # * X29 - Total Asset Growth Rate: Total Asset Growth # * X30 - Net Value Growth Rate: Total Equity Growth # * X31 - Total Asset Return Growth Rate Ratio: Return on Total Asset Growth # * X32 - Cash Reinvestment %: Cash Reinvestment Ratio # * X33 - Current Ratio # * X34 - Quick Ratio: Acid Test # * X35 - Interest Expense Ratio: Interest Expenses/Total Revenue # * X36 - Total debt/Total net worth: Total Liability/Equity Ratio # * X37 - Debt ratio %: Liability/Total Assets # * X38 - Net worth/Assets: Equity/Total Assets # * X39 - Long-term fund suitability ratio (A): (Long-term Liability+Equity)/Fixed Assets # * X40 - Borrowing dependency: Cost of Interest-bearing Debt # * X41 - Contingent liabilities/Net worth: Contingent Liability/Equity # * X42 - Operating profit/Paid-in capital: Operating Income/Capital # * X43 - Net profit before tax/Paid-in capital: Pretax Income/Capital # * X44 - Inventory and accounts receivable/Net value: (Inventory+Accounts Receivables)/Equity # * X45 - Total Asset Turnover # * X46 - Accounts Receivable Turnover # * X47 - Average Collection Days: Days Receivable Outstanding # * X48 - Inventory Turnover Rate (times) # * X49 - Fixed Assets Turnover Frequency # * X50 - Net Worth Turnover Rate (times): Equity Turnover # * X51 - Revenue per person: Sales Per Employee # * X52 - Operating profit per person: Operation Income Per Employee # * X53 - Allocation rate per person: Fixed Assets Per Employee # * X54 - Working Capital to Total Assets # * X55 - Quick Assets/Total Assets # * X56 - Current Assets/Total Assets # * X57 - Cash/Total Assets # * X58 - Quick Assets/Current Liability # * X59 - Cash/Current Liability # * X60 - Current Liability to Assets # * X61 - Operating Funds to Liability # * X62 - Inventory/Working Capital # * X63 - Inventory/Current Liability # * X64 - Current Liabilities/Liability # * X65 - Working Capital/Equity # * X66 - Current Liabilities/Equity # * X67 - Long-term Liability to Current Assets # * X68 - Retained Earnings to Total Assets # * X69 - Total income/Total expense # * X70 - Total expense/Assets # * X71 - Current Asset Turnover Rate: Current Assets to Sales # * X72 - Quick Asset Turnover Rate: Quick Assets to Sales # * X73 - Working capitcal Turnover Rate: Working Capital to Sales # * X74 - Cash Turnover Rate: Cash to Sales # * X75 - Cash Flow to Sales # * X76 - Fixed Assets to Assets # * X77 - Current Liability to Liability # * X78 - Current Liability to Equity # * X79 - Equity to Long-term Liability # * X80 - Cash Flow to Total Assets # * X81 - Cash Flow to Liability # * X82 - CFO to Assets # * X83 - Cash Flow to Equity # * X84 - Current Liability to Current Assets # * X85 - Liability-Assets Flag: 1 if Total Liability exceeds Total Assets, 0 otherwise # * X86 - Net Income to Total Assets # * X87 - Total assets to GNP price # * X88 - No-credit Interval # * X89 - Gross Profit to Sales # * X90 - Net Income to Stockholder's Equity # * X91 - Liability to Equity # * X92 - Degree of Financial Leverage (DFL) # * X93 - Interest Coverage Ratio (Interest expense to EBIT) # * X94 - Net Income Flag: 1 if Net Income is Negative for the last two years, 0 otherwise # * X95 - Equity to Liability # Importing libraries required import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # import dataprep.eda as eda # EDA, Cleaning import seaborn as sns # Data visualization import matplotlib.pyplot as plt # Data visualization # Data analysis and ML Library from sklearn.preprocessing import StandardScaler from sklearn.pipeline import Pipeline from sklearn.model_selection import train_test_split, GridSearchCV from sklearn.linear_model import Lasso # New libraries are installed from here # !pip install dataprep # File available import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # Functions # # Lets take a look into data company_df = pd.read_csv("/kaggle/input/company-bankruptcy-prediction/data.csv") print( "Number of Records : ", company_df.shape[0], "\nNumber of Features : ", company_df.shape[1], ) company_df.columns = company_df.columns.str.strip() company_df.columns company_df.head() company_df.describe() company_df.info() # **Observations :** # * All the given features are numeric *(int64 or float64)* # * Column *Net Income Flag* and *Liability-Assets Flag* looks like a catogorical columns # * There is no missing values # Lets check for the presence of missing values missing_value_count = pd.DataFrame(company_df.isna().sum()) missing_value_count.columns = ["Count"] print( "Total number of columns with missing values :", len(missing_value_count[missing_value_count.Count > 0]), ) # ### Categorical column distribution pd.DataFrame(company_df["Net Income Flag"].value_counts()).plot.bar( y="Net Income Flag", rot=0 ) plt.title("Net income flag distribution") plt.show() print("Net Income Flag Distribution\n") print(pd.DataFrame(company_df["Net Income Flag"].value_counts())) # Observations : # pd.DataFrame(company_df["Liability-Assets Flag"].value_counts()).plot.bar( y="Liability-Assets Flag", rot=0 ) plt.title("Liability-Assets Flag distribution") plt.show() print("Liability-Assets Flag Distribution\n") print(pd.DataFrame(company_df["Liability-Assets Flag"].value_counts())) pd.DataFrame(company_df["Bankrupt?"].value_counts()).plot.bar(y="Bankrupt?", rot=0) plt.title("Bankrupt distribution") plt.show() print("Bankrupt Distribution\n") print(pd.DataFrame(company_df["Bankrupt?"].value_counts())) # ### Too many columns to perform EDA # In the bankruptcy data we have has 90+ columns. Carrying out EDA/Modelling on 90+ columns is a time and resource consuming process. # Possible solutions to select columns count : # 1. **Manual column selection** - Based on Domain knowledge # 2. **Feature selection methods** - Filter methods, wrapper methods and methods [(Click here)](https://www.analyticsvidhya.com/blog/2016/12/introduction-to-feature-selection-methods-with-an-example-or-how-to-select-the-right-variables/#h2_3) # 3. **Dimensionality reduction** - High demensional data to Low dimensional data representation [(Click here)](https://machinelearningmastery.com/dimensionality-reduction-algorithms-with-python/) # As I have limited knowledge in this domain 🤓, Lets start selecting features using *Feature selection methods* # Reference : # * [sklearn - Feature selection module](https://scikit-learn.org/stable/modules/feature_selection.html) # ## Feature selection # ### Filter methods company_corr = pd.DataFrame(company_df.corr(numeric_only=True)) company_corr = pd.DataFrame(company_corr["Bankrupt?"]) # Remove specific indices indices_to_remove = ["Liability-Assets Flag", "Net Income Flag", "Bankrupt?"] company_corr = company_corr.drop(indices_to_remove) plt.figure(figsize=(8, 17)) sns.barplot(y=company_corr.index, x=company_corr["Bankrupt?"]) plt.title("Pearson correllation with Bankruptcy") plt.show() # Lets see what features has weak correlation to strong correlation (>|0.15|) company_corr[(company_corr["Bankrupt?"] < -0.15) | (company_corr["Bankrupt?"] > 0.15)] # Select above mentioned features to find correlation between each other correlated_features = list( company_corr[ (company_corr["Bankrupt?"] < -0.15) | (company_corr["Bankrupt?"] > 0.15) ].index ) + ["Bankrupt?"] corr_test = company_df[correlated_features] plt.figure(figsize=(14, 14)) corr = corr_test.corr() sns.heatmap(corr, cmap="crest", annot=True, fmt=".1f") plt.show() # Observations : # * Lets remove features with correlation with other features - 1.0 # Features selected from Filter methods selected_features_set1 = [ "ROA(A) before interest and % after tax", "Net Value Per Share (A)", "Debt ratio %", "Working Capital to Total Assets", "Current Liability to Current Assets", "Net Income to Stockholder's Equity", "Bankrupt?", ] sns.heatmap( company_df[selected_features_set1].corr(), cmap="crest", annot=True, fmt=".1f" ) plt.show() # Features selected from **Filter methods** : # 1. ROA(A) before interest and % after tax # 2. Net Value Per Share (A) # 3. Debt ratio % # 4. Working Capital to Total Assets # 5. Current Liability to Current Assets # 6. Net Income to Stockholders Equity # Lets try selecting features with **Embedded methods ([Lasso Regression](https://towardsdatascience.com/feature-selection-in-machine-learning-using-lasso-regression-7809c7c2771a), and Random forest method)**, post that will finalize the features # ### Embedded Methods X = company_df[company_df.columns[1:]] y = company_df[company_df.columns[0]] # #### Lasso Regression lasso_reg_model = Lasso(fit_intercept=True, positive=True, random_state=42) lasso_reg_model.fit(X, y) lasso_reg_model coefficients = lasso_reg_model.coef_ features = lasso_reg_model.feature_names_in_ selected_features_set2 = features[coefficients != 0] selected_features_set2
[{"company-bankruptcy-prediction/data.csv": {"column_names": "[\"Bankrupt?\", \" ROA(C) before interest and depreciation before interest\", \" ROA(A) before interest and % after tax\", \" ROA(B) before interest and depreciation after tax\", \" Operating Gross Margin\", \" Realized Sales Gross Margin\", \" Operating Profit Rate\", \" Pre-tax net Interest Rate\", \" After-tax net Interest Rate\", \" Non-industry income and expenditure/revenue\", \" Continuous interest rate (after tax)\", \" Operating Expense Rate\", \" Research and development expense rate\", \" Cash flow rate\", \" Interest-bearing debt interest rate\", \" Tax rate (A)\", \" Net Value Per Share (B)\", \" Net Value Per Share (A)\", \" Net Value Per Share (C)\", \" Persistent EPS in the Last Four Seasons\", \" Cash Flow Per Share\", \" Revenue Per Share (Yuan \\u00a5)\", \" Operating Profit Per Share (Yuan \\u00a5)\", \" Per Share Net profit before tax (Yuan \\u00a5)\", \" Realized Sales Gross Profit Growth Rate\", \" Operating Profit Growth Rate\", \" After-tax Net Profit Growth Rate\", \" Regular Net Profit Growth Rate\", \" Continuous Net Profit Growth Rate\", \" Total Asset Growth Rate\", \" Net Value Growth Rate\", \" Total Asset Return Growth Rate Ratio\", \" Cash Reinvestment %\", \" Current Ratio\", \" Quick Ratio\", \" Interest Expense Ratio\", \" Total debt/Total net worth\", \" Debt ratio %\", \" Net worth/Assets\", \" Long-term fund suitability ratio (A)\", \" Borrowing dependency\", \" Contingent liabilities/Net worth\", \" Operating profit/Paid-in capital\", \" Net profit before tax/Paid-in capital\", \" Inventory and accounts receivable/Net value\", \" Total Asset Turnover\", \" Accounts Receivable Turnover\", \" Average Collection Days\", \" Inventory Turnover Rate (times)\", \" Fixed Assets Turnover Frequency\", \" Net Worth Turnover Rate (times)\", \" Revenue per person\", \" Operating profit per person\", \" Allocation rate per person\", \" Working Capital to Total Assets\", \" Quick Assets/Total Assets\", \" Current Assets/Total Assets\", \" Cash/Total Assets\", \" Quick Assets/Current Liability\", \" Cash/Current Liability\", \" Current Liability to Assets\", \" Operating Funds to Liability\", \" Inventory/Working Capital\", \" Inventory/Current Liability\", \" Current Liabilities/Liability\", \" Working Capital/Equity\", \" Current Liabilities/Equity\", \" Long-term Liability to Current Assets\", \" Retained Earnings to Total Assets\", \" Total income/Total expense\", \" Total expense/Assets\", \" Current Asset Turnover Rate\", \" Quick Asset Turnover Rate\", \" Working capitcal Turnover Rate\", \" Cash Turnover Rate\", \" Cash Flow to Sales\", \" Fixed Assets to Assets\", \" Current Liability to Liability\", \" Current Liability to Equity\", \" Equity to Long-term Liability\", \" Cash Flow to Total Assets\", \" Cash Flow to Liability\", \" CFO to Assets\", \" Cash Flow to Equity\", \" Current Liability to Current Assets\", \" Liability-Assets Flag\", \" Net Income to Total Assets\", \" Total assets to GNP price\", \" No-credit Interval\", \" Gross Profit to Sales\", \" Net Income to Stockholder's Equity\", \" Liability to Equity\", \" Degree of Financial Leverage (DFL)\", \" Interest Coverage Ratio (Interest expense to EBIT)\", \" Net Income Flag\", \" Equity to Liability\"]", "column_data_types": "{\"Bankrupt?\": \"int64\", \" ROA(C) before interest and depreciation before interest\": \"float64\", \" ROA(A) before interest and % after tax\": \"float64\", \" ROA(B) before interest and depreciation after tax\": \"float64\", \" Operating Gross Margin\": \"float64\", \" Realized Sales Gross Margin\": \"float64\", \" Operating Profit Rate\": \"float64\", \" Pre-tax net Interest Rate\": \"float64\", \" After-tax net Interest Rate\": \"float64\", \" Non-industry income and expenditure/revenue\": \"float64\", \" Continuous interest rate (after tax)\": \"float64\", \" Operating Expense Rate\": \"float64\", \" Research and development expense rate\": \"float64\", \" Cash flow rate\": \"float64\", \" Interest-bearing debt interest rate\": \"float64\", \" Tax rate (A)\": \"float64\", \" Net Value Per Share (B)\": \"float64\", \" Net Value Per Share (A)\": \"float64\", \" Net Value Per Share (C)\": \"float64\", \" Persistent EPS in the Last Four Seasons\": \"float64\", \" Cash Flow Per Share\": \"float64\", \" Revenue Per Share (Yuan \\u00a5)\": \"float64\", \" Operating Profit Per Share (Yuan \\u00a5)\": \"float64\", \" Per Share Net profit before tax (Yuan \\u00a5)\": \"float64\", \" Realized Sales Gross Profit Growth Rate\": \"float64\", \" Operating Profit Growth Rate\": \"float64\", \" After-tax Net Profit Growth Rate\": \"float64\", \" Regular Net Profit Growth Rate\": \"float64\", \" Continuous Net Profit Growth Rate\": \"float64\", \" Total Asset Growth Rate\": \"float64\", \" Net Value Growth Rate\": \"float64\", \" Total Asset Return Growth Rate Ratio\": \"float64\", \" Cash Reinvestment %\": \"float64\", \" Current Ratio\": \"float64\", \" Quick Ratio\": \"float64\", \" Interest Expense Ratio\": \"float64\", \" Total debt/Total net worth\": \"float64\", \" Debt ratio %\": \"float64\", \" Net worth/Assets\": \"float64\", \" Long-term fund suitability ratio (A)\": \"float64\", \" Borrowing dependency\": \"float64\", \" Contingent liabilities/Net worth\": \"float64\", \" Operating profit/Paid-in capital\": \"float64\", \" Net profit before tax/Paid-in capital\": \"float64\", \" Inventory and accounts receivable/Net value\": \"float64\", \" Total Asset Turnover\": \"float64\", \" Accounts Receivable Turnover\": \"float64\", \" Average Collection Days\": \"float64\", \" Inventory Turnover Rate (times)\": \"float64\", \" Fixed Assets Turnover Frequency\": \"float64\", \" Net Worth Turnover Rate (times)\": \"float64\", \" Revenue per person\": \"float64\", \" Operating profit per person\": \"float64\", \" Allocation rate per person\": \"float64\", \" Working Capital to Total Assets\": \"float64\", \" Quick Assets/Total Assets\": \"float64\", \" Current Assets/Total Assets\": \"float64\", \" Cash/Total Assets\": \"float64\", \" Quick Assets/Current Liability\": \"float64\", \" Cash/Current Liability\": \"float64\", \" Current Liability to Assets\": \"float64\", \" Operating Funds to Liability\": \"float64\", \" Inventory/Working Capital\": \"float64\", \" Inventory/Current Liability\": \"float64\", \" Current Liabilities/Liability\": \"float64\", \" Working Capital/Equity\": \"float64\", \" Current Liabilities/Equity\": \"float64\", \" Long-term Liability to Current Assets\": \"float64\", \" Retained Earnings to Total Assets\": \"float64\", \" Total income/Total expense\": \"float64\", \" Total expense/Assets\": \"float64\", \" Current Asset Turnover Rate\": \"float64\", \" Quick Asset Turnover Rate\": \"float64\", \" Working capitcal Turnover Rate\": \"float64\", \" Cash Turnover Rate\": \"float64\", \" Cash Flow to Sales\": \"float64\", \" Fixed Assets to Assets\": \"float64\", \" Current Liability to Liability\": \"float64\", \" Current Liability to Equity\": \"float64\", \" Equity to Long-term Liability\": \"float64\", \" Cash Flow to Total Assets\": \"float64\", \" Cash Flow to Liability\": \"float64\", \" CFO to Assets\": \"float64\", \" Cash Flow to Equity\": \"float64\", \" Current Liability to Current Assets\": \"float64\", \" Liability-Assets Flag\": \"int64\", \" Net Income to Total Assets\": \"float64\", \" Total assets to GNP price\": \"float64\", \" No-credit Interval\": \"float64\", \" Gross Profit to Sales\": \"float64\", \" Net Income to Stockholder's Equity\": \"float64\", \" Liability to Equity\": \"float64\", \" Degree of Financial Leverage (DFL)\": \"float64\", \" Interest Coverage Ratio (Interest expense to EBIT)\": \"float64\", \" Net Income Flag\": \"int64\", \" Equity to Liability\": \"float64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 6819 entries, 0 to 6818\nData columns (total 96 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Bankrupt? 6819 non-null int64 \n 1 ROA(C) before interest and depreciation before interest 6819 non-null float64\n 2 ROA(A) before interest and % after tax 6819 non-null float64\n 3 ROA(B) before interest and depreciation after tax 6819 non-null float64\n 4 Operating Gross Margin 6819 non-null float64\n 5 Realized Sales Gross Margin 6819 non-null float64\n 6 Operating Profit Rate 6819 non-null float64\n 7 Pre-tax net Interest Rate 6819 non-null float64\n 8 After-tax net Interest Rate 6819 non-null float64\n 9 Non-industry income and expenditure/revenue 6819 non-null float64\n 10 Continuous interest rate (after tax) 6819 non-null float64\n 11 Operating Expense Rate 6819 non-null float64\n 12 Research and development expense rate 6819 non-null float64\n 13 Cash flow rate 6819 non-null float64\n 14 Interest-bearing debt interest rate 6819 non-null float64\n 15 Tax rate (A) 6819 non-null float64\n 16 Net Value Per Share (B) 6819 non-null float64\n 17 Net Value Per Share (A) 6819 non-null float64\n 18 Net Value Per Share (C) 6819 non-null float64\n 19 Persistent EPS in the Last Four Seasons 6819 non-null float64\n 20 Cash Flow Per Share 6819 non-null float64\n 21 Revenue Per Share (Yuan \u00a5) 6819 non-null float64\n 22 Operating Profit Per Share (Yuan \u00a5) 6819 non-null float64\n 23 Per Share Net profit before tax (Yuan \u00a5) 6819 non-null float64\n 24 Realized Sales Gross Profit Growth Rate 6819 non-null float64\n 25 Operating Profit Growth Rate 6819 non-null float64\n 26 After-tax Net Profit Growth Rate 6819 non-null float64\n 27 Regular Net Profit Growth Rate 6819 non-null float64\n 28 Continuous Net Profit Growth Rate 6819 non-null float64\n 29 Total Asset Growth Rate 6819 non-null float64\n 30 Net Value Growth Rate 6819 non-null float64\n 31 Total Asset Return Growth Rate Ratio 6819 non-null float64\n 32 Cash Reinvestment % 6819 non-null float64\n 33 Current Ratio 6819 non-null float64\n 34 Quick Ratio 6819 non-null float64\n 35 Interest Expense Ratio 6819 non-null float64\n 36 Total debt/Total net worth 6819 non-null float64\n 37 Debt ratio % 6819 non-null float64\n 38 Net worth/Assets 6819 non-null float64\n 39 Long-term fund suitability ratio (A) 6819 non-null float64\n 40 Borrowing dependency 6819 non-null float64\n 41 Contingent liabilities/Net worth 6819 non-null float64\n 42 Operating profit/Paid-in capital 6819 non-null float64\n 43 Net profit before tax/Paid-in capital 6819 non-null float64\n 44 Inventory and accounts receivable/Net value 6819 non-null float64\n 45 Total Asset Turnover 6819 non-null float64\n 46 Accounts Receivable Turnover 6819 non-null float64\n 47 Average Collection Days 6819 non-null float64\n 48 Inventory Turnover Rate (times) 6819 non-null float64\n 49 Fixed Assets Turnover Frequency 6819 non-null float64\n 50 Net Worth Turnover Rate (times) 6819 non-null float64\n 51 Revenue per person 6819 non-null float64\n 52 Operating profit per person 6819 non-null float64\n 53 Allocation rate per person 6819 non-null float64\n 54 Working Capital to Total Assets 6819 non-null float64\n 55 Quick Assets/Total Assets 6819 non-null float64\n 56 Current Assets/Total Assets 6819 non-null float64\n 57 Cash/Total Assets 6819 non-null float64\n 58 Quick Assets/Current Liability 6819 non-null float64\n 59 Cash/Current Liability 6819 non-null float64\n 60 Current Liability to Assets 6819 non-null float64\n 61 Operating Funds to Liability 6819 non-null float64\n 62 Inventory/Working Capital 6819 non-null float64\n 63 Inventory/Current Liability 6819 non-null float64\n 64 Current Liabilities/Liability 6819 non-null float64\n 65 Working Capital/Equity 6819 non-null float64\n 66 Current Liabilities/Equity 6819 non-null float64\n 67 Long-term Liability to Current Assets 6819 non-null float64\n 68 Retained Earnings to Total Assets 6819 non-null float64\n 69 Total income/Total expense 6819 non-null float64\n 70 Total expense/Assets 6819 non-null float64\n 71 Current Asset Turnover Rate 6819 non-null float64\n 72 Quick Asset Turnover Rate 6819 non-null float64\n 73 Working capitcal Turnover Rate 6819 non-null float64\n 74 Cash Turnover Rate 6819 non-null float64\n 75 Cash Flow to Sales 6819 non-null float64\n 76 Fixed Assets to Assets 6819 non-null float64\n 77 Current Liability to Liability 6819 non-null float64\n 78 Current Liability to Equity 6819 non-null float64\n 79 Equity to Long-term Liability 6819 non-null float64\n 80 Cash Flow to Total Assets 6819 non-null float64\n 81 Cash Flow to Liability 6819 non-null float64\n 82 CFO to Assets 6819 non-null float64\n 83 Cash Flow to Equity 6819 non-null float64\n 84 Current Liability to Current Assets 6819 non-null float64\n 85 Liability-Assets Flag 6819 non-null int64 \n 86 Net Income to Total Assets 6819 non-null float64\n 87 Total assets to GNP price 6819 non-null float64\n 88 No-credit Interval 6819 non-null float64\n 89 Gross Profit to Sales 6819 non-null float64\n 90 Net Income to Stockholder's Equity 6819 non-null float64\n 91 Liability to Equity 6819 non-null float64\n 92 Degree of Financial Leverage (DFL) 6819 non-null float64\n 93 Interest Coverage Ratio (Interest expense to EBIT) 6819 non-null float64\n 94 Net Income Flag 6819 non-null int64 \n 95 Equity to Liability 6819 non-null float64\ndtypes: float64(93), int64(3)\nmemory usage: 5.0 MB\n", "summary": "{\"Bankrupt?\": {\"count\": 6819.0, \"mean\": 0.03226279513125092, \"std\": 0.17671017660774022, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \" ROA(C) before interest and depreciation before interest\": {\"count\": 6819.0, \"mean\": 0.5051796332417815, \"std\": 0.06068563875428444, \"min\": 0.0, \"25%\": 0.476527080388047, \"50%\": 0.502705601325988, \"75%\": 0.535562813825379, \"max\": 1.0}, \" ROA(A) before interest and % after tax\": {\"count\": 6819.0, \"mean\": 0.5586249158750463, \"std\": 0.06562003103170726, \"min\": 0.0, \"25%\": 0.53554295682512, \"50%\": 0.559801569995639, \"75%\": 0.58915721761884, \"max\": 1.0}, \" ROA(B) before interest and depreciation after tax\": {\"count\": 6819.0, \"mean\": 0.5535887093516657, \"std\": 0.06159480929187566, \"min\": 0.0, \"25%\": 0.527276620804112, \"50%\": 0.552277959205525, \"75%\": 0.584105144815033, \"max\": 1.0}, \" Operating Gross Margin\": {\"count\": 6819.0, \"mean\": 0.6079480383703836, \"std\": 0.01693381254822146, \"min\": 0.0, \"25%\": 0.6004446590466855, \"50%\": 0.605997492036495, \"75%\": 0.613914152697502, \"max\": 1.0}, \" Realized Sales Gross Margin\": {\"count\": 6819.0, \"mean\": 0.6079294691769791, \"std\": 0.01691607005567578, \"min\": 0.0, \"25%\": 0.600433848859165, \"50%\": 0.605975871661454, \"75%\": 0.6138420847806975, \"max\": 1.0}, \" Operating Profit Rate\": {\"count\": 6819.0, \"mean\": 0.9987551277900442, \"std\": 0.013010025092984125, \"min\": 0.0, \"25%\": 0.998969203197885, \"50%\": 0.999022239374566, \"75%\": 0.999094514164357, \"max\": 1.0}, \" Pre-tax net Interest Rate\": {\"count\": 6819.0, \"mean\": 0.7971897524712905, \"std\": 0.012868988419884597, \"min\": 0.0, \"25%\": 0.797385863236893, \"50%\": 0.797463610578231, \"75%\": 0.797578848185589, \"max\": 1.0}, \" After-tax net Interest Rate\": {\"count\": 6819.0, \"mean\": 0.8090835935135348, \"std\": 0.013600653945149043, \"min\": 0.0, \"25%\": 0.809311597146491, \"50%\": 0.809375198550956, \"75%\": 0.809469266134837, \"max\": 1.0}, \" Non-industry income and expenditure/revenue\": {\"count\": 6819.0, \"mean\": 0.303622923649734, \"std\": 0.011163439838128548, \"min\": 0.0, \"25%\": 0.30346627659685, \"50%\": 0.303525492830123, \"75%\": 0.303585192461218, \"max\": 1.0}, \" Continuous interest rate (after tax)\": {\"count\": 6819.0, \"mean\": 0.7813814325261418, \"std\": 0.012679004028913246, \"min\": 0.0, \"25%\": 0.7815668165898519, \"50%\": 0.781634957112874, \"75%\": 0.7817353784192015, \"max\": 1.0}, \" Operating Expense Rate\": {\"count\": 6819.0, \"mean\": 1995347312.8028853, \"std\": 3237683890.5223837, \"min\": 0.0, \"25%\": 0.0001566874492428, \"50%\": 0.0002777588583625, \"75%\": 4145000000.0, \"max\": 9990000000.0}, \" Research and development expense rate\": {\"count\": 6819.0, \"mean\": 1950427306.0568295, \"std\": 2598291553.9983206, \"min\": 0.0, \"25%\": 0.000128187953762, \"50%\": 509000000.0, \"75%\": 3450000000.0, \"max\": 9980000000.0}, \" Cash flow rate\": {\"count\": 6819.0, \"mean\": 0.4674311857796612, \"std\": 0.017035517308785362, \"min\": 0.0, \"25%\": 0.4615577531181065, \"50%\": 0.465079724549793, \"75%\": 0.471003917029432, \"max\": 1.0}, \" Interest-bearing debt interest rate\": {\"count\": 6819.0, \"mean\": 16448012.905942537, \"std\": 108275033.532824, \"min\": 0.0, \"25%\": 0.0002030203020302, \"50%\": 0.0003210321032103, \"75%\": 0.0005325532553255, \"max\": 990000000.0}, \" Tax rate (A)\": {\"count\": 6819.0, \"mean\": 0.11500074794142427, \"std\": 0.13866749672835132, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0734892195566353, \"75%\": 0.205840672132807, \"max\": 1.0}, \" Net Value Per Share (B)\": {\"count\": 6819.0, \"mean\": 0.19066057949747392, \"std\": 0.033389768351330985, \"min\": 0.0, \"25%\": 0.173612574269942, \"50%\": 0.184400151700308, \"75%\": 0.199570182461759, \"max\": 1.0}, \" Net Value Per Share (A)\": {\"count\": 6819.0, \"mean\": 0.19063317896774643, \"std\": 0.033473514172429, \"min\": 0.0, \"25%\": 0.173612574269942, \"50%\": 0.184400151700308, \"75%\": 0.199570182461759, \"max\": 1.0}, \" Net Value Per Share (C)\": {\"count\": 6819.0, \"mean\": 0.1906723702531618, \"std\": 0.03348013767040907, \"min\": 0.0, \"25%\": 0.1736757827314485, \"50%\": 0.184400151700308, \"75%\": 0.199612321436096, \"max\": 1.0}, \" Persistent EPS in the Last Four Seasons\": {\"count\": 6819.0, \"mean\": 0.22881285256452782, \"std\": 0.03326261307597689, \"min\": 0.0, \"25%\": 0.214711165736976, \"50%\": 0.22454382149948, \"75%\": 0.2388200813085, \"max\": 1.0}, \" Cash Flow Per Share\": {\"count\": 6819.0, \"mean\": 0.32348191216983224, \"std\": 0.01761091295834377, \"min\": 0.0, \"25%\": 0.317747754120393, \"50%\": 0.322487090613284, \"75%\": 0.3286234703260945, \"max\": 1.0}, \" Revenue Per Share (Yuan \\u00a5)\": {\"count\": 6819.0, \"mean\": 1328640.6020960682, \"std\": 51707089.76790663, \"min\": 0.0, \"25%\": 0.01563138073415305, \"50%\": 0.0273757127516373, \"75%\": 0.0463572152396509, \"max\": 3020000000.0}, \" Operating Profit Per Share (Yuan \\u00a5)\": {\"count\": 6819.0, \"mean\": 0.10909073887546925, \"std\": 0.027942244774416095, \"min\": 0.0, \"25%\": 0.0960833808321798, \"50%\": 0.104226040224737, \"75%\": 0.1161550362348345, \"max\": 1.0}, \" Per Share Net profit before tax (Yuan \\u00a5)\": {\"count\": 6819.0, \"mean\": 0.18436057764203345, \"std\": 0.03318020898090537, \"min\": 0.0, \"25%\": 0.170369812457634, \"50%\": 0.179709271672818, \"75%\": 0.193492505837162, \"max\": 1.0}, \" Realized Sales Gross Profit Growth Rate\": {\"count\": 6819.0, \"mean\": 0.02240785447416587, \"std\": 0.012079270152911608, \"min\": 0.0, \"25%\": 0.022064532735505453, \"50%\": 0.0221023731764072, \"75%\": 0.022153148426612798, \"max\": 1.0}, \" Operating Profit Growth Rate\": {\"count\": 6819.0, \"mean\": 0.8479799951688084, \"std\": 0.010752477405401351, \"min\": 0.0, \"25%\": 0.8479841081819834, \"50%\": 0.848043533745768, \"75%\": 0.8481225403945605, \"max\": 1.0}, \" After-tax Net Profit Growth Rate\": {\"count\": 6819.0, \"mean\": 0.6891461185681318, \"std\": 0.013853022260934765, \"min\": 0.0, \"25%\": 0.6892699337448115, \"50%\": 0.689438526343149, \"75%\": 0.6896471679790515, \"max\": 1.0}, \" Regular Net Profit Growth Rate\": {\"count\": 6819.0, \"mean\": 0.6891500117795616, \"std\": 0.0139102834140106, \"min\": 0.0, \"25%\": 0.689270265563206, \"50%\": 0.689438555196922, \"75%\": 0.6896470092832976, \"max\": 1.0}, \" Continuous Net Profit Growth Rate\": {\"count\": 6819.0, \"mean\": 0.21763901299696697, \"std\": 0.01006296314611611, \"min\": 0.0, \"25%\": 0.2175795122117655, \"50%\": 0.217598046961963, \"75%\": 0.217621501194243, \"max\": 1.0}, \" Total Asset Growth Rate\": {\"count\": 6819.0, \"mean\": 5508096595.248749, \"std\": 2897717771.1697035, \"min\": 0.0, \"25%\": 4860000000.0, \"50%\": 6400000000.0, \"75%\": 7390000000.0, \"max\": 9990000000.0}, \" Net Value Growth Rate\": {\"count\": 6819.0, \"mean\": 1566212.055241067, \"std\": 114159389.51834548, \"min\": 0.0, \"25%\": 0.0004409688868264, \"50%\": 0.0004619555222076, \"75%\": 0.000499362141038, \"max\": 9330000000.0}, \" Total Asset Return Growth Rate Ratio\": {\"count\": 6819.0, \"mean\": 0.2642475118758414, \"std\": 0.009634208862611605, \"min\": 0.0, \"25%\": 0.263758926420651, \"50%\": 0.264049545034229, \"75%\": 0.264388341065032, \"max\": 1.0}, \" Cash Reinvestment %\": {\"count\": 6819.0, \"mean\": 0.37967667232266245, \"std\": 0.020736565809616806, \"min\": 0.0, \"25%\": 0.37474851905666695, \"50%\": 0.380425468499683, \"75%\": 0.386731120301032, \"max\": 1.0}, \" Current Ratio\": {\"count\": 6819.0, \"mean\": 403284.954244977, \"std\": 33302155.825480215, \"min\": 0.0, \"25%\": 0.00755504663011965, \"50%\": 0.0105871744549939, \"75%\": 0.0162695280201934, \"max\": 2750000000.0}, \" Quick Ratio\": {\"count\": 6819.0, \"mean\": 8376594.819684891, \"std\": 244684748.44687235, \"min\": 0.0, \"25%\": 0.004725903227376101, \"50%\": 0.0074124720675444, \"75%\": 0.01224910697241505, \"max\": 9230000000.0}, \" Interest Expense Ratio\": {\"count\": 6819.0, \"mean\": 0.6309910117124214, \"std\": 0.011238461504050156, \"min\": 0.0, \"25%\": 0.63061225188696, \"50%\": 0.630698209613567, \"75%\": 0.631125258558102, \"max\": 1.0}, \" Total debt/Total net worth\": {\"count\": 6819.0, \"mean\": 4416336.714259365, \"std\": 168406905.28151134, \"min\": 0.0, \"25%\": 0.0030070491250148, \"50%\": 0.005546284390702, \"75%\": 0.00927329266179695, \"max\": 9940000000.0}, \" Debt ratio %\": {\"count\": 6819.0, \"mean\": 0.11317708497306007, \"std\": 0.05392030606308283, \"min\": 0.0, \"25%\": 0.0728905281615624, \"50%\": 0.111406717658796, \"75%\": 0.148804305106267, \"max\": 1.0}, \" Net worth/Assets\": {\"count\": 6819.0, \"mean\": 0.8868229150269401, \"std\": 0.05392030606308284, \"min\": 0.0, \"25%\": 0.8511956948937329, \"50%\": 0.888593282341204, \"75%\": 0.927109471838438, \"max\": 1.0}, \" Long-term fund suitability ratio (A)\": {\"count\": 6819.0, \"mean\": 0.00878273381503679, \"std\": 0.028152926049290605, \"min\": 0.0, \"25%\": 0.0052436836906082, \"50%\": 0.0056646361117639, \"75%\": 0.00684743246553585, \"max\": 1.0}, \" Borrowing dependency\": {\"count\": 6819.0, \"mean\": 0.37465429459872324, \"std\": 0.016286163355500444, \"min\": 0.0, \"25%\": 0.3701678435547765, \"50%\": 0.372624322553083, \"75%\": 0.3762707372009225, \"max\": 1.0}, \" Contingent liabilities/Net worth\": {\"count\": 6819.0, \"mean\": 0.0059682772664790325, \"std\": 0.012188361875857312, \"min\": 0.0, \"25%\": 0.0053658477137564, \"50%\": 0.0053658477137564, \"75%\": 0.00576435604952715, \"max\": 1.0}, \" Operating profit/Paid-in capital\": {\"count\": 6819.0, \"mean\": 0.10897668140338518, \"std\": 0.02778168598564047, \"min\": 0.0, \"25%\": 0.0961046786197013, \"50%\": 0.104133079290635, \"75%\": 0.115927337274252, \"max\": 1.0}, \" Net profit before tax/Paid-in capital\": {\"count\": 6819.0, \"mean\": 0.18271502907673604, \"std\": 0.030784771508309793, \"min\": 0.0, \"25%\": 0.169376366789835, \"50%\": 0.178455621747983, \"75%\": 0.191606967800317, \"max\": 1.0}, \" Inventory and accounts receivable/Net value\": {\"count\": 6819.0, \"mean\": 0.40245933052066923, \"std\": 0.013324079587932275, \"min\": 0.0, \"25%\": 0.3974026791778925, \"50%\": 0.40013102490143, \"75%\": 0.404550770809581, \"max\": 1.0}, \" Total Asset Turnover\": {\"count\": 6819.0, \"mean\": 0.14160561602172958, \"std\": 0.1011449684929233, \"min\": 0.0, \"25%\": 0.0764617691154423, \"50%\": 0.118440779610195, \"75%\": 0.176911544227886, \"max\": 1.0}, \" Accounts Receivable Turnover\": {\"count\": 6819.0, \"mean\": 12789705.237553563, \"std\": 278259836.9840667, \"min\": 0.0, \"25%\": 0.0007101336065656, \"50%\": 0.0009678106580909, \"75%\": 0.0014547594168788, \"max\": 9740000000.0}, \" Average Collection Days\": {\"count\": 6819.0, \"mean\": 9826220.861191595, \"std\": 256358895.70533204, \"min\": 0.0, \"25%\": 0.0043865304397204, \"50%\": 0.0065725374332349, \"75%\": 0.00897287558119175, \"max\": 9730000000.0}, \" Inventory Turnover Rate (times)\": {\"count\": 6819.0, \"mean\": 2149106056.607619, \"std\": 3247967014.047812, \"min\": 0.0, \"25%\": 0.0001728255554827, \"50%\": 0.0007646742653862, \"75%\": 4620000000.0, \"max\": 9990000000.0}, \" Fixed Assets Turnover Frequency\": {\"count\": 6819.0, \"mean\": 1008595981.8175156, \"std\": 2477557316.9201517, \"min\": 0.0, \"25%\": 0.0002330013064716, \"50%\": 0.000593094234655, \"75%\": 0.0036523711287173, \"max\": 9990000000.0}, \" Net Worth Turnover Rate (times)\": {\"count\": 6819.0, \"mean\": 0.038595054614951586, \"std\": 0.036680343560413615, \"min\": 0.0, \"25%\": 0.0217741935483871, \"50%\": 0.0295161290322581, \"75%\": 0.0429032258064516, \"max\": 1.0}, \" Revenue per person\": {\"count\": 6819.0, \"mean\": 2325854.266358276, \"std\": 136632654.3899363, \"min\": 0.0, \"25%\": 0.010432854016421151, \"50%\": 0.0186155134174464, \"75%\": 0.0358547655068079, \"max\": 8810000000.0}, \" Operating profit per person\": {\"count\": 6819.0, \"mean\": 0.40067101508133507, \"std\": 0.032720144194699534, \"min\": 0.0, \"25%\": 0.392437981954275, \"50%\": 0.395897876574478, \"75%\": 0.40185093055335697, \"max\": 1.0}, \" Allocation rate per person\": {\"count\": 6819.0, \"mean\": 11255785.321742088, \"std\": 294506294.11677057, \"min\": 0.0, \"25%\": 0.004120528997963601, \"50%\": 0.0078443733586557, \"75%\": 0.015020308976719, \"max\": 9570000000.0}, \" Working Capital to Total Assets\": {\"count\": 6819.0, \"mean\": 0.814125170261333, \"std\": 0.0590544026482635, \"min\": 0.0, \"25%\": 0.774308962762401, \"50%\": 0.81027522898466, \"75%\": 0.8503828485419616, \"max\": 1.0}, \" Quick Assets/Total Assets\": {\"count\": 6819.0, \"mean\": 0.4001318123650569, \"std\": 0.20199806668068215, \"min\": 0.0, \"25%\": 0.24197285659394002, \"50%\": 0.386450924981744, \"75%\": 0.540593673285078, \"max\": 1.0}, \" Current Assets/Total Assets\": {\"count\": 6819.0, \"mean\": 0.5222734467680338, \"std\": 0.21811182151419323, \"min\": 0.0, \"25%\": 0.35284541721511353, \"50%\": 0.514829793890847, \"75%\": 0.6890506806831516, \"max\": 1.0}, \" Cash/Total Assets\": {\"count\": 6819.0, \"mean\": 0.12409456048965214, \"std\": 0.13925058358332645, \"min\": 0.0, \"25%\": 0.03354322123979425, \"50%\": 0.0748874639354301, \"75%\": 0.1610731518633315, \"max\": 1.0}, \" Quick Assets/Current Liability\": {\"count\": 6819.0, \"mean\": 3592902.1968296515, \"std\": 171620908.60682163, \"min\": 0.0, \"25%\": 0.00523977582664085, \"50%\": 0.0079088979804512, \"75%\": 0.0129509103075746, \"max\": 8820000000.0}, \" Cash/Current Liability\": {\"count\": 6819.0, \"mean\": 37159994.147133335, \"std\": 510350903.16273063, \"min\": 0.0, \"25%\": 0.0019730075415488497, \"50%\": 0.0049038864700734, \"75%\": 0.0128055731079178, \"max\": 9650000000.0}, \" Current Liability to Assets\": {\"count\": 6819.0, \"mean\": 0.0906727945676238, \"std\": 0.05028985666891821, \"min\": 0.0, \"25%\": 0.0533012764320206, \"50%\": 0.0827047949822228, \"75%\": 0.1195229934695275, \"max\": 1.0}, \" Operating Funds to Liability\": {\"count\": 6819.0, \"mean\": 0.35382800412158655, \"std\": 0.035147184179188065, \"min\": 0.0, \"25%\": 0.34102297735578047, \"50%\": 0.348596657106137, \"75%\": 0.3609148870133705, \"max\": 1.0}, \" Inventory/Working Capital\": {\"count\": 6819.0, \"mean\": 0.27739510610233165, \"std\": 0.010468846972945228, \"min\": 0.0, \"25%\": 0.2770339694810945, \"50%\": 0.277177699032242, \"75%\": 0.2774287054274715, \"max\": 1.0}, \" Inventory/Current Liability\": {\"count\": 6819.0, \"mean\": 55806804.52577958, \"std\": 582051554.6194191, \"min\": 0.0, \"25%\": 0.0031631476746991002, \"50%\": 0.0064973353534734, \"75%\": 0.011146766748190151, \"max\": 9910000000.0}, \" Current Liabilities/Liability\": {\"count\": 6819.0, \"mean\": 0.7615988775853332, \"std\": 0.20667676768344223, \"min\": 0.0, \"25%\": 0.6269807662218725, \"50%\": 0.806881404713333, \"75%\": 0.942026693700069, \"max\": 1.0}, \" Working Capital/Equity\": {\"count\": 6819.0, \"mean\": 0.7358165257322183, \"std\": 0.011678026475599061, \"min\": 0.0, \"25%\": 0.733611818564342, \"50%\": 0.736012732265696, \"75%\": 0.738559910578823, \"max\": 1.0}, \" Current Liabilities/Equity\": {\"count\": 6819.0, \"mean\": 0.33140980061698827, \"std\": 0.013488027908897866, \"min\": 0.0, \"25%\": 0.328095841686878, \"50%\": 0.329685133135929, \"75%\": 0.332322404809702, \"max\": 1.0}, \" Long-term Liability to Current Assets\": {\"count\": 6819.0, \"mean\": 54160038.13589435, \"std\": 570270621.9592104, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0019746187761809, \"75%\": 0.009005945944256601, \"max\": 9540000000.0}, \" Retained Earnings to Total Assets\": {\"count\": 6819.0, \"mean\": 0.9347327541270043, \"std\": 0.025564221690643103, \"min\": 0.0, \"25%\": 0.9310965081459854, \"50%\": 0.937672322031461, \"75%\": 0.9448112860939986, \"max\": 1.0}, \" Total income/Total expense\": {\"count\": 6819.0, \"mean\": 0.002548945567386416, \"std\": 0.01209281469621801, \"min\": 0.0, \"25%\": 0.0022355962096577498, \"50%\": 0.0023361709310448, \"75%\": 0.0024918511193838, \"max\": 1.0}, \" Total expense/Assets\": {\"count\": 6819.0, \"mean\": 0.02918409925586063, \"std\": 0.02714877679286165, \"min\": 0.0, \"25%\": 0.01456705658927065, \"50%\": 0.0226739487842648, \"75%\": 0.035930137895265155, \"max\": 1.0}, \" Current Asset Turnover Rate\": {\"count\": 6819.0, \"mean\": 1195855763.3089354, \"std\": 2821161238.262308, \"min\": 0.0, \"25%\": 0.00014562362973865, \"50%\": 0.0001987815566631, \"75%\": 0.0004525945407579, \"max\": 10000000000.0}, \" Quick Asset Turnover Rate\": {\"count\": 6819.0, \"mean\": 2163735272.034426, \"std\": 3374944402.166023, \"min\": 0.0, \"25%\": 0.00014171486236355001, \"50%\": 0.0002247727878357, \"75%\": 4900000000.0, \"max\": 10000000000.0}, \" Working capitcal Turnover Rate\": {\"count\": 6819.0, \"mean\": 0.5940062655659162, \"std\": 0.008959384178922204, \"min\": 0.0, \"25%\": 0.5939344215587965, \"50%\": 0.593962767104877, \"75%\": 0.5940023454696105, \"max\": 1.0}, \" Cash Turnover Rate\": {\"count\": 6819.0, \"mean\": 2471976967.444318, \"std\": 2938623226.6788445, \"min\": 0.0, \"25%\": 0.0002735337396781, \"50%\": 1080000000.0, \"75%\": 4510000000.0, \"max\": 10000000000.0}, \" Cash Flow to Sales\": {\"count\": 6819.0, \"mean\": 0.6715307810992098, \"std\": 0.0093413456183006, \"min\": 0.0, \"25%\": 0.671565259253275, \"50%\": 0.671573958092574, \"75%\": 0.671586580417158, \"max\": 1.0}, \" Fixed Assets to Assets\": {\"count\": 6819.0, \"mean\": 1220120.5015895537, \"std\": 100754158.71316808, \"min\": 0.0, \"25%\": 0.0853603651897917, \"50%\": 0.196881048224411, \"75%\": 0.3721999782647555, \"max\": 8320000000.0}, \" Current Liability to Liability\": {\"count\": 6819.0, \"mean\": 0.7615988775853332, \"std\": 0.20667676768344223, \"min\": 0.0, \"25%\": 0.6269807662218725, \"50%\": 0.806881404713333, \"75%\": 0.942026693700069, \"max\": 1.0}, \" Current Liability to Equity\": {\"count\": 6819.0, \"mean\": 0.33140980061698827, \"std\": 0.013488027908897866, \"min\": 0.0, \"25%\": 0.328095841686878, \"50%\": 0.329685133135929, \"75%\": 0.332322404809702, \"max\": 1.0}, \" Equity to Long-term Liability\": {\"count\": 6819.0, \"mean\": 0.11564465149636942, \"std\": 0.019529176275314197, \"min\": 0.0, \"25%\": 0.110933233663468, \"50%\": 0.112340004024972, \"75%\": 0.117106091075626, \"max\": 1.0}, \" Cash Flow to Total Assets\": {\"count\": 6819.0, \"mean\": 0.6497305901792364, \"std\": 0.04737213191450496, \"min\": 0.0, \"25%\": 0.633265319013864, \"50%\": 0.645366460270721, \"75%\": 0.6630618534616091, \"max\": 1.0}, \" Cash Flow to Liability\": {\"count\": 6819.0, \"mean\": 0.4618492532922571, \"std\": 0.029942680345244794, \"min\": 0.0, \"25%\": 0.4571164765642225, \"50%\": 0.459750137932885, \"75%\": 0.46423584697152853, \"max\": 1.0}, \" CFO to Assets\": {\"count\": 6819.0, \"mean\": 0.5934150861096208, \"std\": 0.05856055014224858, \"min\": 0.0, \"25%\": 0.5659869401753586, \"50%\": 0.593266274083544, \"75%\": 0.6247688757833555, \"max\": 1.0}, \" Cash Flow to Equity\": {\"count\": 6819.0, \"mean\": 0.3155823898995751, \"std\": 0.01296089240164725, \"min\": 0.0, \"25%\": 0.312994699600273, \"50%\": 0.314952752072916, \"75%\": 0.317707188742567, \"max\": 1.0}, \" Current Liability to Current Assets\": {\"count\": 6819.0, \"mean\": 0.031506365747440736, \"std\": 0.030844688453563848, \"min\": 0.0, \"25%\": 0.018033665707965, \"50%\": 0.0275971428517009, \"75%\": 0.0383746158541899, \"max\": 1.0}, \" Liability-Assets Flag\": {\"count\": 6819.0, \"mean\": 0.001173192550227306, \"std\": 0.034234310865302146, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \" Net Income to Total Assets\": {\"count\": 6819.0, \"mean\": 0.8077602200365486, \"std\": 0.040332191531426226, \"min\": 0.0, \"25%\": 0.7967498491931705, \"50%\": 0.810619042075101, \"75%\": 0.8264545295408715, \"max\": 1.0}, \" Total assets to GNP price\": {\"count\": 6819.0, \"mean\": 18629417.81183602, \"std\": 376450059.7458224, \"min\": 0.0, \"25%\": 0.0009036204813306, \"50%\": 0.0020852127088157, \"75%\": 0.0052697768568805, \"max\": 9820000000.0}, \" No-credit Interval\": {\"count\": 6819.0, \"mean\": 0.623914574767534, \"std\": 0.012289548007412282, \"min\": 0.0, \"25%\": 0.623636304973909, \"50%\": 0.623879225987712, \"75%\": 0.6241681927893561, \"max\": 1.0}, \" Gross Profit to Sales\": {\"count\": 6819.0, \"mean\": 0.607946340270717, \"std\": 0.016933807795673647, \"min\": 0.0, \"25%\": 0.6004428952063054, \"50%\": 0.605998288167218, \"75%\": 0.613913271038147, \"max\": 1.0}, \" Net Income to Stockholder's Equity\": {\"count\": 6819.0, \"mean\": 0.8404020646301005, \"std\": 0.01452252608252491, \"min\": 0.0, \"25%\": 0.8401148040637195, \"50%\": 0.841178760250192, \"75%\": 0.8423569700412374, \"max\": 1.0}, \" Liability to Equity\": {\"count\": 6819.0, \"mean\": 0.2803651538333931, \"std\": 0.014463223575594045, \"min\": 0.0, \"25%\": 0.276944242646329, \"50%\": 0.278777583629637, \"75%\": 0.2814491856088265, \"max\": 1.0}, \" Degree of Financial Leverage (DFL)\": {\"count\": 6819.0, \"mean\": 0.027541119421203627, \"std\": 0.01566794186642967, \"min\": 0.0, \"25%\": 0.0267911566924924, \"50%\": 0.0268081258982465, \"75%\": 0.026913184214613348, \"max\": 1.0}, \" Interest Coverage Ratio (Interest expense to EBIT)\": {\"count\": 6819.0, \"mean\": 0.5653579335465574, \"std\": 0.013214239761961918, \"min\": 0.0, \"25%\": 0.565158395757604, \"50%\": 0.565251928758969, \"75%\": 0.565724709506105, \"max\": 1.0}, \" Net Income Flag\": {\"count\": 6819.0, \"mean\": 1.0, \"std\": 0.0, \"min\": 1.0, \"25%\": 1.0, \"50%\": 1.0, \"75%\": 1.0, \"max\": 1.0}, \" Equity to Liability\": {\"count\": 6819.0, \"mean\": 0.047578356529497656, \"std\": 0.05001371618013796, \"min\": 0.0, \"25%\": 0.024476693570910098, \"50%\": 0.0337976972031022, \"75%\": 0.052837817459331596, \"max\": 1.0}}", "examples": "{\"Bankrupt?\":{\"0\":1,\"1\":1,\"2\":1,\"3\":1},\" ROA(C) before interest and depreciation before interest\":{\"0\":0.3705942573,\"1\":0.4642909375,\"2\":0.4260712719,\"3\":0.3998440014},\" ROA(A) before interest and % after tax\":{\"0\":0.4243894461,\"1\":0.53821413,\"2\":0.4990187527,\"3\":0.4512647187},\" ROA(B) before interest and depreciation after tax\":{\"0\":0.4057497725,\"1\":0.5167300177,\"2\":0.4722950907,\"3\":0.4577332834},\" Operating Gross Margin\":{\"0\":0.6014572133,\"1\":0.6102350855,\"2\":0.6014500065,\"3\":0.5835411292},\" Realized Sales Gross Margin\":{\"0\":0.6014572133,\"1\":0.6102350855,\"2\":0.601363525,\"3\":0.5835411292},\" Operating Profit Rate\":{\"0\":0.9989692032,\"1\":0.9989459782,\"2\":0.9988573535,\"3\":0.9986997471},\" Pre-tax net Interest Rate\":{\"0\":0.7968871459,\"1\":0.7973801913,\"2\":0.7964033693,\"3\":0.7969669683},\" After-tax net Interest Rate\":{\"0\":0.8088093609,\"1\":0.8093007257,\"2\":0.8083875215,\"3\":0.8089655977},\" Non-industry income and expenditure\\/revenue\":{\"0\":0.3026464339,\"1\":0.3035564303,\"2\":0.3020351773,\"3\":0.303349536},\" Continuous interest rate (after tax)\":{\"0\":0.7809848502,\"1\":0.7815059743,\"2\":0.7802839362,\"3\":0.7812409912},\" Operating Expense Rate\":{\"0\":0.0001256969,\"1\":0.0002897851,\"2\":0.0002361297,\"3\":0.0001078888},\" Research and development expense rate\":{\"0\":0.0,\"1\":0.0,\"2\":25500000.0,\"3\":0.0},\" Cash flow rate\":{\"0\":0.4581431435,\"1\":0.4618672572,\"2\":0.4585205875,\"3\":0.4657054427},\" Interest-bearing debt interest rate\":{\"0\":0.0007250725,\"1\":0.0006470647,\"2\":0.000790079,\"3\":0.0004490449},\" Tax rate (A)\":{\"0\":0.0,\"1\":0.0,\"2\":0.0,\"3\":0.0},\" Net Value Per Share (B)\":{\"0\":0.1479499389,\"1\":0.182251064,\"2\":0.1779107497,\"3\":0.1541865071},\" Net Value Per Share (A)\":{\"0\":0.1479499389,\"1\":0.182251064,\"2\":0.1779107497,\"3\":0.1541865071},\" Net Value Per Share (C)\":{\"0\":0.1479499389,\"1\":0.182251064,\"2\":0.193712865,\"3\":0.1541865071},\" Persistent EPS in the Last Four Seasons\":{\"0\":0.1691405881,\"1\":0.208943935,\"2\":0.1805805049,\"3\":0.1937222275},\" Cash Flow Per Share\":{\"0\":0.3116644267,\"1\":0.3181368041,\"2\":0.3071019311,\"3\":0.3216736224},\" Revenue Per Share (Yuan \\u00a5)\":{\"0\":0.0175597804,\"1\":0.021144335,\"2\":0.0059440083,\"3\":0.014368468},\" Operating Profit Per Share (Yuan \\u00a5)\":{\"0\":0.0959205276,\"1\":0.0937220096,\"2\":0.0923377575,\"3\":0.0777623972},\" Per Share Net profit before tax (Yuan \\u00a5)\":{\"0\":0.1387361603,\"1\":0.1699179031,\"2\":0.1428033441,\"3\":0.148602847},\" Realized Sales Gross Profit Growth Rate\":{\"0\":0.0221022784,\"1\":0.0220801699,\"2\":0.0227600968,\"3\":0.0220460669},\" Operating Profit Growth Rate\":{\"0\":0.8481949945,\"1\":0.8480878838,\"2\":0.8480940128,\"3\":0.8480054774},\" After-tax Net Profit Growth Rate\":{\"0\":0.6889794628,\"1\":0.6896929012,\"2\":0.689462677,\"3\":0.6891095356},\" Regular Net Profit Growth Rate\":{\"0\":0.6889794628,\"1\":0.6897017016,\"2\":0.6894696596,\"3\":0.6891095356},\" Continuous Net Profit Growth Rate\":{\"0\":0.2175353862,\"1\":0.2176195965,\"2\":0.217601299,\"3\":0.2175681883},\" Total Asset Growth Rate\":{\"0\":4980000000.0,\"1\":6110000000.0,\"2\":7280000000.0,\"3\":4880000000.0},\" Net Value Growth Rate\":{\"0\":0.0003269773,\"1\":0.0004430401,\"2\":0.0003964253,\"3\":0.0003824259},\" Total Asset Return Growth Rate Ratio\":{\"0\":0.2630999837,\"1\":0.2645157781,\"2\":0.2641839756,\"3\":0.2633711759},\" Cash Reinvestment %\":{\"0\":0.363725271,\"1\":0.376709139,\"2\":0.3689132298,\"3\":0.3840765992},\" Current Ratio\":{\"0\":0.0022589633,\"1\":0.0060162059,\"2\":0.0115425537,\"3\":0.0041940587},\" Quick Ratio\":{\"0\":0.0012077551,\"1\":0.0040393668,\"2\":0.0053475602,\"3\":0.0028964911},\" Interest Expense Ratio\":{\"0\":0.629951302,\"1\":0.6351724634,\"2\":0.6296314434,\"3\":0.630228353},\" Total debt\\/Total net worth\":{\"0\":0.0212659244,\"1\":0.0125023938,\"2\":0.021247686,\"3\":0.0095724017},\" Debt ratio %\":{\"0\":0.2075762615,\"1\":0.1711763461,\"2\":0.2075157965,\"3\":0.151464764},\" Net worth\\/Assets\":{\"0\":0.7924237385,\"1\":0.8288236539,\"2\":0.7924842035,\"3\":0.848535236},\" Long-term fund suitability ratio (A)\":{\"0\":0.0050244547,\"1\":0.0050588818,\"2\":0.0050998994,\"3\":0.0050469241},\" Borrowing dependency\":{\"0\":0.3902843544,\"1\":0.37676002,\"2\":0.3790929201,\"3\":0.3797426876},\" Contingent liabilities\\/Net worth\":{\"0\":0.0064785025,\"1\":0.0058350395,\"2\":0.0065619821,\"3\":0.0053658477},\" Operating profit\\/Paid-in capital\":{\"0\":0.095884834,\"1\":0.0937433843,\"2\":0.0923184653,\"3\":0.0777272949},\" Net profit before tax\\/Paid-in capital\":{\"0\":0.1377573335,\"1\":0.1689616168,\"2\":0.1480355931,\"3\":0.1475605158},\" Inventory and accounts receivable\\/Net value\":{\"0\":0.3980356983,\"1\":0.3977248825,\"2\":0.406580451,\"3\":0.3979245013},\" Total Asset Turnover\":{\"0\":0.0869565217,\"1\":0.0644677661,\"2\":0.0149925037,\"3\":0.0899550225},\" Accounts Receivable Turnover\":{\"0\":0.0018138841,\"1\":0.0012863563,\"2\":0.0014953385,\"3\":0.0019660556},\" Average Collection Days\":{\"0\":0.0034873643,\"1\":0.0049168079,\"2\":0.0042268495,\"3\":0.0032149673},\" Inventory Turnover Rate (times)\":{\"0\":0.0001820926,\"1\":9360000000.0,\"2\":65000000.0,\"3\":7130000000.0},\" Fixed Assets Turnover Frequency\":{\"0\":0.0001165007,\"1\":719000000.0,\"2\":2650000000.0,\"3\":9150000000.0},\" Net Worth Turnover Rate (times)\":{\"0\":0.0329032258,\"1\":0.025483871,\"2\":0.0133870968,\"3\":0.0280645161},\" Revenue per person\":{\"0\":0.034164182,\"1\":0.0068886506,\"2\":0.0289969596,\"3\":0.0154634784},\" Operating profit per person\":{\"0\":0.3929128695,\"1\":0.3915899686,\"2\":0.3819678433,\"3\":0.3784966419},\" Allocation rate per person\":{\"0\":0.0371353016,\"1\":0.0123349721,\"2\":0.1410163119,\"3\":0.0213199897},\" Working Capital to Total Assets\":{\"0\":0.6727752925,\"1\":0.751110917,\"2\":0.8295019149,\"3\":0.7257541797},\" Quick Assets\\/Total Assets\":{\"0\":0.1666729588,\"1\":0.1272360023,\"2\":0.3402008785,\"3\":0.1615745316},\" Current Assets\\/Total Assets\":{\"0\":0.1906429591,\"1\":0.1824190541,\"2\":0.6028057017,\"3\":0.2258148689},\" Cash\\/Total Assets\":{\"0\":0.004094406,\"1\":0.014947727,\"2\":0.0009909445,\"3\":0.0188506248},\" Quick Assets\\/Current Liability\":{\"0\":0.0019967709,\"1\":0.0041360298,\"2\":0.0063024814,\"3\":0.0029612377},\" Cash\\/Current Liability\":{\"0\":0.000147336,\"1\":0.0013839101,\"2\":5340000000.0,\"3\":0.0010106464},\" Current Liability to Assets\":{\"0\":0.1473084504,\"1\":0.0569628274,\"2\":0.0981620645,\"3\":0.0987146304},\" Operating Funds to Liability\":{\"0\":0.3340151713,\"1\":0.341105992,\"2\":0.3367314947,\"3\":0.348716439},\" Inventory\\/Working Capital\":{\"0\":0.2769201582,\"1\":0.2896415764,\"2\":0.2774555281,\"3\":0.2765803042},\" Inventory\\/Current Liability\":{\"0\":0.00103599,\"1\":0.0052096824,\"2\":0.0138787858,\"3\":0.0035401479},\" Current Liabilities\\/Liability\":{\"0\":0.6762691762,\"1\":0.308588593,\"2\":0.4460274872,\"3\":0.6158483686},\" Working Capital\\/Equity\":{\"0\":0.7212745515,\"1\":0.7319752885,\"2\":0.7427286376,\"3\":0.7298249087},\" Current Liabilities\\/Equity\":{\"0\":0.3390770068,\"1\":0.3297401479,\"2\":0.3347768513,\"3\":0.3315089787},\" Long-term Liability to Current Assets\":{\"0\":0.025592368,\"1\":0.0239468187,\"2\":0.0037151157,\"3\":0.0221651997},\" Retained Earnings to Total Assets\":{\"0\":0.9032247712,\"1\":0.9310652176,\"2\":0.9099033625,\"3\":0.9069021588},\" Total income\\/Total expense\":{\"0\":0.002021613,\"1\":0.0022256083,\"2\":0.0020600706,\"3\":0.0018313586},\" Total expense\\/Assets\":{\"0\":0.0648557077,\"1\":0.02551586,\"2\":0.0213874282,\"3\":0.0241610702},\" Current Asset Turnover Rate\":{\"0\":701000000.0,\"1\":0.0001065198,\"2\":0.0017910937,\"3\":8140000000.0},\" Quick Asset Turnover Rate\":{\"0\":6550000000.0,\"1\":7700000000.0,\"2\":0.0010226765,\"3\":6050000000.0},\" Working capitcal Turnover Rate\":{\"0\":0.593830504,\"1\":0.5939155479,\"2\":0.5945018513,\"3\":0.5938887926},\" Cash Turnover Rate\":{\"0\":458000000.0,\"1\":2490000000.0,\"2\":761000000.0,\"3\":2030000000.0},\" Cash Flow to Sales\":{\"0\":0.6715676536,\"1\":0.6715699423,\"2\":0.6715713218,\"3\":0.6715191702},\" Fixed Assets to Assets\":{\"0\":0.4242057622,\"1\":0.4688281283,\"2\":0.2761792222,\"3\":0.5591439633},\" Current Liability to Liability\":{\"0\":0.6762691762,\"1\":0.308588593,\"2\":0.4460274872,\"3\":0.6158483686},\" Current Liability to Equity\":{\"0\":0.3390770068,\"1\":0.3297401479,\"2\":0.3347768513,\"3\":0.3315089787},\" Equity to Long-term Liability\":{\"0\":0.1265494878,\"1\":0.1209161058,\"2\":0.1179223194,\"3\":0.1207604553},\" Cash Flow to Total Assets\":{\"0\":0.6375553953,\"1\":0.6410999847,\"2\":0.6427645502,\"3\":0.5790393123},\" Cash Flow to Liability\":{\"0\":0.4586091477,\"1\":0.4590010533,\"2\":0.4592540355,\"3\":0.4485179116},\" CFO to Assets\":{\"0\":0.5203819179,\"1\":0.5671013087,\"2\":0.5384905396,\"3\":0.6041050562},\" Cash Flow to Equity\":{\"0\":0.3129049481,\"1\":0.3141631352,\"2\":0.3145154263,\"3\":0.3023822548},\" Current Liability to Current Assets\":{\"0\":0.1182504766,\"1\":0.0477752816,\"2\":0.0253464891,\"3\":0.0672496173},\" Liability-Assets Flag\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\" Net Income to Total Assets\":{\"0\":0.7168453432,\"1\":0.795297136,\"2\":0.774669697,\"3\":0.7395545252},\" Total assets to GNP price\":{\"0\":0.00921944,\"1\":0.0083233018,\"2\":0.0400028529,\"3\":0.0032524753},\" No-credit Interval\":{\"0\":0.6228789594,\"1\":0.6236517417,\"2\":0.6238410376,\"3\":0.6229287091},\" Gross Profit to Sales\":{\"0\":0.6014532901,\"1\":0.6102365259,\"2\":0.6014493405,\"3\":0.5835376122},\" Net Income to Stockholder's Equity\":{\"0\":0.827890214,\"1\":0.839969268,\"2\":0.8367743086,\"3\":0.8346971068},\" Liability to Equity\":{\"0\":0.2902018928,\"1\":0.2838459798,\"2\":0.2901885329,\"3\":0.281721193},\" Degree of Financial Leverage (DFL)\":{\"0\":0.0266006308,\"1\":0.2645768198,\"2\":0.0265547199,\"3\":0.0266966344},\" Interest Coverage Ratio (Interest expense to EBIT)\":{\"0\":0.5640501123,\"1\":0.5701749464,\"2\":0.5637060765,\"3\":0.5646634203},\" Net Income Flag\":{\"0\":1,\"1\":1,\"2\":1,\"3\":1},\" Equity to Liability\":{\"0\":0.0164687409,\"1\":0.0207943063,\"2\":0.0164741143,\"3\":0.0239823322}}"}}]
true
1
<start_data_description><data_path>company-bankruptcy-prediction/data.csv: <column_names> ['Bankrupt?', ' ROA(C) before interest and depreciation before interest', ' ROA(A) before interest and % after tax', ' ROA(B) before interest and depreciation after tax', ' Operating Gross Margin', ' Realized Sales Gross Margin', ' Operating Profit Rate', ' Pre-tax net Interest Rate', ' After-tax net Interest Rate', ' Non-industry income and expenditure/revenue', ' Continuous interest rate (after tax)', ' Operating Expense Rate', ' Research and development expense rate', ' Cash flow rate', ' Interest-bearing debt interest rate', ' Tax rate (A)', ' Net Value Per Share (B)', ' Net Value Per Share (A)', ' Net Value Per Share (C)', ' Persistent EPS in the Last Four Seasons', ' Cash Flow Per Share', ' Revenue Per Share (Yuan ¥)', ' Operating Profit Per Share (Yuan ¥)', ' Per Share Net profit before tax (Yuan ¥)', ' Realized Sales Gross Profit Growth Rate', ' Operating Profit Growth Rate', ' After-tax Net Profit Growth Rate', ' Regular Net Profit Growth Rate', ' Continuous Net Profit Growth Rate', ' Total Asset Growth Rate', ' Net Value Growth Rate', ' Total Asset Return Growth Rate Ratio', ' Cash Reinvestment %', ' Current Ratio', ' Quick Ratio', ' Interest Expense Ratio', ' Total debt/Total net worth', ' Debt ratio %', ' Net worth/Assets', ' Long-term fund suitability ratio (A)', ' Borrowing dependency', ' Contingent liabilities/Net worth', ' Operating profit/Paid-in capital', ' Net profit before tax/Paid-in capital', ' Inventory and accounts receivable/Net value', ' Total Asset Turnover', ' Accounts Receivable Turnover', ' Average Collection Days', ' Inventory Turnover Rate (times)', ' Fixed Assets Turnover Frequency', ' Net Worth Turnover Rate (times)', ' Revenue per person', ' Operating profit per person', ' Allocation rate per person', ' Working Capital to Total Assets', ' Quick Assets/Total Assets', ' Current Assets/Total Assets', ' Cash/Total Assets', ' Quick Assets/Current Liability', ' Cash/Current Liability', ' Current Liability to Assets', ' Operating Funds to Liability', ' Inventory/Working Capital', ' Inventory/Current Liability', ' Current Liabilities/Liability', ' Working Capital/Equity', ' Current Liabilities/Equity', ' Long-term Liability to Current Assets', ' Retained Earnings to Total Assets', ' Total income/Total expense', ' Total expense/Assets', ' Current Asset Turnover Rate', ' Quick Asset Turnover Rate', ' Working capitcal Turnover Rate', ' Cash Turnover Rate', ' Cash Flow to Sales', ' Fixed Assets to Assets', ' Current Liability to Liability', ' Current Liability to Equity', ' Equity to Long-term Liability', ' Cash Flow to Total Assets', ' Cash Flow to Liability', ' CFO to Assets', ' Cash Flow to Equity', ' Current Liability to Current Assets', ' Liability-Assets Flag', ' Net Income to Total Assets', ' Total assets to GNP price', ' No-credit Interval', ' Gross Profit to Sales', " Net Income to Stockholder's Equity", ' Liability to Equity', ' Degree of Financial Leverage (DFL)', ' Interest Coverage Ratio (Interest expense to EBIT)', ' Net Income Flag', ' Equity to Liability'] <column_types> {'Bankrupt?': 'int64', ' ROA(C) before interest and depreciation before interest': 'float64', ' ROA(A) before interest and % after tax': 'float64', ' ROA(B) before interest and depreciation after tax': 'float64', ' Operating Gross Margin': 'float64', ' Realized Sales Gross Margin': 'float64', ' Operating Profit Rate': 'float64', ' Pre-tax net Interest Rate': 'float64', ' After-tax net Interest Rate': 'float64', ' Non-industry income and expenditure/revenue': 'float64', ' Continuous interest rate (after tax)': 'float64', ' Operating Expense Rate': 'float64', ' Research and development expense rate': 'float64', ' Cash flow rate': 'float64', ' Interest-bearing debt interest rate': 'float64', ' Tax rate (A)': 'float64', ' Net Value Per Share (B)': 'float64', ' Net Value Per Share (A)': 'float64', ' Net Value Per Share (C)': 'float64', ' Persistent EPS in the Last Four Seasons': 'float64', ' Cash Flow Per Share': 'float64', ' Revenue Per Share (Yuan ¥)': 'float64', ' Operating Profit Per Share (Yuan ¥)': 'float64', ' Per Share Net profit before tax (Yuan ¥)': 'float64', ' Realized Sales Gross Profit Growth Rate': 'float64', ' Operating Profit Growth Rate': 'float64', ' After-tax Net Profit Growth Rate': 'float64', ' Regular Net Profit Growth Rate': 'float64', ' Continuous Net Profit Growth Rate': 'float64', ' Total Asset Growth Rate': 'float64', ' Net Value Growth Rate': 'float64', ' Total Asset Return Growth Rate Ratio': 'float64', ' Cash Reinvestment %': 'float64', ' Current Ratio': 'float64', ' Quick Ratio': 'float64', ' Interest Expense Ratio': 'float64', ' Total debt/Total net worth': 'float64', ' Debt ratio %': 'float64', ' Net worth/Assets': 'float64', ' Long-term fund suitability ratio (A)': 'float64', ' Borrowing dependency': 'float64', ' Contingent liabilities/Net worth': 'float64', ' Operating profit/Paid-in capital': 'float64', ' Net profit before tax/Paid-in capital': 'float64', ' Inventory and accounts receivable/Net value': 'float64', ' Total Asset Turnover': 'float64', ' Accounts Receivable Turnover': 'float64', ' Average Collection Days': 'float64', ' Inventory Turnover Rate (times)': 'float64', ' Fixed Assets Turnover Frequency': 'float64', ' Net Worth Turnover Rate (times)': 'float64', ' Revenue per person': 'float64', ' Operating profit per person': 'float64', ' Allocation rate per person': 'float64', ' Working Capital to Total Assets': 'float64', ' Quick Assets/Total Assets': 'float64', ' Current Assets/Total Assets': 'float64', ' Cash/Total Assets': 'float64', ' Quick Assets/Current Liability': 'float64', ' Cash/Current Liability': 'float64', ' Current Liability to Assets': 'float64', ' Operating Funds to Liability': 'float64', ' Inventory/Working Capital': 'float64', ' Inventory/Current Liability': 'float64', ' Current Liabilities/Liability': 'float64', ' Working Capital/Equity': 'float64', ' Current Liabilities/Equity': 'float64', ' Long-term Liability to Current Assets': 'float64', ' Retained Earnings to Total Assets': 'float64', ' Total income/Total expense': 'float64', ' Total expense/Assets': 'float64', ' Current Asset Turnover Rate': 'float64', ' Quick Asset Turnover Rate': 'float64', ' Working capitcal Turnover Rate': 'float64', ' Cash Turnover Rate': 'float64', ' Cash Flow to Sales': 'float64', ' Fixed Assets to Assets': 'float64', ' Current Liability to Liability': 'float64', ' Current Liability to Equity': 'float64', ' Equity to Long-term Liability': 'float64', ' Cash Flow to Total Assets': 'float64', ' Cash Flow to Liability': 'float64', ' CFO to Assets': 'float64', ' Cash Flow to Equity': 'float64', ' Current Liability to Current Assets': 'float64', ' Liability-Assets Flag': 'int64', ' Net Income to Total Assets': 'float64', ' Total assets to GNP price': 'float64', ' No-credit Interval': 'float64', ' Gross Profit to Sales': 'float64', " Net Income to Stockholder's Equity": 'float64', ' Liability to Equity': 'float64', ' Degree of Financial Leverage (DFL)': 'float64', ' Interest Coverage Ratio (Interest expense to EBIT)': 'float64', ' Net Income Flag': 'int64', ' Equity to Liability': 'float64'} <dataframe_Summary> {'Bankrupt?': {'count': 6819.0, 'mean': 0.03226279513125092, 'std': 0.17671017660774022, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, ' ROA(C) before interest and depreciation before interest': {'count': 6819.0, 'mean': 0.5051796332417815, 'std': 0.06068563875428444, 'min': 0.0, '25%': 0.476527080388047, '50%': 0.502705601325988, '75%': 0.535562813825379, 'max': 1.0}, ' ROA(A) before interest and % after tax': {'count': 6819.0, 'mean': 0.5586249158750463, 'std': 0.06562003103170726, 'min': 0.0, '25%': 0.53554295682512, '50%': 0.559801569995639, '75%': 0.58915721761884, 'max': 1.0}, ' ROA(B) before interest and depreciation after tax': {'count': 6819.0, 'mean': 0.5535887093516657, 'std': 0.06159480929187566, 'min': 0.0, '25%': 0.527276620804112, '50%': 0.552277959205525, '75%': 0.584105144815033, 'max': 1.0}, ' Operating Gross Margin': {'count': 6819.0, 'mean': 0.6079480383703836, 'std': 0.01693381254822146, 'min': 0.0, '25%': 0.6004446590466855, '50%': 0.605997492036495, '75%': 0.613914152697502, 'max': 1.0}, ' Realized Sales Gross Margin': {'count': 6819.0, 'mean': 0.6079294691769791, 'std': 0.01691607005567578, 'min': 0.0, '25%': 0.600433848859165, '50%': 0.605975871661454, '75%': 0.6138420847806975, 'max': 1.0}, ' Operating Profit Rate': {'count': 6819.0, 'mean': 0.9987551277900442, 'std': 0.013010025092984125, 'min': 0.0, '25%': 0.998969203197885, '50%': 0.999022239374566, '75%': 0.999094514164357, 'max': 1.0}, ' Pre-tax net Interest Rate': {'count': 6819.0, 'mean': 0.7971897524712905, 'std': 0.012868988419884597, 'min': 0.0, '25%': 0.797385863236893, '50%': 0.797463610578231, '75%': 0.797578848185589, 'max': 1.0}, ' After-tax net Interest Rate': {'count': 6819.0, 'mean': 0.8090835935135348, 'std': 0.013600653945149043, 'min': 0.0, '25%': 0.809311597146491, '50%': 0.809375198550956, '75%': 0.809469266134837, 'max': 1.0}, ' Non-industry income and expenditure/revenue': {'count': 6819.0, 'mean': 0.303622923649734, 'std': 0.011163439838128548, 'min': 0.0, '25%': 0.30346627659685, '50%': 0.303525492830123, '75%': 0.303585192461218, 'max': 1.0}, ' Continuous interest rate (after tax)': {'count': 6819.0, 'mean': 0.7813814325261418, 'std': 0.012679004028913246, 'min': 0.0, '25%': 0.7815668165898519, '50%': 0.781634957112874, '75%': 0.7817353784192015, 'max': 1.0}, ' Operating Expense Rate': {'count': 6819.0, 'mean': 1995347312.8028853, 'std': 3237683890.5223837, 'min': 0.0, '25%': 0.0001566874492428, '50%': 0.0002777588583625, '75%': 4145000000.0, 'max': 9990000000.0}, ' Research and development expense rate': {'count': 6819.0, 'mean': 1950427306.0568295, 'std': 2598291553.9983206, 'min': 0.0, '25%': 0.000128187953762, '50%': 509000000.0, '75%': 3450000000.0, 'max': 9980000000.0}, ' Cash flow rate': {'count': 6819.0, 'mean': 0.4674311857796612, 'std': 0.017035517308785362, 'min': 0.0, '25%': 0.4615577531181065, '50%': 0.465079724549793, '75%': 0.471003917029432, 'max': 1.0}, ' Interest-bearing debt interest rate': {'count': 6819.0, 'mean': 16448012.905942537, 'std': 108275033.532824, 'min': 0.0, '25%': 0.0002030203020302, '50%': 0.0003210321032103, '75%': 0.0005325532553255, 'max': 990000000.0}, ' Tax rate (A)': {'count': 6819.0, 'mean': 0.11500074794142427, 'std': 0.13866749672835132, 'min': 0.0, '25%': 0.0, '50%': 0.0734892195566353, '75%': 0.205840672132807, 'max': 1.0}, ' Net Value Per Share (B)': {'count': 6819.0, 'mean': 0.19066057949747392, 'std': 0.033389768351330985, 'min': 0.0, '25%': 0.173612574269942, '50%': 0.184400151700308, '75%': 0.199570182461759, 'max': 1.0}, ' Net Value Per Share (A)': {'count': 6819.0, 'mean': 0.19063317896774643, 'std': 0.033473514172429, 'min': 0.0, '25%': 0.173612574269942, '50%': 0.184400151700308, '75%': 0.199570182461759, 'max': 1.0}, ' Net Value Per Share (C)': {'count': 6819.0, 'mean': 0.1906723702531618, 'std': 0.03348013767040907, 'min': 0.0, '25%': 0.1736757827314485, '50%': 0.184400151700308, '75%': 0.199612321436096, 'max': 1.0}, ' Persistent EPS in the Last Four Seasons': {'count': 6819.0, 'mean': 0.22881285256452782, 'std': 0.03326261307597689, 'min': 0.0, '25%': 0.214711165736976, '50%': 0.22454382149948, '75%': 0.2388200813085, 'max': 1.0}, ' Cash Flow Per Share': {'count': 6819.0, 'mean': 0.32348191216983224, 'std': 0.01761091295834377, 'min': 0.0, '25%': 0.317747754120393, '50%': 0.322487090613284, '75%': 0.3286234703260945, 'max': 1.0}, ' Revenue Per Share (Yuan ¥)': {'count': 6819.0, 'mean': 1328640.6020960682, 'std': 51707089.76790663, 'min': 0.0, '25%': 0.01563138073415305, '50%': 0.0273757127516373, '75%': 0.0463572152396509, 'max': 3020000000.0}, ' Operating Profit Per Share (Yuan ¥)': {'count': 6819.0, 'mean': 0.10909073887546925, 'std': 0.027942244774416095, 'min': 0.0, '25%': 0.0960833808321798, '50%': 0.104226040224737, '75%': 0.1161550362348345, 'max': 1.0}, ' Per Share Net profit before tax (Yuan ¥)': {'count': 6819.0, 'mean': 0.18436057764203345, 'std': 0.03318020898090537, 'min': 0.0, '25%': 0.170369812457634, '50%': 0.179709271672818, '75%': 0.193492505837162, 'max': 1.0}, ' Realized Sales Gross Profit Growth Rate': {'count': 6819.0, 'mean': 0.02240785447416587, 'std': 0.012079270152911608, 'min': 0.0, '25%': 0.022064532735505453, '50%': 0.0221023731764072, '75%': 0.022153148426612798, 'max': 1.0}, ' Operating Profit Growth Rate': {'count': 6819.0, 'mean': 0.8479799951688084, 'std': 0.010752477405401351, 'min': 0.0, '25%': 0.8479841081819834, '50%': 0.848043533745768, '75%': 0.8481225403945605, 'max': 1.0}, ' After-tax Net Profit Growth Rate': {'count': 6819.0, 'mean': 0.6891461185681318, 'std': 0.013853022260934765, 'min': 0.0, '25%': 0.6892699337448115, '50%': 0.689438526343149, '75%': 0.6896471679790515, 'max': 1.0}, ' Regular Net Profit Growth Rate': {'count': 6819.0, 'mean': 0.6891500117795616, 'std': 0.0139102834140106, 'min': 0.0, '25%': 0.689270265563206, '50%': 0.689438555196922, '75%': 0.6896470092832976, 'max': 1.0}, ' Continuous Net Profit Growth Rate': {'count': 6819.0, 'mean': 0.21763901299696697, 'std': 0.01006296314611611, 'min': 0.0, '25%': 0.2175795122117655, '50%': 0.217598046961963, '75%': 0.217621501194243, 'max': 1.0}, ' Total Asset Growth Rate': {'count': 6819.0, 'mean': 5508096595.248749, 'std': 2897717771.1697035, 'min': 0.0, '25%': 4860000000.0, '50%': 6400000000.0, '75%': 7390000000.0, 'max': 9990000000.0}, ' Net Value Growth Rate': {'count': 6819.0, 'mean': 1566212.055241067, 'std': 114159389.51834548, 'min': 0.0, '25%': 0.0004409688868264, '50%': 0.0004619555222076, '75%': 0.000499362141038, 'max': 9330000000.0}, ' Total Asset Return Growth Rate Ratio': {'count': 6819.0, 'mean': 0.2642475118758414, 'std': 0.009634208862611605, 'min': 0.0, '25%': 0.263758926420651, '50%': 0.264049545034229, '75%': 0.264388341065032, 'max': 1.0}, ' Cash Reinvestment %': {'count': 6819.0, 'mean': 0.37967667232266245, 'std': 0.020736565809616806, 'min': 0.0, '25%': 0.37474851905666695, '50%': 0.380425468499683, '75%': 0.386731120301032, 'max': 1.0}, ' Current Ratio': {'count': 6819.0, 'mean': 403284.954244977, 'std': 33302155.825480215, 'min': 0.0, '25%': 0.00755504663011965, '50%': 0.0105871744549939, '75%': 0.0162695280201934, 'max': 2750000000.0}, ' Quick Ratio': {'count': 6819.0, 'mean': 8376594.819684891, 'std': 244684748.44687235, 'min': 0.0, '25%': 0.004725903227376101, '50%': 0.0074124720675444, '75%': 0.01224910697241505, 'max': 9230000000.0}, ' Interest Expense Ratio': {'count': 6819.0, 'mean': 0.6309910117124214, 'std': 0.011238461504050156, 'min': 0.0, '25%': 0.63061225188696, '50%': 0.630698209613567, '75%': 0.631125258558102, 'max': 1.0}, ' Total debt/Total net worth': {'count': 6819.0, 'mean': 4416336.714259365, 'std': 168406905.28151134, 'min': 0.0, '25%': 0.0030070491250148, '50%': 0.005546284390702, '75%': 0.00927329266179695, 'max': 9940000000.0}, ' Debt ratio %': {'count': 6819.0, 'mean': 0.11317708497306007, 'std': 0.05392030606308283, 'min': 0.0, '25%': 0.0728905281615624, '50%': 0.111406717658796, '75%': 0.148804305106267, 'max': 1.0}, ' Net worth/Assets': {'count': 6819.0, 'mean': 0.8868229150269401, 'std': 0.05392030606308284, 'min': 0.0, '25%': 0.8511956948937329, '50%': 0.888593282341204, '75%': 0.927109471838438, 'max': 1.0}, ' Long-term fund suitability ratio (A)': {'count': 6819.0, 'mean': 0.00878273381503679, 'std': 0.028152926049290605, 'min': 0.0, '25%': 0.0052436836906082, '50%': 0.0056646361117639, '75%': 0.00684743246553585, 'max': 1.0}, ' Borrowing dependency': {'count': 6819.0, 'mean': 0.37465429459872324, 'std': 0.016286163355500444, 'min': 0.0, '25%': 0.3701678435547765, '50%': 0.372624322553083, '75%': 0.3762707372009225, 'max': 1.0}, ' Contingent liabilities/Net worth': {'count': 6819.0, 'mean': 0.0059682772664790325, 'std': 0.012188361875857312, 'min': 0.0, '25%': 0.0053658477137564, '50%': 0.0053658477137564, '75%': 0.00576435604952715, 'max': 1.0}, ' Operating profit/Paid-in capital': {'count': 6819.0, 'mean': 0.10897668140338518, 'std': 0.02778168598564047, 'min': 0.0, '25%': 0.0961046786197013, '50%': 0.104133079290635, '75%': 0.115927337274252, 'max': 1.0}, ' Net profit before tax/Paid-in capital': {'count': 6819.0, 'mean': 0.18271502907673604, 'std': 0.030784771508309793, 'min': 0.0, '25%': 0.169376366789835, '50%': 0.178455621747983, '75%': 0.191606967800317, 'max': 1.0}, ' Inventory and accounts receivable/Net value': {'count': 6819.0, 'mean': 0.40245933052066923, 'std': 0.013324079587932275, 'min': 0.0, '25%': 0.3974026791778925, '50%': 0.40013102490143, '75%': 0.404550770809581, 'max': 1.0}, ' Total Asset Turnover': {'count': 6819.0, 'mean': 0.14160561602172958, 'std': 0.1011449684929233, 'min': 0.0, '25%': 0.0764617691154423, '50%': 0.118440779610195, '75%': 0.176911544227886, 'max': 1.0}, ' Accounts Receivable Turnover': {'count': 6819.0, 'mean': 12789705.237553563, 'std': 278259836.9840667, 'min': 0.0, '25%': 0.0007101336065656, '50%': 0.0009678106580909, '75%': 0.0014547594168788, 'max': 9740000000.0}, ' Average Collection Days': {'count': 6819.0, 'mean': 9826220.861191595, 'std': 256358895.70533204, 'min': 0.0, '25%': 0.0043865304397204, '50%': 0.0065725374332349, '75%': 0.00897287558119175, 'max': 9730000000.0}, ' Inventory Turnover Rate (times)': {'count': 6819.0, 'mean': 2149106056.607619, 'std': 3247967014.047812, 'min': 0.0, '25%': 0.0001728255554827, '50%': 0.0007646742653862, '75%': 4620000000.0, 'max': 9990000000.0}, ' Fixed Assets Turnover Frequency': {'count': 6819.0, 'mean': 1008595981.8175156, 'std': 2477557316.9201517, 'min': 0.0, '25%': 0.0002330013064716, '50%': 0.000593094234655, '75%': 0.0036523711287173, 'max': 9990000000.0}, ' Net Worth Turnover Rate (times)': {'count': 6819.0, 'mean': 0.038595054614951586, 'std': 0.036680343560413615, 'min': 0.0, '25%': 0.0217741935483871, '50%': 0.0295161290322581, '75%': 0.0429032258064516, 'max': 1.0}, ' Revenue per person': {'count': 6819.0, 'mean': 2325854.266358276, 'std': 136632654.3899363, 'min': 0.0, '25%': 0.010432854016421151, '50%': 0.0186155134174464, '75%': 0.0358547655068079, 'max': 8810000000.0}, ' Operating profit per person': {'count': 6819.0, 'mean': 0.40067101508133507, 'std': 0.032720144194699534, 'min': 0.0, '25%': 0.392437981954275, '50%': 0.395897876574478, '75%': 0.40185093055335697, 'max': 1.0}, ' Allocation rate per person': {'count': 6819.0, 'mean': 11255785.321742088, 'std': 294506294.11677057, 'min': 0.0, '25%': 0.004120528997963601, '50%': 0.0078443733586557, '75%': 0.015020308976719, 'max': 9570000000.0}, ' Working Capital to Total Assets': {'count': 6819.0, 'mean': 0.814125170261333, 'std': 0.0590544026482635, 'min': 0.0, '25%': 0.774308962762401, '50%': 0.81027522898466, '75%': 0.8503828485419616, 'max': 1.0}, ' Quick Assets/Total Assets': {'count': 6819.0, 'mean': 0.4001318123650569, 'std': 0.20199806668068215, 'min': 0.0, '25%': 0.24197285659394002, '50%': 0.386450924981744, '75%': 0.540593673285078, 'max': 1.0}, ' Current Assets/Total Assets': {'count': 6819.0, 'mean': 0.5222734467680338, 'std': 0.21811182151419323, 'min': 0.0, '25%': 0.35284541721511353, '50%': 0.514829793890847, '75%': 0.6890506806831516, 'max': 1.0}, ' Cash/Total Assets': {'count': 6819.0, 'mean': 0.12409456048965214, 'std': 0.13925058358332645, 'min': 0.0, '25%': 0.03354322123979425, '50%': 0.0748874639354301, '75%': 0.1610731518633315, 'max': 1.0}, ' Quick Assets/Current Liability': {'count': 6819.0, 'mean': 3592902.1968296515, 'std': 171620908.60682163, 'min': 0.0, '25%': 0.00523977582664085, '50%': 0.0079088979804512, '75%': 0.0129509103075746, 'max': 8820000000.0}, ' Cash/Current Liability': {'count': 6819.0, 'mean': 37159994.147133335, 'std': 510350903.16273063, 'min': 0.0, '25%': 0.0019730075415488497, '50%': 0.0049038864700734, '75%': 0.0128055731079178, 'max': 9650000000.0}, ' Current Liability to Assets': {'count': 6819.0, 'mean': 0.0906727945676238, 'std': 0.05028985666891821, 'min': 0.0, '25%': 0.0533012764320206, '50%': 0.0827047949822228, '75%': 0.1195229934695275, 'max': 1.0}, ' Operating Funds to Liability': {'count': 6819.0, 'mean': 0.35382800412158655, 'std': 0.035147184179188065, 'min': 0.0, '25%': 0.34102297735578047, '50%': 0.348596657106137, '75%': 0.3609148870133705, 'max': 1.0}, ' Inventory/Working Capital': {'count': 6819.0, 'mean': 0.27739510610233165, 'std': 0.010468846972945228, 'min': 0.0, '25%': 0.2770339694810945, '50%': 0.277177699032242, '75%': 0.2774287054274715, 'max': 1.0}, ' Inventory/Current Liability': {'count': 6819.0, 'mean': 55806804.52577958, 'std': 582051554.6194191, 'min': 0.0, '25%': 0.0031631476746991002, '50%': 0.0064973353534734, '75%': 0.011146766748190151, 'max': 9910000000.0}, ' Current Liabilities/Liability': {'count': 6819.0, 'mean': 0.7615988775853332, 'std': 0.20667676768344223, 'min': 0.0, '25%': 0.6269807662218725, '50%': 0.806881404713333, '75%': 0.942026693700069, 'max': 1.0}, ' Working Capital/Equity': {'count': 6819.0, 'mean': 0.7358165257322183, 'std': 0.011678026475599061, 'min': 0.0, '25%': 0.733611818564342, '50%': 0.736012732265696, '75%': 0.738559910578823, 'max': 1.0}, ' Current Liabilities/Equity': {'count': 6819.0, 'mean': 0.33140980061698827, 'std': 0.013488027908897866, 'min': 0.0, '25%': 0.328095841686878, '50%': 0.329685133135929, '75%': 0.332322404809702, 'max': 1.0}, ' Long-term Liability to Current Assets': {'count': 6819.0, 'mean': 54160038.13589435, 'std': 570270621.9592104, 'min': 0.0, '25%': 0.0, '50%': 0.0019746187761809, '75%': 0.009005945944256601, 'max': 9540000000.0}, ' Retained Earnings to Total Assets': {'count': 6819.0, 'mean': 0.9347327541270043, 'std': 0.025564221690643103, 'min': 0.0, '25%': 0.9310965081459854, '50%': 0.937672322031461, '75%': 0.9448112860939986, 'max': 1.0}, ' Total income/Total expense': {'count': 6819.0, 'mean': 0.002548945567386416, 'std': 0.01209281469621801, 'min': 0.0, '25%': 0.0022355962096577498, '50%': 0.0023361709310448, '75%': 0.0024918511193838, 'max': 1.0}, ' Total expense/Assets': {'count': 6819.0, 'mean': 0.02918409925586063, 'std': 0.02714877679286165, 'min': 0.0, '25%': 0.01456705658927065, '50%': 0.0226739487842648, '75%': 0.035930137895265155, 'max': 1.0}, ' Current Asset Turnover Rate': {'count': 6819.0, 'mean': 1195855763.3089354, 'std': 2821161238.262308, 'min': 0.0, '25%': 0.00014562362973865, '50%': 0.0001987815566631, '75%': 0.0004525945407579, 'max': 10000000000.0}, ' Quick Asset Turnover Rate': {'count': 6819.0, 'mean': 2163735272.034426, 'std': 3374944402.166023, 'min': 0.0, '25%': 0.00014171486236355001, '50%': 0.0002247727878357, '75%': 4900000000.0, 'max': 10000000000.0}, ' Working capitcal Turnover Rate': {'count': 6819.0, 'mean': 0.5940062655659162, 'std': 0.008959384178922204, 'min': 0.0, '25%': 0.5939344215587965, '50%': 0.593962767104877, '75%': 0.5940023454696105, 'max': 1.0}, ' Cash Turnover Rate': {'count': 6819.0, 'mean': 2471976967.444318, 'std': 2938623226.6788445, 'min': 0.0, '25%': 0.0002735337396781, '50%': 1080000000.0, '75%': 4510000000.0, 'max': 10000000000.0}, ' Cash Flow to Sales': {'count': 6819.0, 'mean': 0.6715307810992098, 'std': 0.0093413456183006, 'min': 0.0, '25%': 0.671565259253275, '50%': 0.671573958092574, '75%': 0.671586580417158, 'max': 1.0}, ' Fixed Assets to Assets': {'count': 6819.0, 'mean': 1220120.5015895537, 'std': 100754158.71316808, 'min': 0.0, '25%': 0.0853603651897917, '50%': 0.196881048224411, '75%': 0.3721999782647555, 'max': 8320000000.0}, ' Current Liability to Liability': {'count': 6819.0, 'mean': 0.7615988775853332, 'std': 0.20667676768344223, 'min': 0.0, '25%': 0.6269807662218725, '50%': 0.806881404713333, '75%': 0.942026693700069, 'max': 1.0}, ' Current Liability to Equity': {'count': 6819.0, 'mean': 0.33140980061698827, 'std': 0.013488027908897866, 'min': 0.0, '25%': 0.328095841686878, '50%': 0.329685133135929, '75%': 0.332322404809702, 'max': 1.0}, ' Equity to Long-term Liability': {'count': 6819.0, 'mean': 0.11564465149636942, 'std': 0.019529176275314197, 'min': 0.0, '25%': 0.110933233663468, '50%': 0.112340004024972, '75%': 0.117106091075626, 'max': 1.0}, ' Cash Flow to Total Assets': {'count': 6819.0, 'mean': 0.6497305901792364, 'std': 0.04737213191450496, 'min': 0.0, '25%': 0.633265319013864, '50%': 0.645366460270721, '75%': 0.6630618534616091, 'max': 1.0}, ' Cash Flow to Liability': {'count': 6819.0, 'mean': 0.4618492532922571, 'std': 0.029942680345244794, 'min': 0.0, '25%': 0.4571164765642225, '50%': 0.459750137932885, '75%': 0.46423584697152853, 'max': 1.0}, ' CFO to Assets': {'count': 6819.0, 'mean': 0.5934150861096208, 'std': 0.05856055014224858, 'min': 0.0, '25%': 0.5659869401753586, '50%': 0.593266274083544, '75%': 0.6247688757833555, 'max': 1.0}, ' Cash Flow to Equity': {'count': 6819.0, 'mean': 0.3155823898995751, 'std': 0.01296089240164725, 'min': 0.0, '25%': 0.312994699600273, '50%': 0.314952752072916, '75%': 0.317707188742567, 'max': 1.0}, ' Current Liability to Current Assets': {'count': 6819.0, 'mean': 0.031506365747440736, 'std': 0.030844688453563848, 'min': 0.0, '25%': 0.018033665707965, '50%': 0.0275971428517009, '75%': 0.0383746158541899, 'max': 1.0}, ' Liability-Assets Flag': {'count': 6819.0, 'mean': 0.001173192550227306, 'std': 0.034234310865302146, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, ' Net Income to Total Assets': {'count': 6819.0, 'mean': 0.8077602200365486, 'std': 0.040332191531426226, 'min': 0.0, '25%': 0.7967498491931705, '50%': 0.810619042075101, '75%': 0.8264545295408715, 'max': 1.0}, ' Total assets to GNP price': {'count': 6819.0, 'mean': 18629417.81183602, 'std': 376450059.7458224, 'min': 0.0, '25%': 0.0009036204813306, '50%': 0.0020852127088157, '75%': 0.0052697768568805, 'max': 9820000000.0}, ' No-credit Interval': {'count': 6819.0, 'mean': 0.623914574767534, 'std': 0.012289548007412282, 'min': 0.0, '25%': 0.623636304973909, '50%': 0.623879225987712, '75%': 0.6241681927893561, 'max': 1.0}, ' Gross Profit to Sales': {'count': 6819.0, 'mean': 0.607946340270717, 'std': 0.016933807795673647, 'min': 0.0, '25%': 0.6004428952063054, '50%': 0.605998288167218, '75%': 0.613913271038147, 'max': 1.0}, " Net Income to Stockholder's Equity": {'count': 6819.0, 'mean': 0.8404020646301005, 'std': 0.01452252608252491, 'min': 0.0, '25%': 0.8401148040637195, '50%': 0.841178760250192, '75%': 0.8423569700412374, 'max': 1.0}, ' Liability to Equity': {'count': 6819.0, 'mean': 0.2803651538333931, 'std': 0.014463223575594045, 'min': 0.0, '25%': 0.276944242646329, '50%': 0.278777583629637, '75%': 0.2814491856088265, 'max': 1.0}, ' Degree of Financial Leverage (DFL)': {'count': 6819.0, 'mean': 0.027541119421203627, 'std': 0.01566794186642967, 'min': 0.0, '25%': 0.0267911566924924, '50%': 0.0268081258982465, '75%': 0.026913184214613348, 'max': 1.0}, ' Interest Coverage Ratio (Interest expense to EBIT)': {'count': 6819.0, 'mean': 0.5653579335465574, 'std': 0.013214239761961918, 'min': 0.0, '25%': 0.565158395757604, '50%': 0.565251928758969, '75%': 0.565724709506105, 'max': 1.0}, ' Net Income Flag': {'count': 6819.0, 'mean': 1.0, 'std': 0.0, 'min': 1.0, '25%': 1.0, '50%': 1.0, '75%': 1.0, 'max': 1.0}, ' Equity to Liability': {'count': 6819.0, 'mean': 0.047578356529497656, 'std': 0.05001371618013796, 'min': 0.0, '25%': 0.024476693570910098, '50%': 0.0337976972031022, '75%': 0.052837817459331596, 'max': 1.0}} <dataframe_info> RangeIndex: 6819 entries, 0 to 6818 Data columns (total 96 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Bankrupt? 6819 non-null int64 1 ROA(C) before interest and depreciation before interest 6819 non-null float64 2 ROA(A) before interest and % after tax 6819 non-null float64 3 ROA(B) before interest and depreciation after tax 6819 non-null float64 4 Operating Gross Margin 6819 non-null float64 5 Realized Sales Gross Margin 6819 non-null float64 6 Operating Profit Rate 6819 non-null float64 7 Pre-tax net Interest Rate 6819 non-null float64 8 After-tax net Interest Rate 6819 non-null float64 9 Non-industry income and expenditure/revenue 6819 non-null float64 10 Continuous interest rate (after tax) 6819 non-null float64 11 Operating Expense Rate 6819 non-null float64 12 Research and development expense rate 6819 non-null float64 13 Cash flow rate 6819 non-null float64 14 Interest-bearing debt interest rate 6819 non-null float64 15 Tax rate (A) 6819 non-null float64 16 Net Value Per Share (B) 6819 non-null float64 17 Net Value Per Share (A) 6819 non-null float64 18 Net Value Per Share (C) 6819 non-null float64 19 Persistent EPS in the Last Four Seasons 6819 non-null float64 20 Cash Flow Per Share 6819 non-null float64 21 Revenue Per Share (Yuan ¥) 6819 non-null float64 22 Operating Profit Per Share (Yuan ¥) 6819 non-null float64 23 Per Share Net profit before tax (Yuan ¥) 6819 non-null float64 24 Realized Sales Gross Profit Growth Rate 6819 non-null float64 25 Operating Profit Growth Rate 6819 non-null float64 26 After-tax Net Profit Growth Rate 6819 non-null float64 27 Regular Net Profit Growth Rate 6819 non-null float64 28 Continuous Net Profit Growth Rate 6819 non-null float64 29 Total Asset Growth Rate 6819 non-null float64 30 Net Value Growth Rate 6819 non-null float64 31 Total Asset Return Growth Rate Ratio 6819 non-null float64 32 Cash Reinvestment % 6819 non-null float64 33 Current Ratio 6819 non-null float64 34 Quick Ratio 6819 non-null float64 35 Interest Expense Ratio 6819 non-null float64 36 Total debt/Total net worth 6819 non-null float64 37 Debt ratio % 6819 non-null float64 38 Net worth/Assets 6819 non-null float64 39 Long-term fund suitability ratio (A) 6819 non-null float64 40 Borrowing dependency 6819 non-null float64 41 Contingent liabilities/Net worth 6819 non-null float64 42 Operating profit/Paid-in capital 6819 non-null float64 43 Net profit before tax/Paid-in capital 6819 non-null float64 44 Inventory and accounts receivable/Net value 6819 non-null float64 45 Total Asset Turnover 6819 non-null float64 46 Accounts Receivable Turnover 6819 non-null float64 47 Average Collection Days 6819 non-null float64 48 Inventory Turnover Rate (times) 6819 non-null float64 49 Fixed Assets Turnover Frequency 6819 non-null float64 50 Net Worth Turnover Rate (times) 6819 non-null float64 51 Revenue per person 6819 non-null float64 52 Operating profit per person 6819 non-null float64 53 Allocation rate per person 6819 non-null float64 54 Working Capital to Total Assets 6819 non-null float64 55 Quick Assets/Total Assets 6819 non-null float64 56 Current Assets/Total Assets 6819 non-null float64 57 Cash/Total Assets 6819 non-null float64 58 Quick Assets/Current Liability 6819 non-null float64 59 Cash/Current Liability 6819 non-null float64 60 Current Liability to Assets 6819 non-null float64 61 Operating Funds to Liability 6819 non-null float64 62 Inventory/Working Capital 6819 non-null float64 63 Inventory/Current Liability 6819 non-null float64 64 Current Liabilities/Liability 6819 non-null float64 65 Working Capital/Equity 6819 non-null float64 66 Current Liabilities/Equity 6819 non-null float64 67 Long-term Liability to Current Assets 6819 non-null float64 68 Retained Earnings to Total Assets 6819 non-null float64 69 Total income/Total expense 6819 non-null float64 70 Total expense/Assets 6819 non-null float64 71 Current Asset Turnover Rate 6819 non-null float64 72 Quick Asset Turnover Rate 6819 non-null float64 73 Working capitcal Turnover Rate 6819 non-null float64 74 Cash Turnover Rate 6819 non-null float64 75 Cash Flow to Sales 6819 non-null float64 76 Fixed Assets to Assets 6819 non-null float64 77 Current Liability to Liability 6819 non-null float64 78 Current Liability to Equity 6819 non-null float64 79 Equity to Long-term Liability 6819 non-null float64 80 Cash Flow to Total Assets 6819 non-null float64 81 Cash Flow to Liability 6819 non-null float64 82 CFO to Assets 6819 non-null float64 83 Cash Flow to Equity 6819 non-null float64 84 Current Liability to Current Assets 6819 non-null float64 85 Liability-Assets Flag 6819 non-null int64 86 Net Income to Total Assets 6819 non-null float64 87 Total assets to GNP price 6819 non-null float64 88 No-credit Interval 6819 non-null float64 89 Gross Profit to Sales 6819 non-null float64 90 Net Income to Stockholder's Equity 6819 non-null float64 91 Liability to Equity 6819 non-null float64 92 Degree of Financial Leverage (DFL) 6819 non-null float64 93 Interest Coverage Ratio (Interest expense to EBIT) 6819 non-null float64 94 Net Income Flag 6819 non-null int64 95 Equity to Liability 6819 non-null float64 dtypes: float64(93), int64(3) memory usage: 5.0 MB <some_examples> {'Bankrupt?': {'0': 1, '1': 1, '2': 1, '3': 1}, ' ROA(C) before interest and depreciation before interest': {'0': 0.3705942573, '1': 0.4642909375, '2': 0.4260712719, '3': 0.3998440014}, ' ROA(A) before interest and % after tax': {'0': 0.4243894461, '1': 0.53821413, '2': 0.4990187527, '3': 0.4512647187}, ' ROA(B) before interest and depreciation after tax': {'0': 0.4057497725, '1': 0.5167300177, '2': 0.4722950907, '3': 0.4577332834}, ' Operating Gross Margin': {'0': 0.6014572133, '1': 0.6102350855, '2': 0.6014500065, '3': 0.5835411292}, ' Realized Sales Gross Margin': {'0': 0.6014572133, '1': 0.6102350855, '2': 0.601363525, '3': 0.5835411292}, ' Operating Profit Rate': {'0': 0.9989692032, '1': 0.9989459782, '2': 0.9988573535, '3': 0.9986997471}, ' Pre-tax net Interest Rate': {'0': 0.7968871459, '1': 0.7973801913, '2': 0.7964033693, '3': 0.7969669683}, ' After-tax net Interest Rate': {'0': 0.8088093609, '1': 0.8093007257, '2': 0.8083875215, '3': 0.8089655977}, ' Non-industry income and expenditure/revenue': {'0': 0.3026464339, '1': 0.3035564303, '2': 0.3020351773, '3': 0.303349536}, ' Continuous interest rate (after tax)': {'0': 0.7809848502, '1': 0.7815059743, '2': 0.7802839362, '3': 0.7812409912}, ' Operating Expense Rate': {'0': 0.0001256969, '1': 0.0002897851, '2': 0.0002361297, '3': 0.0001078888}, ' Research and development expense rate': {'0': 0.0, '1': 0.0, '2': 25500000.0, '3': 0.0}, ' Cash flow rate': {'0': 0.4581431435, '1': 0.4618672572, '2': 0.4585205875, '3': 0.4657054427}, ' Interest-bearing debt interest rate': {'0': 0.0007250725, '1': 0.0006470647, '2': 0.000790079, '3': 0.0004490449}, ' Tax rate (A)': {'0': 0.0, '1': 0.0, '2': 0.0, '3': 0.0}, ' Net Value Per Share (B)': {'0': 0.1479499389, '1': 0.182251064, '2': 0.1779107497, '3': 0.1541865071}, ' Net Value Per Share (A)': {'0': 0.1479499389, '1': 0.182251064, '2': 0.1779107497, '3': 0.1541865071}, ' Net Value Per Share (C)': {'0': 0.1479499389, '1': 0.182251064, '2': 0.193712865, '3': 0.1541865071}, ' Persistent EPS in the Last Four Seasons': {'0': 0.1691405881, '1': 0.208943935, '2': 0.1805805049, '3': 0.1937222275}, ' Cash Flow Per Share': {'0': 0.3116644267, '1': 0.3181368041, '2': 0.3071019311, '3': 0.3216736224}, ' Revenue Per Share (Yuan ¥)': {'0': 0.0175597804, '1': 0.021144335, '2': 0.0059440083, '3': 0.014368468}, ' Operating Profit Per Share (Yuan ¥)': {'0': 0.0959205276, '1': 0.0937220096, '2': 0.0923377575, '3': 0.0777623972}, ' Per Share Net profit before tax (Yuan ¥)': {'0': 0.1387361603, '1': 0.1699179031, '2': 0.1428033441, '3': 0.148602847}, ' Realized Sales Gross Profit Growth Rate': {'0': 0.0221022784, '1': 0.0220801699, '2': 0.0227600968, '3': 0.0220460669}, ' Operating Profit Growth Rate': {'0': 0.8481949945, '1': 0.8480878838, '2': 0.8480940128, '3': 0.8480054774}, ' After-tax Net Profit Growth Rate': {'0': 0.6889794628, '1': 0.6896929012, '2': 0.689462677, '3': 0.6891095356}, ' Regular Net Profit Growth Rate': {'0': 0.6889794628, '1': 0.6897017016, '2': 0.6894696596, '3': 0.6891095356}, ' Continuous Net Profit Growth Rate': {'0': 0.2175353862, '1': 0.2176195965, '2': 0.217601299, '3': 0.2175681883}, ' Total Asset Growth Rate': {'0': 4980000000.0, '1': 6110000000.0, '2': 7280000000.0, '3': 4880000000.0}, ' Net Value Growth Rate': {'0': 0.0003269773, '1': 0.0004430401, '2': 0.0003964253, '3': 0.0003824259}, ' Total Asset Return Growth Rate Ratio': {'0': 0.2630999837, '1': 0.2645157781, '2': 0.2641839756, '3': 0.2633711759}, ' Cash Reinvestment %': {'0': 0.363725271, '1': 0.376709139, '2': 0.3689132298, '3': 0.3840765992}, ' Current Ratio': {'0': 0.0022589633, '1': 0.0060162059, '2': 0.0115425537, '3': 0.0041940587}, ' Quick Ratio': {'0': 0.0012077551, '1': 0.0040393668, '2': 0.0053475602, '3': 0.0028964911}, ' Interest Expense Ratio': {'0': 0.629951302, '1': 0.6351724634, '2': 0.6296314434, '3': 0.630228353}, ' Total debt/Total net worth': {'0': 0.0212659244, '1': 0.0125023938, '2': 0.021247686, '3': 0.0095724017}, ' Debt ratio %': {'0': 0.2075762615, '1': 0.1711763461, '2': 0.2075157965, '3': 0.151464764}, ' Net worth/Assets': {'0': 0.7924237385, '1': 0.8288236539, '2': 0.7924842035, '3': 0.848535236}, ' Long-term fund suitability ratio (A)': {'0': 0.0050244547, '1': 0.0050588818, '2': 0.0050998994, '3': 0.0050469241}, ' Borrowing dependency': {'0': 0.3902843544, '1': 0.37676002, '2': 0.3790929201, '3': 0.3797426876}, ' Contingent liabilities/Net worth': {'0': 0.0064785025, '1': 0.0058350395, '2': 0.0065619821, '3': 0.0053658477}, ' Operating profit/Paid-in capital': {'0': 0.095884834, '1': 0.0937433843, '2': 0.0923184653, '3': 0.0777272949}, ' Net profit before tax/Paid-in capital': {'0': 0.1377573335, '1': 0.1689616168, '2': 0.1480355931, '3': 0.1475605158}, ' Inventory and accounts receivable/Net value': {'0': 0.3980356983, '1': 0.3977248825, '2': 0.406580451, '3': 0.3979245013}, ' Total Asset Turnover': {'0': 0.0869565217, '1': 0.0644677661, '2': 0.0149925037, '3': 0.0899550225}, ' Accounts Receivable Turnover': {'0': 0.0018138841, '1': 0.0012863563, '2': 0.0014953385, '3': 0.0019660556}, ' Average Collection Days': {'0': 0.0034873643, '1': 0.0049168079, '2': 0.0042268495, '3': 0.0032149673}, ' Inventory Turnover Rate (times)': {'0': 0.0001820926, '1': 9360000000.0, '2': 65000000.0, '3': 7130000000.0}, ' Fixed Assets Turnover Frequency': {'0': 0.0001165007, '1': 719000000.0, '2': 2650000000.0, '3': 9150000000.0}, ' Net Worth Turnover Rate (times)': {'0': 0.0329032258, '1': 0.025483871, '2': 0.0133870968, '3': 0.0280645161}, ' Revenue per person': {'0': 0.034164182, '1': 0.0068886506, '2': 0.0289969596, '3': 0.0154634784}, ' Operating profit per person': {'0': 0.3929128695, '1': 0.3915899686, '2': 0.3819678433, '3': 0.3784966419}, ' Allocation rate per person': {'0': 0.0371353016, '1': 0.0123349721, '2': 0.1410163119, '3': 0.0213199897}, ' Working Capital to Total Assets': {'0': 0.6727752925, '1': 0.751110917, '2': 0.8295019149, '3': 0.7257541797}, ' Quick Assets/Total Assets': {'0': 0.1666729588, '1': 0.1272360023, '2': 0.3402008785, '3': 0.1615745316}, ' Current Assets/Total Assets': {'0': 0.1906429591, '1': 0.1824190541, '2': 0.6028057017, '3': 0.2258148689}, ' Cash/Total Assets': {'0': 0.004094406, '1': 0.014947727, '2': 0.0009909445, '3': 0.0188506248}, ' Quick Assets/Current Liability': {'0': 0.0019967709, '1': 0.0041360298, '2': 0.0063024814, '3': 0.0029612377}, ' Cash/Current Liability': {'0': 0.000147336, '1': 0.0013839101, '2': 5340000000.0, '3': 0.0010106464}, ' Current Liability to Assets': {'0': 0.1473084504, '1': 0.0569628274, '2': 0.0981620645, '3': 0.0987146304}, ' Operating Funds to Liability': {'0': 0.3340151713, '1': 0.341105992, '2': 0.3367314947, '3': 0.348716439}, ' Inventory/Working Capital': {'0': 0.2769201582, '1': 0.2896415764, '2': 0.2774555281, '3': 0.2765803042}, ' Inventory/Current Liability': {'0': 0.00103599, '1': 0.0052096824, '2': 0.0138787858, '3': 0.0035401479}, ' Current Liabilities/Liability': {'0': 0.6762691762, '1': 0.308588593, '2': 0.4460274872, '3': 0.6158483686}, ' Working Capital/Equity': {'0': 0.7212745515, '1': 0.7319752885, '2': 0.7427286376, '3': 0.7298249087}, ' Current Liabilities/Equity': {'0': 0.3390770068, '1': 0.3297401479, '2': 0.3347768513, '3': 0.3315089787}, ' Long-term Liability to Current Assets': {'0': 0.025592368, '1': 0.0239468187, '2': 0.0037151157, '3': 0.0221651997}, ' Retained Earnings to Total Assets': {'0': 0.9032247712, '1': 0.9310652176, '2': 0.9099033625, '3': 0.9069021588}, ' Total income/Total expense': {'0': 0.002021613, '1': 0.0022256083, '2': 0.0020600706, '3': 0.0018313586}, ' Total expense/Assets': {'0': 0.0648557077, '1': 0.02551586, '2': 0.0213874282, '3': 0.0241610702}, ' Current Asset Turnover Rate': {'0': 701000000.0, '1': 0.0001065198, '2': 0.0017910937, '3': 8140000000.0}, ' Quick Asset Turnover Rate': {'0': 6550000000.0, '1': 7700000000.0, '2': 0.0010226765, '3': 6050000000.0}, ' Working capitcal Turnover Rate': {'0': 0.593830504, '1': 0.5939155479, '2': 0.5945018513, '3': 0.5938887926}, ' Cash Turnover Rate': {'0': 458000000.0, '1': 2490000000.0, '2': 761000000.0, '3': 2030000000.0}, ' Cash Flow to Sales': {'0': 0.6715676536, '1': 0.6715699423, '2': 0.6715713218, '3': 0.6715191702}, ' Fixed Assets to Assets': {'0': 0.4242057622, '1': 0.4688281283, '2': 0.2761792222, '3': 0.5591439633}, ' Current Liability to Liability': {'0': 0.6762691762, '1': 0.308588593, '2': 0.4460274872, '3': 0.6158483686}, ' Current Liability to Equity': {'0': 0.3390770068, '1': 0.3297401479, '2': 0.3347768513, '3': 0.3315089787}, ' Equity to Long-term Liability': {'0': 0.1265494878, '1': 0.1209161058, '2': 0.1179223194, '3': 0.1207604553}, ' Cash Flow to Total Assets': {'0': 0.6375553953, '1': 0.6410999847, '2': 0.6427645502, '3': 0.5790393123}, ' Cash Flow to Liability': {'0': 0.4586091477, '1': 0.4590010533, '2': 0.4592540355, '3': 0.4485179116}, ' CFO to Assets': {'0': 0.5203819179, '1': 0.5671013087, '2': 0.5384905396, '3': 0.6041050562}, ' Cash Flow to Equity': {'0': 0.3129049481, '1': 0.3141631352, '2': 0.3145154263, '3': 0.3023822548}, ' Current Liability to Current Assets': {'0': 0.1182504766, '1': 0.0477752816, '2': 0.0253464891, '3': 0.0672496173}, ' Liability-Assets Flag': {'0': 0, '1': 0, '2': 0, '3': 0}, ' Net Income to Total Assets': {'0': 0.7168453432, '1': 0.795297136, '2': 0.774669697, '3': 0.7395545252}, ' Total assets to GNP price': {'0': 0.00921944, '1': 0.0083233018, '2': 0.0400028529, '3': 0.0032524753}, ' No-credit Interval': {'0': 0.6228789594, '1': 0.6236517417, '2': 0.6238410376, '3': 0.6229287091}, ' Gross Profit to Sales': {'0': 0.6014532901, '1': 0.6102365259, '2': 0.6014493405, '3': 0.5835376122}, " Net Income to Stockholder's Equity": {'0': 0.827890214, '1': 0.839969268, '2': 0.8367743086, '3': 0.8346971068}, ' Liability to Equity': {'0': 0.2902018928, '1': 0.2838459798, '2': 0.2901885329, '3': 0.281721193}, ' Degree of Financial Leverage (DFL)': {'0': 0.0266006308, '1': 0.2645768198, '2': 0.0265547199, '3': 0.0266966344}, ' Interest Coverage Ratio (Interest expense to EBIT)': {'0': 0.5640501123, '1': 0.5701749464, '2': 0.5637060765, '3': 0.5646634203}, ' Net Income Flag': {'0': 1, '1': 1, '2': 1, '3': 1}, ' Equity to Liability': {'0': 0.0164687409, '1': 0.0207943063, '2': 0.0164741143, '3': 0.0239823322}} <end_description>
3,628
0
9,875
3,628
129667076
# Import pandas, pyplot from matplotlib, and seaborn. import pandas as pd import matplotlib.pyplot as plt import seaborn as sns # Import the statsmodel module. import statsmodels.api as sm # Import the ols function from statsmodels. from statsmodels.formula.api import ols # Load the data. data = pd.read_csv( "/kaggle/input/modified-marketing-and-sales-data/modified_marketing_and_sales_data.csv" ) # Display the first five rows. data.head() # Display the shape of the data as a tuple (rows, columns). data.shape # Generate descriptive statistics about TV, Radio, and Social_Media. data[["TV", "Radio", "Social_Media"]].describe() # Calculate the average missing rate in the sales column. missing_sales = data.Sales.isna().mean() # Convert the missing_sales from a decimal to a percentage and round to 2 decimal places. missing_sales = round(missing_sales * 100, 2) # Display the results (missing_sales must be converted to a string to be concatenated in the print statement). print("Percentage of promotions missing Sales: " + str(missing_sales) + "%") # Subset the data to include rows where Sales is present. data = data.dropna(subset=["Sales"], axis=0) # Create a histogram of the Sales. fig = sns.histplot(data["Sales"]) # Add a title fig.set_title("Distribution of Sales") # Create a pairplot of the data. sns.pairplot(data) # Define the OLS formula. ols_formula = "Sales ~ TV" # Create an OLS model. OLS = ols(formula=ols_formula, data=data) # Fit the model. model = OLS.fit() # Save the results summary. model_results = model.summary() # Display the model results. model_results # Create a scatterplot comparing X and Sales (Y). sns.scatterplot(x=data["TV"], y=data["Sales"]) # Calculate the residuals. residuals = model.resid # Create a 1x2 plot figure. fig, axes = plt.subplots(1, 2, figsize=(8, 4)) # Create a histogram with the residuals . sns.histplot(residuals, ax=axes[0]) # Set the x label of the residual plot. axes[0].set_xlabel("Residual Value") # Set the title of the residual plot. axes[0].set_title("Histogram of Residuals") # Create a Q-Q plot of the residuals. ### YOUR CODE HERE ### sm.qqplot(residuals, line="s", ax=axes[1]) # Set the title of the Q-Q plot. axes[1].set_title("Normal Q-Q plot") # Use matplotlib's tight_layout() function to add space between plots for a cleaner appearance. plt.tight_layout() # Show the plot. plt.show() # Create a scatterplot with the fitted values from the model and the residuals. fig = sns.scatterplot(x=model.fittedvalues, y=model.resid) # Set the x-axis label. fig.set_xlabel("Fitted Values") # Set the y-axis label. fig.set_ylabel("Residuals") # Set the title. fig.set_title("Fitted Values v. Residuals") # Add a line at y = 0 to visualize the variance of residuals above and below 0. fig.axhline(0) # Show the plot. plt.show() # Display the model_results defined previously. model_results # Display the model_results defined previously. model_results
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/667/129667076.ipynb
null
null
[{"Id": 129667076, "ScriptId": 38559248, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 9485427, "CreationDate": "05/15/2023 15:36:34", "VersionNumber": 2.0, "Title": "Evaluating Simple Linear Regression in Python", "EvaluationDate": "05/15/2023", "IsChange": false, "TotalLines": 116.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 116.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# Import pandas, pyplot from matplotlib, and seaborn. import pandas as pd import matplotlib.pyplot as plt import seaborn as sns # Import the statsmodel module. import statsmodels.api as sm # Import the ols function from statsmodels. from statsmodels.formula.api import ols # Load the data. data = pd.read_csv( "/kaggle/input/modified-marketing-and-sales-data/modified_marketing_and_sales_data.csv" ) # Display the first five rows. data.head() # Display the shape of the data as a tuple (rows, columns). data.shape # Generate descriptive statistics about TV, Radio, and Social_Media. data[["TV", "Radio", "Social_Media"]].describe() # Calculate the average missing rate in the sales column. missing_sales = data.Sales.isna().mean() # Convert the missing_sales from a decimal to a percentage and round to 2 decimal places. missing_sales = round(missing_sales * 100, 2) # Display the results (missing_sales must be converted to a string to be concatenated in the print statement). print("Percentage of promotions missing Sales: " + str(missing_sales) + "%") # Subset the data to include rows where Sales is present. data = data.dropna(subset=["Sales"], axis=0) # Create a histogram of the Sales. fig = sns.histplot(data["Sales"]) # Add a title fig.set_title("Distribution of Sales") # Create a pairplot of the data. sns.pairplot(data) # Define the OLS formula. ols_formula = "Sales ~ TV" # Create an OLS model. OLS = ols(formula=ols_formula, data=data) # Fit the model. model = OLS.fit() # Save the results summary. model_results = model.summary() # Display the model results. model_results # Create a scatterplot comparing X and Sales (Y). sns.scatterplot(x=data["TV"], y=data["Sales"]) # Calculate the residuals. residuals = model.resid # Create a 1x2 plot figure. fig, axes = plt.subplots(1, 2, figsize=(8, 4)) # Create a histogram with the residuals . sns.histplot(residuals, ax=axes[0]) # Set the x label of the residual plot. axes[0].set_xlabel("Residual Value") # Set the title of the residual plot. axes[0].set_title("Histogram of Residuals") # Create a Q-Q plot of the residuals. ### YOUR CODE HERE ### sm.qqplot(residuals, line="s", ax=axes[1]) # Set the title of the Q-Q plot. axes[1].set_title("Normal Q-Q plot") # Use matplotlib's tight_layout() function to add space between plots for a cleaner appearance. plt.tight_layout() # Show the plot. plt.show() # Create a scatterplot with the fitted values from the model and the residuals. fig = sns.scatterplot(x=model.fittedvalues, y=model.resid) # Set the x-axis label. fig.set_xlabel("Fitted Values") # Set the y-axis label. fig.set_ylabel("Residuals") # Set the title. fig.set_title("Fitted Values v. Residuals") # Add a line at y = 0 to visualize the variance of residuals above and below 0. fig.axhline(0) # Show the plot. plt.show() # Display the model_results defined previously. model_results # Display the model_results defined previously. model_results
false
0
890
0
890
890
129610284
# imports import tensorflow as tf print(tf.__version__) import transformers import os import shutil import tensorflow_hub as hub import matplotlib.pyplot as plt import pandas as pd import numpy as np from transformers import ( AutoModelForSequenceClassification, TFAutoModelForSequenceClassification, TFBertForSequenceClassification, ) from transformers import AutoTokenizer from transformers import BertTokenizerFast from transformers import BertTokenizer # importing and checking the data def import_and_check_data(): from sklearn.utils import shuffle df = pd.read_csv( "/kaggle/input/14-data/1.4.csv", header=None ) # 1.4 is waseem's dataset df.dropna(inplace=True) df = df.rename(columns={0: "tweet", 1: "label"}) df = shuffle(df, random_state=64) df.reset_index(inplace=True) df.drop("index", inplace=True, axis=1) df print(df["label"].value_counts()) return df df = import_and_check_data() df def train_and_test_split(text, label): from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split( text, label, test_size=0.2, random_state=32 ) print("len of x_train ", len(x_train)) print("len of x_test ", len(x_test)) # return list(x_train),list(x_test),list(y_train),list(y_test) return x_train, x_test, y_train, y_test x_train, x_test, y_train, y_test = train_and_test_split(df["tweet"], df["label"]) type(x_test) def train_and_test_ds(x_train, x_test, y_train, y_test): tokenizer = BertTokenizerFast.from_pretrained("bert-base-cased") x_train = list(x_train.astype(str)) y_train = list(y_train) train_encodings = tokenizer(x_train, padding=True, truncation=True) train_ds = tf.data.Dataset.from_tensor_slices((dict(train_encodings), y_train)) train_ds = train_ds.batch(1) # test_dataset x_test, y_test = list(x_test.astype(str)), list(y_test) test_encodings = tokenizer(x_test, padding=True, truncation=True) test_ds = tf.data.Dataset.from_tensor_slices((dict(test_encodings), y_test)) test_ds = test_ds.batch(1) return train_ds, test_ds train_ds, test_ds = train_and_test_ds(x_train, x_test, y_train, y_test) def create_model(): model = TFBertForSequenceClassification.from_pretrained( "bert-base-cased", num_labels=2, id2label={0: "general", 1: "toxic"} ) lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay( initial_learning_rate=2e-5, decay_steps=10000, decay_rate=0.9 ) optimizer = tf.keras.optimizers.Adam(learning_rate=lr_schedule) model.compile( optimizer=optimizer, loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=tf.metrics.SparseCategoricalAccuracy(), ) print(model.summary()) return model model = create_model() # from tensorflow import keras # model = keras.models.load_model('/content/drive/MyDrive/waseem') model.summary() model.fit(train_ds, epochs=3) # model.save('/kaggle/working/') def get_predictions(model, ds): test_probs = [] train_probs = [] from sklearn.metrics import classification_report y_eval = model.predict(ds) l = y_eval["logits"] preds = [] for i in l: prediction = tf.round(tf.nn.sigmoid(i)) print("pred = ", prediction) res = np.argmax(prediction, 0) preds.append(res) # print(classification_report(train_test_vals, preds)) return preds y_train_eval = model.predict(train_ds) l_train = y_train_eval["logits"] train_preds = [] for i in l_train: prediction = tf.round(tf.nn.sigmoid(i)) print("pred = ", prediction) res = np.argmax(prediction, 0) train_preds.append(res) y_test_eval = model.predict(test_ds) l_test = y_test_eval["logits"] test_preds = [] for i in l_test: prediction = tf.round(tf.nn.sigmoid(i)) # print('pred = ', prediction) res = np.argmax(prediction, 0) test_preds.append(res) from sklearn.metrics import classification_report # print(classification_report(y_train,train_preds)) print(classification_report(y_test, test_preds)) def loss_accuracy(model, ds): loss, accuracy = model.evaluate(ds) print("loss = ", loss) print("accuracy = ", accuracy) return loss, accuracy l, a = loss_accuracy(model, train_ds) test_preds def get_gradients(text, model, tokenizer): def get_correct_span_mask(correct_index, token_size): span_mask = np.zeros((1, token_size)) span_mask[0, correct_index] = 1 span_mask = tf.constant(span_mask, dtype="float32") return span_mask embedding_matrix = model.bert.embeddings.weights[0] encoded_tokens = tokenizer(text, return_tensors="tf") token_ids = list(encoded_tokens["input_ids"].numpy()[0]) vocab_size = embedding_matrix.get_shape()[0] # convert token ids to one hot. We can't differentiate wrt to int token ids hence the need for one hot representation token_ids_tensor = tf.constant([token_ids], dtype="int32") token_ids_tensor_one_hot = tf.one_hot(token_ids_tensor, vocab_size) with tf.GradientTape(watch_accessed_variables=False) as tape: # (i) watch input variable tape.watch(token_ids_tensor_one_hot) # multiply input model embedding matrix; allows us do backprop wrt one hot input inputs_embeds = tf.matmul(token_ids_tensor_one_hot, embedding_matrix) # (ii) get prediction pred_scores = model( { "inputs_embeds": inputs_embeds, "attention_mask": encoded_tokens["attention_mask"], } ).logits print("prbs = ", tf.nn.sigmoid(pred_scores)) max_class = tf.argmax(pred_scores, axis=1).numpy()[0] # get mask for predicted score class score_mask = get_correct_span_mask(max_class, pred_scores.shape[1]) # zero out all predictions outside of the correct prediction class; we want to get gradients wrt to just this class predict_correct_class = tf.reduce_sum(pred_scores * score_mask) # (iii) get gradient of input with respect to prediction class gradient_non_normalized = tf.norm( tape.gradient(predict_correct_class, token_ids_tensor_one_hot), axis=2 ) # (iv) normalize gradient scores and return them as "explanations" gradient_tensor = gradient_non_normalized / tf.reduce_max( gradient_non_normalized ) gradients = gradient_tensor[0].numpy().tolist() token_words = tokenizer.convert_ids_to_tokens(token_ids) prediction_label = "toxic" if max_class == 1 else "general" return gradients, token_words, prediction_label def plot_gradients(tokens, gradients, title): import matplotlib.pyplot as plt """ Plot explanations """ plt.figure(figsize=(21, 3)) xvals = [x + str(i) for i, x in enumerate(tokens)] colors = [(0, 0, 1, c) for c in (gradients)] # edgecolors = [ "black" if t==0 else (0,0,1, c) for c,t in zip(gradients, token_types) ] # colors = [ ("r" if t==0 else "b") for c,t in zip(gradients, token_types) ] plt.tick_params(axis="both", which="minor", labelsize=29) p = plt.bar(xvals, gradients, color=colors, linewidth=1) plt.title(title) p = plt.xticks( ticks=[i for i in range(len(tokens))], labels=tokens, fontsize=12, rotation=90 ) # negative_pred_neg = pd.DataFrame(npn) # negative_pred_neg.to_excel('/content/drive/MyDrive/Colab_Models/model_1.4/neg_pred_neg.xlsx', index = False, header = None) y_train = list(y_train) y_test = list(y_test) x_train = list(x_train) x_test = list(x_test) def neg_pred_neg(y_test, y_pred): npn = [] # probs_npn = [] # print('len ', len(y_pred)) cnt = 1 for i in range(0, len(y_test)): if y_test[i] == 1 and y_pred[i] == 1: cnt = cnt + 1 npn.append(x_test[i]) # probs_npn.append(probs_test[i]) # print(cnt) return npn npn = neg_pred_neg(y_test, test_preds) npn # model.save('/content/drive/MyDrive/waseem') def whole_fun(s): from scipy.special import expit tokenizer = BertTokenizerFast.from_pretrained("bert-base-cased") embedding_matrix = model.bert.embeddings.weights[0] encoded_tokens = tokenizer(s, return_tensors="tf") token_ids = list(encoded_tokens["input_ids"].numpy()[0]) vocab_size = embedding_matrix.get_shape()[0] token_ids_tensor = tf.constant([token_ids], dtype="int32") token_ids_tensor_one_hot = tf.one_hot(token_ids_tensor, vocab_size) inputs_embeds = tf.matmul(token_ids_tensor_one_hot, embedding_matrix) # (ii) get prediction pred_scores = model( { "inputs_embeds": inputs_embeds, "attention_mask": encoded_tokens["attention_mask"], } ).logits # print(pred_scores) print("prbs = ", tf.nn.sigmoid(pred_scores)) max_class = tf.argmax(pred_scores, axis=1).numpy()[0] gradients, words, label = get_gradients(s, model, tokenizer) plot_gradients(words, gradients, f"Prediction: {label} | {s} ") def list_of_words_to_change(words, gradients): sum_ = 0 for i in gradients: sum_ = sum_ + i avg = sum_ / len(gradients) list_of_words = [] for i in range(0, len(gradients)): if gradients[i] >= avg: list_of_words.append(words[i]) j = i + 1 while j < len(gradients): if words[j][0] == "#" and gradients[j] < avg: list_of_words.append(words[j]) j = j + 1 else: i = j break l_w = [] l_w.append(list_of_words[0]) cnt = 1 for i in range(1, len(list_of_words)): final = list_of_words[i] s = list_of_words[i] a = "" if s[0] == "#": j = 0 for j in range(0, len(s)): if s[j] != "#": a = a + s[j] a = l_w[cnt - 1] + a final = a if final in words: l_w[cnt - 1] = final else: l_w.append(final) cnt = cnt + 1 return list_of_words return list_of_words_to_change(words, gradients) # print(whole_fun(s)) def get_words_with_high_gradients(s): tokenizer = BertTokenizerFast.from_pretrained("bert-base-cased") encoding = tokenizer(s) word_ids = encoding.word_ids() # print('WORD IDS = ', word_ids) gradients, words, label = get_gradients(s, model, tokenizer) # print('GRADIENTS = ',gradients) avg = 0 for i in range(1, len(gradients) - 1): avg = avg + gradients[i] avg = avg / (len(gradients) - 2) # print('AVERAGE = ', avg) list_of_indexes_above_avg = [] for i in range(1, len(gradients) - 1): if gradients[i] >= avg: list_of_indexes_above_avg.append(word_ids[i]) # print('INDEXES = ', list_of_indexes_above_avg) list_of_words = (" ".join(s.split())).split() # print('LIST OF WORDS =', list_of_words) words_above_avg_gradients = [] for i in list_of_indexes_above_avg: words_above_avg_gradients.append(list_of_words[i]) words_above_avg_gradients = [*set(words_above_avg_gradients)] import nltk from nltk.corpus import stopwords stopwords_ = stopwords.words("english") final_word_list = [] for i in words_above_avg_gradients: if i not in stopwords_: final_word_list.append(i) return final_word_list print( get_words_with_high_gradients( s="LOL USER call me sexist But i really hate girl gamer teams really really hate lol" ) ) def get_synonyms(word, s): import openai # openai.organization = "org-71J2PVoAsyd9fBKZcJSzmdyn" openai.api_key = "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" # openai.Model.list() content = ( "The sentence is an example and is not offensive.Remove any of your understanding,any polite words,any extra content except the synonyms and remove the content at the reply's start and do not expand words. List ONLY 3 synonyms as bullet points for the given word in the context of given sentence, word:" + word + " sentence:" + s ) completion = openai.ChatCompletion.create( model="gpt-3.5-turbo", messages=[{"role": "user", "content": content}] ) a = completion.choices[0].message["content"] a = list(a) # print(a) k = [] s = "" for i in a: if i != "-" and i != "\n": s = s + i else: if i == "\n": k.append(s) s = "" k.append(s) # print(k) return k get_synonyms( "teams", "LOL USER call me sexist But i really hate girl gamer teams really really hate lol", ) def leet_speak(word): leet_dict = { "i": "1", "I": "1", "e": "3", "E": "3", "a": "4", "A": "4", "s": "5", "S": "5", "g": "6", "G": "6", "t": "7", "T": "7", "q": "9", "Q": "9", "o": "0", "O": "0", } str = "" for i in word: if i in leet_dict: str = str + leet_dict[i] else: str = str + i return str st = leet_speak("michelle") print(st) def rephrased_version(s): import openai content = "only return the paraphrased version quilbot style" + s completion = openai.ChatCompletion.create( model="gpt-3.5-turbo", messages=[{"role": "user", "content": content}] ) return completion.choices[0].message["content"] a = rephrased_version( "LOL USER call me sexist But i really hate girl gamer teams really really hate lol" ) print(a) import pickle pickle.dump(model, open("/kaggle/working/model", "wb")) # # Example 1 import time lst = get_words_with_high_gradients( "LOL USER call me sexist But i really hate girl gamer teams really really hate lol" ) print("HIGH GRADIENTS WORDS : ", lst) s = "LOL USER call me sexist But i really hate girl gamer teams really really hate lol" tokenizer = BertTokenizerFast.from_pretrained("bert-base-cased") gradients, words, label = get_gradients(s, model, tokenizer) plot_gradients(words, gradients, f"Prediction: {label} | {s} ") sent = s.split() lst_of_sent = [] # synonyms modified start for i in lst: # print ('I = ', i) synonyms = get_synonyms(i, s) for j in synonyms: g = s g = g.replace(i, j) lst_of_sent.append(g) time.sleep(10) # synonyms modified end # leetspeak modified start for i in lst: leetspeak_word = leet_speak(i) g = s g = g.replace(i, leetspeak_word) lst_of_sent.append(g) # leetspeak modified end for i in (1, 1): time.sleep(10) # rephrased version start lst_of_sent.append(rephrased_version(s)) # rephrased version end for i in lst_of_sent: print(i) print("\n") # # Example 2 s = " USER Call me sexist but am I the only person who doesnt find any female comedian funny in any way at all" lst = get_words_with_high_gradients(s) print("HIGH GRADIENTS WORDS : ", lst) tokenizer = BertTokenizerFast.from_pretrained("bert-base-cased") gradients, words, label = get_gradients(s, model, tokenizer) plot_gradients(words, gradients, f"Prediction: {label} | {s} ") print(get_synonyms("comedian", s)) print() print(get_synonyms("sexist", s)) print() print(get_synonyms("female", s)) print() print(get_synonyms("funny", s)) # replaced words with high gradients with synonyms s = " USER Call me discriminatory but am I the only person who doesnt find any gal stand-up comic amusing in any way at all" gradients, words, label = get_gradients(s, model, tokenizer) plot_gradients(words, gradients, f"Prediction: {label} | {s} ") # leetspeak s = " USER Call me sexist but am I the only person who doesnt find any female c0m3d14an funny in any way at all" gradients, words, label = get_gradients(s, model, tokenizer) plot_gradients(words, gradients, f"Prediction: {label} | {s} ") s = " USER Call me sexist but am I the only person who doesnt find any f3m4l3 c0m3d14an funny in any way at all" gradients, words, label = get_gradients(s, model, tokenizer) plot_gradients(words, gradients, f"Prediction: {label} | {s} ") s = " USER Call me 53x137 but am I the only person who doesnt find any f3m4l3 c0m3d14an funny in any way at all" gradients, words, label = get_gradients(s, model, tokenizer) plot_gradients(words, gradients, f"Prediction: {label} | {s} ") import nltk from nltk.corpus import stopwords print(stopwords.words("english"))
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/610/129610284.ipynb
null
null
[{"Id": 129610284, "ScriptId": 36728165, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7872555, "CreationDate": "05/15/2023 08:09:56", "VersionNumber": 1.0, "Title": "adversarial_attack_1", "EvaluationDate": "05/15/2023", "IsChange": true, "TotalLines": 501.0, "LinesInsertedFromPrevious": 501.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# imports import tensorflow as tf print(tf.__version__) import transformers import os import shutil import tensorflow_hub as hub import matplotlib.pyplot as plt import pandas as pd import numpy as np from transformers import ( AutoModelForSequenceClassification, TFAutoModelForSequenceClassification, TFBertForSequenceClassification, ) from transformers import AutoTokenizer from transformers import BertTokenizerFast from transformers import BertTokenizer # importing and checking the data def import_and_check_data(): from sklearn.utils import shuffle df = pd.read_csv( "/kaggle/input/14-data/1.4.csv", header=None ) # 1.4 is waseem's dataset df.dropna(inplace=True) df = df.rename(columns={0: "tweet", 1: "label"}) df = shuffle(df, random_state=64) df.reset_index(inplace=True) df.drop("index", inplace=True, axis=1) df print(df["label"].value_counts()) return df df = import_and_check_data() df def train_and_test_split(text, label): from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split( text, label, test_size=0.2, random_state=32 ) print("len of x_train ", len(x_train)) print("len of x_test ", len(x_test)) # return list(x_train),list(x_test),list(y_train),list(y_test) return x_train, x_test, y_train, y_test x_train, x_test, y_train, y_test = train_and_test_split(df["tweet"], df["label"]) type(x_test) def train_and_test_ds(x_train, x_test, y_train, y_test): tokenizer = BertTokenizerFast.from_pretrained("bert-base-cased") x_train = list(x_train.astype(str)) y_train = list(y_train) train_encodings = tokenizer(x_train, padding=True, truncation=True) train_ds = tf.data.Dataset.from_tensor_slices((dict(train_encodings), y_train)) train_ds = train_ds.batch(1) # test_dataset x_test, y_test = list(x_test.astype(str)), list(y_test) test_encodings = tokenizer(x_test, padding=True, truncation=True) test_ds = tf.data.Dataset.from_tensor_slices((dict(test_encodings), y_test)) test_ds = test_ds.batch(1) return train_ds, test_ds train_ds, test_ds = train_and_test_ds(x_train, x_test, y_train, y_test) def create_model(): model = TFBertForSequenceClassification.from_pretrained( "bert-base-cased", num_labels=2, id2label={0: "general", 1: "toxic"} ) lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay( initial_learning_rate=2e-5, decay_steps=10000, decay_rate=0.9 ) optimizer = tf.keras.optimizers.Adam(learning_rate=lr_schedule) model.compile( optimizer=optimizer, loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=tf.metrics.SparseCategoricalAccuracy(), ) print(model.summary()) return model model = create_model() # from tensorflow import keras # model = keras.models.load_model('/content/drive/MyDrive/waseem') model.summary() model.fit(train_ds, epochs=3) # model.save('/kaggle/working/') def get_predictions(model, ds): test_probs = [] train_probs = [] from sklearn.metrics import classification_report y_eval = model.predict(ds) l = y_eval["logits"] preds = [] for i in l: prediction = tf.round(tf.nn.sigmoid(i)) print("pred = ", prediction) res = np.argmax(prediction, 0) preds.append(res) # print(classification_report(train_test_vals, preds)) return preds y_train_eval = model.predict(train_ds) l_train = y_train_eval["logits"] train_preds = [] for i in l_train: prediction = tf.round(tf.nn.sigmoid(i)) print("pred = ", prediction) res = np.argmax(prediction, 0) train_preds.append(res) y_test_eval = model.predict(test_ds) l_test = y_test_eval["logits"] test_preds = [] for i in l_test: prediction = tf.round(tf.nn.sigmoid(i)) # print('pred = ', prediction) res = np.argmax(prediction, 0) test_preds.append(res) from sklearn.metrics import classification_report # print(classification_report(y_train,train_preds)) print(classification_report(y_test, test_preds)) def loss_accuracy(model, ds): loss, accuracy = model.evaluate(ds) print("loss = ", loss) print("accuracy = ", accuracy) return loss, accuracy l, a = loss_accuracy(model, train_ds) test_preds def get_gradients(text, model, tokenizer): def get_correct_span_mask(correct_index, token_size): span_mask = np.zeros((1, token_size)) span_mask[0, correct_index] = 1 span_mask = tf.constant(span_mask, dtype="float32") return span_mask embedding_matrix = model.bert.embeddings.weights[0] encoded_tokens = tokenizer(text, return_tensors="tf") token_ids = list(encoded_tokens["input_ids"].numpy()[0]) vocab_size = embedding_matrix.get_shape()[0] # convert token ids to one hot. We can't differentiate wrt to int token ids hence the need for one hot representation token_ids_tensor = tf.constant([token_ids], dtype="int32") token_ids_tensor_one_hot = tf.one_hot(token_ids_tensor, vocab_size) with tf.GradientTape(watch_accessed_variables=False) as tape: # (i) watch input variable tape.watch(token_ids_tensor_one_hot) # multiply input model embedding matrix; allows us do backprop wrt one hot input inputs_embeds = tf.matmul(token_ids_tensor_one_hot, embedding_matrix) # (ii) get prediction pred_scores = model( { "inputs_embeds": inputs_embeds, "attention_mask": encoded_tokens["attention_mask"], } ).logits print("prbs = ", tf.nn.sigmoid(pred_scores)) max_class = tf.argmax(pred_scores, axis=1).numpy()[0] # get mask for predicted score class score_mask = get_correct_span_mask(max_class, pred_scores.shape[1]) # zero out all predictions outside of the correct prediction class; we want to get gradients wrt to just this class predict_correct_class = tf.reduce_sum(pred_scores * score_mask) # (iii) get gradient of input with respect to prediction class gradient_non_normalized = tf.norm( tape.gradient(predict_correct_class, token_ids_tensor_one_hot), axis=2 ) # (iv) normalize gradient scores and return them as "explanations" gradient_tensor = gradient_non_normalized / tf.reduce_max( gradient_non_normalized ) gradients = gradient_tensor[0].numpy().tolist() token_words = tokenizer.convert_ids_to_tokens(token_ids) prediction_label = "toxic" if max_class == 1 else "general" return gradients, token_words, prediction_label def plot_gradients(tokens, gradients, title): import matplotlib.pyplot as plt """ Plot explanations """ plt.figure(figsize=(21, 3)) xvals = [x + str(i) for i, x in enumerate(tokens)] colors = [(0, 0, 1, c) for c in (gradients)] # edgecolors = [ "black" if t==0 else (0,0,1, c) for c,t in zip(gradients, token_types) ] # colors = [ ("r" if t==0 else "b") for c,t in zip(gradients, token_types) ] plt.tick_params(axis="both", which="minor", labelsize=29) p = plt.bar(xvals, gradients, color=colors, linewidth=1) plt.title(title) p = plt.xticks( ticks=[i for i in range(len(tokens))], labels=tokens, fontsize=12, rotation=90 ) # negative_pred_neg = pd.DataFrame(npn) # negative_pred_neg.to_excel('/content/drive/MyDrive/Colab_Models/model_1.4/neg_pred_neg.xlsx', index = False, header = None) y_train = list(y_train) y_test = list(y_test) x_train = list(x_train) x_test = list(x_test) def neg_pred_neg(y_test, y_pred): npn = [] # probs_npn = [] # print('len ', len(y_pred)) cnt = 1 for i in range(0, len(y_test)): if y_test[i] == 1 and y_pred[i] == 1: cnt = cnt + 1 npn.append(x_test[i]) # probs_npn.append(probs_test[i]) # print(cnt) return npn npn = neg_pred_neg(y_test, test_preds) npn # model.save('/content/drive/MyDrive/waseem') def whole_fun(s): from scipy.special import expit tokenizer = BertTokenizerFast.from_pretrained("bert-base-cased") embedding_matrix = model.bert.embeddings.weights[0] encoded_tokens = tokenizer(s, return_tensors="tf") token_ids = list(encoded_tokens["input_ids"].numpy()[0]) vocab_size = embedding_matrix.get_shape()[0] token_ids_tensor = tf.constant([token_ids], dtype="int32") token_ids_tensor_one_hot = tf.one_hot(token_ids_tensor, vocab_size) inputs_embeds = tf.matmul(token_ids_tensor_one_hot, embedding_matrix) # (ii) get prediction pred_scores = model( { "inputs_embeds": inputs_embeds, "attention_mask": encoded_tokens["attention_mask"], } ).logits # print(pred_scores) print("prbs = ", tf.nn.sigmoid(pred_scores)) max_class = tf.argmax(pred_scores, axis=1).numpy()[0] gradients, words, label = get_gradients(s, model, tokenizer) plot_gradients(words, gradients, f"Prediction: {label} | {s} ") def list_of_words_to_change(words, gradients): sum_ = 0 for i in gradients: sum_ = sum_ + i avg = sum_ / len(gradients) list_of_words = [] for i in range(0, len(gradients)): if gradients[i] >= avg: list_of_words.append(words[i]) j = i + 1 while j < len(gradients): if words[j][0] == "#" and gradients[j] < avg: list_of_words.append(words[j]) j = j + 1 else: i = j break l_w = [] l_w.append(list_of_words[0]) cnt = 1 for i in range(1, len(list_of_words)): final = list_of_words[i] s = list_of_words[i] a = "" if s[0] == "#": j = 0 for j in range(0, len(s)): if s[j] != "#": a = a + s[j] a = l_w[cnt - 1] + a final = a if final in words: l_w[cnt - 1] = final else: l_w.append(final) cnt = cnt + 1 return list_of_words return list_of_words_to_change(words, gradients) # print(whole_fun(s)) def get_words_with_high_gradients(s): tokenizer = BertTokenizerFast.from_pretrained("bert-base-cased") encoding = tokenizer(s) word_ids = encoding.word_ids() # print('WORD IDS = ', word_ids) gradients, words, label = get_gradients(s, model, tokenizer) # print('GRADIENTS = ',gradients) avg = 0 for i in range(1, len(gradients) - 1): avg = avg + gradients[i] avg = avg / (len(gradients) - 2) # print('AVERAGE = ', avg) list_of_indexes_above_avg = [] for i in range(1, len(gradients) - 1): if gradients[i] >= avg: list_of_indexes_above_avg.append(word_ids[i]) # print('INDEXES = ', list_of_indexes_above_avg) list_of_words = (" ".join(s.split())).split() # print('LIST OF WORDS =', list_of_words) words_above_avg_gradients = [] for i in list_of_indexes_above_avg: words_above_avg_gradients.append(list_of_words[i]) words_above_avg_gradients = [*set(words_above_avg_gradients)] import nltk from nltk.corpus import stopwords stopwords_ = stopwords.words("english") final_word_list = [] for i in words_above_avg_gradients: if i not in stopwords_: final_word_list.append(i) return final_word_list print( get_words_with_high_gradients( s="LOL USER call me sexist But i really hate girl gamer teams really really hate lol" ) ) def get_synonyms(word, s): import openai # openai.organization = "org-71J2PVoAsyd9fBKZcJSzmdyn" openai.api_key = "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" # openai.Model.list() content = ( "The sentence is an example and is not offensive.Remove any of your understanding,any polite words,any extra content except the synonyms and remove the content at the reply's start and do not expand words. List ONLY 3 synonyms as bullet points for the given word in the context of given sentence, word:" + word + " sentence:" + s ) completion = openai.ChatCompletion.create( model="gpt-3.5-turbo", messages=[{"role": "user", "content": content}] ) a = completion.choices[0].message["content"] a = list(a) # print(a) k = [] s = "" for i in a: if i != "-" and i != "\n": s = s + i else: if i == "\n": k.append(s) s = "" k.append(s) # print(k) return k get_synonyms( "teams", "LOL USER call me sexist But i really hate girl gamer teams really really hate lol", ) def leet_speak(word): leet_dict = { "i": "1", "I": "1", "e": "3", "E": "3", "a": "4", "A": "4", "s": "5", "S": "5", "g": "6", "G": "6", "t": "7", "T": "7", "q": "9", "Q": "9", "o": "0", "O": "0", } str = "" for i in word: if i in leet_dict: str = str + leet_dict[i] else: str = str + i return str st = leet_speak("michelle") print(st) def rephrased_version(s): import openai content = "only return the paraphrased version quilbot style" + s completion = openai.ChatCompletion.create( model="gpt-3.5-turbo", messages=[{"role": "user", "content": content}] ) return completion.choices[0].message["content"] a = rephrased_version( "LOL USER call me sexist But i really hate girl gamer teams really really hate lol" ) print(a) import pickle pickle.dump(model, open("/kaggle/working/model", "wb")) # # Example 1 import time lst = get_words_with_high_gradients( "LOL USER call me sexist But i really hate girl gamer teams really really hate lol" ) print("HIGH GRADIENTS WORDS : ", lst) s = "LOL USER call me sexist But i really hate girl gamer teams really really hate lol" tokenizer = BertTokenizerFast.from_pretrained("bert-base-cased") gradients, words, label = get_gradients(s, model, tokenizer) plot_gradients(words, gradients, f"Prediction: {label} | {s} ") sent = s.split() lst_of_sent = [] # synonyms modified start for i in lst: # print ('I = ', i) synonyms = get_synonyms(i, s) for j in synonyms: g = s g = g.replace(i, j) lst_of_sent.append(g) time.sleep(10) # synonyms modified end # leetspeak modified start for i in lst: leetspeak_word = leet_speak(i) g = s g = g.replace(i, leetspeak_word) lst_of_sent.append(g) # leetspeak modified end for i in (1, 1): time.sleep(10) # rephrased version start lst_of_sent.append(rephrased_version(s)) # rephrased version end for i in lst_of_sent: print(i) print("\n") # # Example 2 s = " USER Call me sexist but am I the only person who doesnt find any female comedian funny in any way at all" lst = get_words_with_high_gradients(s) print("HIGH GRADIENTS WORDS : ", lst) tokenizer = BertTokenizerFast.from_pretrained("bert-base-cased") gradients, words, label = get_gradients(s, model, tokenizer) plot_gradients(words, gradients, f"Prediction: {label} | {s} ") print(get_synonyms("comedian", s)) print() print(get_synonyms("sexist", s)) print() print(get_synonyms("female", s)) print() print(get_synonyms("funny", s)) # replaced words with high gradients with synonyms s = " USER Call me discriminatory but am I the only person who doesnt find any gal stand-up comic amusing in any way at all" gradients, words, label = get_gradients(s, model, tokenizer) plot_gradients(words, gradients, f"Prediction: {label} | {s} ") # leetspeak s = " USER Call me sexist but am I the only person who doesnt find any female c0m3d14an funny in any way at all" gradients, words, label = get_gradients(s, model, tokenizer) plot_gradients(words, gradients, f"Prediction: {label} | {s} ") s = " USER Call me sexist but am I the only person who doesnt find any f3m4l3 c0m3d14an funny in any way at all" gradients, words, label = get_gradients(s, model, tokenizer) plot_gradients(words, gradients, f"Prediction: {label} | {s} ") s = " USER Call me 53x137 but am I the only person who doesnt find any f3m4l3 c0m3d14an funny in any way at all" gradients, words, label = get_gradients(s, model, tokenizer) plot_gradients(words, gradients, f"Prediction: {label} | {s} ") import nltk from nltk.corpus import stopwords print(stopwords.words("english"))
false
0
5,211
0
5,211
5,211
129610333
<jupyter_start><jupyter_text>Salary Prediction dataset This dataset contains information about the salaries of employees at a company. Each row represents a different employee, and the columns include information such as age, gender, education level, job title, years of experience, and salary. Columns: **Age:** This column represents the age of each employee in years. The values in this column are numeric. **Gender:** This column contains the gender of each employee, which can be either male or female. The values in this column are categorical. **Education Level:** This column contains the educational level of each employee, which can be high school, bachelor's degree, master's degree, or PhD. The values in this column are categorical. **Job Title:** This column contains the job title of each employee. The job titles can vary depending on the company and may include positions such as manager, analyst, engineer, or administrator. The values in this column are categorical. **Years of Experience:** This column represents the number of years of work experience of each employee. The values in this column are numeric. **Salary:** This column represents the annual salary of each employee in US dollars. The values in this column are numeric and can vary depending on factors such as job title, years of experience, and education level. Kaggle dataset identifier: salaly-prediction-for-beginer <jupyter_code>import pandas as pd df = pd.read_csv('salaly-prediction-for-beginer/Salary Data.csv') df.info() <jupyter_output><class 'pandas.core.frame.DataFrame'> RangeIndex: 375 entries, 0 to 374 Data columns (total 6 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Age 373 non-null float64 1 Gender 373 non-null object 2 Education Level 373 non-null object 3 Job Title 373 non-null object 4 Years of Experience 373 non-null float64 5 Salary 373 non-null float64 dtypes: float64(3), object(3) memory usage: 17.7+ KB <jupyter_text>Examples: { "Age": 32, "Gender": "Male", "Education Level": "Bachelor's", "Job Title": "Software Engineer", "Years of Experience": 5, "Salary": 90000 } { "Age": 28, "Gender": "Female", "Education Level": "Master's", "Job Title": "Data Analyst", "Years of Experience": 3, "Salary": 65000 } { "Age": 45, "Gender": "Male", "Education Level": "PhD", "Job Title": "Senior Manager", "Years of Experience": 15, "Salary": 150000 } { "Age": 36, "Gender": "Female", "Education Level": "Bachelor's", "Job Title": "Sales Associate", "Years of Experience": 7, "Salary": 60000 } <jupyter_script>import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import itertools from matplotlib.ticker import NullFormatter import matplotlib.ticker as ticker from sklearn import preprocessing data_df = pd.read_csv("/kaggle/input/salaly-prediction-for-beginer/Salary Data.csv") data_df.head() # Number of rows data_df.shape[0] counts = data_df.nunique() counts data_df.drop_duplicates(inplace=True) data_df data_df.describe() data_df.isna().sum() data_df.replace(" ?", np.nan, inplace=True) data_df.isna().sum() plt.figure(figsize=(30, 14)) sns.countplot(data=data_df, x="Years of Experience", hue="Education Level") plt.show() degree_t = data_df["Education Level"].value_counts() plt.figure(figsize=(14, 8)) sns.barplot(x=degree_t.index, y=degree_t.values, palette="Blues_r") plt.xticks(rotation=45) plt.show() fig, ax = plt.subplots(1, 1, figsize=(12, 7)) data_df.boxplot("Salary", "Education Level", ax=ax) plt.title("Salary and Education Level") plt.ylabel("Salary") plt.xticks(rotation=90) plt.show sns.kdeplot( data=data_df, x="Salary", hue="Education Level", fill=True, common_norm=False, palette="crest", alpha=0.3, linewidth=0, ) from sklearn.preprocessing import LabelEncoder data_df.dropna(inplace=True) data_df.drop_duplicates() data_df["Gender"].unique() gender_label = LabelEncoder() data_df["Gender"] = gender_label.fit_transform(data_df["Gender"]) edu_label_encoder = LabelEncoder() data_df["Education Level"] = edu_label_encoder.fit_transform(data_df["Education Level"]) job_title_encoder = LabelEncoder() data_df["Job Title"] = job_title_encoder.fit_transform(data_df["Job Title"]) data_df from pycaret.regression import * s = setup(data=data_df, target="Salary", session_id=123) best = compare_models() X = data_df.drop("Salary", axis=1) y = data_df["Salary"] from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.30, random_state=42 ) from lightgbm import LGBMRegressor # define the model model = LGBMRegressor() # fit the model on the whole dataset model.fit(X, y) pred = model.predict(X_test) from sklearn.metrics import mean_squared_error error1 = np.sqrt(mean_squared_error(y_test, pred)) print( "The difference between the predicted value and the actual value is Rs{:,.02f}".format( error1 ) ) from sklearn.metrics import r2_score from sklearn.metrics import mean_squared_log_error r2 = r2_score(y_test, pred) msle = mean_squared_log_error(y_test, pred) print("R2 score: ", r2) print("Mean squared log error: ", msle) from numpy import mean from numpy import std from sklearn.datasets import make_classification from sklearn.model_selection import cross_val_score from sklearn.model_selection import RepeatedStratifiedKFold from matplotlib import pyplot # LightGBM Hyperparameters # explore lightgbm number of trees effect on performance def get_models(): models = dict() trees = [10, 50, 100, 500, 1000, 5000] for n in trees: models[str(n)] = LGBMRegressor(n_estimators=n) return models # evaluate a give model using cross-validation def evaluate_model(model): cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1) scores = cross_val_score( model, X, y, scoring="neg_mean_squared_error", cv=cv, n_jobs=-1 ) return scores # get the models to evaluate models = get_models() # evaluate the models and store results results, names = list(), list() for name, model in models.items(): scores = evaluate_model(model) results.append(scores) names.append(name) print(">%s %.3f (%.3f)" % (name, mean(scores), std(scores))) # plot model performance for comparison pyplot.boxplot(results, labels=names, showmeans=True) pyplot.show() # get a list of models to evaluate def get_models(): models = dict() for i in range(1, 11): models[str(i)] = LGBMRegressor(max_depth=i, num_leaves=2**i) return models # get the models to evaluate models = get_models() # evaluate the models and store results results, names = list(), list() for name, model in models.items(): scores = evaluate_model(model) results.append(scores) names.append(name) print(">%s %.3f (%.3f)" % (name, mean(scores), std(scores))) # plot model performance for comparison pyplot.boxplot(results, labels=names, showmeans=True) pyplot.show() # get a list of models to evaluate def get_models(): models = dict() rates = [0.0001, 0.001, 0.01, 0.1, 1.0] for r in rates: key = "%.4f" % r models[key] = LGBMRegressor(learning_rate=r) return models # get the models to evaluate models = get_models() # evaluate the models and store results results, names = list(), list() for name, model in models.items(): scores = evaluate_model(model) results.append(scores) names.append(name) print(">%s %.3f (%.3f)" % (name, mean(scores), std(scores))) # plot model performance for comparison pyplot.boxplot(results, labels=names, showmeans=True) pyplot.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/610/129610333.ipynb
salaly-prediction-for-beginer
rkiattisak
[{"Id": 129610333, "ScriptId": 38541588, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 9110340, "CreationDate": "05/15/2023 08:10:15", "VersionNumber": 2.0, "Title": "Salary Prediction using LGBM regression", "EvaluationDate": "05/15/2023", "IsChange": true, "TotalLines": 184.0, "LinesInsertedFromPrevious": 1.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 183.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
[{"Id": 185854610, "KernelVersionId": 129610333, "SourceDatasetVersionId": 5119031}]
[{"Id": 5119031, "DatasetId": 2973025, "DatasourceVersionId": 5190384, "CreatorUserId": 12117120, "LicenseName": "Attribution 4.0 International (CC BY 4.0)", "CreationDate": "03/07/2023 02:45:11", "VersionNumber": 1.0, "Title": "Salary Prediction dataset", "Slug": "salaly-prediction-for-beginer", "Subtitle": "Salaries of employees at a company", "Description": "This dataset contains information about the salaries of employees at a company. Each row represents a different employee, and the columns include information such as age, gender, education level, job title, years of experience, and salary.\n\nColumns:\n\n**Age:** This column represents the age of each employee in years. The values in this column are numeric.\n\n**Gender:** This column contains the gender of each employee, which can be either male or female. The values in this column are categorical.\n\n**Education Level:** This column contains the educational level of each employee, which can be high school, bachelor's degree, master's degree, or PhD. The values in this column are categorical.\n\n**Job Title:** This column contains the job title of each employee. The job titles can vary depending on the company and may include positions such as manager, analyst, engineer, or administrator. The values in this column are categorical.\n\n**Years of Experience:** This column represents the number of years of work experience of each employee. The values in this column are numeric.\n\n**Salary:** This column represents the annual salary of each employee in US dollars. The values in this column are numeric and can vary depending on factors such as job title, years of experience, and education level.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 2973025, "CreatorUserId": 12117120, "OwnerUserId": 12117120.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5119031.0, "CurrentDatasourceVersionId": 5190384.0, "ForumId": 3011486, "Type": 2, "CreationDate": "03/07/2023 02:45:11", "LastActivityDate": "03/07/2023", "TotalViews": 26520, "TotalDownloads": 5500, "TotalVotes": 84, "TotalKernels": 19}]
[{"Id": 12117120, "UserName": "rkiattisak", "DisplayName": "Kiattisak Rattanaporn", "RegisterDate": "10/27/2022", "PerformanceTier": 2}]
import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import itertools from matplotlib.ticker import NullFormatter import matplotlib.ticker as ticker from sklearn import preprocessing data_df = pd.read_csv("/kaggle/input/salaly-prediction-for-beginer/Salary Data.csv") data_df.head() # Number of rows data_df.shape[0] counts = data_df.nunique() counts data_df.drop_duplicates(inplace=True) data_df data_df.describe() data_df.isna().sum() data_df.replace(" ?", np.nan, inplace=True) data_df.isna().sum() plt.figure(figsize=(30, 14)) sns.countplot(data=data_df, x="Years of Experience", hue="Education Level") plt.show() degree_t = data_df["Education Level"].value_counts() plt.figure(figsize=(14, 8)) sns.barplot(x=degree_t.index, y=degree_t.values, palette="Blues_r") plt.xticks(rotation=45) plt.show() fig, ax = plt.subplots(1, 1, figsize=(12, 7)) data_df.boxplot("Salary", "Education Level", ax=ax) plt.title("Salary and Education Level") plt.ylabel("Salary") plt.xticks(rotation=90) plt.show sns.kdeplot( data=data_df, x="Salary", hue="Education Level", fill=True, common_norm=False, palette="crest", alpha=0.3, linewidth=0, ) from sklearn.preprocessing import LabelEncoder data_df.dropna(inplace=True) data_df.drop_duplicates() data_df["Gender"].unique() gender_label = LabelEncoder() data_df["Gender"] = gender_label.fit_transform(data_df["Gender"]) edu_label_encoder = LabelEncoder() data_df["Education Level"] = edu_label_encoder.fit_transform(data_df["Education Level"]) job_title_encoder = LabelEncoder() data_df["Job Title"] = job_title_encoder.fit_transform(data_df["Job Title"]) data_df from pycaret.regression import * s = setup(data=data_df, target="Salary", session_id=123) best = compare_models() X = data_df.drop("Salary", axis=1) y = data_df["Salary"] from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.30, random_state=42 ) from lightgbm import LGBMRegressor # define the model model = LGBMRegressor() # fit the model on the whole dataset model.fit(X, y) pred = model.predict(X_test) from sklearn.metrics import mean_squared_error error1 = np.sqrt(mean_squared_error(y_test, pred)) print( "The difference between the predicted value and the actual value is Rs{:,.02f}".format( error1 ) ) from sklearn.metrics import r2_score from sklearn.metrics import mean_squared_log_error r2 = r2_score(y_test, pred) msle = mean_squared_log_error(y_test, pred) print("R2 score: ", r2) print("Mean squared log error: ", msle) from numpy import mean from numpy import std from sklearn.datasets import make_classification from sklearn.model_selection import cross_val_score from sklearn.model_selection import RepeatedStratifiedKFold from matplotlib import pyplot # LightGBM Hyperparameters # explore lightgbm number of trees effect on performance def get_models(): models = dict() trees = [10, 50, 100, 500, 1000, 5000] for n in trees: models[str(n)] = LGBMRegressor(n_estimators=n) return models # evaluate a give model using cross-validation def evaluate_model(model): cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1) scores = cross_val_score( model, X, y, scoring="neg_mean_squared_error", cv=cv, n_jobs=-1 ) return scores # get the models to evaluate models = get_models() # evaluate the models and store results results, names = list(), list() for name, model in models.items(): scores = evaluate_model(model) results.append(scores) names.append(name) print(">%s %.3f (%.3f)" % (name, mean(scores), std(scores))) # plot model performance for comparison pyplot.boxplot(results, labels=names, showmeans=True) pyplot.show() # get a list of models to evaluate def get_models(): models = dict() for i in range(1, 11): models[str(i)] = LGBMRegressor(max_depth=i, num_leaves=2**i) return models # get the models to evaluate models = get_models() # evaluate the models and store results results, names = list(), list() for name, model in models.items(): scores = evaluate_model(model) results.append(scores) names.append(name) print(">%s %.3f (%.3f)" % (name, mean(scores), std(scores))) # plot model performance for comparison pyplot.boxplot(results, labels=names, showmeans=True) pyplot.show() # get a list of models to evaluate def get_models(): models = dict() rates = [0.0001, 0.001, 0.01, 0.1, 1.0] for r in rates: key = "%.4f" % r models[key] = LGBMRegressor(learning_rate=r) return models # get the models to evaluate models = get_models() # evaluate the models and store results results, names = list(), list() for name, model in models.items(): scores = evaluate_model(model) results.append(scores) names.append(name) print(">%s %.3f (%.3f)" % (name, mean(scores), std(scores))) # plot model performance for comparison pyplot.boxplot(results, labels=names, showmeans=True) pyplot.show()
[{"salaly-prediction-for-beginer/Salary Data.csv": {"column_names": "[\"Age\", \"Gender\", \"Education Level\", \"Job Title\", \"Years of Experience\", \"Salary\"]", "column_data_types": "{\"Age\": \"float64\", \"Gender\": \"object\", \"Education Level\": \"object\", \"Job Title\": \"object\", \"Years of Experience\": \"float64\", \"Salary\": \"float64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 375 entries, 0 to 374\nData columns (total 6 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Age 373 non-null float64\n 1 Gender 373 non-null object \n 2 Education Level 373 non-null object \n 3 Job Title 373 non-null object \n 4 Years of Experience 373 non-null float64\n 5 Salary 373 non-null float64\ndtypes: float64(3), object(3)\nmemory usage: 17.7+ KB\n", "summary": "{\"Age\": {\"count\": 373.0, \"mean\": 37.43163538873995, \"std\": 7.069072938567494, \"min\": 23.0, \"25%\": 31.0, \"50%\": 36.0, \"75%\": 44.0, \"max\": 53.0}, \"Years of Experience\": {\"count\": 373.0, \"mean\": 10.03083109919571, \"std\": 6.557007136414237, \"min\": 0.0, \"25%\": 4.0, \"50%\": 9.0, \"75%\": 15.0, \"max\": 25.0}, \"Salary\": {\"count\": 373.0, \"mean\": 100577.34584450402, \"std\": 48240.013481882655, \"min\": 350.0, \"25%\": 55000.0, \"50%\": 95000.0, \"75%\": 140000.0, \"max\": 250000.0}}", "examples": "{\"Age\":{\"0\":32.0,\"1\":28.0,\"2\":45.0,\"3\":36.0},\"Gender\":{\"0\":\"Male\",\"1\":\"Female\",\"2\":\"Male\",\"3\":\"Female\"},\"Education Level\":{\"0\":\"Bachelor's\",\"1\":\"Master's\",\"2\":\"PhD\",\"3\":\"Bachelor's\"},\"Job Title\":{\"0\":\"Software Engineer\",\"1\":\"Data Analyst\",\"2\":\"Senior Manager\",\"3\":\"Sales Associate\"},\"Years of Experience\":{\"0\":5.0,\"1\":3.0,\"2\":15.0,\"3\":7.0},\"Salary\":{\"0\":90000.0,\"1\":65000.0,\"2\":150000.0,\"3\":60000.0}}"}}]
true
1
<start_data_description><data_path>salaly-prediction-for-beginer/Salary Data.csv: <column_names> ['Age', 'Gender', 'Education Level', 'Job Title', 'Years of Experience', 'Salary'] <column_types> {'Age': 'float64', 'Gender': 'object', 'Education Level': 'object', 'Job Title': 'object', 'Years of Experience': 'float64', 'Salary': 'float64'} <dataframe_Summary> {'Age': {'count': 373.0, 'mean': 37.43163538873995, 'std': 7.069072938567494, 'min': 23.0, '25%': 31.0, '50%': 36.0, '75%': 44.0, 'max': 53.0}, 'Years of Experience': {'count': 373.0, 'mean': 10.03083109919571, 'std': 6.557007136414237, 'min': 0.0, '25%': 4.0, '50%': 9.0, '75%': 15.0, 'max': 25.0}, 'Salary': {'count': 373.0, 'mean': 100577.34584450402, 'std': 48240.013481882655, 'min': 350.0, '25%': 55000.0, '50%': 95000.0, '75%': 140000.0, 'max': 250000.0}} <dataframe_info> RangeIndex: 375 entries, 0 to 374 Data columns (total 6 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Age 373 non-null float64 1 Gender 373 non-null object 2 Education Level 373 non-null object 3 Job Title 373 non-null object 4 Years of Experience 373 non-null float64 5 Salary 373 non-null float64 dtypes: float64(3), object(3) memory usage: 17.7+ KB <some_examples> {'Age': {'0': 32.0, '1': 28.0, '2': 45.0, '3': 36.0}, 'Gender': {'0': 'Male', '1': 'Female', '2': 'Male', '3': 'Female'}, 'Education Level': {'0': "Bachelor's", '1': "Master's", '2': 'PhD', '3': "Bachelor's"}, 'Job Title': {'0': 'Software Engineer', '1': 'Data Analyst', '2': 'Senior Manager', '3': 'Sales Associate'}, 'Years of Experience': {'0': 5.0, '1': 3.0, '2': 15.0, '3': 7.0}, 'Salary': {'0': 90000.0, '1': 65000.0, '2': 150000.0, '3': 60000.0}} <end_description>
1,623
1
2,370
1,623
129610461
import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers from tensorflow.keras.models import Sequential from tensorflow.keras.losses import MeanSquaredError, CategoricalCrossentropy from tensorflow.keras.metrics import Mean, SparseCategoricalAccuracy import keras #!python3 -m pip install tensorflow-macos class Distiller(keras.Model): def __init__(self, teacher, student, temperature=3): super(Distiller, self).__init__() self.teacher = teacher self.student = student self.temperature = temperature def compile( self, optimizer, metrics, student_loss_fn, distillation_loss_fn, alpha=0.1, ): super(Distiller, self).compile(optimizer=optimizer, metrics=metrics) self.student_loss_fn = student_loss_fn self.distillation_loss_fn = distillation_loss_fn self.alpha = alpha def train_step(self, data): x, y = data # Forward pass of teacher and student teacher_predictions = self.teacher(x, training=False) student_predictions = self.student(x, training=True) # Calculate the student loss student_loss = self.student_loss_fn(y, student_predictions) # Calculate the distillation loss soft_teacher_predictions = tf.nn.softmax(teacher_predictions / self.temperature) distillation_loss = self.distillation_loss_fn( soft_teacher_predictions, tf.nn.softmax(student_predictions / self.temperature), ) # Combine the student and distillation loss with alpha weighting total_loss = (self.alpha * student_loss) + ( (1 - self.alpha) * distillation_loss ) # Calculate gradients for the student weights and perform a backward pass grads = tf.gradients(total_loss, self.student.trainable_weights) self.optimizer.apply_gradients(zip(grads, self.student.trainable_weights)) # Update the metrics self.compiled_metrics.update_state(y, student_predictions) # Return a dictionary of metric results results = {m.name: m.result() for m in self.metrics} results.update( {"student_loss": student_loss, "distillation_loss": distillation_loss} ) return results def test_step(self, data): x, y = data # Forward pass of the student y_pred = self.student(x, training=False) # Calculate the loss and update the metrics loss = self.student_loss_fn(y, y_pred) self.compiled_metrics.update_state(y, y_pred) # Return a dictionary of metric results results = {m.name: m.result() for m in self.metrics} results.update({"student_loss": loss}) return results # Define the teacher model teacher = Sequential( [ layers.Conv2D( 32, kernel_size=(3, 3), activation="relu", input_shape=(28, 28, 1) ), layers.MaxPooling2D(pool_size=(2, 2)), layers.Conv2D(64, kernel_size=(3, 3), activation="relu"), layers.MaxPooling2D(pool_size=(2, 2)), layers.Flatten(), layers.Dense(10), ], name="teacher", ) # Train the teacher model on MNIST (x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data() x_train = x_train.reshape(-1, 28, 28, 1).astype("float32") / 255.0 x_test = x_test.reshape(-1, 28, 28, 1).astype("float32") / 255.0 teacher.compile( optimizer=keras.optimizers.Adam(), loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=[keras.metrics.SparseCategoricalAccuracy()], ) teacher.fit(x_train, y_train, epochs=10, validation_data=(x_test, y_test)) # Define the student model student = Sequential( [ layers.Conv2D( 16, kernel_size=(3, 3), activation="relu", input_shape=(28, 28, 1) ), layers.MaxPooling2D(pool_size=(2, 2)), layers.Flatten(), layers.Dense(10), ], name="student", ) # Distill the teacher model into the student model distiller = Distiller(teacher=teacher, student=student, temperature=3) distiller.compile( optimizer=keras.optimizers.Adam(), metrics=[keras.metrics.SparseCategoricalAccuracy()], student_loss_fn=keras.losses.SparseCategoricalCrossentropy(from_logits=True), distillation_loss_fn=keras.losses.KLDivergence(), alpha=0.1, ) distiller.fit(x_train, y_train, epochs=10, validation_data=(x_test, y_test)) distiller.evaluate(x_test, y_test)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/610/129610461.ipynb
null
null
[{"Id": 129610461, "ScriptId": 38505513, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 12301075, "CreationDate": "05/15/2023 08:11:16", "VersionNumber": 1.0, "Title": "DL_HW5_Kabardieva", "EvaluationDate": "05/15/2023", "IsChange": true, "TotalLines": 127.0, "LinesInsertedFromPrevious": 127.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers from tensorflow.keras.models import Sequential from tensorflow.keras.losses import MeanSquaredError, CategoricalCrossentropy from tensorflow.keras.metrics import Mean, SparseCategoricalAccuracy import keras #!python3 -m pip install tensorflow-macos class Distiller(keras.Model): def __init__(self, teacher, student, temperature=3): super(Distiller, self).__init__() self.teacher = teacher self.student = student self.temperature = temperature def compile( self, optimizer, metrics, student_loss_fn, distillation_loss_fn, alpha=0.1, ): super(Distiller, self).compile(optimizer=optimizer, metrics=metrics) self.student_loss_fn = student_loss_fn self.distillation_loss_fn = distillation_loss_fn self.alpha = alpha def train_step(self, data): x, y = data # Forward pass of teacher and student teacher_predictions = self.teacher(x, training=False) student_predictions = self.student(x, training=True) # Calculate the student loss student_loss = self.student_loss_fn(y, student_predictions) # Calculate the distillation loss soft_teacher_predictions = tf.nn.softmax(teacher_predictions / self.temperature) distillation_loss = self.distillation_loss_fn( soft_teacher_predictions, tf.nn.softmax(student_predictions / self.temperature), ) # Combine the student and distillation loss with alpha weighting total_loss = (self.alpha * student_loss) + ( (1 - self.alpha) * distillation_loss ) # Calculate gradients for the student weights and perform a backward pass grads = tf.gradients(total_loss, self.student.trainable_weights) self.optimizer.apply_gradients(zip(grads, self.student.trainable_weights)) # Update the metrics self.compiled_metrics.update_state(y, student_predictions) # Return a dictionary of metric results results = {m.name: m.result() for m in self.metrics} results.update( {"student_loss": student_loss, "distillation_loss": distillation_loss} ) return results def test_step(self, data): x, y = data # Forward pass of the student y_pred = self.student(x, training=False) # Calculate the loss and update the metrics loss = self.student_loss_fn(y, y_pred) self.compiled_metrics.update_state(y, y_pred) # Return a dictionary of metric results results = {m.name: m.result() for m in self.metrics} results.update({"student_loss": loss}) return results # Define the teacher model teacher = Sequential( [ layers.Conv2D( 32, kernel_size=(3, 3), activation="relu", input_shape=(28, 28, 1) ), layers.MaxPooling2D(pool_size=(2, 2)), layers.Conv2D(64, kernel_size=(3, 3), activation="relu"), layers.MaxPooling2D(pool_size=(2, 2)), layers.Flatten(), layers.Dense(10), ], name="teacher", ) # Train the teacher model on MNIST (x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data() x_train = x_train.reshape(-1, 28, 28, 1).astype("float32") / 255.0 x_test = x_test.reshape(-1, 28, 28, 1).astype("float32") / 255.0 teacher.compile( optimizer=keras.optimizers.Adam(), loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=[keras.metrics.SparseCategoricalAccuracy()], ) teacher.fit(x_train, y_train, epochs=10, validation_data=(x_test, y_test)) # Define the student model student = Sequential( [ layers.Conv2D( 16, kernel_size=(3, 3), activation="relu", input_shape=(28, 28, 1) ), layers.MaxPooling2D(pool_size=(2, 2)), layers.Flatten(), layers.Dense(10), ], name="student", ) # Distill the teacher model into the student model distiller = Distiller(teacher=teacher, student=student, temperature=3) distiller.compile( optimizer=keras.optimizers.Adam(), metrics=[keras.metrics.SparseCategoricalAccuracy()], student_loss_fn=keras.losses.SparseCategoricalCrossentropy(from_logits=True), distillation_loss_fn=keras.losses.KLDivergence(), alpha=0.1, ) distiller.fit(x_train, y_train, epochs=10, validation_data=(x_test, y_test)) distiller.evaluate(x_test, y_test)
false
0
1,241
0
1,241
1,241
129784078
<jupyter_start><jupyter_text>titanic.csv Kaggle dataset identifier: titaniccsv <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import pandas as pd import pandas as pd df = pd.read_csv( "/kaggle/input/titaniccsv/titanic.csv", usecols=["Pclass", "Age", "Fare", "Survived"], ) df.head() df.head() ### Replacing df["Cabin"].fillna("Missing", inplace=True) df.head() df["Cabin"].unique() len(df["Cabin"].value_counts()) df["Cabin"] = df["Cabin"].astype(str).str[0] df["Cabin"] df["Cabin"].unique() prob_df = df.groupby(["Cabin"])["Survived"].mean() prob_df prob_df = pd.DataFrame(prob_df) prob_df prob_df["Died"] = 1 - prob_df["Survived"] prob_df.head() prob_df.groupby(["Cabin"])["Died"].mean() prob_df["Probability_ratio"] = prob_df["Survived"] / prob_df["Died"] prob_df.head() probability_encoded = prob_df["Probability_ratio"].to_dict() probability_encoded df["Cabin_encoded"] = df["Cabin"].map(probability_encoded) df.head()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/784/129784078.ipynb
titaniccsv
emrullahdemirhan
[{"Id": 129784078, "ScriptId": 33231703, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 9224136, "CreationDate": "05/16/2023 12:49:15", "VersionNumber": 1.0, "Title": "Standardization_normallization", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 72.0, "LinesInsertedFromPrevious": 72.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 186149234, "KernelVersionId": 129784078, "SourceDatasetVersionId": 2361242}]
[{"Id": 2361242, "DatasetId": 1264723, "DatasourceVersionId": 2403006, "CreatorUserId": 7039146, "LicenseName": "Unknown", "CreationDate": "06/22/2021 21:17:20", "VersionNumber": 4.0, "Title": "titanic.csv", "Slug": "titaniccsv", "Subtitle": NaN, "Description": NaN, "VersionNotes": "test", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 1264723, "CreatorUserId": 7039146, "OwnerUserId": 7039146.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2361242.0, "CurrentDatasourceVersionId": 2403006.0, "ForumId": 1283179, "Type": 2, "CreationDate": "04/11/2021 13:41:21", "LastActivityDate": "04/11/2021", "TotalViews": 10381, "TotalDownloads": 2898, "TotalVotes": 27, "TotalKernels": 34}]
[{"Id": 7039146, "UserName": "emrullahdemirhan", "DisplayName": "Emrullah Demirhan", "RegisterDate": "03/26/2021", "PerformanceTier": 0}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import pandas as pd import pandas as pd df = pd.read_csv( "/kaggle/input/titaniccsv/titanic.csv", usecols=["Pclass", "Age", "Fare", "Survived"], ) df.head() df.head() ### Replacing df["Cabin"].fillna("Missing", inplace=True) df.head() df["Cabin"].unique() len(df["Cabin"].value_counts()) df["Cabin"] = df["Cabin"].astype(str).str[0] df["Cabin"] df["Cabin"].unique() prob_df = df.groupby(["Cabin"])["Survived"].mean() prob_df prob_df = pd.DataFrame(prob_df) prob_df prob_df["Died"] = 1 - prob_df["Survived"] prob_df.head() prob_df.groupby(["Cabin"])["Died"].mean() prob_df["Probability_ratio"] = prob_df["Survived"] / prob_df["Died"] prob_df.head() probability_encoded = prob_df["Probability_ratio"].to_dict() probability_encoded df["Cabin_encoded"] = df["Cabin"].map(probability_encoded) df.head()
false
1
489
0
512
489
129784484
<jupyter_start><jupyter_text>Adult Census Income This data was extracted from the [1994 Census bureau database][1] by Ronny Kohavi and Barry Becker (Data Mining and Visualization, Silicon Graphics). A set of reasonably clean records was extracted using the following conditions: ((AAGE>16) && (AGI>100) && (AFNLWGT>1) && (HRSWK>0)). *The prediction task is to determine whether a person makes over $50K a year*. ## Description of fnlwgt (final weight) The weights on the Current Population Survey (CPS) files are controlled to independent estimates of the civilian noninstitutional population of the US. These are prepared monthly for us by Population Division here at the Census Bureau. We use 3 sets of controls. These are: 1. A single cell estimate of the population 16+ for each state. 2. Controls for Hispanic Origin by age and sex. 3. Controls by Race, age and sex. We use all three sets of controls in our weighting program and "rake" through them 6 times so that by the end we come back to all the controls we used. The term estimate refers to population totals derived from CPS by creating "weighted tallies" of any specified socio-economic characteristics of the population. People with similar demographic characteristics should have similar weights. There is one important caveat to remember about this statement. That is that since the CPS sample is actually a collection of 51 state samples, each with its own probability of selection, the statement only applies within state. ##Relevant papers Ron Kohavi, ["Scaling Up the Accuracy of Naive-Bayes Classifiers: a Decision-Tree Hybrid"][2], *Proceedings of the Second International Conference on Knowledge Discovery and Data Mining*, 1996. (PDF) [1]: http://www.census.gov/en.html [2]: http://robotics.stanford.edu/~ronnyk/nbtree.pdf Kaggle dataset identifier: adult-census-income <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt from scipy import stats from sklearn.preprocessing import LabelEncoder from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import confusion_matrix, classification_report import warnings warnings.filterwarnings("ignore") import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt from scipy import stats from sklearn.preprocessing import LabelEncoder from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import confusion_matrix, classification_report import warnings warnings.filterwarnings("ignore") data = pd.read_csv("../input/adult-census-income/adult.csv") data.head() data.shape data.columns = [ "age", "workclass", "fnlwgt", "education", "education-num", "marital-status", "occupation", "relationship", "race", "sex", "capital-gain", "capital-loss", "hours-per-week", "country", "salary", ] data.isnull().sum() data["workclass"].unique() data.isin([" ?"]).sum() data = data.replace(" ?", np.nan) print("Continous variables in the dataset are: ") cont = [] for i in data.columns: if data[i].dtype == "int64": cont.append(i) print(i, end=",") print("\n\nCategorical variable in the dataset are: ") catg = [] for i in data.columns: if data[i].dtype == "O": catg.append(i) print(i, end=", ") data.head() data.salary.describe() for i in cont: sns.distplot(data[i]) plt.show() for i in catg: sns.countplot(data[i]) plt.show() data["marital-status"].unique() data.info() sns.relplot(data=data, x="age", y="capital-loss", hue="marital-status", kind="line") data.info() import seaborn as sns sns.barplot(x=data["education"], y=data["education-num"]) import seaborn as sns sns.barplot(x=data["sex"], y=data["capital-loss"]) data["education-num"].unique() sns.boxplot(data["capital-gain"]) plt.show() # data sets with high kurtosis tend to have heavy tails, or outliers. # Data sets with low kurtosis tend to have light tails, or lack of outliers stats.kurtosis(data["capital-gain"]) ind = data[(data["capital-gain"] < 10000)].index stats.kurtosis(data.drop(ind)["capital-gain"]) sns.boxplot(data.drop(ind)["capital-gain"]) data.drop(ind)["capital-gain"].shape cont = ["age", "education-num", "capital-gain", "capital-loss", "hours-per-week"] catg plt.figure(figsize=(10, 7)) grp = data["workclass"].value_counts() sns.barplot(grp.index, grp.values) data["workclass"].unique() grp.values data[data["salary"] == "<=50K"]["workclass"].value_counts() data.info() def catg_vis(dt, cg): plt.figure(figsize=(20, 5)) a = dt[dt["salary"] == "<=50K"][cg].value_counts() # Below 50k salary b = dt[dt["salary"] == ">50K"][cg].value_counts() # Above 50k salary plt.subplot(1, 2, 1) plt.title("Below 50K salary") sns.barplot(a.index, a.values) plt.subplot(1, 2, 2) plt.title("Above 50K salary") sns.barplot(b.index, b.values) plt.show() for i in catg: print(i, ":\n") catg_vis(data, i) catg = ["occupation", "race", "sex"] data[cont + catg] # analyzing capital gain and capital loss data["capital-gain"].describe() plt.hist(data["capital-gain"]) plt.show() stats.skew(data["capital-gain"]) cont for i in range(2): cont.pop(2) cont print(cont) print("\n", catg) data[cont + catg].head() le = LabelEncoder() y = data["sex"].values data["sext"] = le.fit_transform(y) y = data["occupation"].values data["occupationt"] = le.fit_transform(y) y = data["race"].values data["racet"] = le.fit_transform(y) data["salaryT"] = le.fit_transform(data["salary"]) sel_cols = ["age", "education-num", "hours-per-week", "occupationt", "racet", "sext"] data[sel_cols].head() for i in range(len(data["capital-gain"])): if data["capital-gain"][i] != 0 and data["capital-loss"][i] != 0: print(i) break print("No such entry") plt.hist(data["capital-gain"]) plt.show() X_train, X_test, y_train, y_test = train_test_split( data[sel_cols], data["salaryT"], test_size=0.33, random_state=42 ) model = RandomForestClassifier() model.fit(X_train, y_train) y_pred = model.predict(X_test) confusion_matrix(y_test, y_pred) print(classification_report(y_test, y_pred, target_names=["<=50K", ">50K"])) sns.distplot(data["age"]) sns.histplot(data["age"], bins=10) data["age_slab"] = data["age"] for i in range(len(data["age_slab"])): a = data["age_slab"][i] if a < 31: data["age_slab"][i] = 1 elif a > 31 and a < 41: data["age_slab"][i] = 2 elif a > 41 and a < 51: data["age_slab"][i] = 3 elif a > 51 and a < 71: data["age_slab"][i] = 4 elif a >= 71: data["age_slab"][i] = 5 data["age_slab"] = data["age"] for i in range(len(data["age_slab"])): a = data["age_slab"][i] if a < 31: data["age_slab"][i] = 1 elif a >= 31 and a < 41: data["age_slab"][i] = 2 elif a >= 41 and a < 51: data["age_slab"][i] = 3 elif a >= 51 and a < 71: data["age_slab"][i] = 4 elif a >= 71: data["age_slab"][i] = 5 plt.hist(data[data["salary"] == "<=50K"]["age_slab"]) plt.show() plt.hist(data[data["salary"] == ">50K"]["age_slab"]) plt.show() X_train, X_test, y_train, y_test = train_test_split( data[sel_cols], data["salaryT"], test_size=0.33 ) model = RandomForestClassifier() model.fit(X_train, y_train) y_pred = model.predict(X_test) confusion_matrix(y_test, y_pred) print(classification_report(y_test, y_pred, target_names=["<=50K", ">50K"])) data[sel_cols].head() sns.distplot(data[data["salary"] == "<=50K"]["hours-per-week"]) plt.title("<=50K") plt.show() sns.distplot(data[data["salary"] == "<=50K"]["hours-per-week"]) plt.title(">50K") plt.show() sns.distplot(data[data["salary"] == "<=50K"]["education-num"]) plt.title("<=50K") plt.show() sns.distplot(data[data["salary"] == "<=50K"]["education-num"]) plt.title(">50K") plt.show() print(data.groupby(by=["salary"])["hours-per-week"].mean()) data.groupby(by=["salary"])["hours-per-week"].median() data.groupby(by=["salary"])["racet"].mean() data[sel_cols].head() df = pd.DataFrame(data.groupby(["race"])["salary"].value_counts()) df data.groupby(["sex"])["salary"].value_counts() X_train.head() from sklearn.linear_model import LogisticRegression model = LogisticRegression() model.fit(X_train, y_train) y_pred = model.predict(X_test) print(classification_report(y_pred, y_test)) from sklearn.linear_model import SGDClassifier from sklearn.preprocessing import StandardScaler model = SGDClassifier() scaler = StandardScaler() v = scaler.fit_transform(X_train) model.fit(v, y_train) v2 = scaler.fit_transform(X_test) y_pred = model.predict(v2) print(classification_report(y_pred, y_test)) from sklearn.linear_model import LogisticRegressionCV model = LogisticRegressionCV(cv=5) model.fit(X_train, y_train) y_pred = model.predict(X_test) print(classification_report(y_pred, y_test)) from sklearn.naive_bayes import MultinomialNB model = MultinomialNB() model.fit(X_train, y_train) y_pred = model.predict(X_test) print(classification_report(y_test, y_pred)) from sklearn.neighbors import KNeighborsClassifier from sklearn.preprocessing import StandardScaler scaler = StandardScaler() model = KNeighborsClassifier() model.fit(scaler.fit_transform(X_train), y_train) y_pred = model.predict(scaler.fit_transform(X_test)) print(classification_report(y_test, y_pred)) from sklearn.svm import SVC model = SVC() scaler = StandardScaler() model.fit(scaler.fit_transform(X_train), y_train) y_pred = model.predict(scaler.fit_transform(X_test)) print(classification_report(y_test, y_pred)) from sklearn.tree import DecisionTreeClassifier model = DecisionTreeClassifier() model.fit(X_train, y_train) y_pred = model.predict(X_test) print(classification_report(y_test, y_pred)) from sklearn.ensemble import AdaBoostClassifier model = AdaBoostClassifier(n_estimators=100) model.fit(X_train, y_train) y_pred = model.predict(X_test) print(classification_report(y_test, y_pred)) print("Confusion matrix\n", confusion_matrix(y_test, y_pred)) from sklearn.neural_network import MLPClassifier model = MLPClassifier() model.fit(X_train, y_train) y_pred = model.predict(X_test) print(classification_report(y_test, y_pred)) from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis model = QuadraticDiscriminantAnalysis() model.fit(X_train, y_train) y_pred = model.predict(X_test) print(classification_report(y_test, y_pred)) from sklearn.model_selection import RandomizedSearchCV n_estimator = [int(i) for i in range(80, 100)] learning_rate = [int(i) for i in range(-5, 5)] alg = ["SAMME", "SAMME.R"] random_grid = { "n_estimators": n_estimator, "learning_rate": learning_rate, "algorithm": alg, } print(random_grid)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/784/129784484.ipynb
adult-census-income
null
[{"Id": 129784484, "ScriptId": 29340935, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 9224136, "CreationDate": "05/16/2023 12:52:29", "VersionNumber": 1.0, "Title": "Adult Income Classification", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 387.0, "LinesInsertedFromPrevious": 387.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 186149635, "KernelVersionId": 129784484, "SourceDatasetVersionId": 498}]
[{"Id": 498, "DatasetId": 225, "DatasourceVersionId": 498, "CreatorUserId": 495305, "LicenseName": "CC0: Public Domain", "CreationDate": "10/07/2016 23:42:59", "VersionNumber": 3.0, "Title": "Adult Census Income", "Slug": "adult-census-income", "Subtitle": "Predict whether income exceeds $50K/yr based on census data", "Description": "This data was extracted from the [1994 Census bureau database][1] by Ronny Kohavi and Barry Becker (Data Mining and Visualization, Silicon Graphics). A set of reasonably clean records was extracted using the following conditions: ((AAGE>16) && (AGI>100) && (AFNLWGT>1) && (HRSWK>0)). *The prediction task is to determine whether a person makes over $50K a year*.\n\n## Description of fnlwgt (final weight)\n\nThe weights on the Current Population Survey (CPS) files are controlled to independent estimates of the civilian noninstitutional population of the US. These are prepared monthly for us by Population Division here at the Census Bureau. We use 3 sets of controls. These are: \n\n 1. A single cell estimate of the population 16+ for each state.\n \n 2. Controls for Hispanic Origin by age and sex.\n\n 3. Controls by Race, age and sex.\n\nWe use all three sets of controls in our weighting program and \"rake\" through them 6 times so that by the end we come back to all the controls we used. The term estimate refers to population totals derived from CPS by creating \"weighted tallies\" of any specified socio-economic characteristics of the population. People with similar demographic characteristics should have similar weights. There is one important caveat to remember about this statement. That is that since the CPS sample is actually a collection of 51 state samples, each with its own probability of selection, the statement only applies within state.\n\n##Relevant papers\n\nRon Kohavi, [\"Scaling Up the Accuracy of Naive-Bayes Classifiers: a Decision-Tree Hybrid\"][2], *Proceedings of the Second International Conference on Knowledge Discovery and Data Mining*, 1996. (PDF)\n\n [1]: http://www.census.gov/en.html\n [2]: http://robotics.stanford.edu/~ronnyk/nbtree.pdf", "VersionNotes": "Removed leading whitespace everywhere", "TotalCompressedBytes": 4104734.0, "TotalUncompressedBytes": 4104734.0}]
[{"Id": 225, "CreatorUserId": 495305, "OwnerUserId": NaN, "OwnerOrganizationId": 7.0, "CurrentDatasetVersionId": 498.0, "CurrentDatasourceVersionId": 498.0, "ForumId": 1649, "Type": 2, "CreationDate": "10/06/2016 17:19:07", "LastActivityDate": "02/05/2018", "TotalViews": 592007, "TotalDownloads": 52622, "TotalVotes": 593, "TotalKernels": 478}]
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt from scipy import stats from sklearn.preprocessing import LabelEncoder from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import confusion_matrix, classification_report import warnings warnings.filterwarnings("ignore") import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt from scipy import stats from sklearn.preprocessing import LabelEncoder from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import confusion_matrix, classification_report import warnings warnings.filterwarnings("ignore") data = pd.read_csv("../input/adult-census-income/adult.csv") data.head() data.shape data.columns = [ "age", "workclass", "fnlwgt", "education", "education-num", "marital-status", "occupation", "relationship", "race", "sex", "capital-gain", "capital-loss", "hours-per-week", "country", "salary", ] data.isnull().sum() data["workclass"].unique() data.isin([" ?"]).sum() data = data.replace(" ?", np.nan) print("Continous variables in the dataset are: ") cont = [] for i in data.columns: if data[i].dtype == "int64": cont.append(i) print(i, end=",") print("\n\nCategorical variable in the dataset are: ") catg = [] for i in data.columns: if data[i].dtype == "O": catg.append(i) print(i, end=", ") data.head() data.salary.describe() for i in cont: sns.distplot(data[i]) plt.show() for i in catg: sns.countplot(data[i]) plt.show() data["marital-status"].unique() data.info() sns.relplot(data=data, x="age", y="capital-loss", hue="marital-status", kind="line") data.info() import seaborn as sns sns.barplot(x=data["education"], y=data["education-num"]) import seaborn as sns sns.barplot(x=data["sex"], y=data["capital-loss"]) data["education-num"].unique() sns.boxplot(data["capital-gain"]) plt.show() # data sets with high kurtosis tend to have heavy tails, or outliers. # Data sets with low kurtosis tend to have light tails, or lack of outliers stats.kurtosis(data["capital-gain"]) ind = data[(data["capital-gain"] < 10000)].index stats.kurtosis(data.drop(ind)["capital-gain"]) sns.boxplot(data.drop(ind)["capital-gain"]) data.drop(ind)["capital-gain"].shape cont = ["age", "education-num", "capital-gain", "capital-loss", "hours-per-week"] catg plt.figure(figsize=(10, 7)) grp = data["workclass"].value_counts() sns.barplot(grp.index, grp.values) data["workclass"].unique() grp.values data[data["salary"] == "<=50K"]["workclass"].value_counts() data.info() def catg_vis(dt, cg): plt.figure(figsize=(20, 5)) a = dt[dt["salary"] == "<=50K"][cg].value_counts() # Below 50k salary b = dt[dt["salary"] == ">50K"][cg].value_counts() # Above 50k salary plt.subplot(1, 2, 1) plt.title("Below 50K salary") sns.barplot(a.index, a.values) plt.subplot(1, 2, 2) plt.title("Above 50K salary") sns.barplot(b.index, b.values) plt.show() for i in catg: print(i, ":\n") catg_vis(data, i) catg = ["occupation", "race", "sex"] data[cont + catg] # analyzing capital gain and capital loss data["capital-gain"].describe() plt.hist(data["capital-gain"]) plt.show() stats.skew(data["capital-gain"]) cont for i in range(2): cont.pop(2) cont print(cont) print("\n", catg) data[cont + catg].head() le = LabelEncoder() y = data["sex"].values data["sext"] = le.fit_transform(y) y = data["occupation"].values data["occupationt"] = le.fit_transform(y) y = data["race"].values data["racet"] = le.fit_transform(y) data["salaryT"] = le.fit_transform(data["salary"]) sel_cols = ["age", "education-num", "hours-per-week", "occupationt", "racet", "sext"] data[sel_cols].head() for i in range(len(data["capital-gain"])): if data["capital-gain"][i] != 0 and data["capital-loss"][i] != 0: print(i) break print("No such entry") plt.hist(data["capital-gain"]) plt.show() X_train, X_test, y_train, y_test = train_test_split( data[sel_cols], data["salaryT"], test_size=0.33, random_state=42 ) model = RandomForestClassifier() model.fit(X_train, y_train) y_pred = model.predict(X_test) confusion_matrix(y_test, y_pred) print(classification_report(y_test, y_pred, target_names=["<=50K", ">50K"])) sns.distplot(data["age"]) sns.histplot(data["age"], bins=10) data["age_slab"] = data["age"] for i in range(len(data["age_slab"])): a = data["age_slab"][i] if a < 31: data["age_slab"][i] = 1 elif a > 31 and a < 41: data["age_slab"][i] = 2 elif a > 41 and a < 51: data["age_slab"][i] = 3 elif a > 51 and a < 71: data["age_slab"][i] = 4 elif a >= 71: data["age_slab"][i] = 5 data["age_slab"] = data["age"] for i in range(len(data["age_slab"])): a = data["age_slab"][i] if a < 31: data["age_slab"][i] = 1 elif a >= 31 and a < 41: data["age_slab"][i] = 2 elif a >= 41 and a < 51: data["age_slab"][i] = 3 elif a >= 51 and a < 71: data["age_slab"][i] = 4 elif a >= 71: data["age_slab"][i] = 5 plt.hist(data[data["salary"] == "<=50K"]["age_slab"]) plt.show() plt.hist(data[data["salary"] == ">50K"]["age_slab"]) plt.show() X_train, X_test, y_train, y_test = train_test_split( data[sel_cols], data["salaryT"], test_size=0.33 ) model = RandomForestClassifier() model.fit(X_train, y_train) y_pred = model.predict(X_test) confusion_matrix(y_test, y_pred) print(classification_report(y_test, y_pred, target_names=["<=50K", ">50K"])) data[sel_cols].head() sns.distplot(data[data["salary"] == "<=50K"]["hours-per-week"]) plt.title("<=50K") plt.show() sns.distplot(data[data["salary"] == "<=50K"]["hours-per-week"]) plt.title(">50K") plt.show() sns.distplot(data[data["salary"] == "<=50K"]["education-num"]) plt.title("<=50K") plt.show() sns.distplot(data[data["salary"] == "<=50K"]["education-num"]) plt.title(">50K") plt.show() print(data.groupby(by=["salary"])["hours-per-week"].mean()) data.groupby(by=["salary"])["hours-per-week"].median() data.groupby(by=["salary"])["racet"].mean() data[sel_cols].head() df = pd.DataFrame(data.groupby(["race"])["salary"].value_counts()) df data.groupby(["sex"])["salary"].value_counts() X_train.head() from sklearn.linear_model import LogisticRegression model = LogisticRegression() model.fit(X_train, y_train) y_pred = model.predict(X_test) print(classification_report(y_pred, y_test)) from sklearn.linear_model import SGDClassifier from sklearn.preprocessing import StandardScaler model = SGDClassifier() scaler = StandardScaler() v = scaler.fit_transform(X_train) model.fit(v, y_train) v2 = scaler.fit_transform(X_test) y_pred = model.predict(v2) print(classification_report(y_pred, y_test)) from sklearn.linear_model import LogisticRegressionCV model = LogisticRegressionCV(cv=5) model.fit(X_train, y_train) y_pred = model.predict(X_test) print(classification_report(y_pred, y_test)) from sklearn.naive_bayes import MultinomialNB model = MultinomialNB() model.fit(X_train, y_train) y_pred = model.predict(X_test) print(classification_report(y_test, y_pred)) from sklearn.neighbors import KNeighborsClassifier from sklearn.preprocessing import StandardScaler scaler = StandardScaler() model = KNeighborsClassifier() model.fit(scaler.fit_transform(X_train), y_train) y_pred = model.predict(scaler.fit_transform(X_test)) print(classification_report(y_test, y_pred)) from sklearn.svm import SVC model = SVC() scaler = StandardScaler() model.fit(scaler.fit_transform(X_train), y_train) y_pred = model.predict(scaler.fit_transform(X_test)) print(classification_report(y_test, y_pred)) from sklearn.tree import DecisionTreeClassifier model = DecisionTreeClassifier() model.fit(X_train, y_train) y_pred = model.predict(X_test) print(classification_report(y_test, y_pred)) from sklearn.ensemble import AdaBoostClassifier model = AdaBoostClassifier(n_estimators=100) model.fit(X_train, y_train) y_pred = model.predict(X_test) print(classification_report(y_test, y_pred)) print("Confusion matrix\n", confusion_matrix(y_test, y_pred)) from sklearn.neural_network import MLPClassifier model = MLPClassifier() model.fit(X_train, y_train) y_pred = model.predict(X_test) print(classification_report(y_test, y_pred)) from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis model = QuadraticDiscriminantAnalysis() model.fit(X_train, y_train) y_pred = model.predict(X_test) print(classification_report(y_test, y_pred)) from sklearn.model_selection import RandomizedSearchCV n_estimator = [int(i) for i in range(80, 100)] learning_rate = [int(i) for i in range(-5, 5)] alg = ["SAMME", "SAMME.R"] random_grid = { "n_estimators": n_estimator, "learning_rate": learning_rate, "algorithm": alg, } print(random_grid)
false
0
3,268
0
3,750
3,268
129784022
<jupyter_start><jupyter_text>Dog Emotions Prediction This dataset is part of dataquest project-walkthrough. Images are downloaded from Flickr using API. where Idog images are classified into 4 category based on their emotions. these 4 category are 1- happy 2- sad 3- angry 4- relaxed Kaggle dataset identifier: dog-emotions-prediction <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import os import shutil from sklearn.model_selection import train_test_split from tensorflow.keras.preprocessing.image import ImageDataGenerator import os import shutil DIRECTORY = r"/kaggle/input/dog-emotions-prediction/images" OUT_DIR = r"/kaggle/working/" CATEGORIES = ["angry", "happy", "relaxed", "sad"] TRAIN_DIR = os.path.join(OUT_DIR, "train") TEST_DIR = os.path.join(OUT_DIR, "test") # Create train and test directories os.makedirs(TRAIN_DIR, exist_ok=True) os.makedirs(TEST_DIR, exist_ok=True) # Move images to train and test directories for category in CATEGORIES: image_dir = os.path.join(DIRECTORY, category) train_category_dir = os.path.join(TRAIN_DIR, category) test_category_dir = os.path.join(TEST_DIR, category) os.makedirs(train_category_dir, exist_ok=True) os.makedirs(test_category_dir, exist_ok=True) images = os.listdir(image_dir) n_test_images = int(len(images) * 0.2) # 20% of images go to test test_images = images[:n_test_images] train_images = images[n_test_images:] for image in train_images: src = os.path.join(image_dir, image) dst = os.path.join(train_category_dir, image) shutil.copy(src, dst) for image in test_images: src = os.path.join(image_dir, image) dst = os.path.join(test_category_dir, image) shutil.copy(src, dst) train_datagen = ImageDataGenerator( rescale=1.0 / 255, validation_split=0.2, rotation_range=5, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, # zoom_range=0.2, horizontal_flip=True, vertical_flip=True, fill_mode="nearest", ) valid_datagen = ImageDataGenerator(rescale=1.0 / 255, validation_split=0.2) test_datagen = ImageDataGenerator(rescale=1.0 / 255) train_dataset = train_datagen.flow_from_directory( directory="/kaggle/working/train", target_size=(224, 224), class_mode="categorical", subset="training", batch_size=64, ) valid_dataset = valid_datagen.flow_from_directory( directory="/kaggle/working/train", target_size=(224, 224), class_mode="categorical", subset="validation", batch_size=64, ) test_dataset = test_datagen.flow_from_directory( directory="/kaggle/working/test", target_size=(224, 224), class_mode="categorical", batch_size=64, ) import numpy as np import cv2 import matplotlib.pyplot as plt import os import random import pickle # # Applying CNN import numpy as np import pandas as pd import matplotlib.pyplot as plt # import skimage.io import keras.backend as K import tensorflow as tf from tensorflow.keras.preprocessing.image import ImageDataGenerator from tensorflow.keras.applications import VGG16 from tensorflow.keras.layers import ( Dense, Flatten, Dropout, BatchNormalization, Activation, ) from tensorflow.keras.models import Model, Sequential from keras.applications.nasnet import NASNetLarge from tensorflow.keras.callbacks import ReduceLROnPlateau, ModelCheckpoint, EarlyStopping from tensorflow.keras.optimizers import Adam import efficientnet.keras as efn # base_model = efn.EfficientNetB0(input_shape = (224, 224, 3), include_top = False, weights = 'imagenet') # base_model = tf.keras.applications.VGG16(input_shape=(48,48,3),include_top=False,weights="imagenet") base_model = efn.EfficientNetB0( include_top=False, weights="imagenet", input_shape=(224, 224, 3), pooling=None, classes=4, ) for layer in base_model.layers: layer.trainable = False import tensorflow as tf # Building Model model = Sequential() model.add(base_model) model.add(Dropout(0.5)) model.add(Flatten()) model.add(BatchNormalization()) model.add(Dense(32, kernel_initializer="he_uniform")) model.add(BatchNormalization()) model.add(Activation("relu")) model.add(Dense(4, activation="softmax")) model.summary() def f1_score(y_true, y_pred): # taken from old keras source code true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) possible_positives = K.sum(K.round(K.clip(y_true, 0, 1))) predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1))) precision = true_positives / (predicted_positives + K.epsilon()) recall = true_positives / (possible_positives + K.epsilon()) f1_val = 2 * (precision * recall) / (precision + recall + K.epsilon()) return f1_val METRICS = [ tf.keras.metrics.BinaryAccuracy(name="accuracy"), tf.keras.metrics.Precision(name="precision"), tf.keras.metrics.Recall(name="recall"), tf.keras.metrics.AUC(name="auc"), f1_score, ] lrd = ReduceLROnPlateau( monitor="val_loss", patience=20, verbose=1, factor=0.50, min_lr=1e-10 ) mcp = ModelCheckpoint("model.h5") es = EarlyStopping(verbose=1, patience=20) # model.compile(optimizer='Adam', loss='categorical_crossentropy',metrics=METRICS) model.compile(optimizer="Adam", loss="categorical_crossentropy", metrics=METRICS) history = model.fit( train_dataset, validation_data=valid_dataset, epochs=25, verbose=1, callbacks=[lrd, mcp, es], ) # %% PLOTTING RESULTS (Train vs Validation FOLDER 1) def Train_Val_Plot( acc, val_acc, loss, val_loss, auc, val_auc, precision, val_precision, f1, val_f1 ): fig, (ax1, ax2, ax3, ax4, ax5) = plt.subplots(1, 5, figsize=(20, 5)) fig.suptitle(" MODEL'S METRICS VISUALIZATION ") ax1.plot(range(1, len(acc) + 1), acc) ax1.plot(range(1, len(val_acc) + 1), val_acc) ax1.set_title("History of Accuracy") ax1.set_xlabel("Epochs") ax1.set_ylabel("Accuracy") ax1.legend(["training", "validation"]) ax2.plot(range(1, len(loss) + 1), loss) ax2.plot(range(1, len(val_loss) + 1), val_loss) ax2.set_title("History of Loss") ax2.set_xlabel("Epochs") ax2.set_ylabel("Loss") ax2.legend(["training", "validation"]) ax3.plot(range(1, len(auc) + 1), auc) ax3.plot(range(1, len(val_auc) + 1), val_auc) ax3.set_title("History of AUC") ax3.set_xlabel("Epochs") ax3.set_ylabel("AUC") ax3.legend(["training", "validation"]) ax4.plot(range(1, len(precision) + 1), precision) ax4.plot(range(1, len(val_precision) + 1), val_precision) ax4.set_title("History of Precision") ax4.set_xlabel("Epochs") ax4.set_ylabel("Precision") ax4.legend(["training", "validation"]) ax5.plot(range(1, len(f1) + 1), f1) ax5.plot(range(1, len(val_f1) + 1), val_f1) ax5.set_title("History of F1-score") ax5.set_xlabel("Epochs") ax5.set_ylabel("F1 score") ax5.legend(["training", "validation"]) plt.show() Train_Val_Plot( history.history["accuracy"], history.history["val_accuracy"], history.history["loss"], history.history["val_loss"], history.history["auc"], history.history["val_auc"], history.history["precision"], history.history["val_precision"], history.history["f1_score"], history.history["val_f1_score"], ) prediction = model.evaluate(test_dataset)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/784/129784022.ipynb
dog-emotions-prediction
devzohaib
[{"Id": 129784022, "ScriptId": 38557692, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 8147366, "CreationDate": "05/16/2023 12:48:51", "VersionNumber": 1.0, "Title": "DogEmotionRecognition", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 242.0, "LinesInsertedFromPrevious": 242.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 186149163, "KernelVersionId": 129784022, "SourceDatasetVersionId": 4279172}]
[{"Id": 4279172, "DatasetId": 2521291, "DatasourceVersionId": 4336866, "CreatorUserId": 5905558, "LicenseName": "CC0: Public Domain", "CreationDate": "10/03/2022 18:08:51", "VersionNumber": 1.0, "Title": "Dog Emotions Prediction", "Slug": "dog-emotions-prediction", "Subtitle": "figure out what emotion a dog is feeling based on a picture", "Description": "This dataset is part of dataquest project-walkthrough. Images are downloaded from Flickr using API. where Idog images are classified into 4 category based on their emotions. these 4 category are \n1- happy\n2- sad\n3- angry\n4- relaxed", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 2521291, "CreatorUserId": 5905558, "OwnerUserId": 5905558.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 4279172.0, "CurrentDatasourceVersionId": 4336866.0, "ForumId": 2549837, "Type": 2, "CreationDate": "10/03/2022 18:08:51", "LastActivityDate": "10/03/2022", "TotalViews": 19412, "TotalDownloads": 1530, "TotalVotes": 48, "TotalKernels": 4}]
[{"Id": 5905558, "UserName": "devzohaib", "DisplayName": "devzohaib", "RegisterDate": "10/07/2020", "PerformanceTier": 2}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import os import shutil from sklearn.model_selection import train_test_split from tensorflow.keras.preprocessing.image import ImageDataGenerator import os import shutil DIRECTORY = r"/kaggle/input/dog-emotions-prediction/images" OUT_DIR = r"/kaggle/working/" CATEGORIES = ["angry", "happy", "relaxed", "sad"] TRAIN_DIR = os.path.join(OUT_DIR, "train") TEST_DIR = os.path.join(OUT_DIR, "test") # Create train and test directories os.makedirs(TRAIN_DIR, exist_ok=True) os.makedirs(TEST_DIR, exist_ok=True) # Move images to train and test directories for category in CATEGORIES: image_dir = os.path.join(DIRECTORY, category) train_category_dir = os.path.join(TRAIN_DIR, category) test_category_dir = os.path.join(TEST_DIR, category) os.makedirs(train_category_dir, exist_ok=True) os.makedirs(test_category_dir, exist_ok=True) images = os.listdir(image_dir) n_test_images = int(len(images) * 0.2) # 20% of images go to test test_images = images[:n_test_images] train_images = images[n_test_images:] for image in train_images: src = os.path.join(image_dir, image) dst = os.path.join(train_category_dir, image) shutil.copy(src, dst) for image in test_images: src = os.path.join(image_dir, image) dst = os.path.join(test_category_dir, image) shutil.copy(src, dst) train_datagen = ImageDataGenerator( rescale=1.0 / 255, validation_split=0.2, rotation_range=5, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, # zoom_range=0.2, horizontal_flip=True, vertical_flip=True, fill_mode="nearest", ) valid_datagen = ImageDataGenerator(rescale=1.0 / 255, validation_split=0.2) test_datagen = ImageDataGenerator(rescale=1.0 / 255) train_dataset = train_datagen.flow_from_directory( directory="/kaggle/working/train", target_size=(224, 224), class_mode="categorical", subset="training", batch_size=64, ) valid_dataset = valid_datagen.flow_from_directory( directory="/kaggle/working/train", target_size=(224, 224), class_mode="categorical", subset="validation", batch_size=64, ) test_dataset = test_datagen.flow_from_directory( directory="/kaggle/working/test", target_size=(224, 224), class_mode="categorical", batch_size=64, ) import numpy as np import cv2 import matplotlib.pyplot as plt import os import random import pickle # # Applying CNN import numpy as np import pandas as pd import matplotlib.pyplot as plt # import skimage.io import keras.backend as K import tensorflow as tf from tensorflow.keras.preprocessing.image import ImageDataGenerator from tensorflow.keras.applications import VGG16 from tensorflow.keras.layers import ( Dense, Flatten, Dropout, BatchNormalization, Activation, ) from tensorflow.keras.models import Model, Sequential from keras.applications.nasnet import NASNetLarge from tensorflow.keras.callbacks import ReduceLROnPlateau, ModelCheckpoint, EarlyStopping from tensorflow.keras.optimizers import Adam import efficientnet.keras as efn # base_model = efn.EfficientNetB0(input_shape = (224, 224, 3), include_top = False, weights = 'imagenet') # base_model = tf.keras.applications.VGG16(input_shape=(48,48,3),include_top=False,weights="imagenet") base_model = efn.EfficientNetB0( include_top=False, weights="imagenet", input_shape=(224, 224, 3), pooling=None, classes=4, ) for layer in base_model.layers: layer.trainable = False import tensorflow as tf # Building Model model = Sequential() model.add(base_model) model.add(Dropout(0.5)) model.add(Flatten()) model.add(BatchNormalization()) model.add(Dense(32, kernel_initializer="he_uniform")) model.add(BatchNormalization()) model.add(Activation("relu")) model.add(Dense(4, activation="softmax")) model.summary() def f1_score(y_true, y_pred): # taken from old keras source code true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) possible_positives = K.sum(K.round(K.clip(y_true, 0, 1))) predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1))) precision = true_positives / (predicted_positives + K.epsilon()) recall = true_positives / (possible_positives + K.epsilon()) f1_val = 2 * (precision * recall) / (precision + recall + K.epsilon()) return f1_val METRICS = [ tf.keras.metrics.BinaryAccuracy(name="accuracy"), tf.keras.metrics.Precision(name="precision"), tf.keras.metrics.Recall(name="recall"), tf.keras.metrics.AUC(name="auc"), f1_score, ] lrd = ReduceLROnPlateau( monitor="val_loss", patience=20, verbose=1, factor=0.50, min_lr=1e-10 ) mcp = ModelCheckpoint("model.h5") es = EarlyStopping(verbose=1, patience=20) # model.compile(optimizer='Adam', loss='categorical_crossentropy',metrics=METRICS) model.compile(optimizer="Adam", loss="categorical_crossentropy", metrics=METRICS) history = model.fit( train_dataset, validation_data=valid_dataset, epochs=25, verbose=1, callbacks=[lrd, mcp, es], ) # %% PLOTTING RESULTS (Train vs Validation FOLDER 1) def Train_Val_Plot( acc, val_acc, loss, val_loss, auc, val_auc, precision, val_precision, f1, val_f1 ): fig, (ax1, ax2, ax3, ax4, ax5) = plt.subplots(1, 5, figsize=(20, 5)) fig.suptitle(" MODEL'S METRICS VISUALIZATION ") ax1.plot(range(1, len(acc) + 1), acc) ax1.plot(range(1, len(val_acc) + 1), val_acc) ax1.set_title("History of Accuracy") ax1.set_xlabel("Epochs") ax1.set_ylabel("Accuracy") ax1.legend(["training", "validation"]) ax2.plot(range(1, len(loss) + 1), loss) ax2.plot(range(1, len(val_loss) + 1), val_loss) ax2.set_title("History of Loss") ax2.set_xlabel("Epochs") ax2.set_ylabel("Loss") ax2.legend(["training", "validation"]) ax3.plot(range(1, len(auc) + 1), auc) ax3.plot(range(1, len(val_auc) + 1), val_auc) ax3.set_title("History of AUC") ax3.set_xlabel("Epochs") ax3.set_ylabel("AUC") ax3.legend(["training", "validation"]) ax4.plot(range(1, len(precision) + 1), precision) ax4.plot(range(1, len(val_precision) + 1), val_precision) ax4.set_title("History of Precision") ax4.set_xlabel("Epochs") ax4.set_ylabel("Precision") ax4.legend(["training", "validation"]) ax5.plot(range(1, len(f1) + 1), f1) ax5.plot(range(1, len(val_f1) + 1), val_f1) ax5.set_title("History of F1-score") ax5.set_xlabel("Epochs") ax5.set_ylabel("F1 score") ax5.legend(["training", "validation"]) plt.show() Train_Val_Plot( history.history["accuracy"], history.history["val_accuracy"], history.history["loss"], history.history["val_loss"], history.history["auc"], history.history["val_auc"], history.history["precision"], history.history["val_precision"], history.history["f1_score"], history.history["val_f1_score"], ) prediction = model.evaluate(test_dataset)
false
0
2,478
0
2,564
2,478
129784624
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import pandas as pd import torch from torch.utils.data import Dataset, DataLoader from transformers import BertTokenizer, BertForSequenceClassification, AdamW # Define custom dataset class class CustomDataset(Dataset): def __init__(self, csv_file, tokenizer, max_length): self.data = pd.read_csv(csv_file) self.tokenizer = tokenizer self.max_length = max_length def __len__(self): return len(self.data) def __getitem__(self, index): text = self.data.iloc[index]["tweet_text"] label = self.data.iloc[index]["cyberbullying_type"] encoding = self.tokenizer.encode_plus( text, add_special_tokens=True, max_length=self.max_length, padding="max_length", truncation=True, return_tensors="pt", ) input_ids = encoding["input_ids"].squeeze() attention_mask = encoding["attention_mask"].squeeze() return { "input_ids": input_ids, "attention_mask": attention_mask, "cyberbullying_type": torch.tensor(label), } # Load the CSV file csv_file = "/kaggle/input/hebrew-dataset-binary/Hebrew_dataset_binary.csv" # Define hyperparameters batch_size = 16 max_length = 128 num_epochs = 5 learning_rate = 2e-5 # Load the tokenizer and model tokenizer = BertTokenizer.from_pretrained("bert-base-multilingual-cased") model = BertForSequenceClassification.from_pretrained( "bert-base-multilingual-cased", num_labels=2 ) # Define the dataset and data loader dataset = CustomDataset(csv_file, tokenizer, max_length) dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True) # Set device (GPU if available, else CPU) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # Move model to the device model = model.to(device) # Define the optimizer and loss function optimizer = AdamW(model.parameters(), lr=learning_rate) criterion = torch.nn.CrossEntropyLoss() # Training loop model.train() for epoch in range(num_epochs): running_loss = 0.0 for batch in dataloader: input_ids = batch["input_ids"].to(device) attention_mask = batch["attention_mask"].to(device) labels = batch["cyberbullying_type"].to(device) optimizer.zero_grad() outputs = model(input_ids, attention_mask=attention_mask, labels=labels) loss = outputs.loss logits = outputs.logits loss.backward() optimizer.step() running_loss += loss.item() epoch_loss = running_loss / len(dataloader) print(f"Epoch {epoch + 1}/{num_epochs} - Loss: {epoch_loss:.4f}") # Save the trained model model.save_pretrained("path_to_save_model") tokenizer.save_pretrained("path_to_save_tokenizer") import torch from transformers import BertTokenizer, BertForSequenceClassification # Load the trained model and tokenizer model_path = model.from_pretrained("path_to_save_model") tokenizer = BertTokenizer.from_pretrained("path_to_save_tokenizer") model = BertForSequenceClassification.from_pretrained(model_path) # Define the input text input_text = "Hello, how are you?" # Tokenize the input text encoding = tokenizer.encode_plus( input_text, add_special_tokens=True, max_length=128, padding="max_length", truncation=True, return_tensors="pt", ) # Extract the input IDs and attention mask input_ids = encoding["input_ids"] attention_mask = encoding["attention_mask"] # Perform inference model.eval() with torch.no_grad(): outputs = model(input_ids, attention_mask=attention_mask) logits = outputs.logits # Get predicted label predicted_label = torch.argmax(logits, dim=1).item() # Print the predicted label print("Predicted Label:", predicted_label) import torch from transformers import BertTokenizer, BertForSequenceClassification # Load the pretrained model and tokenizer model_name = "bert-base-multilingual-cased" tokenizer = BertTokenizer.from_pretrained(model_name) model = BertForSequenceClassification.from_pretrained(model_name) # Set the device to GPU if available device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model.to(device) # Define a sample input text input_text = "This is a sample input text to classify." # Tokenize the input text encoded_input = tokenizer.encode_plus( input_text, add_special_tokens=True, max_length=128, padding="max_length", truncation=True, return_tensors="pt", ) # Move the input tensors to the device input_ids = encoded_input["input_ids"].to(device) attention_mask = encoded_input["attention_mask"].to(device) # Forward pass through the model with torch.no_grad(): outputs = model(input_ids, attention_mask=attention_mask) # Get the predicted probabilities probs = torch.nn.functional.softmax(outputs.logits, dim=-1) predicted_label = torch.argmax(probs, dim=-1).item() # Map the predicted label to its corresponding class label_mapping = { 0: "Class 0", 1: "Class 1", 2: "Class 2", # Add more class mappings as needed } predicted_class = label_mapping[predicted_label] print("Predicted class:", predicted_class) import torch from transformers import BertTokenizer, BertForSequenceClassification # Load the tokenizer and model architecture tokenizer = BertTokenizer.from_pretrained("bert-base-multilingual-cased") model = BertForSequenceClassification.from_pretrained( "path_to_save_tokenizer/added_tokens.json" ) # Set the device to GPU if available device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model.to(device) # Define a sample input text input_text = "This is a sample input text to classify." # Tokenize the input text encoded_input = tokenizer.encode_plus( input_text, add_special_tokens=True, max_length=128, padding="max_length", truncation=True, return_tensors="pt", ) # Move the input tensors to the device input_ids = encoded_input["input_ids"].to(device) attention_mask = encoded_input["attention_mask"].to(device) # Forward pass through the model with torch.no_grad(): outputs = model(input_ids, attention_mask=attention_mask) # Get the predicted probabilities probs = torch.nn.functional.softmax(outputs.logits, dim=-1) predicted_label = torch.argmax(probs, dim=-1).item() # Map the predicted label to its corresponding class label_mapping = { 0: "Class 0", 1: "Class 1", 2: "Class 2", # Add more class mappings as needed } predicted_class = label_mapping[predicted_label] print("Predicted class:", predicted_class) from transformers import AutoTokenizer, AutoModelForSequenceClassification import torch import pandas as pd from sklearn.model_selection import train_test_split from transformers import BertTokenizer, BertForSequenceClassification, AdamW from torch.utils.data import TensorDataset, DataLoader # Load the CSV file data = pd.read_csv("/kaggle/input/hebrew-dataset-binary/Hebrew_dataset_binary.csv") # Split the data into training and validation sets train_data, val_data = train_test_split(data, test_size=0.2, random_state=42) # Load the tokenizer and encode the texts tokenizer = BertTokenizer.from_pretrained("bert-base-multilingual-cased") train_encodings = tokenizer.batch_encode_plus( train_data["tweet_text"].tolist(), add_special_tokens=True, max_length=128, padding="max_length", truncation=True, return_tensors="pt", ) val_encodings = tokenizer.batch_encode_plus( val_data["tweet_text"].tolist(), add_special_tokens=True, max_length=128, padding="max_length", truncation=True, return_tensors="pt", ) # Create TensorDatasets from the encoded inputs train_dataset = TensorDataset( train_encodings["input_ids"], train_encodings["attention_mask"], torch.tensor(train_data["cyberbullying_type"].tolist()), ) val_dataset = TensorDataset( val_encodings["input_ids"], val_encodings["attention_mask"], torch.tensor(val_data["cyberbullying_type"].tolist()), ) # Define the model architecture model = BertForSequenceClassification.from_pretrained( "bert-base-multilingual-cased", num_labels=3 ) # Set the device to GPU if available device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model.to(device) # Define the training parameters batch_size = 16 num_epochs = 5 learning_rate = 2e-5 # Create data loaders train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True) val_loader = DataLoader(val_dataset, batch_size=batch_size) # Define the optimizer and loss function optimizer = AdamW(model.parameters(), lr=learning_rate) loss_fn = torch.nn.CrossEntropyLoss() # Training loop for epoch in range(num_epochs): model.train() total_loss = 0 for batch in train_loader: optimizer.zero_grad() input_ids, attention_mask, labels = batch input_ids = input_ids.to(device) attention_mask = attention_mask.to(device) labels = labels.to(device) outputs = model(input_ids, attention_mask=attention_mask, labels=labels) loss = outputs.loss total_loss += loss.item() loss.backward() optimizer.step() avg_train_loss = total_loss / len(train_loader) model.eval() total_val_loss = 0 total_val_accuracy = 0 with torch.no_grad(): for batch in val_loader: input_ids, attention_mask, labels = batch input_ids = input_ids.to(device) attention_mask = attention_mask.to(device) labels = labels.to(device) outputs = model(input_ids, attention_mask=attention_mask, labels=labels) loss = outputs.loss logits = outputs.logits total_val_loss += loss.item() predictions = torch.argmax(logits, dim=1) total_val_accuracy += (predictions == labels).sum().item() avg_val_loss = total_val_loss / len(val_loader) val_accuracy = total_val_accuracy / len(val_data) print(f"Epoch {epoch+1}:") print(f"Train Loss: {avg_train_loss}:") # save the trained model output_dir = "./model/" model.save_pretrained(output_dir) tokenizer.save_pretrained(output_dir) # Load the pre-trained tokenizer and model tokenizer = AutoTokenizer.from_pretrained(output_dir) model = AutoModelForSequenceClassification.from_pretrained(output_dir) # Function to perform bullying detection on a text def detect_bullying(text): encoded_input = tokenizer.encode_plus( text, add_special_tokens=True, max_length=128, padding="max_length", truncation=True, return_tensors="pt", ) input_ids = encoded_input["input_ids"].to(device) attention_mask = encoded_input["attention_mask"].to(device) with torch.no_grad(): outputs = model(input_ids, attention_mask=attention_mask) logits = outputs.logits probabilities = torch.softmax(logits, dim=1).tolist()[0] labels = ["Not Bullying", "Bullying"] results = {label: prob for label, prob in zip(labels, probabilities)} return results # Example usage text = "You are such a loser!" bullying_result = detect_bullying(text) print(bullying_result) import torch import pandas as pd from transformers import BertTokenizer, BertForSequenceClassification, AdamW # Load the CSV file data = pd.read_csv("/kaggle/input/hebrew-dataset-binary/Hebrew_dataset_binary.csv") # Load the tokenizer tokenizer = BertTokenizer.from_pretrained("bert-base-multilingual-cased") # Tokenize the texts encodings = tokenizer.batch_encode_plus( data["tweet_text"].tolist(), add_special_tokens=True, max_length=128, padding="max_length", truncation=True, return_tensors="pt", ) # Create the TensorDataset dataset = torch.utils.data.TensorDataset( encodings["input_ids"], encodings["attention_mask"], torch.tensor(data["cyberbullying_type"].tolist()), ) # Split the dataset into training and validation sets train_size = int(0.8 * len(dataset)) val_size = len(dataset) - train_size train_dataset, val_dataset = torch.utils.data.random_split( dataset, [train_size, val_size] ) # Define the model architecture model = BertForSequenceClassification.from_pretrained( "bert-base-multilingual-cased", num_labels=2 ) # Set the device to GPU if available device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model.to(device) # Define the training parameters batch_size = 16 num_epochs = 5 learning_rate = 2e-5 # Create the data loaders train_loader = torch.utils.data.DataLoader( train_dataset, batch_size=batch_size, shuffle=True ) val_loader = torch.utils.data.DataLoader( val_dataset, batch_size=batch_size, shuffle=True ) # Define the optimizer and loss function optimizer = AdamW(model.parameters(), lr=learning_rate) loss_fn = torch.nn.CrossEntropyLoss() # Training loop for epoch in range(num_epochs): model.train() total_loss = 0 for batch in train_loader: optimizer.zero_grad() input_ids, attention_mask, labels = batch input_ids = input_ids.to(device) attention_mask = attention_mask.to(device) labels = labels.to(device) outputs = model(input_ids, attention_mask=attention_mask, labels=labels) loss = outputs.loss total_loss += loss.item() loss.backward() optimizer.step() avg_train_loss = total_loss / len(train_loader) model.eval() total_val_loss = 0 total_val_accuracy = 0 with torch.no_grad(): for batch in val_loader: input_ids, attention_mask, labels = batch input_ids = input_ids.to(device) attention_mask = attention_mask.to(device) labels = labels.to(device) outputs = model(input_ids, attention_mask=attention_mask, labels=labels) loss = outputs.loss logits = outputs.logits total_val_loss += loss.item() predictions = torch.argmax(logits, dim=1) total_val_accuracy += (predictions == labels).sum().item() avg_val_loss = total_val_loss / len(val_loader) print(f"Epoch {epoch+1}/{num_epochs}") print(f"Train Loss: {avg_train_loss:.4f}") print(f"Validation Loss: {avg_val_loss:.4f}") # Save the trained model model.save_pretrained("path/to/save/directory") # Load the trained model model = BertForSequenceClassification.from_pretrained("path/to/save/directory") # Function to perform bullying def detect_bullying(text): encoded_input = tokenizer.encode_plus( text, add_special_tokens=True, max_length=128, padding="max_length", truncation=True, return_tensors="pt", ) input_ids = encoded_input["input_ids"].to(device) attention_mask = encoded_input["attention_mask"].to(device) with torch.no_grad(): outputs = model(input_ids, attention_mask=attention_mask) logits = outputs.logits probabilities = torch.softmax(logits, dim=1).tolist()[0] labels = ["Not Bullying", "Bullying"] results = {label: prob for label, prob in zip(labels, probabilities)} return results # Example usage text = "You are such a loser!" bullying_result = detect_bullying(text) print(bullying_result)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/784/129784624.ipynb
null
null
[{"Id": 129784624, "ScriptId": 38507653, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13882750, "CreationDate": "05/16/2023 12:53:28", "VersionNumber": 1.0, "Title": "notebookc1e2ca0f5c", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 513.0, "LinesInsertedFromPrevious": 513.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import pandas as pd import torch from torch.utils.data import Dataset, DataLoader from transformers import BertTokenizer, BertForSequenceClassification, AdamW # Define custom dataset class class CustomDataset(Dataset): def __init__(self, csv_file, tokenizer, max_length): self.data = pd.read_csv(csv_file) self.tokenizer = tokenizer self.max_length = max_length def __len__(self): return len(self.data) def __getitem__(self, index): text = self.data.iloc[index]["tweet_text"] label = self.data.iloc[index]["cyberbullying_type"] encoding = self.tokenizer.encode_plus( text, add_special_tokens=True, max_length=self.max_length, padding="max_length", truncation=True, return_tensors="pt", ) input_ids = encoding["input_ids"].squeeze() attention_mask = encoding["attention_mask"].squeeze() return { "input_ids": input_ids, "attention_mask": attention_mask, "cyberbullying_type": torch.tensor(label), } # Load the CSV file csv_file = "/kaggle/input/hebrew-dataset-binary/Hebrew_dataset_binary.csv" # Define hyperparameters batch_size = 16 max_length = 128 num_epochs = 5 learning_rate = 2e-5 # Load the tokenizer and model tokenizer = BertTokenizer.from_pretrained("bert-base-multilingual-cased") model = BertForSequenceClassification.from_pretrained( "bert-base-multilingual-cased", num_labels=2 ) # Define the dataset and data loader dataset = CustomDataset(csv_file, tokenizer, max_length) dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True) # Set device (GPU if available, else CPU) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # Move model to the device model = model.to(device) # Define the optimizer and loss function optimizer = AdamW(model.parameters(), lr=learning_rate) criterion = torch.nn.CrossEntropyLoss() # Training loop model.train() for epoch in range(num_epochs): running_loss = 0.0 for batch in dataloader: input_ids = batch["input_ids"].to(device) attention_mask = batch["attention_mask"].to(device) labels = batch["cyberbullying_type"].to(device) optimizer.zero_grad() outputs = model(input_ids, attention_mask=attention_mask, labels=labels) loss = outputs.loss logits = outputs.logits loss.backward() optimizer.step() running_loss += loss.item() epoch_loss = running_loss / len(dataloader) print(f"Epoch {epoch + 1}/{num_epochs} - Loss: {epoch_loss:.4f}") # Save the trained model model.save_pretrained("path_to_save_model") tokenizer.save_pretrained("path_to_save_tokenizer") import torch from transformers import BertTokenizer, BertForSequenceClassification # Load the trained model and tokenizer model_path = model.from_pretrained("path_to_save_model") tokenizer = BertTokenizer.from_pretrained("path_to_save_tokenizer") model = BertForSequenceClassification.from_pretrained(model_path) # Define the input text input_text = "Hello, how are you?" # Tokenize the input text encoding = tokenizer.encode_plus( input_text, add_special_tokens=True, max_length=128, padding="max_length", truncation=True, return_tensors="pt", ) # Extract the input IDs and attention mask input_ids = encoding["input_ids"] attention_mask = encoding["attention_mask"] # Perform inference model.eval() with torch.no_grad(): outputs = model(input_ids, attention_mask=attention_mask) logits = outputs.logits # Get predicted label predicted_label = torch.argmax(logits, dim=1).item() # Print the predicted label print("Predicted Label:", predicted_label) import torch from transformers import BertTokenizer, BertForSequenceClassification # Load the pretrained model and tokenizer model_name = "bert-base-multilingual-cased" tokenizer = BertTokenizer.from_pretrained(model_name) model = BertForSequenceClassification.from_pretrained(model_name) # Set the device to GPU if available device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model.to(device) # Define a sample input text input_text = "This is a sample input text to classify." # Tokenize the input text encoded_input = tokenizer.encode_plus( input_text, add_special_tokens=True, max_length=128, padding="max_length", truncation=True, return_tensors="pt", ) # Move the input tensors to the device input_ids = encoded_input["input_ids"].to(device) attention_mask = encoded_input["attention_mask"].to(device) # Forward pass through the model with torch.no_grad(): outputs = model(input_ids, attention_mask=attention_mask) # Get the predicted probabilities probs = torch.nn.functional.softmax(outputs.logits, dim=-1) predicted_label = torch.argmax(probs, dim=-1).item() # Map the predicted label to its corresponding class label_mapping = { 0: "Class 0", 1: "Class 1", 2: "Class 2", # Add more class mappings as needed } predicted_class = label_mapping[predicted_label] print("Predicted class:", predicted_class) import torch from transformers import BertTokenizer, BertForSequenceClassification # Load the tokenizer and model architecture tokenizer = BertTokenizer.from_pretrained("bert-base-multilingual-cased") model = BertForSequenceClassification.from_pretrained( "path_to_save_tokenizer/added_tokens.json" ) # Set the device to GPU if available device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model.to(device) # Define a sample input text input_text = "This is a sample input text to classify." # Tokenize the input text encoded_input = tokenizer.encode_plus( input_text, add_special_tokens=True, max_length=128, padding="max_length", truncation=True, return_tensors="pt", ) # Move the input tensors to the device input_ids = encoded_input["input_ids"].to(device) attention_mask = encoded_input["attention_mask"].to(device) # Forward pass through the model with torch.no_grad(): outputs = model(input_ids, attention_mask=attention_mask) # Get the predicted probabilities probs = torch.nn.functional.softmax(outputs.logits, dim=-1) predicted_label = torch.argmax(probs, dim=-1).item() # Map the predicted label to its corresponding class label_mapping = { 0: "Class 0", 1: "Class 1", 2: "Class 2", # Add more class mappings as needed } predicted_class = label_mapping[predicted_label] print("Predicted class:", predicted_class) from transformers import AutoTokenizer, AutoModelForSequenceClassification import torch import pandas as pd from sklearn.model_selection import train_test_split from transformers import BertTokenizer, BertForSequenceClassification, AdamW from torch.utils.data import TensorDataset, DataLoader # Load the CSV file data = pd.read_csv("/kaggle/input/hebrew-dataset-binary/Hebrew_dataset_binary.csv") # Split the data into training and validation sets train_data, val_data = train_test_split(data, test_size=0.2, random_state=42) # Load the tokenizer and encode the texts tokenizer = BertTokenizer.from_pretrained("bert-base-multilingual-cased") train_encodings = tokenizer.batch_encode_plus( train_data["tweet_text"].tolist(), add_special_tokens=True, max_length=128, padding="max_length", truncation=True, return_tensors="pt", ) val_encodings = tokenizer.batch_encode_plus( val_data["tweet_text"].tolist(), add_special_tokens=True, max_length=128, padding="max_length", truncation=True, return_tensors="pt", ) # Create TensorDatasets from the encoded inputs train_dataset = TensorDataset( train_encodings["input_ids"], train_encodings["attention_mask"], torch.tensor(train_data["cyberbullying_type"].tolist()), ) val_dataset = TensorDataset( val_encodings["input_ids"], val_encodings["attention_mask"], torch.tensor(val_data["cyberbullying_type"].tolist()), ) # Define the model architecture model = BertForSequenceClassification.from_pretrained( "bert-base-multilingual-cased", num_labels=3 ) # Set the device to GPU if available device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model.to(device) # Define the training parameters batch_size = 16 num_epochs = 5 learning_rate = 2e-5 # Create data loaders train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True) val_loader = DataLoader(val_dataset, batch_size=batch_size) # Define the optimizer and loss function optimizer = AdamW(model.parameters(), lr=learning_rate) loss_fn = torch.nn.CrossEntropyLoss() # Training loop for epoch in range(num_epochs): model.train() total_loss = 0 for batch in train_loader: optimizer.zero_grad() input_ids, attention_mask, labels = batch input_ids = input_ids.to(device) attention_mask = attention_mask.to(device) labels = labels.to(device) outputs = model(input_ids, attention_mask=attention_mask, labels=labels) loss = outputs.loss total_loss += loss.item() loss.backward() optimizer.step() avg_train_loss = total_loss / len(train_loader) model.eval() total_val_loss = 0 total_val_accuracy = 0 with torch.no_grad(): for batch in val_loader: input_ids, attention_mask, labels = batch input_ids = input_ids.to(device) attention_mask = attention_mask.to(device) labels = labels.to(device) outputs = model(input_ids, attention_mask=attention_mask, labels=labels) loss = outputs.loss logits = outputs.logits total_val_loss += loss.item() predictions = torch.argmax(logits, dim=1) total_val_accuracy += (predictions == labels).sum().item() avg_val_loss = total_val_loss / len(val_loader) val_accuracy = total_val_accuracy / len(val_data) print(f"Epoch {epoch+1}:") print(f"Train Loss: {avg_train_loss}:") # save the trained model output_dir = "./model/" model.save_pretrained(output_dir) tokenizer.save_pretrained(output_dir) # Load the pre-trained tokenizer and model tokenizer = AutoTokenizer.from_pretrained(output_dir) model = AutoModelForSequenceClassification.from_pretrained(output_dir) # Function to perform bullying detection on a text def detect_bullying(text): encoded_input = tokenizer.encode_plus( text, add_special_tokens=True, max_length=128, padding="max_length", truncation=True, return_tensors="pt", ) input_ids = encoded_input["input_ids"].to(device) attention_mask = encoded_input["attention_mask"].to(device) with torch.no_grad(): outputs = model(input_ids, attention_mask=attention_mask) logits = outputs.logits probabilities = torch.softmax(logits, dim=1).tolist()[0] labels = ["Not Bullying", "Bullying"] results = {label: prob for label, prob in zip(labels, probabilities)} return results # Example usage text = "You are such a loser!" bullying_result = detect_bullying(text) print(bullying_result) import torch import pandas as pd from transformers import BertTokenizer, BertForSequenceClassification, AdamW # Load the CSV file data = pd.read_csv("/kaggle/input/hebrew-dataset-binary/Hebrew_dataset_binary.csv") # Load the tokenizer tokenizer = BertTokenizer.from_pretrained("bert-base-multilingual-cased") # Tokenize the texts encodings = tokenizer.batch_encode_plus( data["tweet_text"].tolist(), add_special_tokens=True, max_length=128, padding="max_length", truncation=True, return_tensors="pt", ) # Create the TensorDataset dataset = torch.utils.data.TensorDataset( encodings["input_ids"], encodings["attention_mask"], torch.tensor(data["cyberbullying_type"].tolist()), ) # Split the dataset into training and validation sets train_size = int(0.8 * len(dataset)) val_size = len(dataset) - train_size train_dataset, val_dataset = torch.utils.data.random_split( dataset, [train_size, val_size] ) # Define the model architecture model = BertForSequenceClassification.from_pretrained( "bert-base-multilingual-cased", num_labels=2 ) # Set the device to GPU if available device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model.to(device) # Define the training parameters batch_size = 16 num_epochs = 5 learning_rate = 2e-5 # Create the data loaders train_loader = torch.utils.data.DataLoader( train_dataset, batch_size=batch_size, shuffle=True ) val_loader = torch.utils.data.DataLoader( val_dataset, batch_size=batch_size, shuffle=True ) # Define the optimizer and loss function optimizer = AdamW(model.parameters(), lr=learning_rate) loss_fn = torch.nn.CrossEntropyLoss() # Training loop for epoch in range(num_epochs): model.train() total_loss = 0 for batch in train_loader: optimizer.zero_grad() input_ids, attention_mask, labels = batch input_ids = input_ids.to(device) attention_mask = attention_mask.to(device) labels = labels.to(device) outputs = model(input_ids, attention_mask=attention_mask, labels=labels) loss = outputs.loss total_loss += loss.item() loss.backward() optimizer.step() avg_train_loss = total_loss / len(train_loader) model.eval() total_val_loss = 0 total_val_accuracy = 0 with torch.no_grad(): for batch in val_loader: input_ids, attention_mask, labels = batch input_ids = input_ids.to(device) attention_mask = attention_mask.to(device) labels = labels.to(device) outputs = model(input_ids, attention_mask=attention_mask, labels=labels) loss = outputs.loss logits = outputs.logits total_val_loss += loss.item() predictions = torch.argmax(logits, dim=1) total_val_accuracy += (predictions == labels).sum().item() avg_val_loss = total_val_loss / len(val_loader) print(f"Epoch {epoch+1}/{num_epochs}") print(f"Train Loss: {avg_train_loss:.4f}") print(f"Validation Loss: {avg_val_loss:.4f}") # Save the trained model model.save_pretrained("path/to/save/directory") # Load the trained model model = BertForSequenceClassification.from_pretrained("path/to/save/directory") # Function to perform bullying def detect_bullying(text): encoded_input = tokenizer.encode_plus( text, add_special_tokens=True, max_length=128, padding="max_length", truncation=True, return_tensors="pt", ) input_ids = encoded_input["input_ids"].to(device) attention_mask = encoded_input["attention_mask"].to(device) with torch.no_grad(): outputs = model(input_ids, attention_mask=attention_mask) logits = outputs.logits probabilities = torch.softmax(logits, dim=1).tolist()[0] labels = ["Not Bullying", "Bullying"] results = {label: prob for label, prob in zip(labels, probabilities)} return results # Example usage text = "You are such a loser!" bullying_result = detect_bullying(text) print(bullying_result)
false
0
4,415
0
4,415
4,415
129784025
<jupyter_start><jupyter_text>Compress 5 percent Coordinates Kaggle dataset identifier: compress-5-percent-coordinates <jupyter_script>import os import rasterio as rs import numpy as np import pandas as pd import matplotlib import matplotlib.pyplot as plt import matplotlib.colors as mc import seaborn as sns import folium import branca from sklearn.cluster import KMeans from sklearn.cluster import DBSCAN from sklearn.preprocessing import StandardScaler from mpl_toolkits.basemap import Basemap def get_fileTime(src): return str( src.name.split("/")[len(src.name.split("/")) - 1] .replace(".tif", "") .split("_")[1] ) def ndre_(src): NIR = src.read(5) RedEdge = src.read(4) NDRE = np.divide((NIR - RedEdge), (NIR + RedEdge)) if np.count_nonzero(np.isnan(NDRE)): NDRE[np.isnan(NDRE)] = 0 return NDRE def msavi_(src): NIR = src.read(5) Red = src.read(3) MSAVI = (2 * NIR + 1 - np.sqrt((2 * NIR + 1) ** 2 - 8 * (NIR - Red))) / 2 # replace any negative values with 0 MSAVI[MSAVI < 0] = 0 return MSAVI def gndvi_(src): NIR = src.read(5) Green = src.read(2) GNDVI = (NIR - Green) / (NIR + Green) return GNDVI def ndvi_(src): NIR = src.read(5) Red = src.read(3) NDVI = (NIR - Red) / (NIR + Red) return NDVI weights_may = {"ndvi": 0.303, "ndre": 0.16, "msavi": 0.263, "gndvi": 0.275} weights_june = {"ndvi": 0.262, "ndre": 0.216, "msavi": 0.258, "gndvi": 0.263} weights_july = {"ndvi": 0.257, "ndre": 0.24, "msavi": 0.25, "gndvi": 0.254} weights_month = {"may": -2.069, "june": 1.642, "july": 1.427} def get_weighted(san: str): PATH = "/kaggle/input/compressed-5-percent" folds = [san] for f in folds: vc_may = [] vc_june = [] vc_july = [] files = os.listdir(PATH + "/" + f) for wheat in files: data = rs.open(PATH + "/" + f + "/" + wheat) ndvi = data.read(6) ndre = ndre_(data) msavi = msavi_(data) gndvi = gndvi_(data) if get_fileTime(data).split("-")[0] == "05": vc_may = ( ndvi * weights_may["ndvi"] + ndre * weights_may["ndre"] + msavi * weights_may["msavi"] + gndvi * weights_may["gndvi"] ) if get_fileTime(data).split("-")[0] == "06": new_index_june = ( ndvi * weights_june["ndvi"] + ndre * weights_june["ndre"] + msavi * weights_june["msavi"] + gndvi * weights_june["gndvi"] ) vc_june.append(new_index_june) if get_fileTime(data).split("-")[0] == "07": new_index_july = ( ndvi * weights_july["ndvi"] + ndre * weights_july["ndre"] + msavi * weights_july["msavi"] + gndvi * weights_july["gndvi"] ) vc_july.append(new_index_july) vc_june_ = np.mean(vc_june, axis=0) vc_july_ = np.mean(vc_july, axis=0) weighted_comb = ( vc_may * weights_month["may"] + vc_june_ * weights_month["june"] + vc_july_ * weights_month["july"] ) return weighted_comb def change_cent(labels, high_label, low_label): lab = labels.copy() temp = np.full(lab.shape, -1) if high_label == 2 and low_label == 0: temp = lab if high_label == 2 and low_label == 1: # 1 --> 0 AND 0 --> 1 temp[lab == 1] = 0 temp[lab == 0] = 1 temp[lab == 2] = 2 if high_label == 1 and low_label == 0: # 1 --> 2 AND 2 --> 1 temp[lab == 1] = 2 temp[lab == 2] = 1 temp[lab == 0] = 0 if high_label == 1 and low_label == 2: # 1 --> 2 AND 2 --> 0 AND 0 --> 1 temp[lab == 1] = 2 temp[lab == 2] = 0 temp[lab == 0] = 1 if high_label == 0 and low_label == 1: # 0 --> 2 AND 1 --> 0 AND 2 --> 1 temp[lab == 0] = 2 temp[lab == 1] = 0 temp[lab == 2] = 1 if high_label == 0 and low_label == 2: # 0 --> 2 AND 2 --> 0 AND 0 --> 1 temp[lab == 0] = 2 temp[lab == 2] = 0 temp[lab == 0] = 1 return temp def run(pole: str): coo = pd.read_csv( "/kaggle/input/compress-5-percent-coordinates/" + pole + ".csv", usecols=["x", "y"], ) data = {"Long": coo["x"], "Lat": coo["y"], "vi": get_weighted(pole).flatten()} return pd.DataFrame(data=data) def plot_KMEANS(pole: str): df = run(pole) kmeans = KMeans(n_clusters=3, n_init=10, random_state=0).fit(df) centroids = [kmeans.cluster_centers_[i][2] for i in range(0, 3)] high_label = np.argmax(centroids) low_label = np.argmin(centroids) labels = change_cent(kmeans.labels_, high_label, low_label) # Plot cmap = matplotlib.colors.LinearSegmentedColormap.from_list( "", ["#964B00", "yellow", "green"] ) plt.scatter(df["Long"], df["Lat"], c=labels.reshape(-1, 1), cmap=cmap) plt.colorbar(label="Weighted") plt.xlabel("Longitude") plt.ylabel("Latitude") plt.title("KMEANS " + pole) plt.show() folds = ["13", "23", "24", "25", "27"] [plot_KMEANS(f) for f in folds]
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/784/129784025.ipynb
compress-5-percent-coordinates
olzhasuikas
[{"Id": 129784025, "ScriptId": 38595850, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7667775, "CreationDate": "05/16/2023 12:48:52", "VersionNumber": 1.0, "Title": "Diploma Work", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 151.0, "LinesInsertedFromPrevious": 151.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 186149166, "KernelVersionId": 129784025, "SourceDatasetVersionId": 5698213}, {"Id": 186149165, "KernelVersionId": 129784025, "SourceDatasetVersionId": 5698201}]
[{"Id": 5698213, "DatasetId": 3276452, "DatasourceVersionId": 5773859, "CreatorUserId": 7667775, "LicenseName": "Unknown", "CreationDate": "05/16/2023 11:59:54", "VersionNumber": 1.0, "Title": "Compress 5 percent Coordinates", "Slug": "compress-5-percent-coordinates", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3276452, "CreatorUserId": 7667775, "OwnerUserId": 7667775.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5698213.0, "CurrentDatasourceVersionId": 5773859.0, "ForumId": 3342133, "Type": 2, "CreationDate": "05/16/2023 11:59:54", "LastActivityDate": "05/16/2023", "TotalViews": 38, "TotalDownloads": 4, "TotalVotes": 0, "TotalKernels": 2}]
[{"Id": 7667775, "UserName": "olzhasuikas", "DisplayName": "Olzhas Uikas", "RegisterDate": "06/13/2021", "PerformanceTier": 0}]
import os import rasterio as rs import numpy as np import pandas as pd import matplotlib import matplotlib.pyplot as plt import matplotlib.colors as mc import seaborn as sns import folium import branca from sklearn.cluster import KMeans from sklearn.cluster import DBSCAN from sklearn.preprocessing import StandardScaler from mpl_toolkits.basemap import Basemap def get_fileTime(src): return str( src.name.split("/")[len(src.name.split("/")) - 1] .replace(".tif", "") .split("_")[1] ) def ndre_(src): NIR = src.read(5) RedEdge = src.read(4) NDRE = np.divide((NIR - RedEdge), (NIR + RedEdge)) if np.count_nonzero(np.isnan(NDRE)): NDRE[np.isnan(NDRE)] = 0 return NDRE def msavi_(src): NIR = src.read(5) Red = src.read(3) MSAVI = (2 * NIR + 1 - np.sqrt((2 * NIR + 1) ** 2 - 8 * (NIR - Red))) / 2 # replace any negative values with 0 MSAVI[MSAVI < 0] = 0 return MSAVI def gndvi_(src): NIR = src.read(5) Green = src.read(2) GNDVI = (NIR - Green) / (NIR + Green) return GNDVI def ndvi_(src): NIR = src.read(5) Red = src.read(3) NDVI = (NIR - Red) / (NIR + Red) return NDVI weights_may = {"ndvi": 0.303, "ndre": 0.16, "msavi": 0.263, "gndvi": 0.275} weights_june = {"ndvi": 0.262, "ndre": 0.216, "msavi": 0.258, "gndvi": 0.263} weights_july = {"ndvi": 0.257, "ndre": 0.24, "msavi": 0.25, "gndvi": 0.254} weights_month = {"may": -2.069, "june": 1.642, "july": 1.427} def get_weighted(san: str): PATH = "/kaggle/input/compressed-5-percent" folds = [san] for f in folds: vc_may = [] vc_june = [] vc_july = [] files = os.listdir(PATH + "/" + f) for wheat in files: data = rs.open(PATH + "/" + f + "/" + wheat) ndvi = data.read(6) ndre = ndre_(data) msavi = msavi_(data) gndvi = gndvi_(data) if get_fileTime(data).split("-")[0] == "05": vc_may = ( ndvi * weights_may["ndvi"] + ndre * weights_may["ndre"] + msavi * weights_may["msavi"] + gndvi * weights_may["gndvi"] ) if get_fileTime(data).split("-")[0] == "06": new_index_june = ( ndvi * weights_june["ndvi"] + ndre * weights_june["ndre"] + msavi * weights_june["msavi"] + gndvi * weights_june["gndvi"] ) vc_june.append(new_index_june) if get_fileTime(data).split("-")[0] == "07": new_index_july = ( ndvi * weights_july["ndvi"] + ndre * weights_july["ndre"] + msavi * weights_july["msavi"] + gndvi * weights_july["gndvi"] ) vc_july.append(new_index_july) vc_june_ = np.mean(vc_june, axis=0) vc_july_ = np.mean(vc_july, axis=0) weighted_comb = ( vc_may * weights_month["may"] + vc_june_ * weights_month["june"] + vc_july_ * weights_month["july"] ) return weighted_comb def change_cent(labels, high_label, low_label): lab = labels.copy() temp = np.full(lab.shape, -1) if high_label == 2 and low_label == 0: temp = lab if high_label == 2 and low_label == 1: # 1 --> 0 AND 0 --> 1 temp[lab == 1] = 0 temp[lab == 0] = 1 temp[lab == 2] = 2 if high_label == 1 and low_label == 0: # 1 --> 2 AND 2 --> 1 temp[lab == 1] = 2 temp[lab == 2] = 1 temp[lab == 0] = 0 if high_label == 1 and low_label == 2: # 1 --> 2 AND 2 --> 0 AND 0 --> 1 temp[lab == 1] = 2 temp[lab == 2] = 0 temp[lab == 0] = 1 if high_label == 0 and low_label == 1: # 0 --> 2 AND 1 --> 0 AND 2 --> 1 temp[lab == 0] = 2 temp[lab == 1] = 0 temp[lab == 2] = 1 if high_label == 0 and low_label == 2: # 0 --> 2 AND 2 --> 0 AND 0 --> 1 temp[lab == 0] = 2 temp[lab == 2] = 0 temp[lab == 0] = 1 return temp def run(pole: str): coo = pd.read_csv( "/kaggle/input/compress-5-percent-coordinates/" + pole + ".csv", usecols=["x", "y"], ) data = {"Long": coo["x"], "Lat": coo["y"], "vi": get_weighted(pole).flatten()} return pd.DataFrame(data=data) def plot_KMEANS(pole: str): df = run(pole) kmeans = KMeans(n_clusters=3, n_init=10, random_state=0).fit(df) centroids = [kmeans.cluster_centers_[i][2] for i in range(0, 3)] high_label = np.argmax(centroids) low_label = np.argmin(centroids) labels = change_cent(kmeans.labels_, high_label, low_label) # Plot cmap = matplotlib.colors.LinearSegmentedColormap.from_list( "", ["#964B00", "yellow", "green"] ) plt.scatter(df["Long"], df["Lat"], c=labels.reshape(-1, 1), cmap=cmap) plt.colorbar(label="Weighted") plt.xlabel("Longitude") plt.ylabel("Latitude") plt.title("KMEANS " + pole) plt.show() folds = ["13", "23", "24", "25", "27"] [plot_KMEANS(f) for f in folds]
false
0
1,851
0
1,877
1,851
129784970
<jupyter_start><jupyter_text>Chest X-Ray Images (Pneumonia) ### Context http://www.cell.com/cell/fulltext/S0092-8674(18)30154-5 ![](https://i.imgur.com/jZqpV51.png) Figure S6. Illustrative Examples of Chest X-Rays in Patients with Pneumonia, Related to Figure 6 The normal chest X-ray (left panel) depicts clear lungs without any areas of abnormal opacification in the image. Bacterial pneumonia (middle) typically exhibits a focal lobar consolidation, in this case in the right upper lobe (white arrows), whereas viral pneumonia (right) manifests with a more diffuse ‘‘interstitial’’ pattern in both lungs. http://www.cell.com/cell/fulltext/S0092-8674(18)30154-5 ### Content The dataset is organized into 3 folders (train, test, val) and contains subfolders for each image category (Pneumonia/Normal). There are 5,863 X-Ray images (JPEG) and 2 categories (Pneumonia/Normal). Chest X-ray images (anterior-posterior) were selected from retrospective cohorts of pediatric patients of one to five years old from Guangzhou Women and Children’s Medical Center, Guangzhou. All chest X-ray imaging was performed as part of patients’ routine clinical care. For the analysis of chest x-ray images, all chest radiographs were initially screened for quality control by removing all low quality or unreadable scans. The diagnoses for the images were then graded by two expert physicians before being cleared for training the AI system. In order to account for any grading errors, the evaluation set was also checked by a third expert. Kaggle dataset identifier: chest-xray-pneumonia <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import glob from sklearn.model_selection import train_test_split from PIL import Image import torch from torch.utils.data import Dataset, DataLoader # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory from tqdm.auto import tqdm from torch.optim import Adam import matplotlib.pyplot as plt from torchvision.models import resnet50 from torchvision import transforms import torch.nn as nn import os import random for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames[:5]: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session class config: EPOCHS = 3 BATCH_SIZE = 32 num_workers = 4 lr = 3e-5 model = resnet50 device = "cuda" if torch.cuda.is_available() else "cpu" # ==================================================== # Directory settings # ==================================================== import os OUTPUT_DIR = "./" if not os.path.exists(OUTPUT_DIR): os.makedirs(OUTPUT_DIR) train_normal = glob.glob("/kaggle/input/chest-xray-pneumonia/chest_xray/train/NORMAL/*") train_pneumonia = glob.glob( "/kaggle/input/chest-xray-pneumonia/chest_xray/train/PNEUMONIA/*" ) test_normal = glob.glob("/kaggle/input/chest-xray-pneumonia/chest_xray/test/NORMAL/*") test_pneumonia = glob.glob( "/kaggle/input/chest-xray-pneumonia/chest_xray/test/PNEUMONIA/*" ) train_normal[:4] train_path = train_normal + train_pneumonia test_path = test_normal + test_pneumonia train_path[2000:2005] train_labels = [0] * len(train_normal) + [1] * len(train_pneumonia) print(len(train_labels)) print(len(train_path)) train_path, valid_path, train_labels, valid_labels = train_test_split( train_path, train_labels, stratify=train_labels ) def show_images(): # Assuming you have a list of file paths called train_normal and train_pneumonia random_normal = random.sample(train_normal, 2) random_abnormal = random.sample(train_pneumonia, 2) # Display the random images fig, axes = plt.subplots(2, 2, figsize=(10, 10)) for i, path in enumerate(random_normal): img = Image.open(path).convert("LA") axes[i // 2, i % 2].imshow(img) axes[i // 2, i % 2].set_title("Normal x-ray") axes[i // 2, i % 2].axis("off") for i, path in enumerate(random_abnormal): img = Image.open(path).convert("LA") axes[1 + i // 2, i % 2].imshow(img) axes[1 + i // 2, i % 2].set_title("Abnormal x-ray") axes[1 + i // 2, i % 2].axis("off") plt.tight_layout() plt.show() show_images() class ChestXrayDataset(Dataset): def __init__(self, path, labels, transforms=None): self.path = path self.labels = labels self.transforms = transforms def __len__(self): return len(self.path) def __getitem__(self, index): path = self.path[index] image = Image.open(path).convert("RGB") if self.transforms: image = self.transforms(image) labels = self.labels[index] labels = torch.tensor([labels]) return image, labels class PneumoniaNet(nn.Module): def __init__(self, pretrained=True): super(PneumoniaNet, self).__init__() self.model = resnet50(pretrained=pretrained) self.fc = nn.Linear(in_features=2048, out_features=1) def forward(self, x): x = self.model.conv1(x) x = self.model.bn1(x) x = self.model.relu(x) x = self.model.maxpool(x) x = self.model.layer1(x) x = self.model.layer2(x) x = self.model.layer3(x) x = self.model.layer4(x) x = self.model.avgpool(x) x = x.view(x.size(0), -1) x = self.fc(x) return x device = config.device pretrained = True model = PneumoniaNet(pretrained=pretrained) torch.save(config.model, OUTPUT_DIR + "config.pth") model.to(device) image_size = (500, 500) train_transform = transforms.Compose( [ transforms.ToTensor(), transforms.RandomRotation(degrees=15), transforms.Resize(size=image_size), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)), ] ) test_transform = transforms.Compose( [ transforms.Resize(size=image_size), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)), ] ) train_dataset = ChestXrayDataset(train_path, train_labels, train_transform) valid_dataset = ChestXrayDataset(valid_path, valid_labels, test_transform) train_dataloader = DataLoader( train_dataset, batch_size=config.BATCH_SIZE, num_workers=config.num_workers, shuffle=True, ) valid_dataloader = DataLoader( valid_dataset, batch_size=config.BATCH_SIZE, num_workers=config.num_workers, shuffle=False, ) criterion = nn.BCEWithLogitsLoss() optimizer = torch.optim.Adam(model.parameters(), config.lr) scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=3) dataloaders = {"train": train_dataloader, "val": valid_dataloader} logging_steps = { "train": len(dataloaders["train"]) // 10, "val": len(dataloaders["val"]) // 10, } dataset_sizes = {"train": len(train_dataset), "val": len(valid_dataset)} batch_sizes = {"train": config.BATCH_SIZE, "val": config.BATCH_SIZE} def train_model(model, criterion, optimizer, epochs, device): # best_model = copy.deepcopy(model.state_dict()) best_acc = 0 train_losses = [] train_accs = [] val_losses = [] val_accs = [] for epoch in tqdm(range(epochs)): for phase in ["train", "val"]: if phase == "train": model.train() else: model.eval() running_loss = 0 correct = 0 for i, (inputs, labels) in tqdm(enumerate(dataloaders[phase])): inputs = inputs.to(device) labels = labels.to(device) optimizer.zero_grad() # Forward pass with torch.set_grad_enabled(phase == "train"): outputs = model(inputs) preds = outputs.sigmoid() > 0.5 loss = criterion(outputs, labels.float()) if phase == "train": loss.backward() optimizer.step() running_loss += loss.item() * inputs.size(0) correct += torch.sum(preds == labels.data) if (i % logging_steps[phase] == 0) and (i > 0): avg_loss = running_loss / dataset_sizes[phase] avg_acc = correct.double() / dataset_sizes[phase] print( f"[{phase}]: {epoch+1} / {epochs} | loss: {avg_loss} | acc: {avg_acc}" ) epoch_loss = running_loss / dataset_sizes[phase] epoch_acc = correct.double() / dataset_sizes[phase] print("{} Loss: {:.4f} Acc: {:.4f}".format(phase, epoch_loss, epoch_acc)) if phase == "val" and epoch_acc > best_acc: best_acc = epoch_acc torch.save(config.model, OUTPUT_DIR + "config.pth") if phase == "train": train_losses.append(epoch_loss) train_accs.append(epoch_acc) else: val_losses.append(epoch_loss) val_accs.append(epoch_acc) # model.load_state_dict(best_model) return model, train_losses, train_accs, val_losses, val_accs train_model(model, criterion, optimizer, config.EPOCHS, device) # Test Data # Training on test data test_path = glob.glob( "/kaggle/input/chest-xray-pneumonia/chest_xray/test/NORMAL/*" ) + glob.glob("/kaggle/input/chest-xray-pneumonia/chest_xray/test/PNEUMONIA/*") test_labels = [0] * len( glob.glob("/kaggle/input/chest-xray-pneumonia/chest_xray/test/NORMAL/*") ) + [1] * len( glob.glob("/kaggle/input/chest-xray-pneumonia/chest_xray/test/PNEUMONIA/*") ) print(len(test_path), len(test_labels)) test_dataset = ChestXrayDataset(test_path, test_labels, test_transform) test_dataloader = DataLoader(test_dataset, batch_size=config.BATCH_SIZE, shuffle=False) y_pred = [] y_true = [] for i, (inputs, labels) in tqdm( enumerate(test_dataloader), leave=False, total=len(test_dataloader) ): with torch.no_grad(): predictions = model(inputs.cuda()) predictions = predictions.sigmoid() predictions = predictions > 0.5 y_pred.append(predictions) y_true.append(labels) y_pred = torch.cat(y_pred) y_true = torch.cat(y_true) y_pred = y_pred.cpu().numpy() y_true = y_true.numpy() y_pred = y_pred.astype(np.int64) y_true = y_true.astype(np.int64) y_pred = y_pred.reshape(-1) y_true = y_true.reshape(-1) from sklearn.metrics import accuracy_score accuracy_score(y_true, y_pred)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/784/129784970.ipynb
chest-xray-pneumonia
paultimothymooney
[{"Id": 129784970, "ScriptId": 38560095, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4649475, "CreationDate": "05/16/2023 12:56:27", "VersionNumber": 3.0, "Title": "Chest_Xray", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 299.0, "LinesInsertedFromPrevious": 41.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 258.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 186150354, "KernelVersionId": 129784970, "SourceDatasetVersionId": 23812}]
[{"Id": 23812, "DatasetId": 17810, "DatasourceVersionId": 23851, "CreatorUserId": 1314380, "LicenseName": "Other (specified in description)", "CreationDate": "03/24/2018 19:41:59", "VersionNumber": 2.0, "Title": "Chest X-Ray Images (Pneumonia)", "Slug": "chest-xray-pneumonia", "Subtitle": "5,863 images, 2 categories", "Description": "### Context\n\nhttp://www.cell.com/cell/fulltext/S0092-8674(18)30154-5\n\n![](https://i.imgur.com/jZqpV51.png)\n\nFigure S6. Illustrative Examples of Chest X-Rays in Patients with Pneumonia, Related to Figure 6\nThe normal chest X-ray (left panel) depicts clear lungs without any areas of abnormal opacification in the image. Bacterial pneumonia (middle) typically exhibits a focal lobar consolidation, in this case in the right upper lobe (white arrows), whereas viral pneumonia (right) manifests with a more diffuse \u2018\u2018interstitial\u2019\u2019 pattern in both lungs.\nhttp://www.cell.com/cell/fulltext/S0092-8674(18)30154-5\n\n### Content\n\nThe dataset is organized into 3 folders (train, test, val) and contains subfolders for each image category (Pneumonia/Normal). There are 5,863 X-Ray images (JPEG) and 2 categories (Pneumonia/Normal). \n\nChest X-ray images (anterior-posterior) were selected from retrospective cohorts of pediatric patients of one to five years old from Guangzhou Women and Children\u2019s Medical Center, Guangzhou. All chest X-ray imaging was performed as part of patients\u2019 routine clinical care. \n\nFor the analysis of chest x-ray images, all chest radiographs were initially screened for quality control by removing all low quality or unreadable scans. The diagnoses for the images were then graded by two expert physicians before being cleared for training the AI system. In order to account for any grading errors, the evaluation set was also checked by a third expert.\n\n### Acknowledgements\n\nData: https://data.mendeley.com/datasets/rscbjbr9sj/2\n\nLicense: [CC BY 4.0][1]\n\nCitation: http://www.cell.com/cell/fulltext/S0092-8674(18)30154-5\n\n![enter image description here][2]\n\n\n### Inspiration\n\nAutomated methods to detect and classify human diseases from medical images.\n\n\n [1]: https://creativecommons.org/licenses/by/4.0/\n [2]: https://i.imgur.com/8AUJkin.png", "VersionNotes": "train/test/val", "TotalCompressedBytes": 1237249419.0, "TotalUncompressedBytes": 1237249419.0}]
[{"Id": 17810, "CreatorUserId": 1314380, "OwnerUserId": 1314380.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 23812.0, "CurrentDatasourceVersionId": 23851.0, "ForumId": 25540, "Type": 2, "CreationDate": "03/22/2018 05:42:41", "LastActivityDate": "03/22/2018", "TotalViews": 2063138, "TotalDownloads": 237932, "TotalVotes": 5834, "TotalKernels": 2058}]
[{"Id": 1314380, "UserName": "paultimothymooney", "DisplayName": "Paul Mooney", "RegisterDate": "10/05/2017", "PerformanceTier": 5}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import glob from sklearn.model_selection import train_test_split from PIL import Image import torch from torch.utils.data import Dataset, DataLoader # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory from tqdm.auto import tqdm from torch.optim import Adam import matplotlib.pyplot as plt from torchvision.models import resnet50 from torchvision import transforms import torch.nn as nn import os import random for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames[:5]: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session class config: EPOCHS = 3 BATCH_SIZE = 32 num_workers = 4 lr = 3e-5 model = resnet50 device = "cuda" if torch.cuda.is_available() else "cpu" # ==================================================== # Directory settings # ==================================================== import os OUTPUT_DIR = "./" if not os.path.exists(OUTPUT_DIR): os.makedirs(OUTPUT_DIR) train_normal = glob.glob("/kaggle/input/chest-xray-pneumonia/chest_xray/train/NORMAL/*") train_pneumonia = glob.glob( "/kaggle/input/chest-xray-pneumonia/chest_xray/train/PNEUMONIA/*" ) test_normal = glob.glob("/kaggle/input/chest-xray-pneumonia/chest_xray/test/NORMAL/*") test_pneumonia = glob.glob( "/kaggle/input/chest-xray-pneumonia/chest_xray/test/PNEUMONIA/*" ) train_normal[:4] train_path = train_normal + train_pneumonia test_path = test_normal + test_pneumonia train_path[2000:2005] train_labels = [0] * len(train_normal) + [1] * len(train_pneumonia) print(len(train_labels)) print(len(train_path)) train_path, valid_path, train_labels, valid_labels = train_test_split( train_path, train_labels, stratify=train_labels ) def show_images(): # Assuming you have a list of file paths called train_normal and train_pneumonia random_normal = random.sample(train_normal, 2) random_abnormal = random.sample(train_pneumonia, 2) # Display the random images fig, axes = plt.subplots(2, 2, figsize=(10, 10)) for i, path in enumerate(random_normal): img = Image.open(path).convert("LA") axes[i // 2, i % 2].imshow(img) axes[i // 2, i % 2].set_title("Normal x-ray") axes[i // 2, i % 2].axis("off") for i, path in enumerate(random_abnormal): img = Image.open(path).convert("LA") axes[1 + i // 2, i % 2].imshow(img) axes[1 + i // 2, i % 2].set_title("Abnormal x-ray") axes[1 + i // 2, i % 2].axis("off") plt.tight_layout() plt.show() show_images() class ChestXrayDataset(Dataset): def __init__(self, path, labels, transforms=None): self.path = path self.labels = labels self.transforms = transforms def __len__(self): return len(self.path) def __getitem__(self, index): path = self.path[index] image = Image.open(path).convert("RGB") if self.transforms: image = self.transforms(image) labels = self.labels[index] labels = torch.tensor([labels]) return image, labels class PneumoniaNet(nn.Module): def __init__(self, pretrained=True): super(PneumoniaNet, self).__init__() self.model = resnet50(pretrained=pretrained) self.fc = nn.Linear(in_features=2048, out_features=1) def forward(self, x): x = self.model.conv1(x) x = self.model.bn1(x) x = self.model.relu(x) x = self.model.maxpool(x) x = self.model.layer1(x) x = self.model.layer2(x) x = self.model.layer3(x) x = self.model.layer4(x) x = self.model.avgpool(x) x = x.view(x.size(0), -1) x = self.fc(x) return x device = config.device pretrained = True model = PneumoniaNet(pretrained=pretrained) torch.save(config.model, OUTPUT_DIR + "config.pth") model.to(device) image_size = (500, 500) train_transform = transforms.Compose( [ transforms.ToTensor(), transforms.RandomRotation(degrees=15), transforms.Resize(size=image_size), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)), ] ) test_transform = transforms.Compose( [ transforms.Resize(size=image_size), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)), ] ) train_dataset = ChestXrayDataset(train_path, train_labels, train_transform) valid_dataset = ChestXrayDataset(valid_path, valid_labels, test_transform) train_dataloader = DataLoader( train_dataset, batch_size=config.BATCH_SIZE, num_workers=config.num_workers, shuffle=True, ) valid_dataloader = DataLoader( valid_dataset, batch_size=config.BATCH_SIZE, num_workers=config.num_workers, shuffle=False, ) criterion = nn.BCEWithLogitsLoss() optimizer = torch.optim.Adam(model.parameters(), config.lr) scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=3) dataloaders = {"train": train_dataloader, "val": valid_dataloader} logging_steps = { "train": len(dataloaders["train"]) // 10, "val": len(dataloaders["val"]) // 10, } dataset_sizes = {"train": len(train_dataset), "val": len(valid_dataset)} batch_sizes = {"train": config.BATCH_SIZE, "val": config.BATCH_SIZE} def train_model(model, criterion, optimizer, epochs, device): # best_model = copy.deepcopy(model.state_dict()) best_acc = 0 train_losses = [] train_accs = [] val_losses = [] val_accs = [] for epoch in tqdm(range(epochs)): for phase in ["train", "val"]: if phase == "train": model.train() else: model.eval() running_loss = 0 correct = 0 for i, (inputs, labels) in tqdm(enumerate(dataloaders[phase])): inputs = inputs.to(device) labels = labels.to(device) optimizer.zero_grad() # Forward pass with torch.set_grad_enabled(phase == "train"): outputs = model(inputs) preds = outputs.sigmoid() > 0.5 loss = criterion(outputs, labels.float()) if phase == "train": loss.backward() optimizer.step() running_loss += loss.item() * inputs.size(0) correct += torch.sum(preds == labels.data) if (i % logging_steps[phase] == 0) and (i > 0): avg_loss = running_loss / dataset_sizes[phase] avg_acc = correct.double() / dataset_sizes[phase] print( f"[{phase}]: {epoch+1} / {epochs} | loss: {avg_loss} | acc: {avg_acc}" ) epoch_loss = running_loss / dataset_sizes[phase] epoch_acc = correct.double() / dataset_sizes[phase] print("{} Loss: {:.4f} Acc: {:.4f}".format(phase, epoch_loss, epoch_acc)) if phase == "val" and epoch_acc > best_acc: best_acc = epoch_acc torch.save(config.model, OUTPUT_DIR + "config.pth") if phase == "train": train_losses.append(epoch_loss) train_accs.append(epoch_acc) else: val_losses.append(epoch_loss) val_accs.append(epoch_acc) # model.load_state_dict(best_model) return model, train_losses, train_accs, val_losses, val_accs train_model(model, criterion, optimizer, config.EPOCHS, device) # Test Data # Training on test data test_path = glob.glob( "/kaggle/input/chest-xray-pneumonia/chest_xray/test/NORMAL/*" ) + glob.glob("/kaggle/input/chest-xray-pneumonia/chest_xray/test/PNEUMONIA/*") test_labels = [0] * len( glob.glob("/kaggle/input/chest-xray-pneumonia/chest_xray/test/NORMAL/*") ) + [1] * len( glob.glob("/kaggle/input/chest-xray-pneumonia/chest_xray/test/PNEUMONIA/*") ) print(len(test_path), len(test_labels)) test_dataset = ChestXrayDataset(test_path, test_labels, test_transform) test_dataloader = DataLoader(test_dataset, batch_size=config.BATCH_SIZE, shuffle=False) y_pred = [] y_true = [] for i, (inputs, labels) in tqdm( enumerate(test_dataloader), leave=False, total=len(test_dataloader) ): with torch.no_grad(): predictions = model(inputs.cuda()) predictions = predictions.sigmoid() predictions = predictions > 0.5 y_pred.append(predictions) y_true.append(labels) y_pred = torch.cat(y_pred) y_true = torch.cat(y_true) y_pred = y_pred.cpu().numpy() y_true = y_true.numpy() y_pred = y_pred.astype(np.int64) y_true = y_true.astype(np.int64) y_pred = y_pred.reshape(-1) y_true = y_true.reshape(-1) from sklearn.metrics import accuracy_score accuracy_score(y_true, y_pred)
false
0
2,847
0
3,324
2,847
129784913
<jupyter_start><jupyter_text>Air Passengers # Context Air Passengers per month. Workshop dataset Kaggle dataset identifier: air-passengers <jupyter_script>import numpy as np import pandas as pd import matplotlib.pyplot as plt from matplotlib.pylab import rcParams from datetime import datetime from statsmodels.tsa.stattools import adfuller, acf, pacf from statsmodels.tsa.seasonal import seasonal_decompose from statsmodels.tsa.arima.model import ARIMA rcParams["figure.figsize"] = 10, 6 df = pd.read_csv("/kaggle/input/air-passengers/AirPassengers.csv") df["Month"] = pd.to_datetime(df["Month"], infer_datetime_format=True) idf = df.set_index(["Month"]) idf.head() plt.xlabel("date") plt.ylabel("passengers") plt.plot(idf) def test_stationary(ts): ts.dropna(inplace=True) ma = ts.rolling(window=12).mean() ms = ts.rolling(window=12).std() org = plt.plot(ts, label="original") mean = plt.plot(ma, color="r", label="rolling mean") std = plt.plot(ms, color="g", label="rolling std") plt.legend(loc="best") plt.show() print("Dickey Fuller test :") dft = adfuller(ts["#Passengers"], autolag="AIC") dfto = pd.Series( dft[0:4], index=["test statistic", "p value", "lags used", "observations"] ) for k, v in dft[4].items(): dfto["critical value (%s)" % k] = v print(dfto) test_stationary(idf) # p value ~ 1 # therefore the timeseries is not stationary # ### Stationary transformation # stationary transformation # applying log values idfl = np.log(idf) test_stationary(idfl) # #### Method 1 # Subtract moving average from log values lrmean = idfl.rolling(window=12).mean() lrstd = idfl.rolling(window=12).std() dfx = idfl - lrmean test_stationary(dfx) # #### Method 2 # Exponential decay edma = idfl.ewm(halflife=12, min_periods=0, adjust=True).mean() eddf = idfl - edma test_stationary(eddf) # #### Method 3 # Time shift transformation: dfts = idfl - idfl.shift() test_stationary(dfts) # ### Trend, Season, Residual d = seasonal_decompose(idfl) t = d.trend s = d.seasonal r = d.resid plt.plot(idfl, label="original") plt.plot(t, color="r", label="trend") plt.plot(s, color="g", label="seasonality") plt.plot(r, color="y", label="residual") plt.legend(loc="best") r = pd.DataFrame(r) r["#Passengers"] = r["resid"] r.drop(["resid"], axis=1, inplace=True) test_stationary(r) # ### ACF and PACF lacf = acf(dfts, nlags=20) lpacf = pacf(dfts, nlags=20, method="ols") # plot ACF plt.subplot(121) plt.plot(lacf) plt.axhline(y=0, linestyle="--", color="gray") plt.axhline(y=-1.96 / np.sqrt(len(dfts)), linestyle="--", color="gray") plt.axhline(y=1.96 / np.sqrt(len(dfts)), linestyle="--", color="gray") plt.title("ACF") plt.subplot(122) plt.plot(lpacf) plt.axhline(y=0, linestyle="--", color="gray") plt.axhline(y=-1.96 / np.sqrt(len(dfts)), linestyle="--", color="gray") plt.axhline(y=1.96 / np.sqrt(len(dfts)), linestyle="--", color="gray") plt.title("PACF") plt.show() # ### ARIMA model = ARIMA(dfts, order=(2, 1, 0), trend_offset=-1, freq=None) result = model.fit(transformed=False) plt.plot(dfts) plt.plot(result.fittedvalues, color="r") model = ARIMA(dfts, order=(0, 1, 2), trend_offset=-1, freq=None) result = model.fit(transformed=False) plt.plot(dfts) plt.plot(result.fittedvalues, color="r") model = ARIMA(dfts, order=(2, 1, 2), trend_offset=-1, freq=None) result = model.fit(transformed=False) plt.plot(dfts) plt.plot(result.fittedvalues, color="r")
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/784/129784913.ipynb
air-passengers
rakannimer
[{"Id": 129784913, "ScriptId": 38535130, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 10465532, "CreationDate": "05/16/2023 12:55:56", "VersionNumber": 1.0, "Title": "ArimaPrediction", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 135.0, "LinesInsertedFromPrevious": 135.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 5}]
[{"Id": 186150292, "KernelVersionId": 129784913, "SourceDatasetVersionId": 1913}]
[{"Id": 1913, "DatasetId": 1057, "DatasourceVersionId": 1913, "CreatorUserId": 992327, "LicenseName": "Database: Open Database, Contents: Database Contents", "CreationDate": "03/29/2017 14:37:25", "VersionNumber": 1.0, "Title": "Air Passengers", "Slug": "air-passengers", "Subtitle": "Number of air passengers per month", "Description": "# Context \n\nAir Passengers per month. Workshop dataset", "VersionNotes": "Initial release", "TotalCompressedBytes": 1746.0, "TotalUncompressedBytes": 1746.0}]
[{"Id": 1057, "CreatorUserId": 992327, "OwnerUserId": 992327.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 1913.0, "CurrentDatasourceVersionId": 1913.0, "ForumId": 3026, "Type": 2, "CreationDate": "03/29/2017 14:37:25", "LastActivityDate": "02/05/2018", "TotalViews": 170819, "TotalDownloads": 39674, "TotalVotes": 143, "TotalKernels": 119}]
[{"Id": 992327, "UserName": "rakannimer", "DisplayName": "RakanNimer", "RegisterDate": "03/28/2017", "PerformanceTier": 0}]
import numpy as np import pandas as pd import matplotlib.pyplot as plt from matplotlib.pylab import rcParams from datetime import datetime from statsmodels.tsa.stattools import adfuller, acf, pacf from statsmodels.tsa.seasonal import seasonal_decompose from statsmodels.tsa.arima.model import ARIMA rcParams["figure.figsize"] = 10, 6 df = pd.read_csv("/kaggle/input/air-passengers/AirPassengers.csv") df["Month"] = pd.to_datetime(df["Month"], infer_datetime_format=True) idf = df.set_index(["Month"]) idf.head() plt.xlabel("date") plt.ylabel("passengers") plt.plot(idf) def test_stationary(ts): ts.dropna(inplace=True) ma = ts.rolling(window=12).mean() ms = ts.rolling(window=12).std() org = plt.plot(ts, label="original") mean = plt.plot(ma, color="r", label="rolling mean") std = plt.plot(ms, color="g", label="rolling std") plt.legend(loc="best") plt.show() print("Dickey Fuller test :") dft = adfuller(ts["#Passengers"], autolag="AIC") dfto = pd.Series( dft[0:4], index=["test statistic", "p value", "lags used", "observations"] ) for k, v in dft[4].items(): dfto["critical value (%s)" % k] = v print(dfto) test_stationary(idf) # p value ~ 1 # therefore the timeseries is not stationary # ### Stationary transformation # stationary transformation # applying log values idfl = np.log(idf) test_stationary(idfl) # #### Method 1 # Subtract moving average from log values lrmean = idfl.rolling(window=12).mean() lrstd = idfl.rolling(window=12).std() dfx = idfl - lrmean test_stationary(dfx) # #### Method 2 # Exponential decay edma = idfl.ewm(halflife=12, min_periods=0, adjust=True).mean() eddf = idfl - edma test_stationary(eddf) # #### Method 3 # Time shift transformation: dfts = idfl - idfl.shift() test_stationary(dfts) # ### Trend, Season, Residual d = seasonal_decompose(idfl) t = d.trend s = d.seasonal r = d.resid plt.plot(idfl, label="original") plt.plot(t, color="r", label="trend") plt.plot(s, color="g", label="seasonality") plt.plot(r, color="y", label="residual") plt.legend(loc="best") r = pd.DataFrame(r) r["#Passengers"] = r["resid"] r.drop(["resid"], axis=1, inplace=True) test_stationary(r) # ### ACF and PACF lacf = acf(dfts, nlags=20) lpacf = pacf(dfts, nlags=20, method="ols") # plot ACF plt.subplot(121) plt.plot(lacf) plt.axhline(y=0, linestyle="--", color="gray") plt.axhline(y=-1.96 / np.sqrt(len(dfts)), linestyle="--", color="gray") plt.axhline(y=1.96 / np.sqrt(len(dfts)), linestyle="--", color="gray") plt.title("ACF") plt.subplot(122) plt.plot(lpacf) plt.axhline(y=0, linestyle="--", color="gray") plt.axhline(y=-1.96 / np.sqrt(len(dfts)), linestyle="--", color="gray") plt.axhline(y=1.96 / np.sqrt(len(dfts)), linestyle="--", color="gray") plt.title("PACF") plt.show() # ### ARIMA model = ARIMA(dfts, order=(2, 1, 0), trend_offset=-1, freq=None) result = model.fit(transformed=False) plt.plot(dfts) plt.plot(result.fittedvalues, color="r") model = ARIMA(dfts, order=(0, 1, 2), trend_offset=-1, freq=None) result = model.fit(transformed=False) plt.plot(dfts) plt.plot(result.fittedvalues, color="r") model = ARIMA(dfts, order=(2, 1, 2), trend_offset=-1, freq=None) result = model.fit(transformed=False) plt.plot(dfts) plt.plot(result.fittedvalues, color="r")
false
1
1,236
5
1,271
1,236
129257164
<jupyter_start><jupyter_text>GlobalLandTemperaturesByMajorCity Kaggle dataset identifier: globallandtemperaturesbymajorcity <jupyter_script>import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import plotly.express as px import re # Đọc file csv df = pd.read_csv( "/kaggle/input/globallandtemperaturesbymajorcity/GlobalLandTemperaturesByMajorCity.csv" ) df = df.dropna() df.head() # Chuyển các vĩ độ và kinh độ sang dạng số df["Latitude"] = [ x.replace("N", "") if "N" in x else "-" + x.replace("S", "") for x in df["Latitude"] ] df["Longitude"] = [ x.replace("E", "") if "E" in x else "-" + x.replace("W", "") for x in df["Longitude"] ] df["Latitude"] = df["Latitude"].astype(float) df["Longitude"] = df["Longitude"].astype(float) # Chuyển kiểu dữ liệu cột dt sang dạng datetime và thêm cột Year,Month lấy năm theo cột dt df["dt"] = pd.to_datetime(df.dt, format="%Y-%m-%d") df["Year"] = df["dt"].dt.year df["Month"] = df["dt"].dt.month # # Câu 1: Biểu diễn nhiệt độ trung bình của các thành phố qua thời gian. # ### 1.1 Biểu diễn nhiệt độ trung bình của các thành phố (trung bình nhiệt độ của mọi năm) # Lấy ra nhiệt độ trung bình của các thành phố lớn tại các nước trên thé giới. average_temp_city_df = ( df.groupby(["City", "Longitude", "Latitude"])["AverageTemperature"] .mean() .reset_index() ) # Vẽ biểu đồ # import plotly.graph_objects as go fig = px.density_mapbox( average_temp_city_df, lat="Latitude", lon="Longitude", z="AverageTemperature", hover_name="City", height=500, zoom=2, ) fig.update_layout(mapbox_style="stamen-terrain", mapbox_center_lon=180) fig.update_layout( margin={"r": 0, "t": 50, "l": 0, "b": 0}, title="Biểu diễn nhiệt độ trung bình của các thành phố lớn của tất cả các nước", title_font_size=18, ) fig.show() # Nhận xét: # - Thành phố có nhiệt độ trung bình cao thường tập trung ở châu Á đặc biệt là ở Nam Á và một số thành phó ở châu Phi. # - Thành phố có nhiệt độ trung bình thấp tập trung ở vùng Bắc Mỹ và một số nước tại châu Âu. # ### 1.2 Biểu diễn nhiệt độ trung bình của các thành phố qua từng năm average_temp_city_df = ( df.groupby(["City", "Year", "Longitude", "Latitude"])["AverageTemperature"] .mean() .reset_index() ) # Sắp xếp lại thứ tự tăng dần của cột Year average_temp_city_df = average_temp_city_df.sort_values(by=["Year"]) # Vẽ biểu đồ fig = px.density_mapbox( average_temp_city_df, lat="Latitude", lon="Longitude", z="AverageTemperature", hover_name="City", animation_frame="Year", mapbox_style="carto-positron", height=700, zoom=2, ) fig.update_layout( margin={"r": 0, "t": 50, "l": 0, "b": 0}, title="Biểu diễn nhiệt độ trung bình của các thành phố lớn qua từng năm", title_font_size=18, ) fig.show() # Nhận xét: # - Ở thế kỉ 18 có lẽ dữ liệu không được cập nhật nên ta chỉ có thể thấy nhiệt độ trung bình của các nước châu Âu và Bắc Mỹ. # - Từ giữa thế kỉ 19 trở đi ta thấy xuất hiện nhiều nước hơn và kể từ đó ta thấy không có sự thay đổi gì nhiều về nhiệt độ trung bình hàng năm. # ### 1.3 Biểu diễn nhiệt độ trung bình của thành phố ở Việt Nam qua từng năm # Lấy dataframe của VN vietnam_df = df[df["Country"] == "Vietnam"] vietnam_df["City"].unique() # Vậy dữ liệu về Việt Nam chỉ có thành phố Hồ Chí Minh. # Ta sẽ biểu diễn nhiệt độ tại TP.HCM qua từng năm sns.lineplot( data=vietnam_df, x="Year", y="AverageTemperature", hue="City", estimator="mean" ) plt.legend(loc="lower right", title="City") plt.title("Biểu diễn nhiệt độ trung bình của TP.HCM qua từng năm") # Nhiệt độ tại TP.HCM tăng dần # # Câu 2: Biểu diễn nhiệt độ trung bình của các nước qua thời gian. # ### 2.1 Biểu diễn nhiệt độ trung bình của các nước (trung bình nhiệt độ của mọi năm) average_temp_country_df = ( df.groupby(["Country", "Longitude", "Latitude"])["AverageTemperature"] .mean() .reset_index() ) fig = px.density_mapbox( average_temp_country_df, lat="Latitude", lon="Longitude", z="AverageTemperature", hover_name="Country", height=500, zoom=2, ) fig.update_layout(mapbox_style="stamen-terrain", mapbox_center_lon=180) fig.update_layout( margin={"r": 0, "t": 50, "l": 0, "b": 0}, title="Biểu diễn nhiệt độ trung bình của các nước", title_font_size=18, ) fig.show() # ### 2.2 Biểu diễn nhiệt độ trung bình của các nước qua từng năm average_temp_country_df = ( df.groupby(["Country", "Year", "Longitude", "Latitude"])["AverageTemperature"] .mean() .reset_index() ) average_temp_country_df = average_temp_country_df.sort_values(by=["Year"]) fig = px.density_mapbox( average_temp_country_df, lat="Latitude", lon="Longitude", z="AverageTemperature", hover_name="Country", animation_frame="Year", mapbox_style="carto-positron", height=700, zoom=2, ) fig.update_layout( margin={"r": 0, "t": 50, "l": 0, "b": 0}, title="Biểu diễn nhiệt độ trung bình của các nước qua từng năm", title_font_size=18, ) fig.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/257/129257164.ipynb
globallandtemperaturesbymajorcity
manukumar16
[{"Id": 129257164, "ScriptId": 38138999, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11617937, "CreationDate": "05/12/2023 08:29:04", "VersionNumber": 2.0, "Title": "lab2_TQHDL", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 145.0, "LinesInsertedFromPrevious": 71.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 74.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 185140859, "KernelVersionId": 129257164, "SourceDatasetVersionId": 4825233}]
[{"Id": 4825233, "DatasetId": 2795334, "DatasourceVersionId": 4888926, "CreatorUserId": 13195050, "LicenseName": "Unknown", "CreationDate": "01/08/2023 12:28:24", "VersionNumber": 1.0, "Title": "GlobalLandTemperaturesByMajorCity", "Slug": "globallandtemperaturesbymajorcity", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 2795334, "CreatorUserId": 13195050, "OwnerUserId": 13195050.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 4825233.0, "CurrentDatasourceVersionId": 4888926.0, "ForumId": 2829559, "Type": 2, "CreationDate": "01/08/2023 12:28:24", "LastActivityDate": "01/08/2023", "TotalViews": 95, "TotalDownloads": 14, "TotalVotes": 0, "TotalKernels": 2}]
[{"Id": 13195050, "UserName": "manukumar16", "DisplayName": "Manu Kumar 16", "RegisterDate": "01/08/2023", "PerformanceTier": 0}]
import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import plotly.express as px import re # Đọc file csv df = pd.read_csv( "/kaggle/input/globallandtemperaturesbymajorcity/GlobalLandTemperaturesByMajorCity.csv" ) df = df.dropna() df.head() # Chuyển các vĩ độ và kinh độ sang dạng số df["Latitude"] = [ x.replace("N", "") if "N" in x else "-" + x.replace("S", "") for x in df["Latitude"] ] df["Longitude"] = [ x.replace("E", "") if "E" in x else "-" + x.replace("W", "") for x in df["Longitude"] ] df["Latitude"] = df["Latitude"].astype(float) df["Longitude"] = df["Longitude"].astype(float) # Chuyển kiểu dữ liệu cột dt sang dạng datetime và thêm cột Year,Month lấy năm theo cột dt df["dt"] = pd.to_datetime(df.dt, format="%Y-%m-%d") df["Year"] = df["dt"].dt.year df["Month"] = df["dt"].dt.month # # Câu 1: Biểu diễn nhiệt độ trung bình của các thành phố qua thời gian. # ### 1.1 Biểu diễn nhiệt độ trung bình của các thành phố (trung bình nhiệt độ của mọi năm) # Lấy ra nhiệt độ trung bình của các thành phố lớn tại các nước trên thé giới. average_temp_city_df = ( df.groupby(["City", "Longitude", "Latitude"])["AverageTemperature"] .mean() .reset_index() ) # Vẽ biểu đồ # import plotly.graph_objects as go fig = px.density_mapbox( average_temp_city_df, lat="Latitude", lon="Longitude", z="AverageTemperature", hover_name="City", height=500, zoom=2, ) fig.update_layout(mapbox_style="stamen-terrain", mapbox_center_lon=180) fig.update_layout( margin={"r": 0, "t": 50, "l": 0, "b": 0}, title="Biểu diễn nhiệt độ trung bình của các thành phố lớn của tất cả các nước", title_font_size=18, ) fig.show() # Nhận xét: # - Thành phố có nhiệt độ trung bình cao thường tập trung ở châu Á đặc biệt là ở Nam Á và một số thành phó ở châu Phi. # - Thành phố có nhiệt độ trung bình thấp tập trung ở vùng Bắc Mỹ và một số nước tại châu Âu. # ### 1.2 Biểu diễn nhiệt độ trung bình của các thành phố qua từng năm average_temp_city_df = ( df.groupby(["City", "Year", "Longitude", "Latitude"])["AverageTemperature"] .mean() .reset_index() ) # Sắp xếp lại thứ tự tăng dần của cột Year average_temp_city_df = average_temp_city_df.sort_values(by=["Year"]) # Vẽ biểu đồ fig = px.density_mapbox( average_temp_city_df, lat="Latitude", lon="Longitude", z="AverageTemperature", hover_name="City", animation_frame="Year", mapbox_style="carto-positron", height=700, zoom=2, ) fig.update_layout( margin={"r": 0, "t": 50, "l": 0, "b": 0}, title="Biểu diễn nhiệt độ trung bình của các thành phố lớn qua từng năm", title_font_size=18, ) fig.show() # Nhận xét: # - Ở thế kỉ 18 có lẽ dữ liệu không được cập nhật nên ta chỉ có thể thấy nhiệt độ trung bình của các nước châu Âu và Bắc Mỹ. # - Từ giữa thế kỉ 19 trở đi ta thấy xuất hiện nhiều nước hơn và kể từ đó ta thấy không có sự thay đổi gì nhiều về nhiệt độ trung bình hàng năm. # ### 1.3 Biểu diễn nhiệt độ trung bình của thành phố ở Việt Nam qua từng năm # Lấy dataframe của VN vietnam_df = df[df["Country"] == "Vietnam"] vietnam_df["City"].unique() # Vậy dữ liệu về Việt Nam chỉ có thành phố Hồ Chí Minh. # Ta sẽ biểu diễn nhiệt độ tại TP.HCM qua từng năm sns.lineplot( data=vietnam_df, x="Year", y="AverageTemperature", hue="City", estimator="mean" ) plt.legend(loc="lower right", title="City") plt.title("Biểu diễn nhiệt độ trung bình của TP.HCM qua từng năm") # Nhiệt độ tại TP.HCM tăng dần # # Câu 2: Biểu diễn nhiệt độ trung bình của các nước qua thời gian. # ### 2.1 Biểu diễn nhiệt độ trung bình của các nước (trung bình nhiệt độ của mọi năm) average_temp_country_df = ( df.groupby(["Country", "Longitude", "Latitude"])["AverageTemperature"] .mean() .reset_index() ) fig = px.density_mapbox( average_temp_country_df, lat="Latitude", lon="Longitude", z="AverageTemperature", hover_name="Country", height=500, zoom=2, ) fig.update_layout(mapbox_style="stamen-terrain", mapbox_center_lon=180) fig.update_layout( margin={"r": 0, "t": 50, "l": 0, "b": 0}, title="Biểu diễn nhiệt độ trung bình của các nước", title_font_size=18, ) fig.show() # ### 2.2 Biểu diễn nhiệt độ trung bình của các nước qua từng năm average_temp_country_df = ( df.groupby(["Country", "Year", "Longitude", "Latitude"])["AverageTemperature"] .mean() .reset_index() ) average_temp_country_df = average_temp_country_df.sort_values(by=["Year"]) fig = px.density_mapbox( average_temp_country_df, lat="Latitude", lon="Longitude", z="AverageTemperature", hover_name="Country", animation_frame="Year", mapbox_style="carto-positron", height=700, zoom=2, ) fig.update_layout( margin={"r": 0, "t": 50, "l": 0, "b": 0}, title="Biểu diễn nhiệt độ trung bình của các nước qua từng năm", title_font_size=18, ) fig.show()
false
1
2,054
0
2,082
2,054
129257601
<jupyter_start><jupyter_text>Income classification Listing of attributes: &gt;50K, &lt;=50K. age: continuous. workclass: Private, Self-emp-not-inc, Self-emp-inc, Federal-gov, Local-gov, State-gov, Without-pay, Never-worked. fnlwgt: continuous. education: Bachelors, Some-college, 11th, HS-grad, Prof-school, Assoc-acdm, Assoc-voc, 9th, 7th-8th, 12th, Masters, 1st-4th, 10th, Doctorate, 5th-6th, Preschool. education-num: continuous. marital-status: Married-civ-spouse, Divorced, Never-married, Separated, Widowed, Married-spouse-absent, Married-AF-spouse. occupation: Tech-support, Craft-repair, Other-service, Sales, Exec-managerial, Prof-specialty, Handlers-cleaners, Machine-op-inspct, Adm-clerical, Farming-fishing, Transport-moving, Priv-house-serv, Protective-serv, Armed-Forces. relationship: Wife, Own-child, Husband, Not-in-family, Other-relative, Unmarried. race: White, Asian-Pac-Islander, Amer-Indian-Eskimo, Other, Black. sex: Female, Male. capital-gain: continuous. capital-loss: continuous. hours-per-week: continuous. native-country: United-States, Cambodia, England, Puerto-Rico, Canada, Germany, Outlying-US(Guam-USVI-etc), India, Japan, Greece, South, China, Cuba, Iran, Honduras, Philippines, Italy, Poland, Jamaica, Vietnam, Mexico, Portugal, Ireland, France, Dominican-Republic, Laos, Ecuador, Taiwan, Haiti, Columbia, Hungary, Guatemala, Nicaragua, Scotland, Thailand, Yugoslavia, El-Salvador, Trinadad&Tobago, Peru, Hong, Holand-Netherlands. Kaggle dataset identifier: income-classification <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import pandas as pd import numpy as np import matplotlib.pyplot as plt df = pd.read_csv("/kaggle/input/income-classification/income_evaluation.csv") # **Cleaning Data** df.head() df.shape df.info() df.isnull().sum() df = df.dropna() df.isnull().sum() df.duplicated().sum() df = df.drop_duplicates() df.duplicated().sum() df.shape df.columns.str.strip() df.columns = df.columns.str.replace( " ", "", ) df.columns # **Data Insights** t1 = df["income"].value_counts() plt.pie(t1.values, labels=t1.index, autopct="%1.1f%%") plt.title("Income earn by Indivisual") plt.show # Workclass Analysis df["workclass"].unique() df["workclass"] = df["workclass"].replace(" ?", "other") t2 = df["workclass"].value_counts() plt.barh(t2.index, t2.values, log=True) plt.title("Workclass") plt.show wc = df.groupby("workclass") wc["hours-per-week"].mean().plot(kind="bar", title=("Avg Hour per week for Workclass")) # Gender Analysis G = df.groupby("sex") t3 = df["sex"].value_counts().reset_index() plt.bar(t3["index"], t3["sex"]) plt.title("Sex Ratio") t4 = G["hours-per-week"].mean().reset_index() plt.bar(t4["sex"], t4["hours-per-week"]) plt.title("hours-per-week Ratio") SI = df.groupby(["sex", "income"])["hours-per-week"].count().reset_index() temp1 = SI[0:2] temp2 = SI[2:] plt.subplot(1, 2, 1) plt.pie(temp1["hours-per-week"], labels=temp1["income"], autopct="%1.1f%%") plt.title("Female") plt.subplot(1, 2, 2) plt.pie(temp2["hours-per-week"], labels=temp2["income"], autopct="%1.1f%%") plt.title("Male") plt.suptitle("Gender wise Income") plt.show() df["marital-status"].unique() df["marital-status"] = df["marital-status"].replace( { " Never-married": "Married", " Married-civ-spouse": "Married", " Married-spouse-absent": "Married", " Married-AF-spouse": "Married", } ) m = df[df["sex"] == " Male"] f = df[df["sex"] == " Female"] t1 = m.groupby("marital-status")["sex"].count() t2 = f.groupby("marital-status")["sex"].count() plt.figure(figsize=(15, 8)) plt.subplot(1, 2, 1) plt.pie(t1.values, labels=t1.index, autopct="%1.1f%%") plt.title("Male") plt.subplot(1, 2, 2) plt.pie(t2.values, labels=t1.index, autopct="%1.1f%%") plt.title("Female") plt.suptitle("Marital Status") plt.figure(figsize=(15, 5)) plt.subplot(1, 2, 1) plt.hist(m["age"]) plt.xlabel("age") plt.ylabel("Freq") plt.title("Male age Freq table") plt.subplot(1, 2, 2) plt.hist(f["age"]) plt.xlabel("age") plt.ylabel("Freq") plt.title("Female age Freq table") # Occupation Analysis df["occupation"].unique() df["occupation"] = df["occupation"].replace(" ?", "other") oc = df.groupby("occupation") oc["income"].count().plot(kind="bar", title="occupation count") oc["hours-per-week"].mean().plot(kind="barh", title="Avg Hour per Week by occupation") a1 = df.groupby(["income", "occupation"]) mask1 = a1["age"].count().reset_index() mask1.columns = mask1.columns.str.replace("age", "Number") table = pd.pivot_table(mask1, index=["occupation"], columns=["income"], aggfunc="sum") table.plot( kind="bar", ylabel="Number of People", title="Number of people has Income above and below 50K with respect to Occupation", )
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/257/129257601.ipynb
income-classification
lodetomasi1995
[{"Id": 129257601, "ScriptId": 38427900, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 10738953, "CreationDate": "05/12/2023 08:33:10", "VersionNumber": 1.0, "Title": "notebooke7ca8e25d0", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 173.0, "LinesInsertedFromPrevious": 173.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 185141727, "KernelVersionId": 129257601, "SourceDatasetVersionId": 346098}]
[{"Id": 346098, "DatasetId": 149550, "DatasourceVersionId": 360062, "CreatorUserId": 2750279, "LicenseName": "Unknown", "CreationDate": "03/26/2019 13:14:12", "VersionNumber": 1.0, "Title": "Income classification", "Slug": "income-classification", "Subtitle": "Prediction task is to determine whether a person makes over 50K a year.", "Description": "Listing of attributes: \n\n&gt;50K, &lt;=50K. \n\nage: continuous. \nworkclass: Private, Self-emp-not-inc, Self-emp-inc, Federal-gov, Local-gov, State-gov, Without-pay, Never-worked. \nfnlwgt: continuous. \neducation: Bachelors, Some-college, 11th, HS-grad, Prof-school, Assoc-acdm, Assoc-voc, 9th, 7th-8th, 12th, Masters, 1st-4th, 10th, Doctorate, 5th-6th, Preschool. \neducation-num: continuous. \nmarital-status: Married-civ-spouse, Divorced, Never-married, Separated, Widowed, Married-spouse-absent, Married-AF-spouse. \noccupation: Tech-support, Craft-repair, Other-service, Sales, Exec-managerial, Prof-specialty, Handlers-cleaners, Machine-op-inspct, Adm-clerical, Farming-fishing, Transport-moving, Priv-house-serv, Protective-serv, Armed-Forces. \nrelationship: Wife, Own-child, Husband, Not-in-family, Other-relative, Unmarried. \nrace: White, Asian-Pac-Islander, Amer-Indian-Eskimo, Other, Black. \nsex: Female, Male. \ncapital-gain: continuous. \ncapital-loss: continuous. \nhours-per-week: continuous. \nnative-country: United-States, Cambodia, England, Puerto-Rico, Canada, Germany, Outlying-US(Guam-USVI-etc), India, Japan, Greece, South, China, Cuba, Iran, Honduras, Philippines, Italy, Poland, Jamaica, Vietnam, Mexico, Portugal, Ireland, France, Dominican-Republic, Laos, Ecuador, Taiwan, Haiti, Columbia, Hungary, Guatemala, Nicaragua, Scotland, Thailand, Yugoslavia, El-Salvador, Trinadad&Tobago, Peru, Hong, Holand-Netherlands.", "VersionNotes": "Initial release", "TotalCompressedBytes": 3811669.0, "TotalUncompressedBytes": 434770.0}]
[{"Id": 149550, "CreatorUserId": 2750279, "OwnerUserId": 2750279.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 346098.0, "CurrentDatasourceVersionId": 360062.0, "ForumId": 159922, "Type": 2, "CreationDate": "03/26/2019 13:14:12", "LastActivityDate": "03/26/2019", "TotalViews": 107727, "TotalDownloads": 15151, "TotalVotes": 175, "TotalKernels": 60}]
[{"Id": 2750279, "UserName": "lodetomasi1995", "DisplayName": "Lorenzo De Tomasi", "RegisterDate": "01/27/2019", "PerformanceTier": 2}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import pandas as pd import numpy as np import matplotlib.pyplot as plt df = pd.read_csv("/kaggle/input/income-classification/income_evaluation.csv") # **Cleaning Data** df.head() df.shape df.info() df.isnull().sum() df = df.dropna() df.isnull().sum() df.duplicated().sum() df = df.drop_duplicates() df.duplicated().sum() df.shape df.columns.str.strip() df.columns = df.columns.str.replace( " ", "", ) df.columns # **Data Insights** t1 = df["income"].value_counts() plt.pie(t1.values, labels=t1.index, autopct="%1.1f%%") plt.title("Income earn by Indivisual") plt.show # Workclass Analysis df["workclass"].unique() df["workclass"] = df["workclass"].replace(" ?", "other") t2 = df["workclass"].value_counts() plt.barh(t2.index, t2.values, log=True) plt.title("Workclass") plt.show wc = df.groupby("workclass") wc["hours-per-week"].mean().plot(kind="bar", title=("Avg Hour per week for Workclass")) # Gender Analysis G = df.groupby("sex") t3 = df["sex"].value_counts().reset_index() plt.bar(t3["index"], t3["sex"]) plt.title("Sex Ratio") t4 = G["hours-per-week"].mean().reset_index() plt.bar(t4["sex"], t4["hours-per-week"]) plt.title("hours-per-week Ratio") SI = df.groupby(["sex", "income"])["hours-per-week"].count().reset_index() temp1 = SI[0:2] temp2 = SI[2:] plt.subplot(1, 2, 1) plt.pie(temp1["hours-per-week"], labels=temp1["income"], autopct="%1.1f%%") plt.title("Female") plt.subplot(1, 2, 2) plt.pie(temp2["hours-per-week"], labels=temp2["income"], autopct="%1.1f%%") plt.title("Male") plt.suptitle("Gender wise Income") plt.show() df["marital-status"].unique() df["marital-status"] = df["marital-status"].replace( { " Never-married": "Married", " Married-civ-spouse": "Married", " Married-spouse-absent": "Married", " Married-AF-spouse": "Married", } ) m = df[df["sex"] == " Male"] f = df[df["sex"] == " Female"] t1 = m.groupby("marital-status")["sex"].count() t2 = f.groupby("marital-status")["sex"].count() plt.figure(figsize=(15, 8)) plt.subplot(1, 2, 1) plt.pie(t1.values, labels=t1.index, autopct="%1.1f%%") plt.title("Male") plt.subplot(1, 2, 2) plt.pie(t2.values, labels=t1.index, autopct="%1.1f%%") plt.title("Female") plt.suptitle("Marital Status") plt.figure(figsize=(15, 5)) plt.subplot(1, 2, 1) plt.hist(m["age"]) plt.xlabel("age") plt.ylabel("Freq") plt.title("Male age Freq table") plt.subplot(1, 2, 2) plt.hist(f["age"]) plt.xlabel("age") plt.ylabel("Freq") plt.title("Female age Freq table") # Occupation Analysis df["occupation"].unique() df["occupation"] = df["occupation"].replace(" ?", "other") oc = df.groupby("occupation") oc["income"].count().plot(kind="bar", title="occupation count") oc["hours-per-week"].mean().plot(kind="barh", title="Avg Hour per Week by occupation") a1 = df.groupby(["income", "occupation"]) mask1 = a1["age"].count().reset_index() mask1.columns = mask1.columns.str.replace("age", "Number") table = pd.pivot_table(mask1, index=["occupation"], columns=["income"], aggfunc="sum") table.plot( kind="bar", ylabel="Number of People", title="Number of people has Income above and below 50K with respect to Occupation", )
false
1
1,358
0
1,944
1,358
129257086
<jupyter_start><jupyter_text>Malicious URLs dataset ### Context Malicious URLs or malicious website is a very serious threat to cybersecurity. Malicious URLs host unsolicited content (spam, phishing, drive-by downloads, etc.) and lure unsuspecting users to become victims of scams (monetary loss, theft of private information, and malware installation), and cause losses of billions of dollars every year. We have collected this dataset to include a large number of examples of Malicious URLs so that a machine learning-based model can be developed to identify malicious urls so that we can stop them in advance before infecting computer system or spreading through inteinternet. ### Content we have collected a huge dataset of 651,191 URLs, out of which 428103 benign or safe URLs, 96457 defacement URLs, 94111 phishing URLs, and 32520 malware URLs. Figure 2 depicts their distribution in terms of percentage. As we know one of the most crucial tasks is to curate the dataset for a machine learning project. We have curated this dataset from five different sources. For collecting benign, phishing, malware and defacement URLs we have used [URL dataset (ISCX-URL-2016)](https://www.unb.ca/cic/datasets/url-2016.html) For increasing phishing and malware URLs, we have used [Malware domain black list dataset](http://www.malwaredomains.com/wordpress/?page_id=66). We have increased benign URLs using [faizan git repo](https://github.com/faizann24/Using-machine-learning-to-detect-malicious-URLs/tree/master/data) At last, we have increased more number of phishing URLs using [Phishtank dataset](https://www.phishtank.com/developer_info.php) and [PhishStorm dataset](https://research.aalto.fi/en/datasets/phishstorm--phishing--legitimate-url-dataset(f49465b2-c68a-4182-9171-075f0ed797d5).html) As we have told you that dataset is collected from different sources. So firstly, we have collected the URLs from different sources into a separate data frame and finally merge them to retain only URLs and their class type. Kaggle dataset identifier: malicious-urls-dataset <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # # Malicious URL Detection - Part 1 # # Load Dataset import pandas as pd a = pd.read_csv("/kaggle/input/malicious-urls-dataset/malicious_phish.csv") a.head() # # Data Preprocessing # Install library for tokenization of URLs # Select Partial data. Full data requires more than 12 hrs of Runtime which exceeds Kaggle's free user time limit a = a[:300000] # Remove Duplicates if any in the dataset a = a.drop_duplicates() # Extract Features from the dataset from url_parser import get_url import chardet # Extract Protocol a["protocol"] = "" def find_protocol(x): try: return get_url(x).protocol except: pass a["protocol"] = a["url"].apply(lambda x: find_protocol(x)) # Extract www a["www"] = "" def find_www(x): try: return get_url(x).www except: pass a["www"] = a["url"].apply(lambda x: find_www(x)) # Extract Sub-Domain a["sub_domain"] = "" def find_sub_domain(x): try: return get_url(x).sub_domain except: pass a["sub_domain"] = a["url"].apply(lambda x: find_sub_domain(x)) # Extract Domain name a["domain"] = "" def find_domain(x): try: return get_url(x).domain except: pass a["domain"] = a["url"].apply(lambda x: find_domain(x)) # Extract Top-Level Domain Name a["top_domain"] = "" def find_top_domain(x): try: return get_url(x).top_domain except: pass a["top_domain"] = a["url"].apply(lambda x: find_top_domain(x)) # Extract Directory a["dir"] = "" def find_dir(x): try: return get_url(x).dir except: pass a["dir"] = a["url"].apply(lambda x: find_dir(x)) # Extract File Name a["file"] = "" def find_file(x): try: return get_url(x).file except: pass a["file"] = a["url"].apply(lambda x: find_file(x)) # Extract Path a["path"] = "" def find_path(x): try: return get_url(x).path except: pass a["path"] = a["url"].apply(lambda x: find_path(x)) # Extract Fragment a["fragment"] = "" def find_fragment(x): try: return get_url(x).fragment except: pass a["fragment"] = a["url"].apply(lambda x: find_fragment(x)) # Extract Query a["query"] = "" def find_query(x): try: return get_url(x).query except: pass a["query"] = a["url"].apply(lambda x: find_query(x)) # Count Occurance of / in path a["count/"] = 0 def count_forwardslash(x): try: return x.count("/") except: return 0 a["count/"] = a["path"].apply(lambda x: count_forwardslash(x)) # Count occurance of % in URL a["count%"] = "" def count_percent(x): try: return x.count("%") except: return 0 a["count%"] = a["path"].apply(lambda x: count_percent(x)) # Count occurance of = in URL a["count="] = "" def count_equals(x): try: return x.count("=") except: return 0 a["count="] = a["path"].apply(lambda x: count_equals(x)) # Count occurance of - in URL a["count-"] = "" def count_hyphen(x): try: return x.count("-") except: return 0 a["count-"] = a["url"].apply(lambda x: count_hyphen(x)) # Count occurance of @ in URL. It may also signify the presence of a email address in the URL a["count@"] = "" def count_attherate(x): try: return x.count("@") except: return 0 a["count@"] = a["url"].apply(lambda x: count_attherate(x)) # Extract length of path in URL a["url_path_len"] = 0 def count_url_path_len(x): try: return len(x) except: return 0 a["url_path_len"] = a["path"].apply(lambda x: count_url_path_len(x)) # Extract length of URL a["url_len"] = 0 def count_url_len(x): try: return len(x) except: return 0 a["url_len"] = a["url"].apply(lambda x: count_url_len(x)) # Extract legth of File name in th URL a["file_name_len"] = 0 def count_file_name_len(x): try: return len(x) except: return 0 a["file_name_len"] = a["file"].apply(lambda x: count_file_name_len(x)) # Data Type Conversions for suitable processing into ML pipeline from sklearn import preprocessing label_encoder = preprocessing.LabelEncoder() a["type"] = label_encoder.fit_transform(a["type"]) a["protocol"] = label_encoder.fit_transform(a["protocol"]) a["sub_domain"] = label_encoder.fit_transform(a["sub_domain"]) a["domain"] = label_encoder.fit_transform(a["domain"]) a["top_domain"] = label_encoder.fit_transform(a["top_domain"]) a["www"] = label_encoder.fit_transform(a["www"]) a["count/"] = a["count/"].astype("int64") a["count%"] = a["count%"].astype("int64") a["count="] = a["count="].astype("int64") a["count-"] = a["count-"].astype("int64") a["count@"] = a["count@"].astype("int64") a["www"] = a["www"].astype("int64") a.info() a.describe() a # Select X and Y variables x = a[ [ "protocol", "www", "sub_domain", "domain", "top_domain", "count/", "count%", "count=", "count-", "count@", "url_len", "url_path_len", "file_name_len", ] ] y = a[["type"]] # Split overall data into Training and Testing Sets from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(x, y) # # Machine Learning Algorithms # Implement Logistic Regression Classification from sklearn.linear_model import LogisticRegression clf = LogisticRegression().fit(x, y) clf.score(x_test, y_test) # Implement Decision Tree Classifier from sklearn import tree clf1 = tree.DecisionTreeClassifier().fit(x_train, y_train) clf1.score(x_test, y_test) # Implement XGBoost Classifier import xgboost as xgb train_reg = xgb.DMatrix(x_train, y_train, enable_categorical=True) test_reg = xgb.DMatrix(x_test, y_test, enable_categorical=True) clf2 = xgb.cv( params={"objective": "multi:softprob", "num_class": "4"}, dtrain=train_reg, nfold=5, metrics=["mlogloss", "auc", "merror"], ) clf2["test-auc-mean"].max()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/257/129257086.ipynb
malicious-urls-dataset
sid321axn
[{"Id": 129257086, "ScriptId": 38393968, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 10403921, "CreationDate": "05/12/2023 08:28:24", "VersionNumber": 2.0, "Title": "MaliciousURLDetectionSmall", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 290.0, "LinesInsertedFromPrevious": 118.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 172.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 185140734, "KernelVersionId": 129257086, "SourceDatasetVersionId": 2456026}]
[{"Id": 2456026, "DatasetId": 1486586, "DatasourceVersionId": 2498424, "CreatorUserId": 2048048, "LicenseName": "CC0: Public Domain", "CreationDate": "07/23/2021 18:03:30", "VersionNumber": 1.0, "Title": "Malicious URLs dataset", "Slug": "malicious-urls-dataset", "Subtitle": "Huge dataset of 6,51,191 Malicious URLs", "Description": "### Context\n\nMalicious URLs or malicious website is a very serious threat to cybersecurity. Malicious URLs host unsolicited content (spam, phishing, drive-by downloads, etc.) and lure unsuspecting users to become victims of scams (monetary loss, theft of private information, and malware installation), and cause losses of billions of dollars every year. We have collected this dataset to include a large number of examples of Malicious URLs so that a machine learning-based model can be developed to identify malicious urls so that we can stop them in advance before infecting computer system or spreading through inteinternet.\n\n\n### Content\n\nwe have collected a huge dataset of 651,191 URLs, out of which 428103 benign or safe URLs, 96457 defacement URLs, 94111 phishing URLs, and 32520 malware URLs. Figure 2 depicts their distribution in terms of percentage. As we know one of the most crucial tasks is to curate the dataset for a machine learning project. We have curated this dataset from five different sources.\n\nFor collecting benign, phishing, malware and defacement URLs we have used [URL dataset (ISCX-URL-2016)](https://www.unb.ca/cic/datasets/url-2016.html) For increasing phishing and malware URLs, we have used [Malware domain black list dataset](http://www.malwaredomains.com/wordpress/?page_id=66). We have increased benign URLs using [faizan git repo](https://github.com/faizann24/Using-machine-learning-to-detect-malicious-URLs/tree/master/data) At last, we have increased more number of phishing URLs using [Phishtank dataset](https://www.phishtank.com/developer_info.php) and [PhishStorm dataset](https://research.aalto.fi/en/datasets/phishstorm--phishing--legitimate-url-dataset(f49465b2-c68a-4182-9171-075f0ed797d5).html) As we have told you that dataset is collected from different sources. So firstly, we have collected the URLs from different sources into a separate data frame and finally merge them to retain only URLs and their class type.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 1486586, "CreatorUserId": 2048048, "OwnerUserId": 2048048.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2456026.0, "CurrentDatasourceVersionId": 2498424.0, "ForumId": 1506277, "Type": 2, "CreationDate": "07/23/2021 18:03:30", "LastActivityDate": "07/23/2021", "TotalViews": 79891, "TotalDownloads": 8584, "TotalVotes": 100, "TotalKernels": 19}]
[{"Id": 2048048, "UserName": "sid321axn", "DisplayName": "Manu Siddhartha", "RegisterDate": "07/06/2018", "PerformanceTier": 2}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # # Malicious URL Detection - Part 1 # # Load Dataset import pandas as pd a = pd.read_csv("/kaggle/input/malicious-urls-dataset/malicious_phish.csv") a.head() # # Data Preprocessing # Install library for tokenization of URLs # Select Partial data. Full data requires more than 12 hrs of Runtime which exceeds Kaggle's free user time limit a = a[:300000] # Remove Duplicates if any in the dataset a = a.drop_duplicates() # Extract Features from the dataset from url_parser import get_url import chardet # Extract Protocol a["protocol"] = "" def find_protocol(x): try: return get_url(x).protocol except: pass a["protocol"] = a["url"].apply(lambda x: find_protocol(x)) # Extract www a["www"] = "" def find_www(x): try: return get_url(x).www except: pass a["www"] = a["url"].apply(lambda x: find_www(x)) # Extract Sub-Domain a["sub_domain"] = "" def find_sub_domain(x): try: return get_url(x).sub_domain except: pass a["sub_domain"] = a["url"].apply(lambda x: find_sub_domain(x)) # Extract Domain name a["domain"] = "" def find_domain(x): try: return get_url(x).domain except: pass a["domain"] = a["url"].apply(lambda x: find_domain(x)) # Extract Top-Level Domain Name a["top_domain"] = "" def find_top_domain(x): try: return get_url(x).top_domain except: pass a["top_domain"] = a["url"].apply(lambda x: find_top_domain(x)) # Extract Directory a["dir"] = "" def find_dir(x): try: return get_url(x).dir except: pass a["dir"] = a["url"].apply(lambda x: find_dir(x)) # Extract File Name a["file"] = "" def find_file(x): try: return get_url(x).file except: pass a["file"] = a["url"].apply(lambda x: find_file(x)) # Extract Path a["path"] = "" def find_path(x): try: return get_url(x).path except: pass a["path"] = a["url"].apply(lambda x: find_path(x)) # Extract Fragment a["fragment"] = "" def find_fragment(x): try: return get_url(x).fragment except: pass a["fragment"] = a["url"].apply(lambda x: find_fragment(x)) # Extract Query a["query"] = "" def find_query(x): try: return get_url(x).query except: pass a["query"] = a["url"].apply(lambda x: find_query(x)) # Count Occurance of / in path a["count/"] = 0 def count_forwardslash(x): try: return x.count("/") except: return 0 a["count/"] = a["path"].apply(lambda x: count_forwardslash(x)) # Count occurance of % in URL a["count%"] = "" def count_percent(x): try: return x.count("%") except: return 0 a["count%"] = a["path"].apply(lambda x: count_percent(x)) # Count occurance of = in URL a["count="] = "" def count_equals(x): try: return x.count("=") except: return 0 a["count="] = a["path"].apply(lambda x: count_equals(x)) # Count occurance of - in URL a["count-"] = "" def count_hyphen(x): try: return x.count("-") except: return 0 a["count-"] = a["url"].apply(lambda x: count_hyphen(x)) # Count occurance of @ in URL. It may also signify the presence of a email address in the URL a["count@"] = "" def count_attherate(x): try: return x.count("@") except: return 0 a["count@"] = a["url"].apply(lambda x: count_attherate(x)) # Extract length of path in URL a["url_path_len"] = 0 def count_url_path_len(x): try: return len(x) except: return 0 a["url_path_len"] = a["path"].apply(lambda x: count_url_path_len(x)) # Extract length of URL a["url_len"] = 0 def count_url_len(x): try: return len(x) except: return 0 a["url_len"] = a["url"].apply(lambda x: count_url_len(x)) # Extract legth of File name in th URL a["file_name_len"] = 0 def count_file_name_len(x): try: return len(x) except: return 0 a["file_name_len"] = a["file"].apply(lambda x: count_file_name_len(x)) # Data Type Conversions for suitable processing into ML pipeline from sklearn import preprocessing label_encoder = preprocessing.LabelEncoder() a["type"] = label_encoder.fit_transform(a["type"]) a["protocol"] = label_encoder.fit_transform(a["protocol"]) a["sub_domain"] = label_encoder.fit_transform(a["sub_domain"]) a["domain"] = label_encoder.fit_transform(a["domain"]) a["top_domain"] = label_encoder.fit_transform(a["top_domain"]) a["www"] = label_encoder.fit_transform(a["www"]) a["count/"] = a["count/"].astype("int64") a["count%"] = a["count%"].astype("int64") a["count="] = a["count="].astype("int64") a["count-"] = a["count-"].astype("int64") a["count@"] = a["count@"].astype("int64") a["www"] = a["www"].astype("int64") a.info() a.describe() a # Select X and Y variables x = a[ [ "protocol", "www", "sub_domain", "domain", "top_domain", "count/", "count%", "count=", "count-", "count@", "url_len", "url_path_len", "file_name_len", ] ] y = a[["type"]] # Split overall data into Training and Testing Sets from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(x, y) # # Machine Learning Algorithms # Implement Logistic Regression Classification from sklearn.linear_model import LogisticRegression clf = LogisticRegression().fit(x, y) clf.score(x_test, y_test) # Implement Decision Tree Classifier from sklearn import tree clf1 = tree.DecisionTreeClassifier().fit(x_train, y_train) clf1.score(x_test, y_test) # Implement XGBoost Classifier import xgboost as xgb train_reg = xgb.DMatrix(x_train, y_train, enable_categorical=True) test_reg = xgb.DMatrix(x_test, y_test, enable_categorical=True) clf2 = xgb.cv( params={"objective": "multi:softprob", "num_class": "4"}, dtrain=train_reg, nfold=5, metrics=["mlogloss", "auc", "merror"], ) clf2["test-auc-mean"].max()
false
1
2,178
0
2,764
2,178
129220215
# ### Plant Classification using KNN with GridSearchCV # In this project, we employed GridSearchCV to identify an optimal combination of parameters to improve the effectiveness of the KNN model. GridSearchCV is a technique that enables an exhaustive search across a pre-defined space of hyperparameters to find the best combination for optimizing the model's performance. This helps us determine the best values for KNN parameters, such as the number of neighbors (K), distance metrics, and others, thereby maximizing the accuracy and performance of the plant prediction model. # KNN (K-Nearest Neighbors) is a supervised machine learning algorithm used for classification and regression tasks. In the case of classification, as in our project, it aims to identify the class of a given object based on the characteristics of its nearest neighbors. KNN considers the distance between data points to determine the class of a new point. It assumes that similar objects are close to each other in the feature space. # Provided Data: # The following plant data has been provided: # Sepal length (cm) # Sepal width (cm) # Petal length (cm) # Petal width (cm) # These data will be used to train the KNN model and make predictions about the class of an unknown plant based on its characteristics. # Importing the iris libraries and dataset import pandas as pd import numpy as np from sklearn.datasets import load_iris from sklearn import preprocessing from sklearn.preprocessing import MinMaxScaler from sklearn.model_selection import train_test_split from sklearn.model_selection import GridSearchCV # Model import from sklearn.neighbors import KNeighborsClassifier # Dataset loading iris = load_iris() X = pd.DataFrame(iris.data, columns=[iris.feature_names]) y = pd.Series(iris.target) print(X.shape) display(X.head(3)) print(y.value_counts()) # Setting the labels with their indexes and names iris_classes = { "setosa": 0, "versicolor": 1, "virginica": 2, } # Normalizing feature values normalizer = MinMaxScaler(feature_range=(0, 3)) X_normalized = normalizer.fit_transform(X) # Implementing GridSearchCV k = np.array([3, 5, 6, 7, 8, 9, 11]) distance = ["minkowski", "chebyshev"] p = np.array([1, 2, 3, 4, 5]) values_grid = {"n_neighbors": k, "metric": distance, "p": p} model = None model = KNeighborsClassifier() gridKNN = GridSearchCV(estimator=model, param_grid=values_grid, cv=5) gridKNN.fit(X_normalized, y) # Visualizing the best combination of hyperparameters found by GridSearchCV print(">> better accuracy: {:.2%}".format(gridKNN.best_score_)) print(">> better K: {}".format(gridKNN.best_estimator_.n_neighbors)) print(">> better distance: {}".format(gridKNN.best_estimator_.metric)) print(">> better p: {}".format(gridKNN.best_estimator_.p)) # Splitting the data into training and testing X_train, X_test, y_train, y_test = train_test_split( X_normalized, y, test_size=0.25, random_state=16 ) # Creating a KNN model by configuring it with hyperparameters provided by GridSearchCV knn = KNeighborsClassifier(n_neighbors=7, metric="chebyshev", p=1) knn.fit(X_train, y_train) # Evaluating the accuracy of the created model accuracy = knn.score(X_test, y_test) print(f">> accuracy: {accuracy:.2%}")
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/220/129220215.ipynb
null
null
[{"Id": 129220215, "ScriptId": 38416510, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14728510, "CreationDate": "05/12/2023 00:35:35", "VersionNumber": 1.0, "Title": "knn-iris-classification", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 87.0, "LinesInsertedFromPrevious": 87.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
null
null
null
null
# ### Plant Classification using KNN with GridSearchCV # In this project, we employed GridSearchCV to identify an optimal combination of parameters to improve the effectiveness of the KNN model. GridSearchCV is a technique that enables an exhaustive search across a pre-defined space of hyperparameters to find the best combination for optimizing the model's performance. This helps us determine the best values for KNN parameters, such as the number of neighbors (K), distance metrics, and others, thereby maximizing the accuracy and performance of the plant prediction model. # KNN (K-Nearest Neighbors) is a supervised machine learning algorithm used for classification and regression tasks. In the case of classification, as in our project, it aims to identify the class of a given object based on the characteristics of its nearest neighbors. KNN considers the distance between data points to determine the class of a new point. It assumes that similar objects are close to each other in the feature space. # Provided Data: # The following plant data has been provided: # Sepal length (cm) # Sepal width (cm) # Petal length (cm) # Petal width (cm) # These data will be used to train the KNN model and make predictions about the class of an unknown plant based on its characteristics. # Importing the iris libraries and dataset import pandas as pd import numpy as np from sklearn.datasets import load_iris from sklearn import preprocessing from sklearn.preprocessing import MinMaxScaler from sklearn.model_selection import train_test_split from sklearn.model_selection import GridSearchCV # Model import from sklearn.neighbors import KNeighborsClassifier # Dataset loading iris = load_iris() X = pd.DataFrame(iris.data, columns=[iris.feature_names]) y = pd.Series(iris.target) print(X.shape) display(X.head(3)) print(y.value_counts()) # Setting the labels with their indexes and names iris_classes = { "setosa": 0, "versicolor": 1, "virginica": 2, } # Normalizing feature values normalizer = MinMaxScaler(feature_range=(0, 3)) X_normalized = normalizer.fit_transform(X) # Implementing GridSearchCV k = np.array([3, 5, 6, 7, 8, 9, 11]) distance = ["minkowski", "chebyshev"] p = np.array([1, 2, 3, 4, 5]) values_grid = {"n_neighbors": k, "metric": distance, "p": p} model = None model = KNeighborsClassifier() gridKNN = GridSearchCV(estimator=model, param_grid=values_grid, cv=5) gridKNN.fit(X_normalized, y) # Visualizing the best combination of hyperparameters found by GridSearchCV print(">> better accuracy: {:.2%}".format(gridKNN.best_score_)) print(">> better K: {}".format(gridKNN.best_estimator_.n_neighbors)) print(">> better distance: {}".format(gridKNN.best_estimator_.metric)) print(">> better p: {}".format(gridKNN.best_estimator_.p)) # Splitting the data into training and testing X_train, X_test, y_train, y_test = train_test_split( X_normalized, y, test_size=0.25, random_state=16 ) # Creating a KNN model by configuring it with hyperparameters provided by GridSearchCV knn = KNeighborsClassifier(n_neighbors=7, metric="chebyshev", p=1) knn.fit(X_train, y_train) # Evaluating the accuracy of the created model accuracy = knn.score(X_test, y_test) print(f">> accuracy: {accuracy:.2%}")
false
0
903
1
903
903
129220432
# **Experimenting with Microsoft's SpeechT5 model.** # * My end goal is to pass it a novel sized text file for it to narate. # * still need to work out how to access the other voice characteristics from xvector import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # Following pip packages need to be installed: from transformers import SpeechT5Processor, SpeechT5ForTextToSpeech, SpeechT5HifiGan from datasets import load_dataset import torch import soundfile as sf from datasets import load_dataset processor = SpeechT5Processor.from_pretrained("microsoft/speecht5_tts") model = SpeechT5ForTextToSpeech.from_pretrained("microsoft/speecht5_tts") vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan") mytext = "No one would have believed in the last years of the nineteenth century that this world was being watched keenly and closely by intelligences greater than man’s and yet as mortal as his own; that as men busied themselves about their various concerns they were scrutinised and studied, perhaps almost as narrowly as a man with a microscope might scrutinise the transient creatures that swarm and multiply in a drop of water. With infinite complacency men went to and fro over this globe about their little affairs, serene in their assurance of their empire over matter. " len(text) # max string length 600 chars inputs = processor(text=mytext, return_tensors="pt") # load xvector containing speaker's voice characteristics from a dataset embeddings_dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation") speaker_embeddings = torch.tensor(embeddings_dataset[7306]["xvector"]).unsqueeze(0) speech = model.generate_speech(inputs["input_ids"], speaker_embeddings, vocoder=vocoder) sf.write("hgWells.wav", speech.numpy(), samplerate=16000) import IPython out_audio = "hgWells.wav" IPython.display.Audio(out_audio)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/220/129220432.ipynb
null
null
[{"Id": 129220432, "ScriptId": 38414604, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14816590, "CreationDate": "05/12/2023 00:39:57", "VersionNumber": 2.0, "Title": "Microsoft SpeechT5 text2speech", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 58.0, "LinesInsertedFromPrevious": 2.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 56.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# **Experimenting with Microsoft's SpeechT5 model.** # * My end goal is to pass it a novel sized text file for it to narate. # * still need to work out how to access the other voice characteristics from xvector import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # Following pip packages need to be installed: from transformers import SpeechT5Processor, SpeechT5ForTextToSpeech, SpeechT5HifiGan from datasets import load_dataset import torch import soundfile as sf from datasets import load_dataset processor = SpeechT5Processor.from_pretrained("microsoft/speecht5_tts") model = SpeechT5ForTextToSpeech.from_pretrained("microsoft/speecht5_tts") vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan") mytext = "No one would have believed in the last years of the nineteenth century that this world was being watched keenly and closely by intelligences greater than man’s and yet as mortal as his own; that as men busied themselves about their various concerns they were scrutinised and studied, perhaps almost as narrowly as a man with a microscope might scrutinise the transient creatures that swarm and multiply in a drop of water. With infinite complacency men went to and fro over this globe about their little affairs, serene in their assurance of their empire over matter. " len(text) # max string length 600 chars inputs = processor(text=mytext, return_tensors="pt") # load xvector containing speaker's voice characteristics from a dataset embeddings_dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation") speaker_embeddings = torch.tensor(embeddings_dataset[7306]["xvector"]).unsqueeze(0) speech = model.generate_speech(inputs["input_ids"], speaker_embeddings, vocoder=vocoder) sf.write("hgWells.wav", speech.numpy(), samplerate=16000) import IPython out_audio = "hgWells.wav" IPython.display.Audio(out_audio)
false
0
676
0
676
676
129220598
<jupyter_start><jupyter_text>NepCOV19Tweets Abstract: COVID-19 has claimed several human lives to this date. People are dying not only because of physical infection of the virus but also because of mental illness, which is linked to people’s sentiments and psychologies. People’s written texts/posts scattered on the web could help understand their psychology and the state they are in during this pandemic. In this paper, we analyze people’s sentiment based on the classification of tweets collected from the social media platform, Twitter, in Nepal. For this, we, first, propose to use three different feature extraction methods—fastText-based (ft), domain-specific (ds), and domain-agnostic (da)—for the representation of tweets. Among these three methods, two methods (“ds” and “da”) are the novel methods used in this study. Second, we propose three different convolution neural networks (CNNs) to implement the proposed features. Last, we ensemble such three CNNs models using ensemble CNN, which works in an end-to-end manner, to achieve the end results. For the evaluation of the proposed feature extraction methods and CNN models, we prepare a Nepali Twitter sentiment dataset, called NepCOV19Tweets, with 3 classes (positive, neutral, and negative). The experimental results on such dataset show that our proposed feature extraction methods possess the discriminating characteristics for the sentiment classification. Moreover, the proposed CNN models impart robust and stable performance on the proposed features. Also, our dataset can be used as a benchmark to study the COVID-19-related sentiment analysis in the Nepali language. If you want to use the NepCOV19Tweets dataset, please cite the paper as, C Sitaula, A Basnet, A Mainali and TB Shahi, "Deep Learning-based Methods for Sentiment Analysis on Nepali COVID-19-related Tweets", Computational Intelligence and Neuroscience, 2021. Kaggle dataset identifier: nepcov19tweets <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: pass # print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import datasets from datasets import load_dataset [d for d in datasets.list_datasets() if "nepali" in d] d = pd.read_csv( "/kaggle/input/oscar-corpus-nepali/ne_dedup.txt", header=None, delimiter="\r\n", engine="python", names=["text"], ) d.shape import re def removeCharacters(s): s["text"] = re.sub( "[a-zA-Z\u00C0-\u024F\u1E00-\u1EFF]+", "", # Accented Alphabets re.sub( "[一-龠]+|[ぁ-ゔ]+|[ァ-ヴー]+|[々〆〤ヶ]+", "", # Japanese re.sub( "[\u0621-\u0669]+", "", # Arabic re.sub( r"[\u4e00-\u9fff]+", "", # Supposed to be Chinese, Japanese and Korean re.sub( r"[\u3131-\u314e|\u314f-\u3163|\uac00-\ud7a3]+", "", # Korean re.sub(r"[\u31c0-\u9fff]+", "", s["text"]), ), ), ), ), ).strip() # Chinese ## sub didn't remove those characters properly, so ditching them altogether s["text"] = ( "" if re.search("[ศག༻ழதโ재ខ년โខማঙщཔუ의ማঙಬઅཁਰהᄃΦชᤖসફ§ャΐൻបะരыپホΨজ]+", s["text"]) else s["text"] ) return s d = d.apply(lambda x: removeCharacters(x), axis=1) d.dropna(inplace=True) print(d.shape) def setOfCharacters(s): s["text"] = "".join(set(s["text"])) return s boc = d.apply(lambda x: setOfCharacters(x), axis=1) boc = set("".join(boc["text"])) print(len(boc)) boc # len: 1099
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/220/129220598.ipynb
nepcov19tweets
mathew11111
[{"Id": 129220598, "ScriptId": 38381400, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4174671, "CreationDate": "05/12/2023 00:42:56", "VersionNumber": 2.0, "Title": "Cleaning Oscar Nepali Dataset", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 58.0, "LinesInsertedFromPrevious": 27.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 31.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 185073370, "KernelVersionId": 129220598, "SourceDatasetVersionId": 2724145}, {"Id": 185073369, "KernelVersionId": 129220598, "SourceDatasetVersionId": 1500084}, {"Id": 185073368, "KernelVersionId": 129220598, "SourceDatasetVersionId": 1402732}, {"Id": 185073367, "KernelVersionId": 129220598, "SourceDatasetVersionId": 870319}]
[{"Id": 2724145, "DatasetId": 1660323, "DatasourceVersionId": 2769111, "CreatorUserId": 824195, "LicenseName": "Other (specified in description)", "CreationDate": "10/20/2021 12:01:28", "VersionNumber": 1.0, "Title": "NepCOV19Tweets", "Slug": "nepcov19tweets", "Subtitle": "Nepali COVID related tweets dataset", "Description": "Abstract:\n\nCOVID-19 has claimed several human lives to this date. People are dying not only because of physical infection of the virus but also because of mental illness, which is linked to people\u2019s sentiments and psychologies. People\u2019s written texts/posts scattered on the web could help understand their psychology and the state they are in during this pandemic. In this paper, we analyze people\u2019s sentiment based on the classification of tweets collected from the social media platform, Twitter, in Nepal. For this, we, first, propose to use three different feature extraction methods\u2014fastText-based (ft), domain-specific (ds), and domain-agnostic (da)\u2014for the representation of tweets. Among these three methods, two methods (\u201cds\u201d and \u201cda\u201d) are the novel methods used in this study. Second, we propose three different convolution neural networks (CNNs) to implement the proposed features. Last, we ensemble such three CNNs models using ensemble CNN, which works in an end-to-end manner, to achieve the end results. For the evaluation of the proposed feature extraction methods and CNN models, we prepare a Nepali Twitter sentiment dataset, called NepCOV19Tweets, with 3 classes (positive, neutral, and negative). The experimental results on such dataset show that our proposed feature extraction methods possess the discriminating characteristics for the sentiment classification. Moreover, the proposed CNN models impart robust and stable performance on the proposed features. Also, our dataset can be used as a benchmark to study the COVID-19-related sentiment analysis in the Nepali language.\n\nIf you want to use the NepCOV19Tweets dataset, please cite the paper as,\n\nC Sitaula, A Basnet, A Mainali and TB Shahi, \"Deep Learning-based Methods for Sentiment Analysis on Nepali COVID-19-related Tweets\", Computational Intelligence and Neuroscience, 2021.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 1660323, "CreatorUserId": 824195, "OwnerUserId": 824195.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2724145.0, "CurrentDatasourceVersionId": 2769111.0, "ForumId": 1681378, "Type": 2, "CreationDate": "10/20/2021 12:01:28", "LastActivityDate": "10/20/2021", "TotalViews": 2770, "TotalDownloads": 73, "TotalVotes": 5, "TotalKernels": 2}]
[{"Id": 824195, "UserName": "mathew11111", "DisplayName": "Anish Basnet", "RegisterDate": "12/05/2016", "PerformanceTier": 0}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: pass # print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import datasets from datasets import load_dataset [d for d in datasets.list_datasets() if "nepali" in d] d = pd.read_csv( "/kaggle/input/oscar-corpus-nepali/ne_dedup.txt", header=None, delimiter="\r\n", engine="python", names=["text"], ) d.shape import re def removeCharacters(s): s["text"] = re.sub( "[a-zA-Z\u00C0-\u024F\u1E00-\u1EFF]+", "", # Accented Alphabets re.sub( "[一-龠]+|[ぁ-ゔ]+|[ァ-ヴー]+|[々〆〤ヶ]+", "", # Japanese re.sub( "[\u0621-\u0669]+", "", # Arabic re.sub( r"[\u4e00-\u9fff]+", "", # Supposed to be Chinese, Japanese and Korean re.sub( r"[\u3131-\u314e|\u314f-\u3163|\uac00-\ud7a3]+", "", # Korean re.sub(r"[\u31c0-\u9fff]+", "", s["text"]), ), ), ), ), ).strip() # Chinese ## sub didn't remove those characters properly, so ditching them altogether s["text"] = ( "" if re.search("[ศག༻ழதโ재ខ년โខማঙщཔუ의ማঙಬઅཁਰהᄃΦชᤖসફ§ャΐൻបะരыپホΨজ]+", s["text"]) else s["text"] ) return s d = d.apply(lambda x: removeCharacters(x), axis=1) d.dropna(inplace=True) print(d.shape) def setOfCharacters(s): s["text"] = "".join(set(s["text"])) return s boc = d.apply(lambda x: setOfCharacters(x), axis=1) boc = set("".join(boc["text"])) print(len(boc)) boc # len: 1099
false
0
788
0
1,248
788
129327543
# for importing necessary packages and libraries import pandas as pd import numpy as np from scipy import stats # for reading the datafile aqi = pd.read_csv("/kaggle/input/epa-air-quality/epa_air_quality.csv") # for exploring the dataframe here: print("Use head() to show a sample of data") print(aqi.head()) print("Use describe() to summarize AQI") print(aqi.describe(include="all")) print("For a more thorough examination of observations by state use values_counts()") print(aqi["state_name"].value_counts()) print("for a more") # for creating dataframes to compare Los Angeles and the rest of California ca_la = aqi[aqi["county_name"] == "Los Angeles"] ca_other = aqi[ (aqi["state_name"] == "California") & (aqi["county_name"] != "Los Angeles") ] # for setting the significance level to 5% significance_level = 0.05 significance_level # for computing the p-value stats.ttest_ind(a=ca_la["aqi"], b=ca_other["aqi"], equal_var=False) # for creating dataframes for New York & Ohio to be compared in your test ny = aqi[aqi["state_name"] == "New York"] ohio = aqi[aqi["state_name"] == "Ohio"] # for computing the p-value here tstat, pvalue = stats.ttest_ind(a=ny["aqi"], b=ohio["aqi"], alternative="less") print(tstat) print(pvalue) # for creating a dataframe for Michigan michigan = aqi[aqi["state_name"] == "Michigan"] # for computing the p-value here tstat, pvalue = stats.ttest_1samp(michigan["aqi"], 10, alternative="greater") print(tstat) print(pvalue)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/327/129327543.ipynb
null
null
[{"Id": 129327543, "ScriptId": 38451743, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 9485427, "CreationDate": "05/12/2023 20:15:53", "VersionNumber": 1.0, "Title": "Exploring Hypothesis Testing in Python", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 49.0, "LinesInsertedFromPrevious": 49.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# for importing necessary packages and libraries import pandas as pd import numpy as np from scipy import stats # for reading the datafile aqi = pd.read_csv("/kaggle/input/epa-air-quality/epa_air_quality.csv") # for exploring the dataframe here: print("Use head() to show a sample of data") print(aqi.head()) print("Use describe() to summarize AQI") print(aqi.describe(include="all")) print("For a more thorough examination of observations by state use values_counts()") print(aqi["state_name"].value_counts()) print("for a more") # for creating dataframes to compare Los Angeles and the rest of California ca_la = aqi[aqi["county_name"] == "Los Angeles"] ca_other = aqi[ (aqi["state_name"] == "California") & (aqi["county_name"] != "Los Angeles") ] # for setting the significance level to 5% significance_level = 0.05 significance_level # for computing the p-value stats.ttest_ind(a=ca_la["aqi"], b=ca_other["aqi"], equal_var=False) # for creating dataframes for New York & Ohio to be compared in your test ny = aqi[aqi["state_name"] == "New York"] ohio = aqi[aqi["state_name"] == "Ohio"] # for computing the p-value here tstat, pvalue = stats.ttest_ind(a=ny["aqi"], b=ohio["aqi"], alternative="less") print(tstat) print(pvalue) # for creating a dataframe for Michigan michigan = aqi[aqi["state_name"] == "Michigan"] # for computing the p-value here tstat, pvalue = stats.ttest_1samp(michigan["aqi"], 10, alternative="greater") print(tstat) print(pvalue)
false
0
495
0
495
495
129327629
# # **Diamonds Price Prediction - Group-based Project** # ***Team Members (Collaborators): -*** # _1- Hekar A. Mohammed._ # _2- Musaad Alhamami._ # _3- Omar Shaqfeh._ # ## **Overview** # In this project, we aim to build a regression model that predicts the diamonds price based on our existing data. # ## **Data Explanation** # **This venerable collection contains information about around 54,000 diamonds, including their prices and other characteristics. This dataset is great for people who are just getting started with data analysis and visualization.** # Now, we'll explore each column and describe it to get the full meaning based on the original source of the data: # | Column | Description # |-------------|--------------------------------------------------------------------------------- # | **price** | Price in US dollars (\\$326 -- $18,823). # | **carat** | Weight of the diamond (0.2--5.01). # | **cut** | Quality of the cut (Fair, Good, Very Good, Premium, Ideal). # | **color** | Diamond color, from J (worst) to D (best). # | **clarity** | A measurement of how clear the diamond is (I1 (worst), SI2, SI1, VS2, VS1, VVS2, VVS1, IF (best)). # | **x** | Length in mm (0--10.74). # | **y** | Width in mm (0--58.9) # | **z** | Depth in mm (0--31.8). # | **depth** | Total depth percentage = z / mean(x, y) = 2 * z / (x + y) (43--79). # | **table** | Width of top of diamond relative to widest point (43--95). # **Additionally, our dataset has been divided into two files as follows: -** # - The training set is in ***train.csv***. # - The testing set is in ***test.csv***. # ## **Setup Kaggle environment necessities** import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # ## **Import the needed libraries** import numpy as np import pandas as pd import seaborn as sns import matplotlib as mpl from sklearn import metrics import matplotlib.pyplot as plt from xgboost import XGBRegressor from sklearn.pipeline import Pipeline from sklearn.metrics import mean_squared_error from sklearn.tree import DecisionTreeRegressor from sklearn.neighbors import KNeighborsRegressor from sklearn.linear_model import LinearRegression from sklearn.ensemble import RandomForestRegressor from sklearn.model_selection import cross_val_score from sklearn.model_selection import train_test_split from sklearn.preprocessing import Normalizer, MaxAbsScaler, MinMaxScaler from sklearn.preprocessing import LabelEncoder, StandardScaler, RobustScaler # ## **Loading our dataset (Training & Testing)** train_dataset = pd.read_csv("/kaggle/input/diamond-price-prediction/train.csv") test_dataset = pd.read_csv("/kaggle/input/diamond-price-prediction/test.csv") # ## **Examining our dataset** # Display the first few rows train_dataset.head() # Display the last few rows train_dataset.tail() # Display the dataset's shape (rows, columns) train_dataset.shape # Display dataset's info (column data types, non-null values, etc.) train_dataset.info() # **Great!** # **Now, we can infer that we have 3 categorical and 7 numerical columns as the following: -** # * **Categorical**: # - cut. # - color. # - clarity. # * **Numerical**: # - carat. # - depth. # - table. # - price. # - x. # - y. # - z. # Display basic statistics for numeric columns train_dataset.describe() # ## **Check and Handle Missing Values and Data Anamolies** train_dataset.isnull().sum() train_dataset.isna().sum() train_dataset.nunique() train_dataset.duplicated().sum() # Dropping zeros to make sure our measurement units have a weight. train_dataset = train_dataset.drop(train_dataset[train_dataset["x"] == 0].index) train_dataset = train_dataset.drop(train_dataset[train_dataset["y"] == 0].index) train_dataset = train_dataset.drop(train_dataset[train_dataset["z"] == 0].index) train_dataset.shape # Dropping the outliers. train_dataset = train_dataset[ (train_dataset["depth"] < 75) & (train_dataset["depth"] > 45) ] train_dataset = train_dataset[ (train_dataset["table"] < 80) & (train_dataset["table"] > 40) ] train_dataset = train_dataset[(train_dataset["x"] < 30)] train_dataset = train_dataset[(train_dataset["y"] < 30)] train_dataset = train_dataset[(train_dataset["z"] < 30) & (train_dataset["z"] > 2)] train_dataset.shape # ## **Analyze the Correlation between our data features** correlation_matrix = train_dataset.corr() correlation_matrix # Correlation heatmap plt.figure(figsize=(15, 7)) sns.heatmap(correlation_matrix, annot=True) # **Based on the two cells of the previous outputs, we can infer that our numerical attributes 'price' and 'carat' correlated to each other, whilst the remaining have no correlations (Or have unnoticed correlations)** # ## **Visualize the Data** train_dataset.hist(bins=50, figsize=(20, 15)) plt.show() sns.pairplot(train_dataset, diag_kind="kde") plt.figure(figsize=(15, 7)) correlation_matrix["price"].sort_values(ascending=False).plot(kind="bar") plt.scatter(train_dataset["carat"], train_dataset["price"]) input_cat_columns = train_dataset.select_dtypes(include=["object"]).columns.tolist() for col in input_cat_columns: sns.catplot( x=col, y="price", kind="box", dodge=False, height=5, aspect=3, data=train_dataset, ) train_dataset["color"].hist() ax = sns.pairplot(train_dataset, hue="cut") # ## **Preprocess and Prepare our Data for the training Phase** # Get the list of categorical variables s = train_dataset.dtypes == "object" object_cols = list(s[s].index) print("Categorical variables:") print(object_cols) # Encoding the catogrical data to be an encoded numerical data def encode_data(dataset: pd.DataFrame()): # Make a copy to avoid changing original data label_data = dataset.copy() # Apply label encoder to each column with categorical data label_encoder = LabelEncoder() for col in object_cols: label_data[col] = label_encoder.fit_transform(label_data[col]) return label_data # Encoding our training set encoded_train_dataset = encode_data(train_dataset) # Review our statistical info encoded_train_dataset.describe() # Reviewing the correlation matrix plt.figure(figsize=(15, 12)) updated_correlation_matrix = encoded_train_dataset.corr() f, ax = plt.subplots(figsize=(15, 7)) sns.heatmap( updated_correlation_matrix, annot=True, ) encoded_train_dataset = encoded_train_dataset.drop(["Id"], axis=1) # Assigning the featurs as X and trarget as y X = encoded_train_dataset.drop(["price"], axis=1) y = encoded_train_dataset["price"] # Split the training set to train and test portions to validate our model X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.25, random_state=42 ) # ## **Model Creation** # Building pipelins of standard scaler and model for varios regressors. pipeline_lr = Pipeline( [("scalar1", RobustScaler()), ("lr_classifier", LinearRegression())] ) pipeline_dt = Pipeline( [("scalar2", RobustScaler()), ("dt_classifier", DecisionTreeRegressor())] ) pipeline_rf = Pipeline( [("scalar3", RobustScaler()), ("rf_classifier", RandomForestRegressor())] ) pipeline_kn = Pipeline( [("scalar4", RobustScaler()), ("rf_classifier", KNeighborsRegressor())] ) pipeline_xgb = Pipeline( [("scalar5", RobustScaler()), ("rf_classifier", XGBRegressor())] ) # List of all the pipelines pipelines = [pipeline_lr, pipeline_dt, pipeline_rf, pipeline_kn, pipeline_xgb] # Dictionary of pipelines and model types for ease of reference pipe_dict = { 0: "LinearRegression", 1: "DecisionTreeRegressor", 2: "RandomForestRegressor", 3: "KNeighborsRegressor", 4: "XGBRegressor", } # Fit the pipelines for pipe in pipelines: pipe.fit(X_train, y_train) # ## **Model Evaluation** # To know which is the best model, we will evaluate all the previous made models and check their Root Squared Mean Error (RSME); the lower is the better. cv_results_rms = [] for i, model in enumerate(pipelines): cv_score = cross_val_score( model, X_train, y_train, scoring="neg_root_mean_squared_error", cv=10 ) cv_results_rms.append(cv_score) print("%s: %f " % (pipe_dict[i], cv_score.mean())) # ## **Model Selection** # Since XGBRegressor model is the lower one, we are going to select it as a winner-model. xgb_model = XGBRegressor() xgb_model.fit(X_train, y_train) xgb_pred = xgb_model.predict(X_test) np.sqrt(mean_squared_error(xgb_pred, y_test)) # Model Evaluation in varied metrics print("R^2:", metrics.r2_score(y_test, xgb_pred)) print( "Adjusted R^2:", 1 - (1 - metrics.r2_score(y_test, xgb_pred)) * (len(y_test) - 1) / (len(y_test) - X_test.shape[1] - 1), ) print("MAE:", metrics.mean_absolute_error(y_test, xgb_pred)) print("MSE:", metrics.mean_squared_error(y_test, xgb_pred)) print("RMSE:", np.sqrt(metrics.mean_squared_error(y_test, xgb_pred))) # ## **Predict the diamonds prices of the Testing set of our data** # Encoding our testing set encoded_test_dataset = encode_data(dataset=test_dataset) # Saving the Testing set IDs encoded_test_dataset_ids = encoded_test_dataset["Id"] # Dropping them encoded_test_dataset = encoded_test_dataset.drop(["Id"], axis=1) xgb_test_pred = xgb_model.predict(encoded_test_dataset) # ## **Prepare our Submission File (submission.csv)** df = pd.DataFrame({"Id": encoded_test_dataset_ids, "price": xgb_test_pred}) df.to_csv("submission.csv", index=False)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/327/129327629.ipynb
null
null
[{"Id": 129327629, "ScriptId": 38440207, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11237613, "CreationDate": "05/12/2023 20:17:09", "VersionNumber": 2.0, "Title": "SHAI G15 | Group#3 team", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 271.0, "LinesInsertedFromPrevious": 218.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 53.0, "LinesInsertedFromFork": 219.0, "LinesDeletedFromFork": 150.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 52.0, "TotalVotes": 1}]
null
null
null
null
# # **Diamonds Price Prediction - Group-based Project** # ***Team Members (Collaborators): -*** # _1- Hekar A. Mohammed._ # _2- Musaad Alhamami._ # _3- Omar Shaqfeh._ # ## **Overview** # In this project, we aim to build a regression model that predicts the diamonds price based on our existing data. # ## **Data Explanation** # **This venerable collection contains information about around 54,000 diamonds, including their prices and other characteristics. This dataset is great for people who are just getting started with data analysis and visualization.** # Now, we'll explore each column and describe it to get the full meaning based on the original source of the data: # | Column | Description # |-------------|--------------------------------------------------------------------------------- # | **price** | Price in US dollars (\\$326 -- $18,823). # | **carat** | Weight of the diamond (0.2--5.01). # | **cut** | Quality of the cut (Fair, Good, Very Good, Premium, Ideal). # | **color** | Diamond color, from J (worst) to D (best). # | **clarity** | A measurement of how clear the diamond is (I1 (worst), SI2, SI1, VS2, VS1, VVS2, VVS1, IF (best)). # | **x** | Length in mm (0--10.74). # | **y** | Width in mm (0--58.9) # | **z** | Depth in mm (0--31.8). # | **depth** | Total depth percentage = z / mean(x, y) = 2 * z / (x + y) (43--79). # | **table** | Width of top of diamond relative to widest point (43--95). # **Additionally, our dataset has been divided into two files as follows: -** # - The training set is in ***train.csv***. # - The testing set is in ***test.csv***. # ## **Setup Kaggle environment necessities** import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # ## **Import the needed libraries** import numpy as np import pandas as pd import seaborn as sns import matplotlib as mpl from sklearn import metrics import matplotlib.pyplot as plt from xgboost import XGBRegressor from sklearn.pipeline import Pipeline from sklearn.metrics import mean_squared_error from sklearn.tree import DecisionTreeRegressor from sklearn.neighbors import KNeighborsRegressor from sklearn.linear_model import LinearRegression from sklearn.ensemble import RandomForestRegressor from sklearn.model_selection import cross_val_score from sklearn.model_selection import train_test_split from sklearn.preprocessing import Normalizer, MaxAbsScaler, MinMaxScaler from sklearn.preprocessing import LabelEncoder, StandardScaler, RobustScaler # ## **Loading our dataset (Training & Testing)** train_dataset = pd.read_csv("/kaggle/input/diamond-price-prediction/train.csv") test_dataset = pd.read_csv("/kaggle/input/diamond-price-prediction/test.csv") # ## **Examining our dataset** # Display the first few rows train_dataset.head() # Display the last few rows train_dataset.tail() # Display the dataset's shape (rows, columns) train_dataset.shape # Display dataset's info (column data types, non-null values, etc.) train_dataset.info() # **Great!** # **Now, we can infer that we have 3 categorical and 7 numerical columns as the following: -** # * **Categorical**: # - cut. # - color. # - clarity. # * **Numerical**: # - carat. # - depth. # - table. # - price. # - x. # - y. # - z. # Display basic statistics for numeric columns train_dataset.describe() # ## **Check and Handle Missing Values and Data Anamolies** train_dataset.isnull().sum() train_dataset.isna().sum() train_dataset.nunique() train_dataset.duplicated().sum() # Dropping zeros to make sure our measurement units have a weight. train_dataset = train_dataset.drop(train_dataset[train_dataset["x"] == 0].index) train_dataset = train_dataset.drop(train_dataset[train_dataset["y"] == 0].index) train_dataset = train_dataset.drop(train_dataset[train_dataset["z"] == 0].index) train_dataset.shape # Dropping the outliers. train_dataset = train_dataset[ (train_dataset["depth"] < 75) & (train_dataset["depth"] > 45) ] train_dataset = train_dataset[ (train_dataset["table"] < 80) & (train_dataset["table"] > 40) ] train_dataset = train_dataset[(train_dataset["x"] < 30)] train_dataset = train_dataset[(train_dataset["y"] < 30)] train_dataset = train_dataset[(train_dataset["z"] < 30) & (train_dataset["z"] > 2)] train_dataset.shape # ## **Analyze the Correlation between our data features** correlation_matrix = train_dataset.corr() correlation_matrix # Correlation heatmap plt.figure(figsize=(15, 7)) sns.heatmap(correlation_matrix, annot=True) # **Based on the two cells of the previous outputs, we can infer that our numerical attributes 'price' and 'carat' correlated to each other, whilst the remaining have no correlations (Or have unnoticed correlations)** # ## **Visualize the Data** train_dataset.hist(bins=50, figsize=(20, 15)) plt.show() sns.pairplot(train_dataset, diag_kind="kde") plt.figure(figsize=(15, 7)) correlation_matrix["price"].sort_values(ascending=False).plot(kind="bar") plt.scatter(train_dataset["carat"], train_dataset["price"]) input_cat_columns = train_dataset.select_dtypes(include=["object"]).columns.tolist() for col in input_cat_columns: sns.catplot( x=col, y="price", kind="box", dodge=False, height=5, aspect=3, data=train_dataset, ) train_dataset["color"].hist() ax = sns.pairplot(train_dataset, hue="cut") # ## **Preprocess and Prepare our Data for the training Phase** # Get the list of categorical variables s = train_dataset.dtypes == "object" object_cols = list(s[s].index) print("Categorical variables:") print(object_cols) # Encoding the catogrical data to be an encoded numerical data def encode_data(dataset: pd.DataFrame()): # Make a copy to avoid changing original data label_data = dataset.copy() # Apply label encoder to each column with categorical data label_encoder = LabelEncoder() for col in object_cols: label_data[col] = label_encoder.fit_transform(label_data[col]) return label_data # Encoding our training set encoded_train_dataset = encode_data(train_dataset) # Review our statistical info encoded_train_dataset.describe() # Reviewing the correlation matrix plt.figure(figsize=(15, 12)) updated_correlation_matrix = encoded_train_dataset.corr() f, ax = plt.subplots(figsize=(15, 7)) sns.heatmap( updated_correlation_matrix, annot=True, ) encoded_train_dataset = encoded_train_dataset.drop(["Id"], axis=1) # Assigning the featurs as X and trarget as y X = encoded_train_dataset.drop(["price"], axis=1) y = encoded_train_dataset["price"] # Split the training set to train and test portions to validate our model X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.25, random_state=42 ) # ## **Model Creation** # Building pipelins of standard scaler and model for varios regressors. pipeline_lr = Pipeline( [("scalar1", RobustScaler()), ("lr_classifier", LinearRegression())] ) pipeline_dt = Pipeline( [("scalar2", RobustScaler()), ("dt_classifier", DecisionTreeRegressor())] ) pipeline_rf = Pipeline( [("scalar3", RobustScaler()), ("rf_classifier", RandomForestRegressor())] ) pipeline_kn = Pipeline( [("scalar4", RobustScaler()), ("rf_classifier", KNeighborsRegressor())] ) pipeline_xgb = Pipeline( [("scalar5", RobustScaler()), ("rf_classifier", XGBRegressor())] ) # List of all the pipelines pipelines = [pipeline_lr, pipeline_dt, pipeline_rf, pipeline_kn, pipeline_xgb] # Dictionary of pipelines and model types for ease of reference pipe_dict = { 0: "LinearRegression", 1: "DecisionTreeRegressor", 2: "RandomForestRegressor", 3: "KNeighborsRegressor", 4: "XGBRegressor", } # Fit the pipelines for pipe in pipelines: pipe.fit(X_train, y_train) # ## **Model Evaluation** # To know which is the best model, we will evaluate all the previous made models and check their Root Squared Mean Error (RSME); the lower is the better. cv_results_rms = [] for i, model in enumerate(pipelines): cv_score = cross_val_score( model, X_train, y_train, scoring="neg_root_mean_squared_error", cv=10 ) cv_results_rms.append(cv_score) print("%s: %f " % (pipe_dict[i], cv_score.mean())) # ## **Model Selection** # Since XGBRegressor model is the lower one, we are going to select it as a winner-model. xgb_model = XGBRegressor() xgb_model.fit(X_train, y_train) xgb_pred = xgb_model.predict(X_test) np.sqrt(mean_squared_error(xgb_pred, y_test)) # Model Evaluation in varied metrics print("R^2:", metrics.r2_score(y_test, xgb_pred)) print( "Adjusted R^2:", 1 - (1 - metrics.r2_score(y_test, xgb_pred)) * (len(y_test) - 1) / (len(y_test) - X_test.shape[1] - 1), ) print("MAE:", metrics.mean_absolute_error(y_test, xgb_pred)) print("MSE:", metrics.mean_squared_error(y_test, xgb_pred)) print("RMSE:", np.sqrt(metrics.mean_squared_error(y_test, xgb_pred))) # ## **Predict the diamonds prices of the Testing set of our data** # Encoding our testing set encoded_test_dataset = encode_data(dataset=test_dataset) # Saving the Testing set IDs encoded_test_dataset_ids = encoded_test_dataset["Id"] # Dropping them encoded_test_dataset = encoded_test_dataset.drop(["Id"], axis=1) xgb_test_pred = xgb_model.predict(encoded_test_dataset) # ## **Prepare our Submission File (submission.csv)** df = pd.DataFrame({"Id": encoded_test_dataset_ids, "price": xgb_test_pred}) df.to_csv("submission.csv", index=False)
false
0
2,886
1
2,886
2,886
129327539
<jupyter_start><jupyter_text>data.csv Kaggle dataset identifier: datacsv <jupyter_script>"/kaggle/input/car-evaluation-data-set/car_evaluation.csv" import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import warnings warnings.filterwarnings("ignore") data = "/kaggle/input/car-evaluation-data-set/car_evaluation.csv" df = pd.read_csv(data, header=None) df.shape df.head() col_names = ["buying", "maint", "doors", "persons", "lug_boot", "safety", "class"] df.columns = col_names col_names df.head() df.info() col_names = ["buying", "maint", "doors", "persons", "lug_boot", "safety", "class"] for col in col_names: print(df[col].value_counts()) df["class"].value_counts() df.isnull().sum() x = df.drop(["class"], axis=1) y = df["class"] from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split( x, y, test_size=0.33, random_state=42 ) x_train.shape, x_test.shape x_train.dtypes x_train.head() import category_encoders as ce encoder = ce.OrdinalEncoder( cols=["buying", "maint", "doors", "persons", "lug_boot", "safety"] ) x_train = encoder.fit_transform(x_train) x_test = encoder.transform(x_test) x_train.head() x_test.head() from sklearn.ensemble import RandomForestClassifier rfc = RandomForestClassifier(n_estimators=10, random_state=0) rfc.fit(x_train, y_train) y_pred = rfc.predict(x_test) from sklearn.metrics import accuracy_score print( "model accuracy score with 10 decision-trees : {0:0.4f}".format( accuracy_score(y_test, y_pred) ) ) rfc_100 = RandomForestClassifier(n_estimators=100, random_state=0) rfc_100.fit(x_train, y_train) y_pred_100 = rfc_100.predict(x_test) print( "model accuracy score with 100 decision-trees : {0:0.4f}".format( accuracy_score(y_test, y_pred_100) ) ) # # random forest classification import numpy as np import pandas as pd # %% data = pd.read_csv("/kaggle/input/datacsv/data.csv") data.drop(["id", "Unnamed: 32"], axis=1, inplace=True) # %% data.diagnosis = [1 if each == "M" else 0 for each in data.diagnosis] y = data.diagnosis.values x_data = data.drop(["diagnosis"], axis=1) # %% # normalization x = (x_data - np.min(x_data)) / (np.max(x_data) - np.min(x_data)) # %% from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split( x, y, test_size=0.15, random_state=42 ) # %% from sklearn.tree import DecisionTreeClassifier dt = DecisionTreeClassifier() dt.fit(x_train, y_train) print("decision-tree score: ", dt.score(x_test, y_test)) # %% from sklearn.ensemble import RandomForestClassifier rf = RandomForestClassifier(n_estimators=100, random_state=1) rf.fit(x_train, y_train) print("random forest algo result", rf.score(x_test, y_test))
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/327/129327539.ipynb
datacsv
irmerdgn
[{"Id": 129327539, "ScriptId": 38451926, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13929564, "CreationDate": "05/12/2023 20:15:48", "VersionNumber": 1.0, "Title": "Week-10", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 114.0, "LinesInsertedFromPrevious": 114.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 185271453, "KernelVersionId": 129327539, "SourceDatasetVersionId": 5672351}, {"Id": 185271452, "KernelVersionId": 129327539, "SourceDatasetVersionId": 3884}]
[{"Id": 5672351, "DatasetId": 3260913, "DatasourceVersionId": 5747885, "CreatorUserId": 13929564, "LicenseName": "Unknown", "CreationDate": "05/12/2023 20:14:00", "VersionNumber": 1.0, "Title": "data.csv", "Slug": "datacsv", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3260913, "CreatorUserId": 13929564, "OwnerUserId": 13929564.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5672351.0, "CurrentDatasourceVersionId": 5747885.0, "ForumId": 3326494, "Type": 2, "CreationDate": "05/12/2023 20:14:00", "LastActivityDate": "05/12/2023", "TotalViews": 53, "TotalDownloads": 0, "TotalVotes": 0, "TotalKernels": 1}]
[{"Id": 13929564, "UserName": "irmerdgn", "DisplayName": "\u0130rem Erdo\u011fan", "RegisterDate": "03/01/2023", "PerformanceTier": 0}]
"/kaggle/input/car-evaluation-data-set/car_evaluation.csv" import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import warnings warnings.filterwarnings("ignore") data = "/kaggle/input/car-evaluation-data-set/car_evaluation.csv" df = pd.read_csv(data, header=None) df.shape df.head() col_names = ["buying", "maint", "doors", "persons", "lug_boot", "safety", "class"] df.columns = col_names col_names df.head() df.info() col_names = ["buying", "maint", "doors", "persons", "lug_boot", "safety", "class"] for col in col_names: print(df[col].value_counts()) df["class"].value_counts() df.isnull().sum() x = df.drop(["class"], axis=1) y = df["class"] from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split( x, y, test_size=0.33, random_state=42 ) x_train.shape, x_test.shape x_train.dtypes x_train.head() import category_encoders as ce encoder = ce.OrdinalEncoder( cols=["buying", "maint", "doors", "persons", "lug_boot", "safety"] ) x_train = encoder.fit_transform(x_train) x_test = encoder.transform(x_test) x_train.head() x_test.head() from sklearn.ensemble import RandomForestClassifier rfc = RandomForestClassifier(n_estimators=10, random_state=0) rfc.fit(x_train, y_train) y_pred = rfc.predict(x_test) from sklearn.metrics import accuracy_score print( "model accuracy score with 10 decision-trees : {0:0.4f}".format( accuracy_score(y_test, y_pred) ) ) rfc_100 = RandomForestClassifier(n_estimators=100, random_state=0) rfc_100.fit(x_train, y_train) y_pred_100 = rfc_100.predict(x_test) print( "model accuracy score with 100 decision-trees : {0:0.4f}".format( accuracy_score(y_test, y_pred_100) ) ) # # random forest classification import numpy as np import pandas as pd # %% data = pd.read_csv("/kaggle/input/datacsv/data.csv") data.drop(["id", "Unnamed: 32"], axis=1, inplace=True) # %% data.diagnosis = [1 if each == "M" else 0 for each in data.diagnosis] y = data.diagnosis.values x_data = data.drop(["diagnosis"], axis=1) # %% # normalization x = (x_data - np.min(x_data)) / (np.max(x_data) - np.min(x_data)) # %% from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split( x, y, test_size=0.15, random_state=42 ) # %% from sklearn.tree import DecisionTreeClassifier dt = DecisionTreeClassifier() dt.fit(x_train, y_train) print("decision-tree score: ", dt.score(x_test, y_test)) # %% from sklearn.ensemble import RandomForestClassifier rf = RandomForestClassifier(n_estimators=100, random_state=1) rf.fit(x_train, y_train) print("random forest algo result", rf.score(x_test, y_test))
false
1
974
0
993
974
129327206
import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import warnings from sklearn.linear_model import LogisticRegression from sklearn.base import BaseEstimator, TransformerMixin from sklearn.pipeline import Pipeline from sklearn.tree import DecisionTreeClassifier from sklearn.pipeline import FeatureUnion from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score warnings.filterwarnings("ignore") input_path = "/kaggle/input/titanic/" train_df = pd.read_csv(input_path + "train.csv", index_col="PassengerId") test_df = pd.read_csv(input_path + "test.csv", index_col="PassengerId") print(train_df.shape, test_df.shape) train_df.head() train_df.info() test_df.isnull().sum() sns.countplot(train_df, x="Survived", hue="Sex") sns.countplot(train_df, x="Pclass", hue="Sex") sns.histplot(train_df, x="Age", hue="Survived") class Alone(BaseEstimator, TransformerMixin): def fit(self, X, y=None): return self def transform(self, X, y=None): X["Alone"] = X["Parch"] + X["SibSp"] X["Alone"] = X["Alone"].apply(lambda x: 1 if x > 0 else 0) X.drop(["Parch", "SibSp"], axis=1, inplace=True) return X class DropUselessFeatures(BaseEstimator, TransformerMixin): def fit(self, X, y=None): return self def transform(self, X, y=None): columns = ["Name", "Cabin", "Embarked", "Ticket", "Fare"] X.drop(columns, axis=1, inplace=True) return X class MapSex(BaseEstimator, TransformerMixin): def fit(self, X, y=None): return self def transform(self, X, y=None): sex_dict = {"male": 1, "female": 2} X["Sex"] = X["Sex"].map(sex_dict) return X class FillNans(BaseEstimator, TransformerMixin): def fit(self, X, y=None): return self def transform(self, X, y=None): X.fillna(0, inplace=True) return X prepped_data_pipeline = Pipeline( [ ("is_passenger_alone", Alone()), ("drop_useless_features", DropUselessFeatures()), ("map_sex", MapSex()), ("fill_nans", FillNans()), ] ) prepped_train_data = prepped_data_pipeline.fit_transform(train_df) prepped_test_data = prepped_data_pipeline.transform(test_df) prepped_train_data.sample(5) sns.countplot(prepped_train_data, x="Alone", hue="Survived") def write_predictions(model, indexes, test_df, filename): predict = model.predict(test_df) output_df = pd.DataFrame({"PassengerId": indexes, "Survived": predict}) output_df.to_csv(filename, index=False) train_y = prepped_train_data["Survived"].copy() prepped_train_data.drop(["Survived"], axis=1, inplace=True) X_train, X_split, y_train, y_split = train_test_split( prepped_train_data, train_y, test_size=0.3, random_state=42 ) log_reg = LogisticRegression() log_reg.fit(X_train, y_train) log_reg_preds = log_reg.predict(X_split) accuracy = accuracy_score(y_split, log_reg_preds) print(accuracy) write_predictions(log_reg, test_df.index, test_df, "prediction_log_reg.csv") # 0.75837
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/327/129327206.ipynb
null
null
[{"Id": 129327206, "ScriptId": 38326230, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 10677547, "CreationDate": "05/12/2023 20:10:27", "VersionNumber": 2.0, "Title": "Using pipelines", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 110.0, "LinesInsertedFromPrevious": 4.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 106.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import warnings from sklearn.linear_model import LogisticRegression from sklearn.base import BaseEstimator, TransformerMixin from sklearn.pipeline import Pipeline from sklearn.tree import DecisionTreeClassifier from sklearn.pipeline import FeatureUnion from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score warnings.filterwarnings("ignore") input_path = "/kaggle/input/titanic/" train_df = pd.read_csv(input_path + "train.csv", index_col="PassengerId") test_df = pd.read_csv(input_path + "test.csv", index_col="PassengerId") print(train_df.shape, test_df.shape) train_df.head() train_df.info() test_df.isnull().sum() sns.countplot(train_df, x="Survived", hue="Sex") sns.countplot(train_df, x="Pclass", hue="Sex") sns.histplot(train_df, x="Age", hue="Survived") class Alone(BaseEstimator, TransformerMixin): def fit(self, X, y=None): return self def transform(self, X, y=None): X["Alone"] = X["Parch"] + X["SibSp"] X["Alone"] = X["Alone"].apply(lambda x: 1 if x > 0 else 0) X.drop(["Parch", "SibSp"], axis=1, inplace=True) return X class DropUselessFeatures(BaseEstimator, TransformerMixin): def fit(self, X, y=None): return self def transform(self, X, y=None): columns = ["Name", "Cabin", "Embarked", "Ticket", "Fare"] X.drop(columns, axis=1, inplace=True) return X class MapSex(BaseEstimator, TransformerMixin): def fit(self, X, y=None): return self def transform(self, X, y=None): sex_dict = {"male": 1, "female": 2} X["Sex"] = X["Sex"].map(sex_dict) return X class FillNans(BaseEstimator, TransformerMixin): def fit(self, X, y=None): return self def transform(self, X, y=None): X.fillna(0, inplace=True) return X prepped_data_pipeline = Pipeline( [ ("is_passenger_alone", Alone()), ("drop_useless_features", DropUselessFeatures()), ("map_sex", MapSex()), ("fill_nans", FillNans()), ] ) prepped_train_data = prepped_data_pipeline.fit_transform(train_df) prepped_test_data = prepped_data_pipeline.transform(test_df) prepped_train_data.sample(5) sns.countplot(prepped_train_data, x="Alone", hue="Survived") def write_predictions(model, indexes, test_df, filename): predict = model.predict(test_df) output_df = pd.DataFrame({"PassengerId": indexes, "Survived": predict}) output_df.to_csv(filename, index=False) train_y = prepped_train_data["Survived"].copy() prepped_train_data.drop(["Survived"], axis=1, inplace=True) X_train, X_split, y_train, y_split = train_test_split( prepped_train_data, train_y, test_size=0.3, random_state=42 ) log_reg = LogisticRegression() log_reg.fit(X_train, y_train) log_reg_preds = log_reg.predict(X_split) accuracy = accuracy_score(y_split, log_reg_preds) print(accuracy) write_predictions(log_reg, test_df.index, test_df, "prediction_log_reg.csv") # 0.75837
false
0
998
0
998
998
129327934
<jupyter_start><jupyter_text>CIFAKE: Real and AI-Generated Synthetic Images # CIFAKE: Real and AI-Generated Synthetic Images The quality of AI-generated images has rapidly increased, leading to concerns of authenticity and trustworthiness. CIFAKE is a dataset that contains 60,000 synthetically-generated images and 60,000 real images (collected from CIFAR-10). Can computer vision techniques be used to detect when an image is real or has been generated by AI? Further information on this dataset can be found here: [Bird, J.J., Lotfi, A. (2023). CIFAKE: Image Classification and Explainable Identification of AI-Generated Synthetic Images. arXiv preprint arXiv:2303.14126.](https://arxiv.org/abs/2303.14126) ![Images from the CIFAKE dataset](https://i.imgur.com/RiOwf8i.png) ## Dataset details The dataset contains two classes - REAL and FAKE. For REAL, we collected the images from Krizhevsky & Hinton's [CIFAR-10 dataset](https://www.cs.toronto.edu/~kriz/cifar.html) For the FAKE images, we generated the equivalent of CIFAR-10 with Stable Diffusion version 1.4 There are 100,000 images for training (50k per class) and 20,000 for testing (10k per class) ## Papers with Code The dataset and all studies using it are linked using [Papers with Code](https://paperswithcode.com/dataset/cifake-real-and-ai-generated-synthetic-images) [https://paperswithcode.com/dataset/cifake-real-and-ai-generated-synthetic-images](https://paperswithcode.com/dataset/cifake-real-and-ai-generated-synthetic-images) ## References If you use this dataset, you **must** cite the following sources [Krizhevsky, A., & Hinton, G. (2009). Learning multiple layers of features from tiny images.](https://www.cs.toronto.edu/~kriz/learning-features-2009-TR.pdfl) [Bird, J.J., Lotfi, A. (2023). CIFAKE: Image Classification and Explainable Identification of AI-Generated Synthetic Images. arXiv preprint arXiv:2303.14126.](https://arxiv.org/abs/2303.14126) Real images are from Krizhevsky & Hinton (2009), fake images are from Bird & Lotfi (2023). The Bird & Lotfi study is a preprint currently available on [ArXiv](https://arxiv.org/abs/2303.14126) and this description will be updated when the paper is published. ## Notes The updates to the dataset on the 28th of March 2023 did not change anything; the file formats ".jpeg" were renamed ".jpg" and the root folder was uploaded to meet Kaggle's usability requirements. ## License This dataset is published under the [same MIT license as CIFAR-10](https://github.com/wichtounet/cifar-10/blob/master/LICENSE): *Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:* *The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.* *THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.* Kaggle dataset identifier: cifake-real-and-ai-generated-synthetic-images <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os # for dirname, _, filenames in os.walk('/kaggle/input'): # for filename in filenames: # print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import os import random import shutil # Set the paths to your dataset folders dataset_dir = "/kaggle/input/cifake-real-and-ai-generated-synthetic-images/train" real_dir = os.path.join(dataset_dir, "REAL") fake_dir = os.path.join(dataset_dir, "FAKE") # Set the paths to the new directories that will contain the selected images train_dir = "/kaggle/working/train" real_train_dir = os.path.join(train_dir, "REAL") fake_train_dir = os.path.join(train_dir, "FAKE") # Create the new directories if they don't exist if not os.path.exists(real_train_dir): os.makedirs(real_train_dir) if not os.path.exists(fake_train_dir): os.makedirs(fake_train_dir) # Set the number of images to select from each folder num_images = 50 # Randomly select the required number of images from the REAL folder and copy them to the new directory real_images = os.listdir(real_dir) selected_real_images = random.sample(real_images, num_images) for image_name in selected_real_images: source_path = os.path.join(real_dir, image_name) dest_path = os.path.join(real_train_dir, image_name) shutil.copyfile(source_path, dest_path) # Randomly select the required number of images from the FAKE folder and copy them to the new directory fake_images = os.listdir(fake_dir) selected_fake_images = random.sample(fake_images, num_images) for image_name in selected_fake_images: source_path = os.path.join(fake_dir, image_name) dest_path = os.path.join(fake_train_dir, image_name) shutil.copyfile(source_path, dest_path) import tensorflow as tf import numpy as np import os import cv2 # Set the paths to the train images train_dir = "/kaggle/working/train" real_train_dir = os.path.join(train_dir, "real") fake_train_dir = os.path.join(train_dir, "fake") # Define the CNN architecture model = tf.keras.Sequential( [ tf.keras.layers.Conv2D( 32, (3, 3), activation="relu", input_shape=(224, 224, 3) ), tf.keras.layers.MaxPooling2D((2, 2)), tf.keras.layers.Conv2D(64, (3, 3), activation="relu"), tf.keras.layers.MaxPooling2D((2, 2)), tf.keras.layers.Conv2D(128, (3, 3), activation="relu"), tf.keras.layers.MaxPooling2D((2, 2)), tf.keras.layers.Flatten(), tf.keras.layers.Dense(128, activation="relu"), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(1, activation="sigmoid"), ] ) # Compile the model model.compile( loss="binary_crossentropy", optimizer=tf.keras.optimizers.Adam(lr=0.0001), metrics=["accuracy"], ) # Define the training data generator train_datagen = tf.keras.preprocessing.image.ImageDataGenerator( rescale=1.0 / 255, rotation_range=20, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, zoom_range=0.2, horizontal_flip=True, ) # Define the validation data generator valid_datagen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255) # Set the batch size batch_size = 16 # Define the training and validation data generators train_generator = train_datagen.flow_from_directory( train_dir, target_size=(224, 224), batch_size=batch_size, class_mode="binary" ) valid_generator = valid_datagen.flow_from_directory( train_dir, target_size=(224, 224), batch_size=batch_size, class_mode="binary" ) # Train the model history = model.fit(train_generator, epochs=20, validation_data=valid_generator) # Set the paths to your dataset folders dataset_dir_test = "/kaggle/input/cifake-real-and-ai-generated-synthetic-images/test" real_dir = os.path.join(dataset_dir_test, "REAL") fake_dir = os.path.join(dataset_dir_test, "FAKE") # Set the paths to the new directories that will contain the selected images test_dir = "/kaggle/working/test" real_test_dir = os.path.join(test_dir, "REAL") fake_test_dir = os.path.join(test_dir, "FAKE") # Create the new directories if they don't exist if not os.path.exists(real_test_dir): os.makedirs(real_test_dir) if not os.path.exists(fake_test_dir): os.makedirs(fake_test_dir) # Set the number of images to select from each folder num_images = 10 # Randomly select the required number of images from the REAL folder and copy them to the new directory real_images = os.listdir(real_dir) selected_real_images = random.sample(real_images, num_images) for image_name in selected_real_images: source_path = os.path.join(real_dir, image_name) dest_path = os.path.join(real_test_dir, image_name) shutil.copyfile(source_path, dest_path) # Randomly select the required number of images from the FAKE folder and copy them to the new directory fake_images = os.listdir(fake_dir) selected_fake_images = random.sample(fake_images, num_images) for image_name in selected_fake_images: source_path = os.path.join(fake_dir, image_name) dest_path = os.path.join(fake_test_dir, image_name) shutil.copyfile(source_path, dest_path) # Define the labels labels = ["FAKE", "REAL"] # Create a list to store the predictions predictions = [] # Loop through the real test images and predict if they are real or fake for image_name in os.listdir(real_test_dir): image_path = os.path.join(real_test_dir, image_name) image = cv2.imread(image_path) image = cv2.resize(image, (224, 224)) image = np.expand_dims(image, axis=0) prediction = model.predict(image) predicted_label = labels[np.argmax(prediction)] predictions.append(predicted_label) print(predictions) # Loop through the fake test images and predict if they are real or fake for image_name in os.listdir(fake_test_dir): image_path = os.path.join(fake_test_dir, image_name) image = cv2.imread(image_path) image = cv2.resize(image, (224, 224)) image = np.expand_dims(image, axis=0) prediction = model.predict(image) predicted_label = labels[np.argmax(prediction)] predictions.append(predicted_label) # Print the predictions print(predictions)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/327/129327934.ipynb
cifake-real-and-ai-generated-synthetic-images
birdy654
[{"Id": 129327934, "ScriptId": 38450781, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11988597, "CreationDate": "05/12/2023 20:21:53", "VersionNumber": 1.0, "Title": "notebook5624991017", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 173.0, "LinesInsertedFromPrevious": 173.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 185272261, "KernelVersionId": 129327934, "SourceDatasetVersionId": 5256696}]
[{"Id": 5256696, "DatasetId": 3041726, "DatasourceVersionId": 5329502, "CreatorUserId": 2039603, "LicenseName": "Other (specified in description)", "CreationDate": "03/28/2023 16:00:29", "VersionNumber": 3.0, "Title": "CIFAKE: Real and AI-Generated Synthetic Images", "Slug": "cifake-real-and-ai-generated-synthetic-images", "Subtitle": "Can Computer Vision detect when images have been generated by AI?", "Description": "# CIFAKE: Real and AI-Generated Synthetic Images\nThe quality of AI-generated images has rapidly increased, leading to concerns of authenticity and trustworthiness.\n\nCIFAKE is a dataset that contains 60,000 synthetically-generated images and 60,000 real images (collected from CIFAR-10). Can computer vision techniques be used to detect when an image is real or has been generated by AI?\n\nFurther information on this dataset can be found here: [Bird, J.J., Lotfi, A. (2023). CIFAKE: Image Classification and Explainable Identification of AI-Generated Synthetic Images. arXiv preprint arXiv:2303.14126.](https://arxiv.org/abs/2303.14126)\n\n![Images from the CIFAKE dataset](https://i.imgur.com/RiOwf8i.png)\n\n## Dataset details\nThe dataset contains two classes - REAL and FAKE. \n\nFor REAL, we collected the images from Krizhevsky & Hinton's [CIFAR-10 dataset](https://www.cs.toronto.edu/~kriz/cifar.html)\n\nFor the FAKE images, we generated the equivalent of CIFAR-10 with Stable Diffusion version 1.4\n\nThere are 100,000 images for training (50k per class) and 20,000 for testing (10k per class)\n\n## Papers with Code\nThe dataset and all studies using it are linked using [Papers with Code](https://paperswithcode.com/dataset/cifake-real-and-ai-generated-synthetic-images)\n[https://paperswithcode.com/dataset/cifake-real-and-ai-generated-synthetic-images](https://paperswithcode.com/dataset/cifake-real-and-ai-generated-synthetic-images)\n\n\n## References\nIf you use this dataset, you **must** cite the following sources\n\n[Krizhevsky, A., & Hinton, G. (2009). Learning multiple layers of features from tiny images.](https://www.cs.toronto.edu/~kriz/learning-features-2009-TR.pdfl)\n\n[Bird, J.J., Lotfi, A. (2023). CIFAKE: Image Classification and Explainable Identification of AI-Generated Synthetic Images. arXiv preprint arXiv:2303.14126.](https://arxiv.org/abs/2303.14126)\n\nReal images are from Krizhevsky & Hinton (2009), fake images are from Bird & Lotfi (2023). The Bird & Lotfi study is a preprint currently available on [ArXiv](https://arxiv.org/abs/2303.14126) and this description will be updated when the paper is published.\n\n## Notes\n\nThe updates to the dataset on the 28th of March 2023 did not change anything; the file formats \".jpeg\" were renamed \".jpg\" and the root folder was uploaded to meet Kaggle's usability requirements.\n\n## License\nThis dataset is published under the [same MIT license as CIFAR-10](https://github.com/wichtounet/cifar-10/blob/master/LICENSE):\n\n*Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:*\n\n*The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.*\n\n*THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.*", "VersionNotes": "Kaggle compatibility fix (no actual changes)", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3041726, "CreatorUserId": 2039603, "OwnerUserId": 2039603.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5256696.0, "CurrentDatasourceVersionId": 5329502.0, "ForumId": 3081274, "Type": 2, "CreationDate": "03/24/2023 13:22:42", "LastActivityDate": "03/24/2023", "TotalViews": 13728, "TotalDownloads": 1803, "TotalVotes": 46, "TotalKernels": 15}]
[{"Id": 2039603, "UserName": "birdy654", "DisplayName": "Jordan J. Bird", "RegisterDate": "07/03/2018", "PerformanceTier": 2}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os # for dirname, _, filenames in os.walk('/kaggle/input'): # for filename in filenames: # print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import os import random import shutil # Set the paths to your dataset folders dataset_dir = "/kaggle/input/cifake-real-and-ai-generated-synthetic-images/train" real_dir = os.path.join(dataset_dir, "REAL") fake_dir = os.path.join(dataset_dir, "FAKE") # Set the paths to the new directories that will contain the selected images train_dir = "/kaggle/working/train" real_train_dir = os.path.join(train_dir, "REAL") fake_train_dir = os.path.join(train_dir, "FAKE") # Create the new directories if they don't exist if not os.path.exists(real_train_dir): os.makedirs(real_train_dir) if not os.path.exists(fake_train_dir): os.makedirs(fake_train_dir) # Set the number of images to select from each folder num_images = 50 # Randomly select the required number of images from the REAL folder and copy them to the new directory real_images = os.listdir(real_dir) selected_real_images = random.sample(real_images, num_images) for image_name in selected_real_images: source_path = os.path.join(real_dir, image_name) dest_path = os.path.join(real_train_dir, image_name) shutil.copyfile(source_path, dest_path) # Randomly select the required number of images from the FAKE folder and copy them to the new directory fake_images = os.listdir(fake_dir) selected_fake_images = random.sample(fake_images, num_images) for image_name in selected_fake_images: source_path = os.path.join(fake_dir, image_name) dest_path = os.path.join(fake_train_dir, image_name) shutil.copyfile(source_path, dest_path) import tensorflow as tf import numpy as np import os import cv2 # Set the paths to the train images train_dir = "/kaggle/working/train" real_train_dir = os.path.join(train_dir, "real") fake_train_dir = os.path.join(train_dir, "fake") # Define the CNN architecture model = tf.keras.Sequential( [ tf.keras.layers.Conv2D( 32, (3, 3), activation="relu", input_shape=(224, 224, 3) ), tf.keras.layers.MaxPooling2D((2, 2)), tf.keras.layers.Conv2D(64, (3, 3), activation="relu"), tf.keras.layers.MaxPooling2D((2, 2)), tf.keras.layers.Conv2D(128, (3, 3), activation="relu"), tf.keras.layers.MaxPooling2D((2, 2)), tf.keras.layers.Flatten(), tf.keras.layers.Dense(128, activation="relu"), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(1, activation="sigmoid"), ] ) # Compile the model model.compile( loss="binary_crossentropy", optimizer=tf.keras.optimizers.Adam(lr=0.0001), metrics=["accuracy"], ) # Define the training data generator train_datagen = tf.keras.preprocessing.image.ImageDataGenerator( rescale=1.0 / 255, rotation_range=20, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, zoom_range=0.2, horizontal_flip=True, ) # Define the validation data generator valid_datagen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255) # Set the batch size batch_size = 16 # Define the training and validation data generators train_generator = train_datagen.flow_from_directory( train_dir, target_size=(224, 224), batch_size=batch_size, class_mode="binary" ) valid_generator = valid_datagen.flow_from_directory( train_dir, target_size=(224, 224), batch_size=batch_size, class_mode="binary" ) # Train the model history = model.fit(train_generator, epochs=20, validation_data=valid_generator) # Set the paths to your dataset folders dataset_dir_test = "/kaggle/input/cifake-real-and-ai-generated-synthetic-images/test" real_dir = os.path.join(dataset_dir_test, "REAL") fake_dir = os.path.join(dataset_dir_test, "FAKE") # Set the paths to the new directories that will contain the selected images test_dir = "/kaggle/working/test" real_test_dir = os.path.join(test_dir, "REAL") fake_test_dir = os.path.join(test_dir, "FAKE") # Create the new directories if they don't exist if not os.path.exists(real_test_dir): os.makedirs(real_test_dir) if not os.path.exists(fake_test_dir): os.makedirs(fake_test_dir) # Set the number of images to select from each folder num_images = 10 # Randomly select the required number of images from the REAL folder and copy them to the new directory real_images = os.listdir(real_dir) selected_real_images = random.sample(real_images, num_images) for image_name in selected_real_images: source_path = os.path.join(real_dir, image_name) dest_path = os.path.join(real_test_dir, image_name) shutil.copyfile(source_path, dest_path) # Randomly select the required number of images from the FAKE folder and copy them to the new directory fake_images = os.listdir(fake_dir) selected_fake_images = random.sample(fake_images, num_images) for image_name in selected_fake_images: source_path = os.path.join(fake_dir, image_name) dest_path = os.path.join(fake_test_dir, image_name) shutil.copyfile(source_path, dest_path) # Define the labels labels = ["FAKE", "REAL"] # Create a list to store the predictions predictions = [] # Loop through the real test images and predict if they are real or fake for image_name in os.listdir(real_test_dir): image_path = os.path.join(real_test_dir, image_name) image = cv2.imread(image_path) image = cv2.resize(image, (224, 224)) image = np.expand_dims(image, axis=0) prediction = model.predict(image) predicted_label = labels[np.argmax(prediction)] predictions.append(predicted_label) print(predictions) # Loop through the fake test images and predict if they are real or fake for image_name in os.listdir(fake_test_dir): image_path = os.path.join(fake_test_dir, image_name) image = cv2.imread(image_path) image = cv2.resize(image, (224, 224)) image = np.expand_dims(image, axis=0) prediction = model.predict(image) predicted_label = labels[np.argmax(prediction)] predictions.append(predicted_label) # Print the predictions print(predictions)
false
0
2,063
0
3,106
2,063
129327065
import os import json import music21 as m21 import numpy as np import tensorflow.keras as keras KERN_DATASET_PATH = "/kaggle/input/deutsch/deutschl/deutschl/erk" SAVE_DIR = "/kaggle/working/dataset" SINGLE_FILE_DATASET = "/kaggle/working/file_dataset" MAPPING_PATH = "/kaggle/working/mapping.json" SEQUENCE_LENGTH = 64 # durations are expressed in quarter length ACCEPTABLE_DURATIONS = [ 0.25, # 16th note 0.5, # 8th note 0.75, 1.0, # quarter note 1.5, 2, # half note 3, 4, # whole note ] def load_songs_in_kern(dataset_path): """Loads all kern pieces in dataset using music21. :param dataset_path (str): Path to dataset :return songs (list of m21 streams): List containing all pieces """ songs = [] # go through all the files in dataset and load them with music21 for path, subdirs, files in os.walk(dataset_path): for file in files: # consider only kern files if file[-3:] == "krn": song = m21.converter.parse(os.path.join(path, file)) songs.append(song) return songs def has_acceptable_durations(song, acceptable_durations): """Boolean routine that returns True if piece has all acceptable duration, False otherwise. :param song (m21 stream): :param acceptable_durations (list): List of acceptable duration in quarter length :return (bool): """ for note in song.flat.notesAndRests: if note.duration.quarterLength not in acceptable_durations: return False return True def transpose(song): """Transposes song to C maj/A min :param piece (m21 stream): Piece to transpose :return transposed_song (m21 stream): """ # get key from the song parts = song.getElementsByClass(m21.stream.Part) measures_part0 = parts[0].getElementsByClass(m21.stream.Measure) key = measures_part0[0][4] # estimate key using music21 if not isinstance(key, m21.key.Key): key = song.analyze("key") # get interval for transposition. E.g., Bmaj -> Cmaj if key.mode == "major": interval = m21.interval.Interval(key.tonic, m21.pitch.Pitch("C")) elif key.mode == "minor": interval = m21.interval.Interval(key.tonic, m21.pitch.Pitch("A")) # transpose song by calculated interval tranposed_song = song.transpose(interval) return tranposed_song def encode_song(song, time_step=0.25): """Converts a score into a time-series-like music representation. Each item in the encoded list represents 'min_duration' quarter lengths. The symbols used at each step are: integers for MIDI notes, 'r' for representing a rest, and '_' for representing notes/rests that are carried over into a new time step. Here's a sample encoding: ["r", "_", "60", "_", "_", "_", "72" "_"] :param song (m21 stream): Piece to encode :param time_step (float): Duration of each time step in quarter length :return: """ encoded_song = [] for event in song.flat.notesAndRests: # handle notes if isinstance(event, m21.note.Note): symbol = event.pitch.midi # 60 # handle rests elif isinstance(event, m21.note.Rest): symbol = "r" # convert the note/rest into time series notation steps = int(event.duration.quarterLength / time_step) for step in range(steps): # if it's the first time we see a note/rest, let's encode it. Otherwise, it means we're carrying the same # symbol in a new time step if step == 0: encoded_song.append(symbol) else: encoded_song.append("_") # cast encoded song to str encoded_song = " ".join(map(str, encoded_song)) return encoded_song def preprocess(dataset_path): # load folk songs print("Loading songs...") songs = load_songs_in_kern(dataset_path) print(f"Loaded {len(songs)} songs.") for i, song in enumerate(songs): # filter out songs that have non-acceptable durations if not has_acceptable_durations(song, ACCEPTABLE_DURATIONS): continue # transpose songs to Cmaj/Amin song = transpose(song) # encode songs with music time series representation encoded_song = encode_song(song) # save songs to text file save_path = os.path.join(SAVE_DIR, str(i)) with open(save_path, "w") as fp: fp.write(encoded_song) if i % 10 == 0: print(f"Song {i} out of {len(songs)} processed") def load(file_path): with open(file_path, "r") as fp: song = fp.read() return song def create_single_file_dataset(dataset_path, file_dataset_path, sequence_length): """Generates a file collating all the encoded songs and adding new piece delimiters. :param dataset_path (str): Path to folder containing the encoded songs :param file_dataset_path (str): Path to file for saving songs in single file :param sequence_length (int): # of time steps to be considered for training :return songs (str): String containing all songs in dataset + delimiters """ new_song_delimiter = "/ " * sequence_length songs = "" # load encoded songs and add delimiters for path, _, files in os.walk(dataset_path): for file in files: file_path = os.path.join(path, file) song = load(file_path) songs = songs + song + " " + new_song_delimiter # remove empty space from last character of string songs = songs[:-1] # save string that contains all the dataset with open(file_dataset_path, "w") as fp: fp.write(songs) return songs def create_mapping(songs, mapping_path): """Creates a json file that maps the symbols in the song dataset onto integers :param songs (str): String with all songs :param mapping_path (str): Path where to save mapping :return: """ mappings = {} # identify the vocabulary songs = songs.split() vocabulary = list(set(songs)) # create mappings for i, symbol in enumerate(vocabulary): mappings[symbol] = i # save voabulary to a json file with open(mapping_path, "w") as fp: json.dump(mappings, fp, indent=4) def convert_songs_to_int(songs): int_songs = [] # load mappings with open(MAPPING_PATH, "r") as fp: mappings = json.load(fp) # transform songs string to list songs = songs.split() # map songs to int for symbol in songs: int_songs.append(mappings[symbol]) return int_songs def generate_training_sequences(sequence_length): """Create input and output data samples for training. Each sample is a sequence. :param sequence_length (int): Length of each sequence. With a quantisation at 16th notes, 64 notes equates to 4 bars :return inputs (ndarray): Training inputs :return targets (ndarray): Training targets """ # load songs and map them to int songs = load(SINGLE_FILE_DATASET) int_songs = convert_songs_to_int(songs) inputs = [] targets = [] # generate the training sequences num_sequences = len(int_songs) - sequence_length for i in range(num_sequences): inputs.append(int_songs[i : i + sequence_length]) targets.append(int_songs[i + sequence_length]) # one-hot encode the sequences vocabulary_size = len(set(int_songs)) # inputs size: (# of sequences, sequence length, vocabulary size) inputs = keras.utils.to_categorical(inputs, num_classes=vocabulary_size) targets = np.array(targets) print(f"There are {len(inputs)} sequences.") return inputs, targets def main(): preprocess(KERN_DATASET_PATH) songs = create_single_file_dataset(SAVE_DIR, SINGLE_FILE_DATASET, SEQUENCE_LENGTH) create_mapping(songs, MAPPING_PATH) # inputs, targets = generate_training_sequences(SEQUENCE_LENGTH) if __name__ == "__main__": main() import matplotlib.pyplot as plt import tensorflow.keras as keras OUTPUT_UNITS = 38 NUM_UNITS = [256] LOSS = "sparse_categorical_crossentropy" LEARNING_RATE = 0.001 EPOCHS = 50 BATCH_SIZE = 64 SAVE_MODEL_PATH = "model_bilstm.h5" def build_model(output_units, num_units, loss, learning_rate): # Create the model architecture input = keras.layers.Input(shape=(None, output_units)) x = keras.layers.Bidirectional(keras.layers.GRU(num_units[0]))(input) x = keras.layers.Dropout(0.2)(x) output = keras.layers.Dense(output_units, activation="softmax")(x) model = keras.Model(input, output) # compile model model.compile( loss=loss, optimizer=keras.optimizers.Adam(learning_rate=learning_rate), metrics=["accuracy"], ) model.summary() return model def train( output_units=OUTPUT_UNITS, num_units=NUM_UNITS, loss=LOSS, learning_rate=LEARNING_RATE, ): # generate the training sequences inputs, targets = generate_training_sequences(SEQUENCE_LENGTH) # build the network model = build_model(output_units, num_units, loss, learning_rate) # train the model model.fit(inputs, targets, epochs=EPOCHS, batch_size=BATCH_SIZE) model.save(SAVE_MODEL_PATH) plt.plot(save_model.history.history["loss"]) plt.plot(save_model.history.history["accuracy"]) plt.title("Model Performance") plt.ylabel("Value") plt.xlabel("Epoch") plt.legend(["loss", "accuracy"], loc="upper left") plt.show() if __name__ == "__main__": train()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/327/129327065.ipynb
null
null
[{"Id": 129327065, "ScriptId": 38426779, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 15068418, "CreationDate": "05/12/2023 20:08:44", "VersionNumber": 3.0, "Title": "notebook5ef2a4715a", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 320.0, "LinesInsertedFromPrevious": 5.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 315.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import os import json import music21 as m21 import numpy as np import tensorflow.keras as keras KERN_DATASET_PATH = "/kaggle/input/deutsch/deutschl/deutschl/erk" SAVE_DIR = "/kaggle/working/dataset" SINGLE_FILE_DATASET = "/kaggle/working/file_dataset" MAPPING_PATH = "/kaggle/working/mapping.json" SEQUENCE_LENGTH = 64 # durations are expressed in quarter length ACCEPTABLE_DURATIONS = [ 0.25, # 16th note 0.5, # 8th note 0.75, 1.0, # quarter note 1.5, 2, # half note 3, 4, # whole note ] def load_songs_in_kern(dataset_path): """Loads all kern pieces in dataset using music21. :param dataset_path (str): Path to dataset :return songs (list of m21 streams): List containing all pieces """ songs = [] # go through all the files in dataset and load them with music21 for path, subdirs, files in os.walk(dataset_path): for file in files: # consider only kern files if file[-3:] == "krn": song = m21.converter.parse(os.path.join(path, file)) songs.append(song) return songs def has_acceptable_durations(song, acceptable_durations): """Boolean routine that returns True if piece has all acceptable duration, False otherwise. :param song (m21 stream): :param acceptable_durations (list): List of acceptable duration in quarter length :return (bool): """ for note in song.flat.notesAndRests: if note.duration.quarterLength not in acceptable_durations: return False return True def transpose(song): """Transposes song to C maj/A min :param piece (m21 stream): Piece to transpose :return transposed_song (m21 stream): """ # get key from the song parts = song.getElementsByClass(m21.stream.Part) measures_part0 = parts[0].getElementsByClass(m21.stream.Measure) key = measures_part0[0][4] # estimate key using music21 if not isinstance(key, m21.key.Key): key = song.analyze("key") # get interval for transposition. E.g., Bmaj -> Cmaj if key.mode == "major": interval = m21.interval.Interval(key.tonic, m21.pitch.Pitch("C")) elif key.mode == "minor": interval = m21.interval.Interval(key.tonic, m21.pitch.Pitch("A")) # transpose song by calculated interval tranposed_song = song.transpose(interval) return tranposed_song def encode_song(song, time_step=0.25): """Converts a score into a time-series-like music representation. Each item in the encoded list represents 'min_duration' quarter lengths. The symbols used at each step are: integers for MIDI notes, 'r' for representing a rest, and '_' for representing notes/rests that are carried over into a new time step. Here's a sample encoding: ["r", "_", "60", "_", "_", "_", "72" "_"] :param song (m21 stream): Piece to encode :param time_step (float): Duration of each time step in quarter length :return: """ encoded_song = [] for event in song.flat.notesAndRests: # handle notes if isinstance(event, m21.note.Note): symbol = event.pitch.midi # 60 # handle rests elif isinstance(event, m21.note.Rest): symbol = "r" # convert the note/rest into time series notation steps = int(event.duration.quarterLength / time_step) for step in range(steps): # if it's the first time we see a note/rest, let's encode it. Otherwise, it means we're carrying the same # symbol in a new time step if step == 0: encoded_song.append(symbol) else: encoded_song.append("_") # cast encoded song to str encoded_song = " ".join(map(str, encoded_song)) return encoded_song def preprocess(dataset_path): # load folk songs print("Loading songs...") songs = load_songs_in_kern(dataset_path) print(f"Loaded {len(songs)} songs.") for i, song in enumerate(songs): # filter out songs that have non-acceptable durations if not has_acceptable_durations(song, ACCEPTABLE_DURATIONS): continue # transpose songs to Cmaj/Amin song = transpose(song) # encode songs with music time series representation encoded_song = encode_song(song) # save songs to text file save_path = os.path.join(SAVE_DIR, str(i)) with open(save_path, "w") as fp: fp.write(encoded_song) if i % 10 == 0: print(f"Song {i} out of {len(songs)} processed") def load(file_path): with open(file_path, "r") as fp: song = fp.read() return song def create_single_file_dataset(dataset_path, file_dataset_path, sequence_length): """Generates a file collating all the encoded songs and adding new piece delimiters. :param dataset_path (str): Path to folder containing the encoded songs :param file_dataset_path (str): Path to file for saving songs in single file :param sequence_length (int): # of time steps to be considered for training :return songs (str): String containing all songs in dataset + delimiters """ new_song_delimiter = "/ " * sequence_length songs = "" # load encoded songs and add delimiters for path, _, files in os.walk(dataset_path): for file in files: file_path = os.path.join(path, file) song = load(file_path) songs = songs + song + " " + new_song_delimiter # remove empty space from last character of string songs = songs[:-1] # save string that contains all the dataset with open(file_dataset_path, "w") as fp: fp.write(songs) return songs def create_mapping(songs, mapping_path): """Creates a json file that maps the symbols in the song dataset onto integers :param songs (str): String with all songs :param mapping_path (str): Path where to save mapping :return: """ mappings = {} # identify the vocabulary songs = songs.split() vocabulary = list(set(songs)) # create mappings for i, symbol in enumerate(vocabulary): mappings[symbol] = i # save voabulary to a json file with open(mapping_path, "w") as fp: json.dump(mappings, fp, indent=4) def convert_songs_to_int(songs): int_songs = [] # load mappings with open(MAPPING_PATH, "r") as fp: mappings = json.load(fp) # transform songs string to list songs = songs.split() # map songs to int for symbol in songs: int_songs.append(mappings[symbol]) return int_songs def generate_training_sequences(sequence_length): """Create input and output data samples for training. Each sample is a sequence. :param sequence_length (int): Length of each sequence. With a quantisation at 16th notes, 64 notes equates to 4 bars :return inputs (ndarray): Training inputs :return targets (ndarray): Training targets """ # load songs and map them to int songs = load(SINGLE_FILE_DATASET) int_songs = convert_songs_to_int(songs) inputs = [] targets = [] # generate the training sequences num_sequences = len(int_songs) - sequence_length for i in range(num_sequences): inputs.append(int_songs[i : i + sequence_length]) targets.append(int_songs[i + sequence_length]) # one-hot encode the sequences vocabulary_size = len(set(int_songs)) # inputs size: (# of sequences, sequence length, vocabulary size) inputs = keras.utils.to_categorical(inputs, num_classes=vocabulary_size) targets = np.array(targets) print(f"There are {len(inputs)} sequences.") return inputs, targets def main(): preprocess(KERN_DATASET_PATH) songs = create_single_file_dataset(SAVE_DIR, SINGLE_FILE_DATASET, SEQUENCE_LENGTH) create_mapping(songs, MAPPING_PATH) # inputs, targets = generate_training_sequences(SEQUENCE_LENGTH) if __name__ == "__main__": main() import matplotlib.pyplot as plt import tensorflow.keras as keras OUTPUT_UNITS = 38 NUM_UNITS = [256] LOSS = "sparse_categorical_crossentropy" LEARNING_RATE = 0.001 EPOCHS = 50 BATCH_SIZE = 64 SAVE_MODEL_PATH = "model_bilstm.h5" def build_model(output_units, num_units, loss, learning_rate): # Create the model architecture input = keras.layers.Input(shape=(None, output_units)) x = keras.layers.Bidirectional(keras.layers.GRU(num_units[0]))(input) x = keras.layers.Dropout(0.2)(x) output = keras.layers.Dense(output_units, activation="softmax")(x) model = keras.Model(input, output) # compile model model.compile( loss=loss, optimizer=keras.optimizers.Adam(learning_rate=learning_rate), metrics=["accuracy"], ) model.summary() return model def train( output_units=OUTPUT_UNITS, num_units=NUM_UNITS, loss=LOSS, learning_rate=LEARNING_RATE, ): # generate the training sequences inputs, targets = generate_training_sequences(SEQUENCE_LENGTH) # build the network model = build_model(output_units, num_units, loss, learning_rate) # train the model model.fit(inputs, targets, epochs=EPOCHS, batch_size=BATCH_SIZE) model.save(SAVE_MODEL_PATH) plt.plot(save_model.history.history["loss"]) plt.plot(save_model.history.history["accuracy"]) plt.title("Model Performance") plt.ylabel("Value") plt.xlabel("Epoch") plt.legend(["loss", "accuracy"], loc="upper left") plt.show() if __name__ == "__main__": train()
false
0
2,600
0
2,600
2,600
129327428
<jupyter_start><jupyter_text>Gender Classification Dataset ### Context While I was practicing machine learning, I wanted to create a simple dataset that is closely aligned to the real world scenario and gives better results to whet my appetite on this domain. If you are a beginner who wants to try solving classification problems in machine learning and if you prefer achieving better results, try using this dataset in your projects which will be a great place to start. ### Content This dataset contains 7 features and a label column. **long_hair** - This column contains 0's and 1's where 1 is "long hair" and 0 is "not long hair". **forehead_width_cm** - This column is in CM's. This is the width of the forehead. **forehead_height_cm** - This is the height of the forehead and it's in Cm's. **nose_wide** - This column contains 0's and 1's where 1 is "wide nose" and 0 is "not wide nose". **nose_long** - This column contains 0's and 1's where 1 is "Long nose" and 0 is "not long nose". **lips_thin** - This column contains 0's and 1's where 1 represents the "thin lips" while 0 is "Not thin lips". **distance_nose_to_lip_long** - This column contains 0's and 1's where 1 represents the "long distance between nose and lips" while 0 is "short distance between nose and lips". **gender** - This is either "Male" or "Female". Kaggle dataset identifier: gender-classification-dataset <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session df = pd.read_csv( "/kaggle/input/gender-classification-dataset/gender_classification_v7.csv" ) df.head() df.info() df.nunique() # # Applying Label Encoding for converting all floats and objects to int from sklearn import preprocessing label_encoder = preprocessing.LabelEncoder() df["gender"] = label_encoder.fit_transform(df["gender"]) df["forehead_width_cm"] = df["forehead_width_cm"].astype(int) df["forehead_height_cm"] = df["forehead_height_cm"].astype(int) # # Data Preprocessing/ Splitting into Train/Valid/Test Set import tensorflow as tf # 75% of the data is selected train_df = df.sample(frac=0.75, random_state=4) # it drops the training data # from the original dataframe val_df = df.drop(train_df.index) df.info() df.head() # # Scaling out Data # Now Now, the most simple method to do that is : # value – (min value of the column) / (range of the column) # calling to (0,1) range max_val = train_df.max(axis=0) min_val = train_df.min(axis=0) range = max_val - min_val train_df = (train_df - min_val) / (range) val_df = (val_df - min_val) / range train_df.shape train_df.head() # now let's separate the targets and labels X_train = train_df.drop("gender", axis=1) X_val = val_df.drop("gender", axis=1) y_train = train_df["gender"] y_val = val_df["gender"] # We'll need to pass the shape # of features/inputs as an argument # in our model, so let's define a variable # to save it. input_shape = [X_train.shape[1]] input_shape # # Create Model Neural Network # **Create a linear Model** model = tf.keras.Sequential([tf.keras.layers.Dense(units=1, input_shape=input_shape)]) model.summary() # **Creating a Multilayered Neural Network** model = tf.keras.Sequential( [ tf.keras.layers.Dense(units=64, activation="relu", input_shape=input_shape), tf.keras.layers.Dense(units=64, activation="relu"), tf.keras.layers.Dense(units=1), ] ) model.summary() # adam optimizer works pretty well for # all kinds of problems and is a good starting point model.compile( optimizer="adam", # MAE error is good for # numerical predictions loss="mae", ) losses = model.fit( X_train, y_train, validation_data=(X_val, y_val), # it will use 'batch_size' number # of examples per example batch_size=256, epochs=15, # total epoch ) # # Generate Predictions and Analyze Accuracy # this will pass the first 3 rows of features # of our data as input to make predictions model.predict(X_val.iloc[0:3, :]) y_val.iloc[0:3] # # Visualize Training Vs Validation Loss loss_df = pd.DataFrame(losses.history) # history stores the loss/val # loss in each epoch # loss_df is a dataframe which # contains the losses so we can # plot it to visualize our model training loss_df.loc[:, ["loss", "val_loss"]].plot()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/327/129327428.ipynb
gender-classification-dataset
elakiricoder
[{"Id": 129327428, "ScriptId": 38447326, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7424170, "CreationDate": "05/12/2023 20:13:41", "VersionNumber": 1.0, "Title": "Neural Networks Gender Classification", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 144.0, "LinesInsertedFromPrevious": 144.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
[{"Id": 185271223, "KernelVersionId": 129327428, "SourceDatasetVersionId": 1539868}]
[{"Id": 1539868, "DatasetId": 908128, "DatasourceVersionId": 1574501, "CreatorUserId": 5036435, "LicenseName": "CC0: Public Domain", "CreationDate": "10/06/2020 02:44:12", "VersionNumber": 1.0, "Title": "Gender Classification Dataset", "Slug": "gender-classification-dataset", "Subtitle": "Dataset to try machine learning classification tasks", "Description": "### Context\n\nWhile I was practicing machine learning, I wanted to create a simple dataset that is closely aligned to the real world scenario and gives better results to whet my appetite on this domain. If you are a beginner who wants to try solving classification problems in machine learning and if you prefer achieving better results, try using this dataset in your projects which will be a great place to start.\n\n\n### Content\n\nThis dataset contains 7 features and a label column. \n\n**long_hair** - This column contains 0's and 1's where 1 is \"long hair\" and 0 is \"not long hair\".\n**forehead_width_cm** - This column is in CM's. This is the width of the forehead. \n**forehead_height_cm** - This is the height of the forehead and it's in Cm's.\n**nose_wide** - This column contains 0's and 1's where 1 is \"wide nose\" and 0 is \"not wide nose\". \n**nose_long** - This column contains 0's and 1's where 1 is \"Long nose\" and 0 is \"not long nose\".\n**lips_thin** - This column contains 0's and 1's where 1 represents the \"thin lips\" while 0 is \"Not thin lips\".\n**distance_nose_to_lip_long** - This column contains 0's and 1's where 1 represents the \"long distance between nose and lips\" while 0 is \"short distance between nose and lips\".\n\n**gender** - This is either \"Male\" or \"Female\".\n\n\n### Acknowledgements\n\nNothing to acknowledge as this is just a made up data.\n\n\n### Inspiration\n\nIt's painful to see bad results at the beginning. Don't begin with complicated datasets if you are a beginner. I'm sure that this dataset will encourage you to proceed further in the domain. Good luck.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 908128, "CreatorUserId": 5036435, "OwnerUserId": 5036435.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 1539868.0, "CurrentDatasourceVersionId": 1574501.0, "ForumId": 923861, "Type": 2, "CreationDate": "10/06/2020 02:44:12", "LastActivityDate": "10/06/2020", "TotalViews": 54701, "TotalDownloads": 9949, "TotalVotes": 80, "TotalKernels": 66}]
[{"Id": 5036435, "UserName": "elakiricoder", "DisplayName": "Jifry Issadeen", "RegisterDate": "05/06/2020", "PerformanceTier": 0}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session df = pd.read_csv( "/kaggle/input/gender-classification-dataset/gender_classification_v7.csv" ) df.head() df.info() df.nunique() # # Applying Label Encoding for converting all floats and objects to int from sklearn import preprocessing label_encoder = preprocessing.LabelEncoder() df["gender"] = label_encoder.fit_transform(df["gender"]) df["forehead_width_cm"] = df["forehead_width_cm"].astype(int) df["forehead_height_cm"] = df["forehead_height_cm"].astype(int) # # Data Preprocessing/ Splitting into Train/Valid/Test Set import tensorflow as tf # 75% of the data is selected train_df = df.sample(frac=0.75, random_state=4) # it drops the training data # from the original dataframe val_df = df.drop(train_df.index) df.info() df.head() # # Scaling out Data # Now Now, the most simple method to do that is : # value – (min value of the column) / (range of the column) # calling to (0,1) range max_val = train_df.max(axis=0) min_val = train_df.min(axis=0) range = max_val - min_val train_df = (train_df - min_val) / (range) val_df = (val_df - min_val) / range train_df.shape train_df.head() # now let's separate the targets and labels X_train = train_df.drop("gender", axis=1) X_val = val_df.drop("gender", axis=1) y_train = train_df["gender"] y_val = val_df["gender"] # We'll need to pass the shape # of features/inputs as an argument # in our model, so let's define a variable # to save it. input_shape = [X_train.shape[1]] input_shape # # Create Model Neural Network # **Create a linear Model** model = tf.keras.Sequential([tf.keras.layers.Dense(units=1, input_shape=input_shape)]) model.summary() # **Creating a Multilayered Neural Network** model = tf.keras.Sequential( [ tf.keras.layers.Dense(units=64, activation="relu", input_shape=input_shape), tf.keras.layers.Dense(units=64, activation="relu"), tf.keras.layers.Dense(units=1), ] ) model.summary() # adam optimizer works pretty well for # all kinds of problems and is a good starting point model.compile( optimizer="adam", # MAE error is good for # numerical predictions loss="mae", ) losses = model.fit( X_train, y_train, validation_data=(X_val, y_val), # it will use 'batch_size' number # of examples per example batch_size=256, epochs=15, # total epoch ) # # Generate Predictions and Analyze Accuracy # this will pass the first 3 rows of features # of our data as input to make predictions model.predict(X_val.iloc[0:3, :]) y_val.iloc[0:3] # # Visualize Training Vs Validation Loss loss_df = pd.DataFrame(losses.history) # history stores the loss/val # loss in each epoch # loss_df is a dataframe which # contains the losses so we can # plot it to visualize our model training loss_df.loc[:, ["loss", "val_loss"]].plot()
false
1
1,079
1
1,464
1,079
129327110
<jupyter_start><jupyter_text>Flickr 8k Dataset Flickr8k_Dataset: Contains a total of 8092 images in JPEG format with different shapes and sizes. Of which 6000 are used for training, 1000 for test and 1000 for development. Flickr8k_text : Contains text files describing train_set ,test_set. Flickr8k. token Kaggle dataset identifier: flickr-8k-images <jupyter_script># tensorflow version import tensorflow print("tensorflow: %s" % tensorflow.__version__) # keras version import keras print("keras: %s" % keras.__version__) import string from os import listdir from pickle import dump from keras.applications.vgg16 import VGG16 from tensorflow.keras.utils import load_img from tensorflow.keras.utils import img_to_array from keras.applications.vgg16 import preprocess_input from keras.models import Model from numpy import argmax from pickle import load from tensorflow.keras.preprocessing.text import Tokenizer from tensorflow.keras.preprocessing.sequence import pad_sequences from keras.models import load_model from nltk.translate.bleu_score import corpus_bleu from numpy import array from keras.utils import to_categorical from keras.utils import plot_model from keras.layers import Input from keras.layers import Dense from keras.layers import LSTM from keras.layers import Embedding from keras.layers import Dropout # from keras.layers import Add from keras.callbacks import ModelCheckpoint # prep photo data # # extract features from each photo in the directory # def extract_features(directory): # # load the model # model = VGG16() # # re-structure the model # model = Model(inputs=model.inputs, outputs=model.layers[-2].output) # # summarize # print(model.summary()) # # extract features from each photo # features = dict() # for name in listdir(directory): # # load an image from file # filename = directory + '/' + name # image = load_img(filename, target_size=(224, 224)) # # convert the image pixels to a numpy array # image = img_to_array(image) # # reshape data for the model # image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2])) # # prepare the image for the VGG model # image = preprocess_input(image) # # get features # feature = model.predict(image, verbose=0) # # get image id # image_id = name.split('.')[0] # # store feature # features[image_id] = feature # print('>%s' % name) # return features # # extract features from all images # directory = '/kaggle/input/flickr-8k-images/Flickr8k/Flickr8k_Dataset/Flicker8k_Dataset' # features = extract_features(directory) # print('Extracted Features: %d' % len(features)) # # save to file # dump(features, open('/kaggle/working/features.pkl', 'wb')) # load file doc # # load doc into memory # def load_doc(filename): # # open the file as read only # file = open(filename, 'r') # # read all text # text = file.read() # # close the file # file.close() # return text # # extract descriptions for images # def load_descriptions(doc): # mapping = dict() # # process lines # for line in doc.split('\n'): # # split line by white space # tokens = line.split() # if len(line) < 2: # continue # # take the first token as the image id, the rest as the description # image_id, image_desc = tokens[0], tokens[1:] # # remove filename from image id # image_id = image_id.split('.')[0] # # convert description tokens back to string # image_desc = ' '.join(image_desc) # # create the list if needed # if image_id not in mapping: # mapping[image_id] = list() # # store description # mapping[image_id].append(image_desc) # return mapping # def clean_descriptions(descriptions): # # prepare translation table for removing punctuation # table = str.maketrans('', '', string.punctuation) # for key, desc_list in descriptions.items(): # for i in range(len(desc_list)): # desc = desc_list[i] # # tokenize # desc = desc.split() # # convert to lower case # desc = [word.lower() for word in desc] # # remove punctuation from each token # desc = [w.translate(table) for w in desc] # # remove hanging 's' and 'a' # desc = [word for word in desc if len(word)>1] # # remove tokens with numbers in them # desc = [word for word in desc if word.isalpha()] # # store as string # desc_list[i] = ' '.join(desc) # # convert the loaded descriptions into a vocabulary of words # def to_vocabulary(descriptions): # # build a list of all description strings # all_desc = set() # for key in descriptions.keys(): # [all_desc.update(d.split()) for d in descriptions[key]] # return all_desc # # save descriptions to file, one per line # def save_descriptions(descriptions, filename): # lines = list() # for key, desc_list in descriptions.items(): # for desc in desc_list: # lines.append(key + ' ' + desc) # data = '\n'.join(lines) # file = open(filename, 'w') # file.write(data) # file.close() # filename = '/kaggle/input/flickr-8k-images/Flickr8k/Flickr8k_text/Flickr8k.token.txt' # # load descriptions # doc = load_doc(filename) # # parse descriptions # descriptions = load_descriptions(doc) # print('Loaded: %d ' % len(descriptions)) # # clean descriptions # clean_descriptions(descriptions) # # summarize vocabulary # vocabulary = to_vocabulary(descriptions) # print('Vocabulary Size: %d' % len(vocabulary)) # # save to file # save_descriptions(descriptions, '/kaggle/working/descriptions.txt') # train and evaluate def load_doc(filename): file = open(filename, "r") text = file.read() file.close() return text def load_set(filename): doc = load_doc(filename) dataset = list() for line in doc.split("\n"): if len(line) < 1: continue identifier = line.split(".")[0] dataset.append(identifier) return set(dataset) def load_clean_descriptions(filename, dataset): doc = load_doc(filename) descriptions = dict() for line in doc.split("\n"): tokens = line.split() image_id, image_desc = tokens[0], tokens[1:] if image_id in dataset: if image_id not in descriptions: descriptions[image_id] = list() desc = "startseq " + " ".join(image_desc) + " endseq" descriptions[image_id].append(desc) return descriptions def load_photo_features(filename, dataset): all_features = load(open(filename, "rb")) features = {k: all_features[k] for k in dataset} return features def to_lines(descriptions): all_desc = list() for key in descriptions.keys(): [all_desc.append(d) for d in descriptions[key]] return all_desc def create_tokenizer(descriptions): lines = to_lines(descriptions) tokenizer = Tokenizer() tokenizer.fit_on_texts(lines) return tokenizer def max_length(descriptions): lines = to_lines(descriptions) return max(len(d.split()) for d in lines) def word_for_id(integer, tokenizer): for word, index in tokenizer.word_index.items(): if index == integer: return word return None def generate_desc(model, tokenizer, photo, max_length): in_text = "startseq" for i in range(max_length): sequence = tokenizer.texts_to_sequences([in_text])[0] sequence = pad_sequences([sequence], maxlen=max_length) yhat = model.predict([photo, sequence], verbose=0) yhat = argmax(yhat) word = word_for_id(yhat, tokenizer) if word is None: break in_text += " " + word if word == "endseq": break return in_text def create_sequences(tokenizer, max_length, desc_list, photo, vocab_size): X1, X2, y = list(), list(), list() for desc in desc_list: seq = tokenizer.texts_to_sequences([desc])[0] for i in range(1, len(seq)): in_seq, out_seq = seq[:i], seq[i] in_seq = pad_sequences([in_seq], maxlen=max_length)[0] out_seq = to_categorical([out_seq], num_classes=vocab_size)[0] X1.append(photo) X2.append(in_seq) y.append(out_seq) return array(X1), array(X2), array(y) def define_model(vocab_size, max_length): inputs1 = Input(shape=(4096,)) fe1 = Dropout(0.5)(inputs1) fe2 = Dense(256, activation="relu")(fe1) inputs2 = Input(shape=(max_length,)) se1 = Embedding(vocab_size, 256, mask_zero=True)(inputs2) se2 = Dropout(0.5)(se1) se3 = LSTM(256)(se2) decoder1 = keras.layers.add([fe2, se3]) decoder2 = Dense(256, activation="relu")(decoder1) outputs = Dense(vocab_size, activation="softmax")(decoder2) model = Model(inputs=[inputs1, inputs2], outputs=outputs) model.compile(loss="categorical_crossentropy", optimizer="adam") model.summary() plot_model(model, to_file="/kaggle/working/model.png", show_shapes=True) return model def data_generator(descriptions, photos, tokenizer, max_length, vocab_size): while 1: for key, desc_list in descriptions.items(): photo = photos[key][0] in_img, in_seq, out_word = create_sequences( tokenizer, max_length, desc_list, photo, vocab_size ) yield [in_img, in_seq], out_word filename = ( "/kaggle/input/flickr-8k-images/Flickr8k/Flickr8k_text/Flickr_8k.trainImages.txt" ) train = load_set(filename) print("Dataset: %d" % len(train)) train_descriptions = load_clean_descriptions( "/kaggle/input/vrprojdata/descriptions.txt", train ) print("Descriptions: train=%d" % len(train_descriptions)) train_features = load_photo_features("/kaggle/input/vrprojdata/features.pkl", train) print("Photos: train=%d" % len(train_features)) tokenizer = create_tokenizer(train_descriptions) vocab_size = len(tokenizer.word_index) + 1 print("Vocabulary Size: %d" % vocab_size) max_length = max_length(train_descriptions) print("Description Length: %d" % max_length) model = define_model(vocab_size, max_length) epochs = 20 steps = len(train_descriptions) for i in range(epochs): generator = data_generator( train_descriptions, train_features, tokenizer, max_length, vocab_size ) model.fit_generator(generator, epochs=1, steps_per_epoch=steps, verbose=1) model.save("model_" + str(i) + ".h5") def evaluate_model(model, descriptions, photos, tokenizer, max_length): actual, predicted = list(), list() for key, desc_list in descriptions.items(): yhat = generate_desc(model, tokenizer, photos[key], max_length) references = [d.split() for d in desc_list] actual.append(references) predicted.append(yhat.split()) print("BLEU-1: %f" % corpus_bleu(actual, predicted, weights=(1.0, 0, 0, 0))) print("BLEU-2: %f" % corpus_bleu(actual, predicted, weights=(0.5, 0.5, 0, 0))) print("BLEU-3: %f" % corpus_bleu(actual, predicted, weights=(0.3, 0.3, 0.3, 0))) print( "BLEU-4: %f" % corpus_bleu(actual, predicted, weights=(0.25, 0.25, 0.25, 0.25)) )
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/327/129327110.ipynb
flickr-8k-images
gazu468
[{"Id": 129327110, "ScriptId": 38264644, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11247588, "CreationDate": "05/12/2023 20:09:18", "VersionNumber": 4.0, "Title": "vrproj", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 309.0, "LinesInsertedFromPrevious": 2.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 307.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 185270576, "KernelVersionId": 129327110, "SourceDatasetVersionId": 2733544}, {"Id": 185270577, "KernelVersionId": 129327110, "SourceDatasetVersionId": 5672289}]
[{"Id": 2733544, "DatasetId": 1666473, "DatasourceVersionId": 2778654, "CreatorUserId": 2257109, "LicenseName": "Other (specified in description)", "CreationDate": "10/23/2021 12:54:09", "VersionNumber": 1.0, "Title": "Flickr 8k Dataset", "Slug": "flickr-8k-images", "Subtitle": "Image Captioning dataset from Flickr", "Description": "Flickr8k_Dataset: Contains a total of 8092 images in JPEG format with different shapes and sizes. Of which 6000 are used for training, 1000 for test and 1000 for development. Flickr8k_text : Contains text files describing train_set ,test_set. Flickr8k. token", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 1666473, "CreatorUserId": 2257109, "OwnerUserId": 2257109.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2733544.0, "CurrentDatasourceVersionId": 2778654.0, "ForumId": 1687609, "Type": 2, "CreationDate": "10/23/2021 12:54:09", "LastActivityDate": "10/23/2021", "TotalViews": 1214, "TotalDownloads": 57, "TotalVotes": 10, "TotalKernels": 2}]
[{"Id": 2257109, "UserName": "gazu468", "DisplayName": "Gaju Ahmed", "RegisterDate": "09/17/2018", "PerformanceTier": 3}]
# tensorflow version import tensorflow print("tensorflow: %s" % tensorflow.__version__) # keras version import keras print("keras: %s" % keras.__version__) import string from os import listdir from pickle import dump from keras.applications.vgg16 import VGG16 from tensorflow.keras.utils import load_img from tensorflow.keras.utils import img_to_array from keras.applications.vgg16 import preprocess_input from keras.models import Model from numpy import argmax from pickle import load from tensorflow.keras.preprocessing.text import Tokenizer from tensorflow.keras.preprocessing.sequence import pad_sequences from keras.models import load_model from nltk.translate.bleu_score import corpus_bleu from numpy import array from keras.utils import to_categorical from keras.utils import plot_model from keras.layers import Input from keras.layers import Dense from keras.layers import LSTM from keras.layers import Embedding from keras.layers import Dropout # from keras.layers import Add from keras.callbacks import ModelCheckpoint # prep photo data # # extract features from each photo in the directory # def extract_features(directory): # # load the model # model = VGG16() # # re-structure the model # model = Model(inputs=model.inputs, outputs=model.layers[-2].output) # # summarize # print(model.summary()) # # extract features from each photo # features = dict() # for name in listdir(directory): # # load an image from file # filename = directory + '/' + name # image = load_img(filename, target_size=(224, 224)) # # convert the image pixels to a numpy array # image = img_to_array(image) # # reshape data for the model # image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2])) # # prepare the image for the VGG model # image = preprocess_input(image) # # get features # feature = model.predict(image, verbose=0) # # get image id # image_id = name.split('.')[0] # # store feature # features[image_id] = feature # print('>%s' % name) # return features # # extract features from all images # directory = '/kaggle/input/flickr-8k-images/Flickr8k/Flickr8k_Dataset/Flicker8k_Dataset' # features = extract_features(directory) # print('Extracted Features: %d' % len(features)) # # save to file # dump(features, open('/kaggle/working/features.pkl', 'wb')) # load file doc # # load doc into memory # def load_doc(filename): # # open the file as read only # file = open(filename, 'r') # # read all text # text = file.read() # # close the file # file.close() # return text # # extract descriptions for images # def load_descriptions(doc): # mapping = dict() # # process lines # for line in doc.split('\n'): # # split line by white space # tokens = line.split() # if len(line) < 2: # continue # # take the first token as the image id, the rest as the description # image_id, image_desc = tokens[0], tokens[1:] # # remove filename from image id # image_id = image_id.split('.')[0] # # convert description tokens back to string # image_desc = ' '.join(image_desc) # # create the list if needed # if image_id not in mapping: # mapping[image_id] = list() # # store description # mapping[image_id].append(image_desc) # return mapping # def clean_descriptions(descriptions): # # prepare translation table for removing punctuation # table = str.maketrans('', '', string.punctuation) # for key, desc_list in descriptions.items(): # for i in range(len(desc_list)): # desc = desc_list[i] # # tokenize # desc = desc.split() # # convert to lower case # desc = [word.lower() for word in desc] # # remove punctuation from each token # desc = [w.translate(table) for w in desc] # # remove hanging 's' and 'a' # desc = [word for word in desc if len(word)>1] # # remove tokens with numbers in them # desc = [word for word in desc if word.isalpha()] # # store as string # desc_list[i] = ' '.join(desc) # # convert the loaded descriptions into a vocabulary of words # def to_vocabulary(descriptions): # # build a list of all description strings # all_desc = set() # for key in descriptions.keys(): # [all_desc.update(d.split()) for d in descriptions[key]] # return all_desc # # save descriptions to file, one per line # def save_descriptions(descriptions, filename): # lines = list() # for key, desc_list in descriptions.items(): # for desc in desc_list: # lines.append(key + ' ' + desc) # data = '\n'.join(lines) # file = open(filename, 'w') # file.write(data) # file.close() # filename = '/kaggle/input/flickr-8k-images/Flickr8k/Flickr8k_text/Flickr8k.token.txt' # # load descriptions # doc = load_doc(filename) # # parse descriptions # descriptions = load_descriptions(doc) # print('Loaded: %d ' % len(descriptions)) # # clean descriptions # clean_descriptions(descriptions) # # summarize vocabulary # vocabulary = to_vocabulary(descriptions) # print('Vocabulary Size: %d' % len(vocabulary)) # # save to file # save_descriptions(descriptions, '/kaggle/working/descriptions.txt') # train and evaluate def load_doc(filename): file = open(filename, "r") text = file.read() file.close() return text def load_set(filename): doc = load_doc(filename) dataset = list() for line in doc.split("\n"): if len(line) < 1: continue identifier = line.split(".")[0] dataset.append(identifier) return set(dataset) def load_clean_descriptions(filename, dataset): doc = load_doc(filename) descriptions = dict() for line in doc.split("\n"): tokens = line.split() image_id, image_desc = tokens[0], tokens[1:] if image_id in dataset: if image_id not in descriptions: descriptions[image_id] = list() desc = "startseq " + " ".join(image_desc) + " endseq" descriptions[image_id].append(desc) return descriptions def load_photo_features(filename, dataset): all_features = load(open(filename, "rb")) features = {k: all_features[k] for k in dataset} return features def to_lines(descriptions): all_desc = list() for key in descriptions.keys(): [all_desc.append(d) for d in descriptions[key]] return all_desc def create_tokenizer(descriptions): lines = to_lines(descriptions) tokenizer = Tokenizer() tokenizer.fit_on_texts(lines) return tokenizer def max_length(descriptions): lines = to_lines(descriptions) return max(len(d.split()) for d in lines) def word_for_id(integer, tokenizer): for word, index in tokenizer.word_index.items(): if index == integer: return word return None def generate_desc(model, tokenizer, photo, max_length): in_text = "startseq" for i in range(max_length): sequence = tokenizer.texts_to_sequences([in_text])[0] sequence = pad_sequences([sequence], maxlen=max_length) yhat = model.predict([photo, sequence], verbose=0) yhat = argmax(yhat) word = word_for_id(yhat, tokenizer) if word is None: break in_text += " " + word if word == "endseq": break return in_text def create_sequences(tokenizer, max_length, desc_list, photo, vocab_size): X1, X2, y = list(), list(), list() for desc in desc_list: seq = tokenizer.texts_to_sequences([desc])[0] for i in range(1, len(seq)): in_seq, out_seq = seq[:i], seq[i] in_seq = pad_sequences([in_seq], maxlen=max_length)[0] out_seq = to_categorical([out_seq], num_classes=vocab_size)[0] X1.append(photo) X2.append(in_seq) y.append(out_seq) return array(X1), array(X2), array(y) def define_model(vocab_size, max_length): inputs1 = Input(shape=(4096,)) fe1 = Dropout(0.5)(inputs1) fe2 = Dense(256, activation="relu")(fe1) inputs2 = Input(shape=(max_length,)) se1 = Embedding(vocab_size, 256, mask_zero=True)(inputs2) se2 = Dropout(0.5)(se1) se3 = LSTM(256)(se2) decoder1 = keras.layers.add([fe2, se3]) decoder2 = Dense(256, activation="relu")(decoder1) outputs = Dense(vocab_size, activation="softmax")(decoder2) model = Model(inputs=[inputs1, inputs2], outputs=outputs) model.compile(loss="categorical_crossentropy", optimizer="adam") model.summary() plot_model(model, to_file="/kaggle/working/model.png", show_shapes=True) return model def data_generator(descriptions, photos, tokenizer, max_length, vocab_size): while 1: for key, desc_list in descriptions.items(): photo = photos[key][0] in_img, in_seq, out_word = create_sequences( tokenizer, max_length, desc_list, photo, vocab_size ) yield [in_img, in_seq], out_word filename = ( "/kaggle/input/flickr-8k-images/Flickr8k/Flickr8k_text/Flickr_8k.trainImages.txt" ) train = load_set(filename) print("Dataset: %d" % len(train)) train_descriptions = load_clean_descriptions( "/kaggle/input/vrprojdata/descriptions.txt", train ) print("Descriptions: train=%d" % len(train_descriptions)) train_features = load_photo_features("/kaggle/input/vrprojdata/features.pkl", train) print("Photos: train=%d" % len(train_features)) tokenizer = create_tokenizer(train_descriptions) vocab_size = len(tokenizer.word_index) + 1 print("Vocabulary Size: %d" % vocab_size) max_length = max_length(train_descriptions) print("Description Length: %d" % max_length) model = define_model(vocab_size, max_length) epochs = 20 steps = len(train_descriptions) for i in range(epochs): generator = data_generator( train_descriptions, train_features, tokenizer, max_length, vocab_size ) model.fit_generator(generator, epochs=1, steps_per_epoch=steps, verbose=1) model.save("model_" + str(i) + ".h5") def evaluate_model(model, descriptions, photos, tokenizer, max_length): actual, predicted = list(), list() for key, desc_list in descriptions.items(): yhat = generate_desc(model, tokenizer, photos[key], max_length) references = [d.split() for d in desc_list] actual.append(references) predicted.append(yhat.split()) print("BLEU-1: %f" % corpus_bleu(actual, predicted, weights=(1.0, 0, 0, 0))) print("BLEU-2: %f" % corpus_bleu(actual, predicted, weights=(0.5, 0.5, 0, 0))) print("BLEU-3: %f" % corpus_bleu(actual, predicted, weights=(0.3, 0.3, 0.3, 0))) print( "BLEU-4: %f" % corpus_bleu(actual, predicted, weights=(0.25, 0.25, 0.25, 0.25)) )
false
0
3,197
0
3,303
3,197
129350908
<jupyter_start><jupyter_text>Facebook Hateful Meme Dataset Kaggle dataset identifier: facebook-hateful-meme-dataset <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: # print(os.path.join(dirname, filename)) continue # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session data_dir = "/kaggle/input/facebook-hateful-meme-dataset/data/" img_path = data_dir + "data_dir" train_path = data_dir + "train.jsonl" dev_path = data_dir + "dev.jsonl" test_path = data_dir + "test.jsonl" import torch, json, os from PIL import Image class HatefulMemesDataset(torch.utils.data.Dataset): def __init__(self, data_path): self.data = [json.loads(l) for l in open(data_path)] self.data_dir = os.path.dirname(data_path) def __getitem__(self, index: int): # Load images on the fly. image = Image.open( os.path.join(self.data_dir, self.data[index]["img"]) ).convert("RGB") text = self.data[index]["text"] label = self.data[index]["label"] return image, text, label def load_image_only(self, index: int): image = Image.open( os.path.join(self.data_dir, self.data[index]["img"]) ).convert("RGB") return image def get_label(self, index: int): label = self.data[index]["label"] return label def get_test_item(self, index: int): # Load images on the fly. image = Image.open( os.path.join(self.data_dir, self.data[index]["img"]) ).convert("RGB") text = self.data[index]["text"] return image, text def __len__(self): return len(self.data) train_data = HatefulMemesDataset(train_path) val_data = HatefulMemesDataset(dev_path) test_data = HatefulMemesDataset(test_path) print("Data size of training data: %d samples" % len(train_data)) print("Data size of validation data: %d samples" % len(val_data)) print("Data size of test data: %d samples" % len(test_data)) import torchvision import torchvision.transforms.functional as F import torchvision.transforms as transforms import matplotlib.pyplot as plt hateful_ids = [id for id in range(0, len(train_data)) if train_data.get_label(id) == 1] print("There are %d hateful memes among %d" % (len(hateful_ids), len(train_data))) def show_image_group(dataset, image_ids, n_images): image_ids = random.sample(image_ids, n_images) # image_ids = image_ids[n_image_inds] Transf = transforms.Compose([transforms.Resize((512, 512)), transforms.ToTensor()]) imgs = [Transf(dataset.load_image_only(id)) for id in image_ids] grid = torchvision.utils.make_grid(imgs, nrow=5) plt.figure(figsize=(20, 10)) plt.axis(False) plt.imshow(F.to_pil_image(grid)) import random show_image_group(train_data, hateful_ids, 20) n_images = 20 n_image_inds = random.sample(hateful_ids, n_images) hateful_ids = hateful_ids[n_image_inds] hateful_ids[n_image_inds[1]] n_image_inds len(hateful_ids)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/350/129350908.ipynb
facebook-hateful-meme-dataset
parthplc
[{"Id": 129350908, "ScriptId": 38459464, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 1501471, "CreationDate": "05/13/2023 03:33:05", "VersionNumber": 3.0, "Title": "Hateful Memes - Usage - Random Sample", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 101.0, "LinesInsertedFromPrevious": 3.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 98.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 185314625, "KernelVersionId": 129350908, "SourceDatasetVersionId": 1246182}]
[{"Id": 1246182, "DatasetId": 715500, "DatasourceVersionId": 1277976, "CreatorUserId": 2054977, "LicenseName": "Unknown", "CreationDate": "06/14/2020 15:26:33", "VersionNumber": 1.0, "Title": "Facebook Hateful Meme Dataset", "Slug": "facebook-hateful-meme-dataset", "Subtitle": "Facebook Hateful meme classification challenge on datadriven", "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 715500, "CreatorUserId": 2054977, "OwnerUserId": 2054977.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 1246182.0, "CurrentDatasourceVersionId": 1277976.0, "ForumId": 730283, "Type": 2, "CreationDate": "06/14/2020 15:26:33", "LastActivityDate": "06/14/2020", "TotalViews": 24514, "TotalDownloads": 3035, "TotalVotes": 66, "TotalKernels": 24}]
[{"Id": 2054977, "UserName": "parthplc", "DisplayName": "Parth Chokhra", "RegisterDate": "07/09/2018", "PerformanceTier": 2}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: # print(os.path.join(dirname, filename)) continue # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session data_dir = "/kaggle/input/facebook-hateful-meme-dataset/data/" img_path = data_dir + "data_dir" train_path = data_dir + "train.jsonl" dev_path = data_dir + "dev.jsonl" test_path = data_dir + "test.jsonl" import torch, json, os from PIL import Image class HatefulMemesDataset(torch.utils.data.Dataset): def __init__(self, data_path): self.data = [json.loads(l) for l in open(data_path)] self.data_dir = os.path.dirname(data_path) def __getitem__(self, index: int): # Load images on the fly. image = Image.open( os.path.join(self.data_dir, self.data[index]["img"]) ).convert("RGB") text = self.data[index]["text"] label = self.data[index]["label"] return image, text, label def load_image_only(self, index: int): image = Image.open( os.path.join(self.data_dir, self.data[index]["img"]) ).convert("RGB") return image def get_label(self, index: int): label = self.data[index]["label"] return label def get_test_item(self, index: int): # Load images on the fly. image = Image.open( os.path.join(self.data_dir, self.data[index]["img"]) ).convert("RGB") text = self.data[index]["text"] return image, text def __len__(self): return len(self.data) train_data = HatefulMemesDataset(train_path) val_data = HatefulMemesDataset(dev_path) test_data = HatefulMemesDataset(test_path) print("Data size of training data: %d samples" % len(train_data)) print("Data size of validation data: %d samples" % len(val_data)) print("Data size of test data: %d samples" % len(test_data)) import torchvision import torchvision.transforms.functional as F import torchvision.transforms as transforms import matplotlib.pyplot as plt hateful_ids = [id for id in range(0, len(train_data)) if train_data.get_label(id) == 1] print("There are %d hateful memes among %d" % (len(hateful_ids), len(train_data))) def show_image_group(dataset, image_ids, n_images): image_ids = random.sample(image_ids, n_images) # image_ids = image_ids[n_image_inds] Transf = transforms.Compose([transforms.Resize((512, 512)), transforms.ToTensor()]) imgs = [Transf(dataset.load_image_only(id)) for id in image_ids] grid = torchvision.utils.make_grid(imgs, nrow=5) plt.figure(figsize=(20, 10)) plt.axis(False) plt.imshow(F.to_pil_image(grid)) import random show_image_group(train_data, hateful_ids, 20) n_images = 20 n_image_inds = random.sample(hateful_ids, n_images) hateful_ids = hateful_ids[n_image_inds] hateful_ids[n_image_inds[1]] n_image_inds len(hateful_ids)
false
0
1,056
0
1,086
1,056
129350548
<jupyter_start><jupyter_text>Walmart Data Analysis and Forcasting A retail store that has multiple outlets across the country are facing issues in managing the inventory - to match the demand with respect to supply. You are a data scientist, who has to come up with useful insights using the data and make prediction models to forecast the sales for X number of months/years. Kaggle dataset identifier: walmart-data-analysis-and-forcasting <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # data viz import matplotlib.pyplot as plt from matplotlib import rcParams import seaborn as sns df = pd.read_csv( "/kaggle/input/walmart-data-analysis-and-forcasting/Walmart Data Analysis and Forcasting.csv" ) df.head(5) df.describe() df.shape df.isna().sum() df.info() # We do not have any null-values in our dataset # Top 5 stores with the higest total sales df.groupby("Store").agg({"Weekly_Sales": "sum"}).sort_values( "Weekly_Sales", ascending=False ).head(5) # stores with the least total sales df.groupby("Store").agg({"Weekly_Sales": "sum"}).sort_values( "Weekly_Sales", ascending=True ).head(5) # Top weeks with the highest sales df.sort_values("Weekly_Sales", ascending=False).head(5) # weeks with the lowest sales df.sort_values("Weekly_Sales", ascending=True).head(5) # Average weekly sales in holiday vs non-holiday week df.groupby("Holiday_Flag").agg({"Weekly_Sales": "mean"}) # Temprature and Weekly Sales - Sctter plot plt.scatter(df["Temperature"], df["Weekly_Sales"], alpha=0.5) plt.xlabel("Temperature") plt.ylabel("Weekly Sales") plt.title("Sctter plot of Weekly Sales vs Temperature") # With the current we can see a pattern and can devise that customer deem 20-60 as the optimum temperature to shop and observe more high weekly sale data points in that temperature range # Weekly Sales trend line # changing the date data type df["Date"] = pd.to_datetime(df["Date"], format="%d-%m-%Y") # aggeregate all the weekly sales WeeklySales = df.groupby(["Date"])["Weekly_Sales"].sum().reset_index() plt.figure(figsize=(12, 8)) plt.plot(WeeklySales["Date"], WeeklySales["Weekly_Sales"], color="blue") plt.xlabel("Date") plt.ylabel("Weekly Slaes") plt.title("Weekly Sales trend line") plt.show() # We can see a repeating trend in the sales around the holiday season during december; Sales have a peak during the season and dip after new years and pick up back again after a few weeks - We observe an seasonality trend in weekly sales # Distribution of wekly sales sns.histplot(df["Weekly_Sales"], bins=50, color="blue", edgecolor="black", kde=True) plt.title("Distribution of Weekly Sales") plt.show() corrmat = df.corr() hm = sns.heatmap( corrmat, cbar=True, annot=True, square=True, fmt=".2f", annot_kws={"size": 10}, yticklabels=df.columns, xticklabels=df.columns, cmap="Spectral_r", ) plt.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/350/129350548.ipynb
walmart-data-analysis-and-forcasting
asahu40
[{"Id": 129350548, "ScriptId": 38368943, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14977727, "CreationDate": "05/13/2023 03:27:30", "VersionNumber": 1.0, "Title": "Walmart Sales Data EDA", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 99.0, "LinesInsertedFromPrevious": 99.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 185314153, "KernelVersionId": 129350548, "SourceDatasetVersionId": 5526698}]
[{"Id": 5526698, "DatasetId": 3186183, "DatasourceVersionId": 5601262, "CreatorUserId": 4711744, "LicenseName": "CC0: Public Domain", "CreationDate": "04/26/2023 07:07:03", "VersionNumber": 1.0, "Title": "Walmart Data Analysis and Forcasting", "Slug": "walmart-data-analysis-and-forcasting", "Subtitle": "Walmart Sales Data Analysis and Forcasting for EDA and Machine Learning", "Description": "A retail store that has multiple outlets across the country are facing issues in managing the\ninventory - to match the demand with respect to supply. You are a data scientist, who has to\ncome up with useful insights using the data and make prediction models to forecast the sales for\nX number of months/years.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3186183, "CreatorUserId": 4711744, "OwnerUserId": 4711744.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5526698.0, "CurrentDatasourceVersionId": 5601262.0, "ForumId": 3250612, "Type": 2, "CreationDate": "04/26/2023 07:07:03", "LastActivityDate": "04/26/2023", "TotalViews": 22574, "TotalDownloads": 4018, "TotalVotes": 75, "TotalKernels": 14}]
[{"Id": 4711744, "UserName": "asahu40", "DisplayName": "Amit Kumar Sahu", "RegisterDate": "03/21/2020", "PerformanceTier": 0}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # data viz import matplotlib.pyplot as plt from matplotlib import rcParams import seaborn as sns df = pd.read_csv( "/kaggle/input/walmart-data-analysis-and-forcasting/Walmart Data Analysis and Forcasting.csv" ) df.head(5) df.describe() df.shape df.isna().sum() df.info() # We do not have any null-values in our dataset # Top 5 stores with the higest total sales df.groupby("Store").agg({"Weekly_Sales": "sum"}).sort_values( "Weekly_Sales", ascending=False ).head(5) # stores with the least total sales df.groupby("Store").agg({"Weekly_Sales": "sum"}).sort_values( "Weekly_Sales", ascending=True ).head(5) # Top weeks with the highest sales df.sort_values("Weekly_Sales", ascending=False).head(5) # weeks with the lowest sales df.sort_values("Weekly_Sales", ascending=True).head(5) # Average weekly sales in holiday vs non-holiday week df.groupby("Holiday_Flag").agg({"Weekly_Sales": "mean"}) # Temprature and Weekly Sales - Sctter plot plt.scatter(df["Temperature"], df["Weekly_Sales"], alpha=0.5) plt.xlabel("Temperature") plt.ylabel("Weekly Sales") plt.title("Sctter plot of Weekly Sales vs Temperature") # With the current we can see a pattern and can devise that customer deem 20-60 as the optimum temperature to shop and observe more high weekly sale data points in that temperature range # Weekly Sales trend line # changing the date data type df["Date"] = pd.to_datetime(df["Date"], format="%d-%m-%Y") # aggeregate all the weekly sales WeeklySales = df.groupby(["Date"])["Weekly_Sales"].sum().reset_index() plt.figure(figsize=(12, 8)) plt.plot(WeeklySales["Date"], WeeklySales["Weekly_Sales"], color="blue") plt.xlabel("Date") plt.ylabel("Weekly Slaes") plt.title("Weekly Sales trend line") plt.show() # We can see a repeating trend in the sales around the holiday season during december; Sales have a peak during the season and dip after new years and pick up back again after a few weeks - We observe an seasonality trend in weekly sales # Distribution of wekly sales sns.histplot(df["Weekly_Sales"], bins=50, color="blue", edgecolor="black", kde=True) plt.title("Distribution of Weekly Sales") plt.show() corrmat = df.corr() hm = sns.heatmap( corrmat, cbar=True, annot=True, square=True, fmt=".2f", annot_kws={"size": 10}, yticklabels=df.columns, xticklabels=df.columns, cmap="Spectral_r", ) plt.show()
false
1
922
0
1,026
922
129350706
import numpy as np import pandas as pd from sklearn.linear_model import LogisticRegression from sklearn.model_selection import cross_val_score from sklearn import preprocessing from sklearn.model_selection import train_test_split from sklearn.metrics import roc_auc_score path = "/Users/stephanielin/Desktop/edm_cup_2023/" def load_data(filenames): dataframes = {} for name, file in filenames.items(): dataframes[name] = pd.read_csv(file) return dataframes def print_shape(dataframes): for df_name, df in dataframes.items(): print(f"{df_name} shape: {df.shape}") def unique_values(dataframes): for df_name, df in dataframes.items(): print(f"\n{df_name} unique values:") for col in df.select_dtypes(include="object").columns: print(f"{col}: {len(df[col].unique())} unique values") def check_missing_values(dataframes): for df_name, df in dataframes.items(): print(f"\n{df_name} missing values:") print(df.isnull().sum()) def display_head(dataframes): for df_name, df in dataframes.items(): print(f"\n{df_name} head:") display(df.head()) filenames = { "action_logs": path + "action_logs.csv", "assignment_details": path + "assignment_details.csv", "assignment_relationships": path + "assignment_relationships.csv", "evaluation_unit_test_scores": path + "evaluation_unit_test_scores.csv", "explanation_details": path + "explanation_details.csv", "hint_details": path + "hint_details.csv", "problem_details": path + "problem_details.csv", "sequence_details": path + "sequence_details.csv", "sequence_relationships": path + "sequence_relationships.csv", "training_unit_test_scores": path + "training_unit_test_scores.csv", } dataframes = load_data(filenames) print_shape(dataframes) check_missing_values(dataframes) unique_values(dataframes) ########################################## Data Clearning ########################################## tuts = dataframes["training_unit_test_scores"] euts = dataframes["evaluation_unit_test_scores"] ar = dataframes["assignment_relationships"] al = dataframes["action_logs"] ad = dataframes["assignment_details"] problemd = dataframes["problem_details"] ed = dataframes["explanation_details"] hd = dataframes["hint_details"] sd = dataframes["sequence_details"] sr = dataframes["sequence_relationships"] # assignment_detail.csv # assignment_end_time null value indicates removal for predicted question or student not finish # assignment_due_date: If a teacher did not specify a due date, this column will be blank. ad["assignment_due_date"] = ad["assignment_due_date"].fillna("NA") # problem_details.csv # This file contains one row for every problem referenced in the dataset, # except for some problems in the action logs, which have been deleted from the database. # These problems likely had errors during their original transcription into ASSISTments that were corrected, # but no record of the original problems was kept. problemd["problem_skill_code"] = problemd["problem_skill_code"].fillna("NA") problemd["problem_contains_image"] = problemd["problem_contains_image"].fillna(0) problemd["problem_contains_equation"] = problemd["problem_contains_equation"].fillna(0) problemd["problem_contains_video"] = problemd["problem_contains_video"].fillna(0) problemd["problem_skill_description"] = problemd["problem_skill_description"].fillna( "NA" ) # sequence_details # This file contains at least one row for each sequence present in the dataset and contains all the information unique to each sequence. # Some sequences have duplicate rows because they are present in multiple locations within units. Whether that is intentional is unclear. # sequence_folder_path_level_4 & sequence_folder_path_level_5: If there is no folder at this path depth, this column is blank. sd["sequence_folder_path_level_4"] = sd["sequence_folder_path_level_4"].fillna("NA") sd["sequence_folder_path_level_5"] = sd["sequence_folder_path_level_5"].fillna("NA") ########################################## Explorative Analysis ########################################## # data viz import matplotlib.pyplot as plt from matplotlib import rcParams import seaborn as sns # explore problem related variables problem_df = tuts.merge( problemd, how="left", left_on="problem_id", right_on="problem_id" ) problem_df.head() # continuous variables problem_df.corr() # categorical variables only problem types: problem_skill_code (too many unique values), problem_skill_description(too many unique values) sns.countplot(x="score", hue="problem_type", data=problem_df) plt.title("Countplot of score by problem_type") plt.show() ########################################## Feature Engineer ########################################## # too many unique values for problem_skill_code # If a skill code has a dash in it, that specifies a more detailed sub-skill beyond what the common core provides. problemd["sub_skill_involved"] = ( problemd["problem_skill_code"].str.contains("-").astype(int) ) # create dummy variables for problem_type problem_type = pd.get_dummies(problemd["problem_type"], drop_first=True) problemd.drop("problem_type", axis=1, inplace=True) problemd = pd.concat([problemd, problem_type], axis=1) problem_type_columns = problemd.iloc[:, -9:].columns.tolist() problem_type_columns.append("problem_id") problemd.head(10) # If the student never completed the assignment, this assignment_end_time will be blank. # create a notfinish column to indicate whether the assignment is finsihed or not ad["notfinish"] = ad["assignment_end_time"].isnull().astype(int) # get the sum of unfinished in unit assignments for each unit test assignment ad_ar = ar.merge( ad, how="left", left_on="in_unit_assignment_log_id", right_on="assignment_log_id" ) ad_ar.head() assignment_total = ad_ar[["unit_test_assignment_log_id", "in_unit_assignment_log_id"]] assignment_total = ( assignment_total.groupby("unit_test_assignment_log_id")["in_unit_assignment_log_id"] .nunique() .rename("Total_Assignment_Count") ) assignment_total.to_frame() assignment_total.head() notfinish = ad_ar[ ["unit_test_assignment_log_id", "in_unit_assignment_log_id", "notfinish"] ] notfinish = notfinish.groupby("unit_test_assignment_log_id").sum() notfinish.head() notfinish = notfinish.merge( assignment_total, how="left", left_index=True, right_index=True ) # add a column to calculate the percent of unfinished in-unit assignments notfinish["notfinish_percent"] = ( notfinish["notfinish"] / notfinish["Total_Assignment_Count"] ) notfinish["notfinish_percent"] = notfinish["notfinish_percent"].round(4) notfinish.head() # Associate the action logs for each in unit assignment with their unit test assignment al_ar = ar.merge( al, how="left", left_on="in_unit_assignment_log_id", right_on="assignment_log_id" ) action_df = al_ar[["unit_test_assignment_log_id", "action"]] # Get the total number of times each action was taken within the in unit assignments corresponding to each unit test # assignment action_df = pd.get_dummies(action_df, columns=["action"]) action_df = action_df.groupby("unit_test_assignment_log_id").sum() # Create a feature for the total action count, then scale it between 0 and 1 action_count = action_df.sum(axis=1) # Convert the individual action counts into a fraction of total actions taken action_df = action_df.div(action_count, axis=0) # Add the scaled total action count to the dataframe action_df["action_count"] = (action_count - action_count.min()) / ( action_count.max() - action_count.min() ) # Associate the tutor for each in unit assignment with their unit test assignment tutor_df = al_ar[["unit_test_assignment_log_id", "available_core_tutoring"]] # Get the total number of times each available_core_tutoring was taken within the in unit assignments corresponding to each unit test assignment tutor_df = pd.get_dummies(tutor_df, columns=["available_core_tutoring"]) tutor_df = tutor_df.groupby("unit_test_assignment_log_id").sum() # Create a feature for the total tutoring count, then scale it between 0 and 1 tutor_count = tutor_df.sum(axis=1) # Convert the individual tutor counts into a fraction of total tutorings tutor_df = tutor_df.div(tutor_count, axis=0) ########################################## Training Set ########################################## tuts = dataframes["training_unit_test_scores"] # Merge action count features with the training unit test scores tuts = tuts.merge(action_df, how="left", left_on="assignment_log_id", right_index=True) # Merge problem type, problem skill code, problem type, problem_multipart_position features with the training unit test scores # selected_cols = ['problem_id', 'problem_skill_code', 'problem_multipart_position' ] tuts = tuts.merge( problemd[["problem_id", "sub_skill_involved", "problem_multipart_position"]], how="left", left_on="problem_id", right_on="problem_id", ) # merge problem types tuts = tuts.merge( problemd[problem_type_columns], how="left", left_on="problem_id", right_on="problem_id", ) # Merge available_core_tutoring count features with the training unit test scores tuts = tuts.merge(tutor_df, how="left", left_on="assignment_log_id", right_index=True) # merge not_finished percent tuts = tuts.merge( notfinish["notfinish_percent"], how="left", left_on="assignment_log_id", right_index=True, ) tuts.head(10) tuts = tuts.fillna(0) ########################################## Test Set ########################################## euts = dataframes["evaluation_unit_test_scores"] # Merge action count features with the evaluation unit test scores euts = euts.merge(action_df, how="left", left_on="assignment_log_id", right_index=True) # Merge problem type, problem skill code, sub_skill_involved, problem_multipart_position features with the evaluation unit test scores euts = euts.merge( problemd[["problem_id", "sub_skill_involved", "problem_multipart_position"]], how="left", left_on="problem_id", right_on="problem_id", ) # merge problem type dummies euts = euts.merge( problemd[problem_type_columns], how="left", left_on="problem_id", right_on="problem_id", ) # Merge available_core_tutoring count features with evaluation unit test scores euts = euts.merge(tutor_df, how="left", left_on="assignment_log_id", right_index=True) # merge not finished percent euts = euts.merge( notfinish["notfinish_percent"], how="left", left_on="assignment_log_id", right_index=True, ) # euts = euts.merge(attempts['average_attempts'], how='left', left_on='assignment_log_id', right_index=True) euts.head(10) euts = euts.fillna(0) ########################################## Model ########################################## # Collect the input and target columns for the regression input_cols = [c for c in tuts.columns] input_cols.remove("assignment_log_id") input_cols.remove("problem_id") input_cols.remove("score") target_col = "score" print(input_cols) # divide training and testing X = tuts[input_cols] y = tuts["score"] X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=42 ) # Initialize a logistic regression lr = LogisticRegression(max_iter=1000) # Fit the regression on all the training data lr = lr.fit(X_train, y_train) # y_pred_proba = rf_classifier.predict_proba(X_test)[:, 1] y_pred_proba = lr.predict_proba(X_test)[:, 1] auc = roc_auc_score(y_test, y_pred_proba) print("AUC:", auc) # For the final prediction, using all tuts to train to predict euts # Initialize a logistic regression lr = LogisticRegression(max_iter=1000) # Fit the regression on all the training data lr = lr.fit(tuts[input_cols], tuts[target_col]) # Predict the score for each evaluation problem euts[target_col] = lr.predict_proba(euts[input_cols])[:, 1] euts[["id", "score"]].to_csv("example_submission.csv", index=False)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/350/129350706.ipynb
null
null
[{"Id": 129350706, "ScriptId": 38459891, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 12064854, "CreationDate": "05/13/2023 03:30:12", "VersionNumber": 1.0, "Title": "4051_Final_Group7", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 282.0, "LinesInsertedFromPrevious": 282.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np import pandas as pd from sklearn.linear_model import LogisticRegression from sklearn.model_selection import cross_val_score from sklearn import preprocessing from sklearn.model_selection import train_test_split from sklearn.metrics import roc_auc_score path = "/Users/stephanielin/Desktop/edm_cup_2023/" def load_data(filenames): dataframes = {} for name, file in filenames.items(): dataframes[name] = pd.read_csv(file) return dataframes def print_shape(dataframes): for df_name, df in dataframes.items(): print(f"{df_name} shape: {df.shape}") def unique_values(dataframes): for df_name, df in dataframes.items(): print(f"\n{df_name} unique values:") for col in df.select_dtypes(include="object").columns: print(f"{col}: {len(df[col].unique())} unique values") def check_missing_values(dataframes): for df_name, df in dataframes.items(): print(f"\n{df_name} missing values:") print(df.isnull().sum()) def display_head(dataframes): for df_name, df in dataframes.items(): print(f"\n{df_name} head:") display(df.head()) filenames = { "action_logs": path + "action_logs.csv", "assignment_details": path + "assignment_details.csv", "assignment_relationships": path + "assignment_relationships.csv", "evaluation_unit_test_scores": path + "evaluation_unit_test_scores.csv", "explanation_details": path + "explanation_details.csv", "hint_details": path + "hint_details.csv", "problem_details": path + "problem_details.csv", "sequence_details": path + "sequence_details.csv", "sequence_relationships": path + "sequence_relationships.csv", "training_unit_test_scores": path + "training_unit_test_scores.csv", } dataframes = load_data(filenames) print_shape(dataframes) check_missing_values(dataframes) unique_values(dataframes) ########################################## Data Clearning ########################################## tuts = dataframes["training_unit_test_scores"] euts = dataframes["evaluation_unit_test_scores"] ar = dataframes["assignment_relationships"] al = dataframes["action_logs"] ad = dataframes["assignment_details"] problemd = dataframes["problem_details"] ed = dataframes["explanation_details"] hd = dataframes["hint_details"] sd = dataframes["sequence_details"] sr = dataframes["sequence_relationships"] # assignment_detail.csv # assignment_end_time null value indicates removal for predicted question or student not finish # assignment_due_date: If a teacher did not specify a due date, this column will be blank. ad["assignment_due_date"] = ad["assignment_due_date"].fillna("NA") # problem_details.csv # This file contains one row for every problem referenced in the dataset, # except for some problems in the action logs, which have been deleted from the database. # These problems likely had errors during their original transcription into ASSISTments that were corrected, # but no record of the original problems was kept. problemd["problem_skill_code"] = problemd["problem_skill_code"].fillna("NA") problemd["problem_contains_image"] = problemd["problem_contains_image"].fillna(0) problemd["problem_contains_equation"] = problemd["problem_contains_equation"].fillna(0) problemd["problem_contains_video"] = problemd["problem_contains_video"].fillna(0) problemd["problem_skill_description"] = problemd["problem_skill_description"].fillna( "NA" ) # sequence_details # This file contains at least one row for each sequence present in the dataset and contains all the information unique to each sequence. # Some sequences have duplicate rows because they are present in multiple locations within units. Whether that is intentional is unclear. # sequence_folder_path_level_4 & sequence_folder_path_level_5: If there is no folder at this path depth, this column is blank. sd["sequence_folder_path_level_4"] = sd["sequence_folder_path_level_4"].fillna("NA") sd["sequence_folder_path_level_5"] = sd["sequence_folder_path_level_5"].fillna("NA") ########################################## Explorative Analysis ########################################## # data viz import matplotlib.pyplot as plt from matplotlib import rcParams import seaborn as sns # explore problem related variables problem_df = tuts.merge( problemd, how="left", left_on="problem_id", right_on="problem_id" ) problem_df.head() # continuous variables problem_df.corr() # categorical variables only problem types: problem_skill_code (too many unique values), problem_skill_description(too many unique values) sns.countplot(x="score", hue="problem_type", data=problem_df) plt.title("Countplot of score by problem_type") plt.show() ########################################## Feature Engineer ########################################## # too many unique values for problem_skill_code # If a skill code has a dash in it, that specifies a more detailed sub-skill beyond what the common core provides. problemd["sub_skill_involved"] = ( problemd["problem_skill_code"].str.contains("-").astype(int) ) # create dummy variables for problem_type problem_type = pd.get_dummies(problemd["problem_type"], drop_first=True) problemd.drop("problem_type", axis=1, inplace=True) problemd = pd.concat([problemd, problem_type], axis=1) problem_type_columns = problemd.iloc[:, -9:].columns.tolist() problem_type_columns.append("problem_id") problemd.head(10) # If the student never completed the assignment, this assignment_end_time will be blank. # create a notfinish column to indicate whether the assignment is finsihed or not ad["notfinish"] = ad["assignment_end_time"].isnull().astype(int) # get the sum of unfinished in unit assignments for each unit test assignment ad_ar = ar.merge( ad, how="left", left_on="in_unit_assignment_log_id", right_on="assignment_log_id" ) ad_ar.head() assignment_total = ad_ar[["unit_test_assignment_log_id", "in_unit_assignment_log_id"]] assignment_total = ( assignment_total.groupby("unit_test_assignment_log_id")["in_unit_assignment_log_id"] .nunique() .rename("Total_Assignment_Count") ) assignment_total.to_frame() assignment_total.head() notfinish = ad_ar[ ["unit_test_assignment_log_id", "in_unit_assignment_log_id", "notfinish"] ] notfinish = notfinish.groupby("unit_test_assignment_log_id").sum() notfinish.head() notfinish = notfinish.merge( assignment_total, how="left", left_index=True, right_index=True ) # add a column to calculate the percent of unfinished in-unit assignments notfinish["notfinish_percent"] = ( notfinish["notfinish"] / notfinish["Total_Assignment_Count"] ) notfinish["notfinish_percent"] = notfinish["notfinish_percent"].round(4) notfinish.head() # Associate the action logs for each in unit assignment with their unit test assignment al_ar = ar.merge( al, how="left", left_on="in_unit_assignment_log_id", right_on="assignment_log_id" ) action_df = al_ar[["unit_test_assignment_log_id", "action"]] # Get the total number of times each action was taken within the in unit assignments corresponding to each unit test # assignment action_df = pd.get_dummies(action_df, columns=["action"]) action_df = action_df.groupby("unit_test_assignment_log_id").sum() # Create a feature for the total action count, then scale it between 0 and 1 action_count = action_df.sum(axis=1) # Convert the individual action counts into a fraction of total actions taken action_df = action_df.div(action_count, axis=0) # Add the scaled total action count to the dataframe action_df["action_count"] = (action_count - action_count.min()) / ( action_count.max() - action_count.min() ) # Associate the tutor for each in unit assignment with their unit test assignment tutor_df = al_ar[["unit_test_assignment_log_id", "available_core_tutoring"]] # Get the total number of times each available_core_tutoring was taken within the in unit assignments corresponding to each unit test assignment tutor_df = pd.get_dummies(tutor_df, columns=["available_core_tutoring"]) tutor_df = tutor_df.groupby("unit_test_assignment_log_id").sum() # Create a feature for the total tutoring count, then scale it between 0 and 1 tutor_count = tutor_df.sum(axis=1) # Convert the individual tutor counts into a fraction of total tutorings tutor_df = tutor_df.div(tutor_count, axis=0) ########################################## Training Set ########################################## tuts = dataframes["training_unit_test_scores"] # Merge action count features with the training unit test scores tuts = tuts.merge(action_df, how="left", left_on="assignment_log_id", right_index=True) # Merge problem type, problem skill code, problem type, problem_multipart_position features with the training unit test scores # selected_cols = ['problem_id', 'problem_skill_code', 'problem_multipart_position' ] tuts = tuts.merge( problemd[["problem_id", "sub_skill_involved", "problem_multipart_position"]], how="left", left_on="problem_id", right_on="problem_id", ) # merge problem types tuts = tuts.merge( problemd[problem_type_columns], how="left", left_on="problem_id", right_on="problem_id", ) # Merge available_core_tutoring count features with the training unit test scores tuts = tuts.merge(tutor_df, how="left", left_on="assignment_log_id", right_index=True) # merge not_finished percent tuts = tuts.merge( notfinish["notfinish_percent"], how="left", left_on="assignment_log_id", right_index=True, ) tuts.head(10) tuts = tuts.fillna(0) ########################################## Test Set ########################################## euts = dataframes["evaluation_unit_test_scores"] # Merge action count features with the evaluation unit test scores euts = euts.merge(action_df, how="left", left_on="assignment_log_id", right_index=True) # Merge problem type, problem skill code, sub_skill_involved, problem_multipart_position features with the evaluation unit test scores euts = euts.merge( problemd[["problem_id", "sub_skill_involved", "problem_multipart_position"]], how="left", left_on="problem_id", right_on="problem_id", ) # merge problem type dummies euts = euts.merge( problemd[problem_type_columns], how="left", left_on="problem_id", right_on="problem_id", ) # Merge available_core_tutoring count features with evaluation unit test scores euts = euts.merge(tutor_df, how="left", left_on="assignment_log_id", right_index=True) # merge not finished percent euts = euts.merge( notfinish["notfinish_percent"], how="left", left_on="assignment_log_id", right_index=True, ) # euts = euts.merge(attempts['average_attempts'], how='left', left_on='assignment_log_id', right_index=True) euts.head(10) euts = euts.fillna(0) ########################################## Model ########################################## # Collect the input and target columns for the regression input_cols = [c for c in tuts.columns] input_cols.remove("assignment_log_id") input_cols.remove("problem_id") input_cols.remove("score") target_col = "score" print(input_cols) # divide training and testing X = tuts[input_cols] y = tuts["score"] X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=42 ) # Initialize a logistic regression lr = LogisticRegression(max_iter=1000) # Fit the regression on all the training data lr = lr.fit(X_train, y_train) # y_pred_proba = rf_classifier.predict_proba(X_test)[:, 1] y_pred_proba = lr.predict_proba(X_test)[:, 1] auc = roc_auc_score(y_test, y_pred_proba) print("AUC:", auc) # For the final prediction, using all tuts to train to predict euts # Initialize a logistic regression lr = LogisticRegression(max_iter=1000) # Fit the regression on all the training data lr = lr.fit(tuts[input_cols], tuts[target_col]) # Predict the score for each evaluation problem euts[target_col] = lr.predict_proba(euts[input_cols])[:, 1] euts[["id", "score"]].to_csv("example_submission.csv", index=False)
false
0
3,437
0
3,437
3,437
129683499
import pandas as pd df = pd.read_csv( "/kaggle/input/innopolis-global-ai-challenge-2023-ai4med/test_sample.csv" ) df["ID"] = df["ID"].apply(lambda x: x.split("_")) df["Image Number"] = df["ID"].apply(lambda x: x[0]) df["pixel_x"] = df["ID"].apply(lambda x: x[1]) df["pixel_y"] = df["ID"].apply(lambda x: x[2]) del df["ID"] df uni = df["pixel_x"].unique() min(uni), max(uni) uni = df["pixel_y"].unique() min(uni), max(uni)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/683/129683499.ipynb
null
null
[{"Id": 129683499, "ScriptId": 38564576, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 3281392, "CreationDate": "05/15/2023 18:06:22", "VersionNumber": 1.0, "Title": "Test_Sample", "EvaluationDate": "05/15/2023", "IsChange": true, "TotalLines": 15.0, "LinesInsertedFromPrevious": 15.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import pandas as pd df = pd.read_csv( "/kaggle/input/innopolis-global-ai-challenge-2023-ai4med/test_sample.csv" ) df["ID"] = df["ID"].apply(lambda x: x.split("_")) df["Image Number"] = df["ID"].apply(lambda x: x[0]) df["pixel_x"] = df["ID"].apply(lambda x: x[1]) df["pixel_y"] = df["ID"].apply(lambda x: x[2]) del df["ID"] df uni = df["pixel_x"].unique() min(uni), max(uni) uni = df["pixel_y"].unique() min(uni), max(uni)
false
0
180
0
180
180
129683495
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns testdata = pd.read_csv("/kaggle/input/diamond-price-prediction/test.csv") dataset = pd.read_csv("/kaggle/input/diamond-price-prediction/train.csv") dataset.head() testdata.head() print(dataset.columns) print(testdata.columns) dataset.drop(["Id"], axis=1, inplace=True) Id = testdata["Id"] testdata.drop(["Id"], axis=1, inplace=True) dataset.head(5) dataset.shape # we have 10 row and col 43152 dataset.index # number of dataset dataset.info() # no null data and we have dtypes float 6 , integer 1 ,and object 3 cutquality = dataset.value_counts("cut") cutquality cutquality = dataset.value_counts("cut") label = cutquality.index index_array = label.to_numpy() cutscount = [] for cutcount in cutquality: cutscount.append(cutcount) plt.pie(cutscount, labels=index_array, autopct="%1.1f%%") plt.title("cut quality of diamond") plt.show() colorlist = dataset.value_counts("color") colorlist label = colorlist.index index_array = label.to_numpy() colorscount = [] for colorcount in colorlist: colorscount.append(colorcount) plt.pie(colorscount, labels=index_array, autopct="%1.1f%%") plt.title("color of diamond") plt.show() clarity = dataset.value_counts("clarity") clarity label = clarity.index index_array = label.to_numpy() claritylist = [] for count in clarity: claritylist.append(count) plt.pie(claritylist, labels=index_array, autopct="%1.1f%%") plt.title("clarity a measurement of how clear the diamond") plt.show() dataset.describe() dataset = dataset.drop(dataset[dataset["x"] == 0].index) dataset = dataset.drop(dataset[dataset["y"] == 0].index) dataset = dataset.drop(dataset[dataset["z"] == 0].index) dataset.describe() # delete missing data dataset.shape dataset["price"].plot.hist() plt.figure(figsize=(15, 8)) sns.boxplot(y="price", x="cut", data=dataset) plt.figure(figsize=(15, 8)) sns.boxplot(y="price", x="color", data=dataset) plt.figure(figsize=(15, 8)) sns.boxplot(y="price", x="clarity", data=dataset) plt.figure(figsize=(15, 8)) sns.boxplot(x="color", y="price", hue="cut", data=dataset) plt.figure(figsize=(15, 8)) sns.boxplot(x="clarity", y="price", hue="cut", data=dataset) sns.regplot(x="price", y="x", data=dataset, fit_reg=True) sns.regplot(x="price", y="y", data=dataset, fit_reg=True) sns.regplot(x="price", y="z", data=dataset, fit_reg=True) sns.regplot(x="price", y="depth", data=dataset, fit_reg=True) sns.regplot(x="price", y="table", data=dataset, fit_reg=True) dataset = dataset[(dataset["depth"] < 75) & (dataset["depth"] > 45)] dataset = dataset[(dataset["table"] < 80) & (dataset["table"] > 40)] dataset = dataset[(dataset["x"] < 30)] dataset = dataset[(dataset["y"] < 30)] dataset = dataset[(dataset["z"] < 30) & (dataset["z"] > 2)] dataset.shape sns.pairplot(dataset) plt.figure(figsize=(15, 8)) sns.violinplot(x="cut", y="price", data=dataset, scale="count") plt.figure(figsize=(15, 8)) sns.violinplot(x="color", y="price", data=dataset, scale="count") plt.figure(figsize=(15, 8)) sns.violinplot(x="clarity", y="price", data=dataset, scale="count") dataset.isna().sum() testdata.isna().sum() def cut_quality(argument): match argument: case "Ideal": return 5 case "Premium": return 4 case "Very Good": return 3 case "Good": return 2 case default: return 1 def color_grade(argument): match argument: case "D": return 6 case "E": return 5 case "F": return 4 case "G": return 3 case "H": return 2 case default: return 1 def clarity_grade(argument): match argument: case "IF": return 8 case "VVS1": return 7 case "VVS2": return 6 case "VS1": return 5 case "VS2": return 4 case "SI1": return 3 case "SI2": return 2 case default: return 1 label_data = dataset.copy() label_data["cut"] = label_data["cut"].apply(cut_quality) label_data["color"] = label_data["color"].apply(color_grade) label_data["clarity"] = label_data["clarity"].apply(clarity_grade) label_data.head() corr = label_data.corr() sns.set(style="darkgrid") plt.figure(figsize=(15, 8)) sns.heatmap(corr, annot=True) corr_matrix = dataset.corr() corr_matrix["price"].sort_values(ascending=False) print(dataset["color"].unique()) print(dataset["cut"].unique()) print(dataset["clarity"].unique()) print(testdata["color"].unique()) print(testdata["cut"].unique()) print(testdata["clarity"].unique()) testdata label_test_data = testdata.copy() label_test_data["cut"] = label_test_data["cut"].apply(cut_quality) label_test_data["color"] = label_test_data["color"].apply(color_grade) label_test_data["clarity"] = label_test_data["clarity"].apply(clarity_grade) label_test_data.head() from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error import joblib lin_reg = LinearRegression() tree_reg = DecisionTreeRegressor() forest_reg = RandomForestRegressor() X = label_data.drop(["price"], axis=1) y = label_data["price"] X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.10, random_state=20 ) print(X_train.shape) print(X_test.shape) print(y_train.shape) print(y_test.shape) print(label_test_data.shape) tree_reg.fit(X_train, y_train) print( "Decision Tree Regressor train accuracy score is :", tree_reg.score(X_train, y_train), ) predictions = tree_reg.predict(X_test) predictions_test = tree_reg.predict(label_test_data) print( "Decision Tree Regressor test accuracy score is :", tree_reg.score(X_test, y_test) ) testdata["price"] = predictions_test testdata.to_csv("testDecisionTreeRegressor.csv", index=False) testdata print("R2_Score is: ", r2_score(y_test, predictions).round(4)) print("Root_mean_squared_error is: ", np.sqrt(mean_squared_error(y_test, predictions))) print("mean_absolute_error is: ", mean_absolute_error(y_test, predictions).round(4)) # save decision_tree_regressor_model joblib.dump(tree_reg, "decision_tree_regressor_model.joblib") joblib.load("decision_tree_regressor_model.joblib") depths = [i for i in range(1, 30)] train_score = [] test_score = [] for i in depths: DTR = DecisionTreeRegressor(max_depth=i) DTR.fit(X_train, y_train) train_score.append(DTR.score(X_train, y_train)) test_score.append(DTR.score(X_test, y_test)) plt.plot(depths, train_score, label="Training") plt.plot(depths, test_score, label="Testing") plt.xlabel("Max_depth") plt.ylabel("Train, Test scores") plt.title("Decision Tree Regressor") plt.show() lin_reg.fit(X_train, y_train) print("Linear Regression train accuracy score is :", lin_reg.score(X_train, y_train)) predictions = lin_reg.predict(X_test) predictions_test = lin_reg.predict(label_test_data) print("Linear Regression test accuracy score is :", lin_reg.score(X_test, y_test)) testdata["price"] = predictions_test testdata.to_csv("testLinearRegression.csv", index=False) testdata print("R2_Score is: ", r2_score(y_test, predictions).round(4)) print("Root_mean_squared_error is: ", np.sqrt(mean_squared_error(y_test, predictions))) print("mean_absolute_error is: ", mean_absolute_error(y_test, predictions).round(4)) # save linear_regression_model joblib.dump(lin_reg, "linear_regression_model.joblib") joblib.load("linear_regression_model.joblib") depths = [i for i in range(1, 30)] train_score = [] test_score = [] for i in depths: lin_reg = LinearRegression() lin_reg.fit(X_train, y_train) train_score.append(lin_reg.score(X_train, y_train)) test_score.append(lin_reg.score(X_test, y_test)) plt.plot(depths, train_score, label="Training") plt.plot(depths, test_score, label="Testing") plt.xlabel("Max_depth") plt.ylabel("Train, Test scores") plt.title("Linear Regression") plt.legend() plt.show() forest_reg.fit(X_train, y_train) print( "Random Forest Regressor train accuracy score is:", forest_reg.score(X_train, y_train), ) predictions = forest_reg.predict(X_test) predictions_test = forest_reg.predict(label_test_data) print( "Random Forest Regressor test accuracy score is:", forest_reg.score(X_test, y_test) ) testdata["price"] = predictions_test testdata.to_csv("testRandomForestRegressor.csv", index=False) testdata # seave random_forest_model joblib.dump(forest_reg, "random_forest_model.joblib") joblib.load("random_forest_model.joblib") depths = [i for i in range(1, 20)] train_score = [] test_score = [] for i in depths: forest_reg = RandomForestRegressor(max_depth=i) forest_reg.fit(X_train, y_train) train_score.append(forest_reg.score(X_train, y_train)) test_score.append(forest_reg.score(X_test, y_test)) plt.plot(depths, train_score, label="Training") plt.plot(depths, test_score, label="Testing") plt.xlabel("Max_depth") plt.ylabel("Train, Test scores") plt.title("Random Forest Regressor") plt.legend() plt.show() print("R2_Score is: ", r2_score(y_test, predictions).round(4)) print("Root_mean_squared_error is: ", np.sqrt(mean_squared_error(y_test, predictions))) print("mean_absolute_error is: ", mean_absolute_error(y_test, predictions).round(4)) testdata = {"Id": Id, "price": predictions_test} sub = pd.DataFrame(data=testdata) sub.to_csv("sub.csv", index=False)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/683/129683495.ipynb
null
null
[{"Id": 129683495, "ScriptId": 38474617, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13604219, "CreationDate": "05/15/2023 18:06:19", "VersionNumber": 1.0, "Title": "My Team", "EvaluationDate": "05/15/2023", "IsChange": false, "TotalLines": 372.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 372.0, "LinesInsertedFromFork": 0.0, "LinesDeletedFromFork": 0.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 372.0, "TotalVotes": 1}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns testdata = pd.read_csv("/kaggle/input/diamond-price-prediction/test.csv") dataset = pd.read_csv("/kaggle/input/diamond-price-prediction/train.csv") dataset.head() testdata.head() print(dataset.columns) print(testdata.columns) dataset.drop(["Id"], axis=1, inplace=True) Id = testdata["Id"] testdata.drop(["Id"], axis=1, inplace=True) dataset.head(5) dataset.shape # we have 10 row and col 43152 dataset.index # number of dataset dataset.info() # no null data and we have dtypes float 6 , integer 1 ,and object 3 cutquality = dataset.value_counts("cut") cutquality cutquality = dataset.value_counts("cut") label = cutquality.index index_array = label.to_numpy() cutscount = [] for cutcount in cutquality: cutscount.append(cutcount) plt.pie(cutscount, labels=index_array, autopct="%1.1f%%") plt.title("cut quality of diamond") plt.show() colorlist = dataset.value_counts("color") colorlist label = colorlist.index index_array = label.to_numpy() colorscount = [] for colorcount in colorlist: colorscount.append(colorcount) plt.pie(colorscount, labels=index_array, autopct="%1.1f%%") plt.title("color of diamond") plt.show() clarity = dataset.value_counts("clarity") clarity label = clarity.index index_array = label.to_numpy() claritylist = [] for count in clarity: claritylist.append(count) plt.pie(claritylist, labels=index_array, autopct="%1.1f%%") plt.title("clarity a measurement of how clear the diamond") plt.show() dataset.describe() dataset = dataset.drop(dataset[dataset["x"] == 0].index) dataset = dataset.drop(dataset[dataset["y"] == 0].index) dataset = dataset.drop(dataset[dataset["z"] == 0].index) dataset.describe() # delete missing data dataset.shape dataset["price"].plot.hist() plt.figure(figsize=(15, 8)) sns.boxplot(y="price", x="cut", data=dataset) plt.figure(figsize=(15, 8)) sns.boxplot(y="price", x="color", data=dataset) plt.figure(figsize=(15, 8)) sns.boxplot(y="price", x="clarity", data=dataset) plt.figure(figsize=(15, 8)) sns.boxplot(x="color", y="price", hue="cut", data=dataset) plt.figure(figsize=(15, 8)) sns.boxplot(x="clarity", y="price", hue="cut", data=dataset) sns.regplot(x="price", y="x", data=dataset, fit_reg=True) sns.regplot(x="price", y="y", data=dataset, fit_reg=True) sns.regplot(x="price", y="z", data=dataset, fit_reg=True) sns.regplot(x="price", y="depth", data=dataset, fit_reg=True) sns.regplot(x="price", y="table", data=dataset, fit_reg=True) dataset = dataset[(dataset["depth"] < 75) & (dataset["depth"] > 45)] dataset = dataset[(dataset["table"] < 80) & (dataset["table"] > 40)] dataset = dataset[(dataset["x"] < 30)] dataset = dataset[(dataset["y"] < 30)] dataset = dataset[(dataset["z"] < 30) & (dataset["z"] > 2)] dataset.shape sns.pairplot(dataset) plt.figure(figsize=(15, 8)) sns.violinplot(x="cut", y="price", data=dataset, scale="count") plt.figure(figsize=(15, 8)) sns.violinplot(x="color", y="price", data=dataset, scale="count") plt.figure(figsize=(15, 8)) sns.violinplot(x="clarity", y="price", data=dataset, scale="count") dataset.isna().sum() testdata.isna().sum() def cut_quality(argument): match argument: case "Ideal": return 5 case "Premium": return 4 case "Very Good": return 3 case "Good": return 2 case default: return 1 def color_grade(argument): match argument: case "D": return 6 case "E": return 5 case "F": return 4 case "G": return 3 case "H": return 2 case default: return 1 def clarity_grade(argument): match argument: case "IF": return 8 case "VVS1": return 7 case "VVS2": return 6 case "VS1": return 5 case "VS2": return 4 case "SI1": return 3 case "SI2": return 2 case default: return 1 label_data = dataset.copy() label_data["cut"] = label_data["cut"].apply(cut_quality) label_data["color"] = label_data["color"].apply(color_grade) label_data["clarity"] = label_data["clarity"].apply(clarity_grade) label_data.head() corr = label_data.corr() sns.set(style="darkgrid") plt.figure(figsize=(15, 8)) sns.heatmap(corr, annot=True) corr_matrix = dataset.corr() corr_matrix["price"].sort_values(ascending=False) print(dataset["color"].unique()) print(dataset["cut"].unique()) print(dataset["clarity"].unique()) print(testdata["color"].unique()) print(testdata["cut"].unique()) print(testdata["clarity"].unique()) testdata label_test_data = testdata.copy() label_test_data["cut"] = label_test_data["cut"].apply(cut_quality) label_test_data["color"] = label_test_data["color"].apply(color_grade) label_test_data["clarity"] = label_test_data["clarity"].apply(clarity_grade) label_test_data.head() from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error import joblib lin_reg = LinearRegression() tree_reg = DecisionTreeRegressor() forest_reg = RandomForestRegressor() X = label_data.drop(["price"], axis=1) y = label_data["price"] X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.10, random_state=20 ) print(X_train.shape) print(X_test.shape) print(y_train.shape) print(y_test.shape) print(label_test_data.shape) tree_reg.fit(X_train, y_train) print( "Decision Tree Regressor train accuracy score is :", tree_reg.score(X_train, y_train), ) predictions = tree_reg.predict(X_test) predictions_test = tree_reg.predict(label_test_data) print( "Decision Tree Regressor test accuracy score is :", tree_reg.score(X_test, y_test) ) testdata["price"] = predictions_test testdata.to_csv("testDecisionTreeRegressor.csv", index=False) testdata print("R2_Score is: ", r2_score(y_test, predictions).round(4)) print("Root_mean_squared_error is: ", np.sqrt(mean_squared_error(y_test, predictions))) print("mean_absolute_error is: ", mean_absolute_error(y_test, predictions).round(4)) # save decision_tree_regressor_model joblib.dump(tree_reg, "decision_tree_regressor_model.joblib") joblib.load("decision_tree_regressor_model.joblib") depths = [i for i in range(1, 30)] train_score = [] test_score = [] for i in depths: DTR = DecisionTreeRegressor(max_depth=i) DTR.fit(X_train, y_train) train_score.append(DTR.score(X_train, y_train)) test_score.append(DTR.score(X_test, y_test)) plt.plot(depths, train_score, label="Training") plt.plot(depths, test_score, label="Testing") plt.xlabel("Max_depth") plt.ylabel("Train, Test scores") plt.title("Decision Tree Regressor") plt.show() lin_reg.fit(X_train, y_train) print("Linear Regression train accuracy score is :", lin_reg.score(X_train, y_train)) predictions = lin_reg.predict(X_test) predictions_test = lin_reg.predict(label_test_data) print("Linear Regression test accuracy score is :", lin_reg.score(X_test, y_test)) testdata["price"] = predictions_test testdata.to_csv("testLinearRegression.csv", index=False) testdata print("R2_Score is: ", r2_score(y_test, predictions).round(4)) print("Root_mean_squared_error is: ", np.sqrt(mean_squared_error(y_test, predictions))) print("mean_absolute_error is: ", mean_absolute_error(y_test, predictions).round(4)) # save linear_regression_model joblib.dump(lin_reg, "linear_regression_model.joblib") joblib.load("linear_regression_model.joblib") depths = [i for i in range(1, 30)] train_score = [] test_score = [] for i in depths: lin_reg = LinearRegression() lin_reg.fit(X_train, y_train) train_score.append(lin_reg.score(X_train, y_train)) test_score.append(lin_reg.score(X_test, y_test)) plt.plot(depths, train_score, label="Training") plt.plot(depths, test_score, label="Testing") plt.xlabel("Max_depth") plt.ylabel("Train, Test scores") plt.title("Linear Regression") plt.legend() plt.show() forest_reg.fit(X_train, y_train) print( "Random Forest Regressor train accuracy score is:", forest_reg.score(X_train, y_train), ) predictions = forest_reg.predict(X_test) predictions_test = forest_reg.predict(label_test_data) print( "Random Forest Regressor test accuracy score is:", forest_reg.score(X_test, y_test) ) testdata["price"] = predictions_test testdata.to_csv("testRandomForestRegressor.csv", index=False) testdata # seave random_forest_model joblib.dump(forest_reg, "random_forest_model.joblib") joblib.load("random_forest_model.joblib") depths = [i for i in range(1, 20)] train_score = [] test_score = [] for i in depths: forest_reg = RandomForestRegressor(max_depth=i) forest_reg.fit(X_train, y_train) train_score.append(forest_reg.score(X_train, y_train)) test_score.append(forest_reg.score(X_test, y_test)) plt.plot(depths, train_score, label="Training") plt.plot(depths, test_score, label="Testing") plt.xlabel("Max_depth") plt.ylabel("Train, Test scores") plt.title("Random Forest Regressor") plt.legend() plt.show() print("R2_Score is: ", r2_score(y_test, predictions).round(4)) print("Root_mean_squared_error is: ", np.sqrt(mean_squared_error(y_test, predictions))) print("mean_absolute_error is: ", mean_absolute_error(y_test, predictions).round(4)) testdata = {"Id": Id, "price": predictions_test} sub = pd.DataFrame(data=testdata) sub.to_csv("sub.csv", index=False)
false
0
3,361
1
3,361
3,361
129683212
<jupyter_start><jupyter_text>World Men Volleyball Data: Players,Teams, 13 more Dive into the heart of the global volleyball universe with this comprehensive dataset! Spanning players, teams, matches, transfers, awards, and more, this dataset offers an unparalleled look into professional volleyball. From the strength behind the scenes to the stars of the show, explore the profiles of bodybuilders, physiotherapists, statisticians, and players who make each game possible. Follow the trajectories of players as they move between teams and achieve recognition for their skills. Discover the leaders who guide each team to success, including coaches, presidents, team managers, and sports directors. Go beyond the court to explore the broader context of the volleyball world. Learn about the countries participating in this global sport and the stadiums hosting electrifying matches. Study the intricate details of each game, down to the number of sets won by each team. Whether you're a volleyball enthusiast, a data scientist looking for a unique dataset to analyze, or simply curious about the inner workings of a global sport, this dataset provides a wealth of information to explore and investigate. Welcome to the world of volleyball! Kaggle dataset identifier: world-volleyball-data-players-teams-and-more <jupyter_script># Import necessary libraries import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt # Load the matches dataset matches = pd.read_csv( "../input/world-volleyball-data-players-teams-and-more/matches.csv" ) # Replace with your actual file path # Load the teams dataset teams = pd.read_csv( "../input/world-volleyball-data-players-teams-and-more/teams.csv" ) # Replace with your actual file path # Merge matches and teams datasets matches = matches.merge(teams, left_on="first_team_id", right_on="team_id", how="left") matches = matches.rename(columns={"team_name": "first_team_name"}).drop( "team_id", axis=1 ) matches = matches.merge(teams, left_on="second_team_id", right_on="team_id", how="left") matches = matches.rename(columns={"team_name": "second_team_name"}).drop( "team_id", axis=1 ) # Now, let's try to find the team with the most wins # For this, we will first create a new column 'winner' matches["winner"] = matches.apply( lambda row: row["first_team_name"] if row["n_set_team1"] > row["n_set_team2"] else row["second_team_name"], axis=1, ) # Count the number of wins by each team team_wins = matches["winner"].value_counts() # Display the team with the most wins team_wins.idxmax() # Import libraries import matplotlib.pyplot as plt import seaborn as sns from matplotlib.colors import LinearSegmentedColormap # Load libraries for date handling from datetime import datetime # Convert 'date' column to datetime matches["date"] = pd.to_datetime(matches["date"]) # Extract year from 'date' and create a new column 'year' matches["year"] = matches["date"].dt.year # Count the number of matches per year matches_per_year = matches["year"].value_counts().sort_index() # Plot number of matches per year plt.figure(figsize=(10, 6)) sns.lineplot( x=matches_per_year.index, y=matches_per_year.values, marker="o", sort=False, color="crimson", linewidth=2.5, ) plt.title("Number of Matches per Year", fontsize=20) plt.xlabel("Year", fontsize=15) plt.ylabel("Number of Matches", fontsize=15) plt.grid(color="gray", linestyle="--", linewidth=0.5) plt.show() # Create a gradient color mapping based on number of wins colors = plt.cm.coolwarm(np.linspace(0, 1, len(team_wins[:10]))) # Plot the top 10 teams with the most wins plt.figure(figsize=(10, 6)) team_wins[:10].plot(kind="barh", color=colors, edgecolor="black") plt.title("Top 10 Teams with Most Wins", fontsize=20) plt.xlabel("Number of Wins", fontsize=15) plt.ylabel("Team", fontsize=15) plt.gca().invert_yaxis() # invert y-axis to have the team with the most wins at the top plt.show() # Import necessary libraries import numpy as np # Calculate the total number of sets for each match matches["total_sets"] = matches["n_set_team1"] + matches["n_set_team2"] # Plot the distribution of total sets per match plt.figure(figsize=(10, 6)) sns.countplot(x="total_sets", data=matches, palette="viridis", edgecolor="black") plt.title("Distribution of Total Sets per Match", fontsize=20) plt.xlabel("Total Sets", fontsize=15) plt.ylabel("Number of Matches", fontsize=15) plt.grid(color="gray", linestyle="--", linewidth=0.5) plt.show() # Count the number of matches per league matches_per_league = matches["league"].value_counts() # Get the top 10 leagues with the most matches top_leagues = matches_per_league.nlargest(10) # Define the color palette colors = sns.color_palette("viridis", len(top_leagues)) # Plot the number of matches for the top 10 leagues plt.figure(figsize=(10, 6)) top_leagues.plot(kind="barh", color=colors) plt.title("Top 10 Leagues with Most Matches", fontsize=20) plt.xlabel("Number of Matches", fontsize=15) plt.ylabel("League", fontsize=15) plt.gca().invert_yaxis() # invert y-axis to have the league with the most matches at the top plt.show() # Extract day of week from 'date' and create a new column 'day_of_week' matches["day_of_week"] = matches["date"].dt.day_name() # Order of days for plotting order = ["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"] # Count the number of matches per day of the week matches_per_day = matches["day_of_week"].value_counts().loc[order] # Plot the number of matches per day of the week plt.figure(figsize=(10, 6)) matches_per_day.plot(kind="bar", color="purple", edgecolor="black") plt.title("Number of Matches per Day of the Week", fontsize=20) plt.xlabel("Day of the Week", fontsize=15) plt.ylabel("Number of Matches", fontsize=15) plt.xticks(rotation=45) plt.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/683/129683212.ipynb
world-volleyball-data-players-teams-and-more
arianghasemi
[{"Id": 129683212, "ScriptId": 38564117, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6636345, "CreationDate": "05/15/2023 18:03:52", "VersionNumber": 2.0, "Title": "Matches analysis", "EvaluationDate": "05/15/2023", "IsChange": true, "TotalLines": 120.0, "LinesInsertedFromPrevious": 49.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 71.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 186004804, "KernelVersionId": 129683212, "SourceDatasetVersionId": 5692585}]
[{"Id": 5692585, "DatasetId": 3258792, "DatasourceVersionId": 5768199, "CreatorUserId": 6636345, "LicenseName": "Attribution-NonCommercial 4.0 International (CC BY-NC 4.0)", "CreationDate": "05/15/2023 17:26:50", "VersionNumber": 4.0, "Title": "World Men Volleyball Data: Players,Teams, 13 more", "Slug": "world-volleyball-data-players-teams-and-more", "Subtitle": "Over 47,000 volleyball players (~all players) data associated with other factors", "Description": "Dive into the heart of the global volleyball universe with this comprehensive dataset! Spanning players, teams, matches, transfers, awards, and more, this dataset offers an unparalleled look into professional volleyball.\n\nFrom the strength behind the scenes to the stars of the show, explore the profiles of bodybuilders, physiotherapists, statisticians, and players who make each game possible. Follow the trajectories of players as they move between teams and achieve recognition for their skills. Discover the leaders who guide each team to success, including coaches, presidents, team managers, and sports directors.\n\nGo beyond the court to explore the broader context of the volleyball world. Learn about the countries participating in this global sport and the stadiums hosting electrifying matches. Study the intricate details of each game, down to the number of sets won by each team.\n\nWhether you're a volleyball enthusiast, a data scientist looking for a unique dataset to analyze, or simply curious about the inner workings of a global sport, this dataset provides a wealth of information to explore and investigate. Welcome to the world of volleyball!", "VersionNotes": "Data Update 2023-05-15", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3258792, "CreatorUserId": 6636345, "OwnerUserId": 6636345.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5692585.0, "CurrentDatasourceVersionId": 5768199.0, "ForumId": 3324321, "Type": 2, "CreationDate": "05/12/2023 09:57:58", "LastActivityDate": "05/12/2023", "TotalViews": 331, "TotalDownloads": 16, "TotalVotes": 0, "TotalKernels": 3}]
[{"Id": 6636345, "UserName": "arianghasemi", "DisplayName": "Arian Ghasemi", "RegisterDate": "02/01/2021", "PerformanceTier": 2}]
# Import necessary libraries import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt # Load the matches dataset matches = pd.read_csv( "../input/world-volleyball-data-players-teams-and-more/matches.csv" ) # Replace with your actual file path # Load the teams dataset teams = pd.read_csv( "../input/world-volleyball-data-players-teams-and-more/teams.csv" ) # Replace with your actual file path # Merge matches and teams datasets matches = matches.merge(teams, left_on="first_team_id", right_on="team_id", how="left") matches = matches.rename(columns={"team_name": "first_team_name"}).drop( "team_id", axis=1 ) matches = matches.merge(teams, left_on="second_team_id", right_on="team_id", how="left") matches = matches.rename(columns={"team_name": "second_team_name"}).drop( "team_id", axis=1 ) # Now, let's try to find the team with the most wins # For this, we will first create a new column 'winner' matches["winner"] = matches.apply( lambda row: row["first_team_name"] if row["n_set_team1"] > row["n_set_team2"] else row["second_team_name"], axis=1, ) # Count the number of wins by each team team_wins = matches["winner"].value_counts() # Display the team with the most wins team_wins.idxmax() # Import libraries import matplotlib.pyplot as plt import seaborn as sns from matplotlib.colors import LinearSegmentedColormap # Load libraries for date handling from datetime import datetime # Convert 'date' column to datetime matches["date"] = pd.to_datetime(matches["date"]) # Extract year from 'date' and create a new column 'year' matches["year"] = matches["date"].dt.year # Count the number of matches per year matches_per_year = matches["year"].value_counts().sort_index() # Plot number of matches per year plt.figure(figsize=(10, 6)) sns.lineplot( x=matches_per_year.index, y=matches_per_year.values, marker="o", sort=False, color="crimson", linewidth=2.5, ) plt.title("Number of Matches per Year", fontsize=20) plt.xlabel("Year", fontsize=15) plt.ylabel("Number of Matches", fontsize=15) plt.grid(color="gray", linestyle="--", linewidth=0.5) plt.show() # Create a gradient color mapping based on number of wins colors = plt.cm.coolwarm(np.linspace(0, 1, len(team_wins[:10]))) # Plot the top 10 teams with the most wins plt.figure(figsize=(10, 6)) team_wins[:10].plot(kind="barh", color=colors, edgecolor="black") plt.title("Top 10 Teams with Most Wins", fontsize=20) plt.xlabel("Number of Wins", fontsize=15) plt.ylabel("Team", fontsize=15) plt.gca().invert_yaxis() # invert y-axis to have the team with the most wins at the top plt.show() # Import necessary libraries import numpy as np # Calculate the total number of sets for each match matches["total_sets"] = matches["n_set_team1"] + matches["n_set_team2"] # Plot the distribution of total sets per match plt.figure(figsize=(10, 6)) sns.countplot(x="total_sets", data=matches, palette="viridis", edgecolor="black") plt.title("Distribution of Total Sets per Match", fontsize=20) plt.xlabel("Total Sets", fontsize=15) plt.ylabel("Number of Matches", fontsize=15) plt.grid(color="gray", linestyle="--", linewidth=0.5) plt.show() # Count the number of matches per league matches_per_league = matches["league"].value_counts() # Get the top 10 leagues with the most matches top_leagues = matches_per_league.nlargest(10) # Define the color palette colors = sns.color_palette("viridis", len(top_leagues)) # Plot the number of matches for the top 10 leagues plt.figure(figsize=(10, 6)) top_leagues.plot(kind="barh", color=colors) plt.title("Top 10 Leagues with Most Matches", fontsize=20) plt.xlabel("Number of Matches", fontsize=15) plt.ylabel("League", fontsize=15) plt.gca().invert_yaxis() # invert y-axis to have the league with the most matches at the top plt.show() # Extract day of week from 'date' and create a new column 'day_of_week' matches["day_of_week"] = matches["date"].dt.day_name() # Order of days for plotting order = ["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"] # Count the number of matches per day of the week matches_per_day = matches["day_of_week"].value_counts().loc[order] # Plot the number of matches per day of the week plt.figure(figsize=(10, 6)) matches_per_day.plot(kind="bar", color="purple", edgecolor="black") plt.title("Number of Matches per Day of the Week", fontsize=20) plt.xlabel("Day of the Week", fontsize=15) plt.ylabel("Number of Matches", fontsize=15) plt.xticks(rotation=45) plt.show()
false
2
1,434
0
1,738
1,434
129683435
# # Hi friend 🙃 # **We are implementing a Conditional Variational Autoencoder step by step so that you can see that the world of generative models is very interesting and not quite complicated.** # # Libraries import numpy as np import matplotlib.pyplot as plt import tensorflow as tf from tensorflow.keras.datasets import mnist from tensorflow import keras import keras.backend as K from tensorflow.keras.layers import Input, Dense, Lambda from tensorflow.keras.layers import Flatten, Reshape, concatenate from tensorflow.keras.layers import BatchNormalization, Dropout hidden_dim = 2 batch_size = 100 num_classes = 10 # # Prepare data (x_train, y_train), (x_test, y_test) = mnist.load_data() x_train = x_train / 255 x_test = x_test / 255 x_train = np.reshape(x_train, (len(x_train), 28, 28, 1)) x_test = np.reshape(x_test, (len(x_test), 28, 28, 1)) y_train_cat = keras.utils.to_categorical(y_train, num_classes) y_test_cat = keras.utils.to_categorical(y_test, num_classes) # # Our model | VAE # * encoder # * latent space # * decoder # **auxiliary functions** def dropout_and_batchnorm(x): return Dropout(0.3)(BatchNormalization()(x)) def noiser(args): global mean, log_var mean, log_var = args N = K.random_normal(shape=(batch_size, hidden_dim), mean=0.0, stddev=1.0) return K.exp(log_var / 2) * N + mean # # Encoder input_image = Input(batch_shape=(batch_size, 28, 28, 1)) fl = Flatten()(input_image) lb = Input(shape=(num_classes,)) x = concatenate([fl, lb]) x = Dense(256, activation="relu")(x) x = dropout_and_batchnorm(x) x = Dense(128, activation="relu")(x) x = dropout_and_batchnorm(x) # # Latent space mean = Dense(hidden_dim)(x) log_var = Dense(hidden_dim)(x) h = Lambda(noiser, output_shape=(hidden_dim,), name="latent_space")([mean, log_var]) # # Decoder input_dec = Input(shape=(hidden_dim,)) lb_dec = Input(shape=(num_classes,)) d = concatenate([input_dec, lb_dec]) d = Dense(128, activation="elu")(d) d = dropout_and_batchnorm(d) d = Dense(256, activation="elu")(d) d = dropout_and_batchnorm(d) d = Dense(28 * 28, activation="sigmoid")(d) decoded = Reshape((28, 28, 1))(d) # # Loss function def vae_loss(x, y): x = K.reshape(x, shape=(batch_size, 28 * 28)) y = K.reshape(y, shape=(batch_size, 28 * 28)) loss = K.sum(K.square(x - y), axis=-1) kl_loss = -0.5 * K.sum(1 + log_var - K.square(mean) - K.exp(log_var), axis=-1) return loss + kl_loss # # VAE encoder = keras.Model([input_image, lb], h, name="encoder") decoder = keras.Model([input_dec, lb_dec], decoded, name="decoder") cvae = keras.Model( inputs=[input_image, lb, lb_dec], outputs=decoder([encoder([input_image, lb]), lb_dec]), name="cvae", ) cvae.compile(optimizer="adam", loss=vae_loss) cvae.summary() # # Training epochs = 15 cvae.fit( [x_train, y_train_cat, y_train_cat], x_train, epochs=epochs, batch_size=batch_size, shuffle=True, ) # ## Our latent space lb = lb_dec = y_test_cat h = encoder.predict([x_test, lb], batch_size=batch_size) plt.scatter(h[:, 0], h[:, 1]) # # And now let's make our first prompt # **Yes, you heard right. This is exactly what we did training with class labels for.** import re class PromptProcess: def __inti__(self, num_classes: int = 10): self.num_classes = num_classes def __show(self, prompt: str, n: int = 4, num: int = 1) -> None: total = 2 * n + 1 input_lbl = np.zeros((1, num_classes)) input_lbl[0, prompt] = 1 h = np.zeros((1, hidden_dim)) plt.figure(figsize=(total, total)) for i in range(-n, n + 1): for j in range(-n, n + 1): ax = plt.subplot(total, total, num) num += 1 h[0, :] = [1 * i / n, 1 * j / n] img = decoder.predict([h, input_lbl]) plt.imshow(img.squeeze(), cmap="gray") ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) plt.show() print(self.origin_prompt) @staticmethod def get_prompt(prompt: str) -> int: prompt = int(re.findall(r"\d+", prompt)[0]) assert 0 <= prompt <= 10 return prompt def __call__(self, prompt: str) -> None: self.origin_prompt = prompt prompt = self.get_prompt(prompt) self.__show(prompt) process = PromptProcess() process("Please draw 6")
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/683/129683435.ipynb
null
null
[{"Id": 129683435, "ScriptId": 38369919, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13000811, "CreationDate": "05/15/2023 18:05:49", "VersionNumber": 2.0, "Title": "Conditional Variational Autoencoder | Keras \ud83e\udd40", "EvaluationDate": "05/15/2023", "IsChange": true, "TotalLines": 160.0, "LinesInsertedFromPrevious": 36.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 124.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 3}]
null
null
null
null
# # Hi friend 🙃 # **We are implementing a Conditional Variational Autoencoder step by step so that you can see that the world of generative models is very interesting and not quite complicated.** # # Libraries import numpy as np import matplotlib.pyplot as plt import tensorflow as tf from tensorflow.keras.datasets import mnist from tensorflow import keras import keras.backend as K from tensorflow.keras.layers import Input, Dense, Lambda from tensorflow.keras.layers import Flatten, Reshape, concatenate from tensorflow.keras.layers import BatchNormalization, Dropout hidden_dim = 2 batch_size = 100 num_classes = 10 # # Prepare data (x_train, y_train), (x_test, y_test) = mnist.load_data() x_train = x_train / 255 x_test = x_test / 255 x_train = np.reshape(x_train, (len(x_train), 28, 28, 1)) x_test = np.reshape(x_test, (len(x_test), 28, 28, 1)) y_train_cat = keras.utils.to_categorical(y_train, num_classes) y_test_cat = keras.utils.to_categorical(y_test, num_classes) # # Our model | VAE # * encoder # * latent space # * decoder # **auxiliary functions** def dropout_and_batchnorm(x): return Dropout(0.3)(BatchNormalization()(x)) def noiser(args): global mean, log_var mean, log_var = args N = K.random_normal(shape=(batch_size, hidden_dim), mean=0.0, stddev=1.0) return K.exp(log_var / 2) * N + mean # # Encoder input_image = Input(batch_shape=(batch_size, 28, 28, 1)) fl = Flatten()(input_image) lb = Input(shape=(num_classes,)) x = concatenate([fl, lb]) x = Dense(256, activation="relu")(x) x = dropout_and_batchnorm(x) x = Dense(128, activation="relu")(x) x = dropout_and_batchnorm(x) # # Latent space mean = Dense(hidden_dim)(x) log_var = Dense(hidden_dim)(x) h = Lambda(noiser, output_shape=(hidden_dim,), name="latent_space")([mean, log_var]) # # Decoder input_dec = Input(shape=(hidden_dim,)) lb_dec = Input(shape=(num_classes,)) d = concatenate([input_dec, lb_dec]) d = Dense(128, activation="elu")(d) d = dropout_and_batchnorm(d) d = Dense(256, activation="elu")(d) d = dropout_and_batchnorm(d) d = Dense(28 * 28, activation="sigmoid")(d) decoded = Reshape((28, 28, 1))(d) # # Loss function def vae_loss(x, y): x = K.reshape(x, shape=(batch_size, 28 * 28)) y = K.reshape(y, shape=(batch_size, 28 * 28)) loss = K.sum(K.square(x - y), axis=-1) kl_loss = -0.5 * K.sum(1 + log_var - K.square(mean) - K.exp(log_var), axis=-1) return loss + kl_loss # # VAE encoder = keras.Model([input_image, lb], h, name="encoder") decoder = keras.Model([input_dec, lb_dec], decoded, name="decoder") cvae = keras.Model( inputs=[input_image, lb, lb_dec], outputs=decoder([encoder([input_image, lb]), lb_dec]), name="cvae", ) cvae.compile(optimizer="adam", loss=vae_loss) cvae.summary() # # Training epochs = 15 cvae.fit( [x_train, y_train_cat, y_train_cat], x_train, epochs=epochs, batch_size=batch_size, shuffle=True, ) # ## Our latent space lb = lb_dec = y_test_cat h = encoder.predict([x_test, lb], batch_size=batch_size) plt.scatter(h[:, 0], h[:, 1]) # # And now let's make our first prompt # **Yes, you heard right. This is exactly what we did training with class labels for.** import re class PromptProcess: def __inti__(self, num_classes: int = 10): self.num_classes = num_classes def __show(self, prompt: str, n: int = 4, num: int = 1) -> None: total = 2 * n + 1 input_lbl = np.zeros((1, num_classes)) input_lbl[0, prompt] = 1 h = np.zeros((1, hidden_dim)) plt.figure(figsize=(total, total)) for i in range(-n, n + 1): for j in range(-n, n + 1): ax = plt.subplot(total, total, num) num += 1 h[0, :] = [1 * i / n, 1 * j / n] img = decoder.predict([h, input_lbl]) plt.imshow(img.squeeze(), cmap="gray") ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) plt.show() print(self.origin_prompt) @staticmethod def get_prompt(prompt: str) -> int: prompt = int(re.findall(r"\d+", prompt)[0]) assert 0 <= prompt <= 10 return prompt def __call__(self, prompt: str) -> None: self.origin_prompt = prompt prompt = self.get_prompt(prompt) self.__show(prompt) process = PromptProcess() process("Please draw 6")
false
0
1,505
3
1,505
1,505
129683383
# ## Imports import os import gc import glob import json import multiprocessing as mp import warnings import albumentations as A import matplotlib.pyplot as plt import matplotlib.patches as patches import PIL.Image as Image import cv2 import numpy as np import pandas as pd import random import torch import torch.nn as nn import torch.optim as optim import torch.utils.data as thd import segmentation_models_pytorch as smp from torchvision import transforms from collections import defaultdict from types import SimpleNamespace from typing import Dict, List, Optional, Tuple from pathlib import Path from sklearn.metrics import fbeta_score from sklearn.exceptions import UndefinedMetricWarning from albumentations.pytorch import ToTensorV2 from segmentation_models_pytorch.encoders import get_preprocessing_fn from tqdm.auto import tqdm warnings.simplefilter("ignore") # ## Config class CFG: # ============== comp exp name ============= comp_name = "vesuvius" comp_dir_path = "/kaggle/input" comp_folder_name = "vesuvius-challenge-ink-detection" comp_dataset_path = os.path.join(comp_dir_path, comp_folder_name) exp_name = "vesuvius_2d_slide_unet_exp001" # ============== pred target ============= target_size = 1 # ============== model cfg ============= model_name = "Unet" backbone = "efficientnet-b0" # backbone = 'se_resnext50_32x4d' in_chans = 6 # 65 # ============== data preprocessing ============= preprocess_input = get_preprocessing_fn(backbone, pretrained="imagenet") # ============== training cfg ============= size = 224 tile_size = 224 stride = tile_size // 2 train_batch_size = 32 # 32 valid_batch_size = train_batch_size use_amp = True scheduler = "GradualWarmupSchedulerV2" # scheduler = 'CosineAnnealingLR' epochs = 15 # 30 # adamW warmupあり warmup_factor = 10 # lr = 1e-3 / warmup_factor lr = 1e-3 # ============== fold ============= valid_id = 1 # objective_cv = 'binary' # 'binary', 'multiclass', 'regression' metric_direction = "maximize" # maximize, 'minimize' # metrics = 'dice_coef' # ============== fixed ============= pretrained = True inf_weight = "best" # 'best' min_lr = 1e-6 weight_decay = 1e-6 max_grad_norm = 1000 print_freq = 50 num_workers = 4 seed = 42 # ============== set dataset path ============= outputs_path = f"/kaggle/working/outputs/{comp_name}/{exp_name}/" submission_dir = outputs_path + "submissions/" submission_path = submission_dir + f"submission_{exp_name}.csv" model_dir = outputs_path + f"{comp_name}-models/" figures_dir = outputs_path + "figures/" log_dir = outputs_path + "logs/" log_path = log_dir + f"{exp_name}.txt" # ============== augmentation ============= train_aug_list = [ # A.RandomResizedCrop( # size, size, scale=(0.85, 1.0)), A.Resize(size, size), # A.HorizontalFlip(p=0.5), # A.VerticalFlip(p=0.5), A.RandomBrightnessContrast(p=0.75), A.ShiftScaleRotate(p=0.75), A.OneOf( [ A.GaussNoise(var_limit=[10, 50]), A.GaussianBlur(), A.MotionBlur(), ], p=0.4, ), A.GridDistortion(num_steps=5, distort_limit=0.3, p=0.5), A.CoarseDropout( max_holes=1, max_width=int(size * 0.3), max_height=int(size * 0.3), mask_fill_value=0, p=0.5, ), # A.Cutout(max_h_size=int(size * 0.6), # max_w_size=int(size * 0.6), num_holes=1, p=1.0), A.Normalize(mean=[0] * in_chans, std=[1] * in_chans), ToTensorV2(transpose_mask=True), ] valid_aug_list = [ A.Resize(size, size), A.Normalize(mean=[0] * in_chans, std=[1] * in_chans), ToTensorV2(transpose_mask=True), ] # [ # A.RandomResizedCrop(height=224, width=224, scale=(0.08, 1.0)), # A.HorizontalFlip(p=0.5), # A.OneOf([ # A.RandomBrightnessContrast(brightness_limit=0.4, contrast_limit=0.4), # # A.HueSaturationValue(hue_shift_limit=10, sat_shift_limit=20, val_shift_limit=10), # ], p=0.75), # A.GaussNoise(var_limit=(10.0, 50.0)), # A.CoarseDropout(max_holes=8, max_height=32, max_width=32, p=0.5), # A.Normalize( # mean= [0] * in_chans, # std= [1] * in_chans # ), # # A.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),s # ToTensorV2() # ] # ## Set up data class SubvolumeDataset(thd.Dataset): def __init__(self, fragments: List[Path], transform=None, train=True): # self.fragments = sorted(map(lambda path: path.resolve(), fragments)) self.fragments_ids = [i + 1 for i in range(len(fragments))] self.transform = transform self.train = train images, masks, valid_images, valid_masks, _ = self.slice_fragment_to_subvolumes( self.fragments_ids ) self.masks = masks self.images = images def slice_fragment_to_subvolumes(self, fragment_ids): train_images = [] train_masks = [] valid_images = [] valid_masks = [] valid_xyxys = [] for fragment_id in range(1, 4): image, mask = self.read_image_mask(fragment_id) x1_list = list(range(0, image.shape[1] - CFG.tile_size + 1, CFG.stride)) y1_list = list(range(0, image.shape[0] - CFG.tile_size + 1, CFG.stride)) for y1 in y1_list: for x1 in x1_list: y2 = y1 + CFG.tile_size x2 = x1 + CFG.tile_size # xyxys.append((x1, y1, x2, y2)) if fragment_id == CFG.valid_id: valid_images.append(image[y1:y2, x1:x2]) valid_masks.append(mask[y1:y2, x1:x2, None]) valid_xyxys.append([x1, y1, x2, y2]) else: train_images.append(image[y1:y2, x1:x2]) train_masks.append(mask[y1:y2, x1:x2, None]) return train_images, train_masks, valid_images, valid_masks, valid_xyxys def read_image_mask(self, fragment_id): z_dim = CFG.in_chans z_mid = 65 // 2 # len(surface_volume_paths) // 2 z_start, z_end = z_mid - z_dim // 2, z_mid + z_dim // 2 indx = range(z_start, z_end) images = [] for i in tqdm(indx): # image = np.array(Image.open(CFG.comp_dataset_path + f"/train/{fragment_id}/surface_volume/{i:02}.tif"), dtype='float32') image = cv2.imread( CFG.comp_dataset_path + f"/train/{fragment_id}/surface_volume/{i:02}.tif", 0, ) pad0 = CFG.tile_size - image.shape[0] % CFG.tile_size pad1 = CFG.tile_size - image.shape[1] % CFG.tile_size image = np.pad(image, [(0, pad0), (0, pad1)], constant_values=0) images.append(image) images = np.stack(images, axis=2) mask = cv2.imread( CFG.comp_dataset_path + f"/train/{fragment_id}/inklabels.png", 0 ) # mask = np.array(Image.open(CFG.comp_dataset_path + f"/train/{fragment_id}/inklabels.png")) # mask = Image.open(CFG.comp_dataset_path + f"/train/{fragment_id}/inklabels.png") mask = np.pad(mask, [(0, pad0), (0, pad1)], constant_values=0) mask = mask.astype("float32") mask /= 255.0 print(images.shape) return images, mask def __len__(self): return len(self.images) def __getitem__(self, index): images = self.images[index] mask = self.masks[index] if self.transform: data = self.transform(image=images, mask=mask) images = data["image"] mask = data["mask"] return images, mask base_path = Path("/kaggle/input/vesuvius-challenge-ink-detection") train_path = base_path / "train" all_fragments = sorted([f.name for f in train_path.iterdir()]) print("All fragments:", all_fragments) train_fragments = [train_path / fragment_name for fragment_name in all_fragments[:1]] train_fragments train_trasnforms = A.Compose(CFG.train_aug_list) train_dset = SubvolumeDataset(fragments=train_fragments, transform=train_trasnforms) print("Num items (pixels)", len(train_dset)) # #### Sanity check index = 500 print(f"Sub Volume image shape = {train_dset[index][0].shape}") print(f"Sub Volume mask shape = {train_dset[index][1].shape}") print(f"dataset len = {len(train_dset)}") plot_dataset = SubvolumeDataset(fragments=train_fragments) transform = CFG.train_aug_list transform = A.Compose( [t for t in transform if not isinstance(t, (A.Normalize, ToTensorV2))] ) plot_count = 0 for i in range(1000): image, mask = plot_dataset[i] data = transform(image=image, mask=mask) aug_image = data["image"] aug_mask = data["mask"] if mask.sum() == 0: continue fig, axes = plt.subplots(1, 4, figsize=(15, 8)) axes[0].imshow(image[..., 0], cmap="gray") axes[1].imshow(mask, cmap="gray") axes[2].imshow(aug_image[..., 0], cmap="gray") axes[3].imshow(aug_mask, cmap="gray") plot_count += 1 if plot_count == 3: break del plot_dataset gc.collect() train_loader = thd.DataLoader(train_dset, batch_size=CFG.train_batch_size, shuffle=True) print("Num batches:", len(train_loader)) # ### Set up model DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu") class InkDetector(torch.nn.Module): def __init__(self, cfg, weight=None): super().__init__() self.cfg = cfg self.model = smp.Unet( encoder_name=cfg.backbone, encoder_weights=weight, in_channels=cfg.in_chans, classes=cfg.target_size, activation=None, ) def forward(self, image): output = self.model(image) return output model = InkDetector(CFG, "imagenet").to(DEVICE) # ### Train TRAINING_STEPS = 10 LEARNING_RATE = CFG.lr TRAIN_RUN = True # To avoid re-running when saving the notebook if TRAIN_RUN: criterion = nn.BCEWithLogitsLoss() optimizer = optim.SGD(model.parameters(), lr=LEARNING_RATE) scheduler = torch.optim.lr_scheduler.OneCycleLR( optimizer, max_lr=LEARNING_RATE, total_steps=TRAINING_STEPS ) model.train() running_loss = 0.0 running_accuracy = 0.0 running_fbeta = 0.0 denom = 0 pbar = tqdm(enumerate(train_loader), total=TRAINING_STEPS) for i, (subvolumes, inklabels) in pbar: if i >= TRAINING_STEPS: break optimizer.zero_grad() outputs = model(subvolumes.to(DEVICE)) loss = criterion(outputs, inklabels.to(DEVICE)) loss.backward() optimizer.step() scheduler.step() pred_ink = outputs.detach().sigmoid().gt(0.4).cpu().int() accuracy = (pred_ink == inklabels).sum().float().div(inklabels.size(0)) running_fbeta += fbeta_score( inklabels.view(-1).numpy(), pred_ink.view(-1).numpy(), beta=0.5 ) running_accuracy += accuracy.item() running_loss += loss.item() denom += 1 pbar.set_postfix( { "Loss": running_loss / denom, "Accuracy": running_accuracy / denom, "[email protected]": running_fbeta / denom, } ) if (i + 1) % 500 == 0: running_loss = 0.0 running_accuracy = 0.0 running_fbeta = 0.0 denom = 0 torch.save(model.state_dict(), "/kaggle/working/model.pt") else: model_weights = torch.load("/kaggle/working/model.pt") model.load_state_dict(model_weights) # ### Evaluate # Clear memory before loading test fragments train_dset.labels = None train_dset.image_stacks = [] del train_loader, train_dset gc.collect() test_path = base_path / "test" test_fragments = [test_path / fragment_name for fragment_name in test_path.iterdir()] print("All fragments:", test_fragments) pred_images = [] model.eval() for test_fragment in test_fragments: outputs = [] eval_dset = SubvolumeDataset(fragments=[test_fragment], train=False) eval_loader = thd.DataLoader(eval_dset, batch_size=BATCH_SIZE, shuffle=False) with torch.no_grad(): for i, (subvolumes, _) in enumerate(tqdm(eval_loader)): output = model(subvolumes.to(DEVICE)).view(-1).sigmoid().cpu().numpy() outputs.append(output) # we only load 1 fragment at a time image_shape = eval_dset.image_stacks[0].shape[1:] eval_dset.labels = None eval_dset.image_stacks = None del eval_loader gc.collect() pred_image = np.zeros(image_shape, dtype=np.uint8) outputs = np.concatenate(outputs) for (y, x, _), prob in zip(eval_dset.pixels[: outputs.shape[0]], outputs): pred_image[y, x] = prob > 0.4 pred_images.append(pred_image) eval_dset.pixels = None del eval_dset gc.collect() print("Finished", test_fragment) plt.imshow(pred_images[1], cmap="gray") # ### Submission def rle(output): flat_img = np.where(output > 0.4, 1, 0).astype(np.uint8) starts = np.array((flat_img[:-1] == 0) & (flat_img[1:] == 1)) ends = np.array((flat_img[:-1] == 1) & (flat_img[1:] == 0)) starts_ix = np.where(starts)[0] + 2 ends_ix = np.where(ends)[0] + 2 lengths = ends_ix - starts_ix return " ".join(map(str, sum(zip(starts_ix, lengths), ()))) submission = defaultdict(list) for fragment_id, fragment_name in enumerate(test_fragments): submission["Id"].append(fragment_name.name) submission["Predicted"].append(rle(pred_images[fragment_id])) pd.DataFrame.from_dict(submission).to_csv("/kaggle/working/submission.csv", index=False) pd.DataFrame.from_dict(submission)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/683/129683383.ipynb
null
null
[{"Id": 129683383, "ScriptId": 38514101, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11219911, "CreationDate": "05/15/2023 18:05:19", "VersionNumber": 3.0, "Title": "UNet Segmentataion [training]", "EvaluationDate": "05/15/2023", "IsChange": true, "TotalLines": 464.0, "LinesInsertedFromPrevious": 74.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 390.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# ## Imports import os import gc import glob import json import multiprocessing as mp import warnings import albumentations as A import matplotlib.pyplot as plt import matplotlib.patches as patches import PIL.Image as Image import cv2 import numpy as np import pandas as pd import random import torch import torch.nn as nn import torch.optim as optim import torch.utils.data as thd import segmentation_models_pytorch as smp from torchvision import transforms from collections import defaultdict from types import SimpleNamespace from typing import Dict, List, Optional, Tuple from pathlib import Path from sklearn.metrics import fbeta_score from sklearn.exceptions import UndefinedMetricWarning from albumentations.pytorch import ToTensorV2 from segmentation_models_pytorch.encoders import get_preprocessing_fn from tqdm.auto import tqdm warnings.simplefilter("ignore") # ## Config class CFG: # ============== comp exp name ============= comp_name = "vesuvius" comp_dir_path = "/kaggle/input" comp_folder_name = "vesuvius-challenge-ink-detection" comp_dataset_path = os.path.join(comp_dir_path, comp_folder_name) exp_name = "vesuvius_2d_slide_unet_exp001" # ============== pred target ============= target_size = 1 # ============== model cfg ============= model_name = "Unet" backbone = "efficientnet-b0" # backbone = 'se_resnext50_32x4d' in_chans = 6 # 65 # ============== data preprocessing ============= preprocess_input = get_preprocessing_fn(backbone, pretrained="imagenet") # ============== training cfg ============= size = 224 tile_size = 224 stride = tile_size // 2 train_batch_size = 32 # 32 valid_batch_size = train_batch_size use_amp = True scheduler = "GradualWarmupSchedulerV2" # scheduler = 'CosineAnnealingLR' epochs = 15 # 30 # adamW warmupあり warmup_factor = 10 # lr = 1e-3 / warmup_factor lr = 1e-3 # ============== fold ============= valid_id = 1 # objective_cv = 'binary' # 'binary', 'multiclass', 'regression' metric_direction = "maximize" # maximize, 'minimize' # metrics = 'dice_coef' # ============== fixed ============= pretrained = True inf_weight = "best" # 'best' min_lr = 1e-6 weight_decay = 1e-6 max_grad_norm = 1000 print_freq = 50 num_workers = 4 seed = 42 # ============== set dataset path ============= outputs_path = f"/kaggle/working/outputs/{comp_name}/{exp_name}/" submission_dir = outputs_path + "submissions/" submission_path = submission_dir + f"submission_{exp_name}.csv" model_dir = outputs_path + f"{comp_name}-models/" figures_dir = outputs_path + "figures/" log_dir = outputs_path + "logs/" log_path = log_dir + f"{exp_name}.txt" # ============== augmentation ============= train_aug_list = [ # A.RandomResizedCrop( # size, size, scale=(0.85, 1.0)), A.Resize(size, size), # A.HorizontalFlip(p=0.5), # A.VerticalFlip(p=0.5), A.RandomBrightnessContrast(p=0.75), A.ShiftScaleRotate(p=0.75), A.OneOf( [ A.GaussNoise(var_limit=[10, 50]), A.GaussianBlur(), A.MotionBlur(), ], p=0.4, ), A.GridDistortion(num_steps=5, distort_limit=0.3, p=0.5), A.CoarseDropout( max_holes=1, max_width=int(size * 0.3), max_height=int(size * 0.3), mask_fill_value=0, p=0.5, ), # A.Cutout(max_h_size=int(size * 0.6), # max_w_size=int(size * 0.6), num_holes=1, p=1.0), A.Normalize(mean=[0] * in_chans, std=[1] * in_chans), ToTensorV2(transpose_mask=True), ] valid_aug_list = [ A.Resize(size, size), A.Normalize(mean=[0] * in_chans, std=[1] * in_chans), ToTensorV2(transpose_mask=True), ] # [ # A.RandomResizedCrop(height=224, width=224, scale=(0.08, 1.0)), # A.HorizontalFlip(p=0.5), # A.OneOf([ # A.RandomBrightnessContrast(brightness_limit=0.4, contrast_limit=0.4), # # A.HueSaturationValue(hue_shift_limit=10, sat_shift_limit=20, val_shift_limit=10), # ], p=0.75), # A.GaussNoise(var_limit=(10.0, 50.0)), # A.CoarseDropout(max_holes=8, max_height=32, max_width=32, p=0.5), # A.Normalize( # mean= [0] * in_chans, # std= [1] * in_chans # ), # # A.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),s # ToTensorV2() # ] # ## Set up data class SubvolumeDataset(thd.Dataset): def __init__(self, fragments: List[Path], transform=None, train=True): # self.fragments = sorted(map(lambda path: path.resolve(), fragments)) self.fragments_ids = [i + 1 for i in range(len(fragments))] self.transform = transform self.train = train images, masks, valid_images, valid_masks, _ = self.slice_fragment_to_subvolumes( self.fragments_ids ) self.masks = masks self.images = images def slice_fragment_to_subvolumes(self, fragment_ids): train_images = [] train_masks = [] valid_images = [] valid_masks = [] valid_xyxys = [] for fragment_id in range(1, 4): image, mask = self.read_image_mask(fragment_id) x1_list = list(range(0, image.shape[1] - CFG.tile_size + 1, CFG.stride)) y1_list = list(range(0, image.shape[0] - CFG.tile_size + 1, CFG.stride)) for y1 in y1_list: for x1 in x1_list: y2 = y1 + CFG.tile_size x2 = x1 + CFG.tile_size # xyxys.append((x1, y1, x2, y2)) if fragment_id == CFG.valid_id: valid_images.append(image[y1:y2, x1:x2]) valid_masks.append(mask[y1:y2, x1:x2, None]) valid_xyxys.append([x1, y1, x2, y2]) else: train_images.append(image[y1:y2, x1:x2]) train_masks.append(mask[y1:y2, x1:x2, None]) return train_images, train_masks, valid_images, valid_masks, valid_xyxys def read_image_mask(self, fragment_id): z_dim = CFG.in_chans z_mid = 65 // 2 # len(surface_volume_paths) // 2 z_start, z_end = z_mid - z_dim // 2, z_mid + z_dim // 2 indx = range(z_start, z_end) images = [] for i in tqdm(indx): # image = np.array(Image.open(CFG.comp_dataset_path + f"/train/{fragment_id}/surface_volume/{i:02}.tif"), dtype='float32') image = cv2.imread( CFG.comp_dataset_path + f"/train/{fragment_id}/surface_volume/{i:02}.tif", 0, ) pad0 = CFG.tile_size - image.shape[0] % CFG.tile_size pad1 = CFG.tile_size - image.shape[1] % CFG.tile_size image = np.pad(image, [(0, pad0), (0, pad1)], constant_values=0) images.append(image) images = np.stack(images, axis=2) mask = cv2.imread( CFG.comp_dataset_path + f"/train/{fragment_id}/inklabels.png", 0 ) # mask = np.array(Image.open(CFG.comp_dataset_path + f"/train/{fragment_id}/inklabels.png")) # mask = Image.open(CFG.comp_dataset_path + f"/train/{fragment_id}/inklabels.png") mask = np.pad(mask, [(0, pad0), (0, pad1)], constant_values=0) mask = mask.astype("float32") mask /= 255.0 print(images.shape) return images, mask def __len__(self): return len(self.images) def __getitem__(self, index): images = self.images[index] mask = self.masks[index] if self.transform: data = self.transform(image=images, mask=mask) images = data["image"] mask = data["mask"] return images, mask base_path = Path("/kaggle/input/vesuvius-challenge-ink-detection") train_path = base_path / "train" all_fragments = sorted([f.name for f in train_path.iterdir()]) print("All fragments:", all_fragments) train_fragments = [train_path / fragment_name for fragment_name in all_fragments[:1]] train_fragments train_trasnforms = A.Compose(CFG.train_aug_list) train_dset = SubvolumeDataset(fragments=train_fragments, transform=train_trasnforms) print("Num items (pixels)", len(train_dset)) # #### Sanity check index = 500 print(f"Sub Volume image shape = {train_dset[index][0].shape}") print(f"Sub Volume mask shape = {train_dset[index][1].shape}") print(f"dataset len = {len(train_dset)}") plot_dataset = SubvolumeDataset(fragments=train_fragments) transform = CFG.train_aug_list transform = A.Compose( [t for t in transform if not isinstance(t, (A.Normalize, ToTensorV2))] ) plot_count = 0 for i in range(1000): image, mask = plot_dataset[i] data = transform(image=image, mask=mask) aug_image = data["image"] aug_mask = data["mask"] if mask.sum() == 0: continue fig, axes = plt.subplots(1, 4, figsize=(15, 8)) axes[0].imshow(image[..., 0], cmap="gray") axes[1].imshow(mask, cmap="gray") axes[2].imshow(aug_image[..., 0], cmap="gray") axes[3].imshow(aug_mask, cmap="gray") plot_count += 1 if plot_count == 3: break del plot_dataset gc.collect() train_loader = thd.DataLoader(train_dset, batch_size=CFG.train_batch_size, shuffle=True) print("Num batches:", len(train_loader)) # ### Set up model DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu") class InkDetector(torch.nn.Module): def __init__(self, cfg, weight=None): super().__init__() self.cfg = cfg self.model = smp.Unet( encoder_name=cfg.backbone, encoder_weights=weight, in_channels=cfg.in_chans, classes=cfg.target_size, activation=None, ) def forward(self, image): output = self.model(image) return output model = InkDetector(CFG, "imagenet").to(DEVICE) # ### Train TRAINING_STEPS = 10 LEARNING_RATE = CFG.lr TRAIN_RUN = True # To avoid re-running when saving the notebook if TRAIN_RUN: criterion = nn.BCEWithLogitsLoss() optimizer = optim.SGD(model.parameters(), lr=LEARNING_RATE) scheduler = torch.optim.lr_scheduler.OneCycleLR( optimizer, max_lr=LEARNING_RATE, total_steps=TRAINING_STEPS ) model.train() running_loss = 0.0 running_accuracy = 0.0 running_fbeta = 0.0 denom = 0 pbar = tqdm(enumerate(train_loader), total=TRAINING_STEPS) for i, (subvolumes, inklabels) in pbar: if i >= TRAINING_STEPS: break optimizer.zero_grad() outputs = model(subvolumes.to(DEVICE)) loss = criterion(outputs, inklabels.to(DEVICE)) loss.backward() optimizer.step() scheduler.step() pred_ink = outputs.detach().sigmoid().gt(0.4).cpu().int() accuracy = (pred_ink == inklabels).sum().float().div(inklabels.size(0)) running_fbeta += fbeta_score( inklabels.view(-1).numpy(), pred_ink.view(-1).numpy(), beta=0.5 ) running_accuracy += accuracy.item() running_loss += loss.item() denom += 1 pbar.set_postfix( { "Loss": running_loss / denom, "Accuracy": running_accuracy / denom, "[email protected]": running_fbeta / denom, } ) if (i + 1) % 500 == 0: running_loss = 0.0 running_accuracy = 0.0 running_fbeta = 0.0 denom = 0 torch.save(model.state_dict(), "/kaggle/working/model.pt") else: model_weights = torch.load("/kaggle/working/model.pt") model.load_state_dict(model_weights) # ### Evaluate # Clear memory before loading test fragments train_dset.labels = None train_dset.image_stacks = [] del train_loader, train_dset gc.collect() test_path = base_path / "test" test_fragments = [test_path / fragment_name for fragment_name in test_path.iterdir()] print("All fragments:", test_fragments) pred_images = [] model.eval() for test_fragment in test_fragments: outputs = [] eval_dset = SubvolumeDataset(fragments=[test_fragment], train=False) eval_loader = thd.DataLoader(eval_dset, batch_size=BATCH_SIZE, shuffle=False) with torch.no_grad(): for i, (subvolumes, _) in enumerate(tqdm(eval_loader)): output = model(subvolumes.to(DEVICE)).view(-1).sigmoid().cpu().numpy() outputs.append(output) # we only load 1 fragment at a time image_shape = eval_dset.image_stacks[0].shape[1:] eval_dset.labels = None eval_dset.image_stacks = None del eval_loader gc.collect() pred_image = np.zeros(image_shape, dtype=np.uint8) outputs = np.concatenate(outputs) for (y, x, _), prob in zip(eval_dset.pixels[: outputs.shape[0]], outputs): pred_image[y, x] = prob > 0.4 pred_images.append(pred_image) eval_dset.pixels = None del eval_dset gc.collect() print("Finished", test_fragment) plt.imshow(pred_images[1], cmap="gray") # ### Submission def rle(output): flat_img = np.where(output > 0.4, 1, 0).astype(np.uint8) starts = np.array((flat_img[:-1] == 0) & (flat_img[1:] == 1)) ends = np.array((flat_img[:-1] == 1) & (flat_img[1:] == 0)) starts_ix = np.where(starts)[0] + 2 ends_ix = np.where(ends)[0] + 2 lengths = ends_ix - starts_ix return " ".join(map(str, sum(zip(starts_ix, lengths), ()))) submission = defaultdict(list) for fragment_id, fragment_name in enumerate(test_fragments): submission["Id"].append(fragment_name.name) submission["Predicted"].append(rle(pred_images[fragment_id])) pd.DataFrame.from_dict(submission).to_csv("/kaggle/working/submission.csv", index=False) pd.DataFrame.from_dict(submission)
false
0
4,365
0
4,365
4,365
129683517
import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns # # Data Cleaning train_data = pd.read_csv("/kaggle/input/titanic/train.csv") test_data = pd.read_csv("/kaggle/input/titanic/test.csv") train_data.head() train_data.info() test_data.info() # We can see test data does not have **Survived** column # Remove duplicates and find null values print("Total duplicates for train data:", train_data.duplicated().sum()) print("Total duplicates for test data:", test_data.duplicated().sum()) print("Total null data for train data:", train_data.isnull().sum(), "\n") print("Total null data for test data:", test_data.isnull().sum()) # I am dropping the Cabin data from both training and test data because most of it is missing and also I am dropping one row where *Fare* is null in test data test_data[test_data["Fare"].isnull()] # Median Fare of Pclass 3 where Embarkment in S test_data[(test_data["Pclass"] == 3) & (test_data["Embarked"] == "S")].Fare.hist( bins=50 ) test_data[(test_data["Pclass"] == 3) & (test_data["Embarked"] == "S")].Fare.median() # so I will fill the missing Fare with median 8 euros test_data["Fare"].fillna(8, inplace=True) train_data.drop(columns="Cabin", axis=1, inplace=True) test_data.drop(columns="Cabin", axis=1, inplace=True) # Embarked has two null data for training set # Select the "Embarked" column embarked_column = train_data["Embarked"] # Convert the "Embarked" column to numeric values numeric_embarked = pd.Categorical(embarked_column).codes # Convert the numeric embarked back to a DataFrame numeric_embarked_df = pd.DataFrame(numeric_embarked, columns=["Embarked"]) # Concatenate the numeric embarked DataFrame with the original DataFrame data_with_numeric_embarked = pd.concat([train_data, numeric_embarked_df], axis=1) # Calculate the correlation between the embarked and other columns correlation = data_with_numeric_embarked.corr()["Embarked"] # Print the correlation with other columns print(correlation) plt.title("Embarked and Fare relationship for training data") sns.barplot(train_data, x="Embarked", y="Fare") # We can see from this, that "Fare" is highly correlated with "embarked". This relationship is used for filling the missing embarked values train_data[train_data["Embarked"].isnull()] # Since these two passengers have Fare 80 they possibly embarked in C # Fill the missing value for "Embarked" train_data["Embarked"].fillna("C", inplace=True) # There are 177 missing values in *Age* train_data plt.title("KDE plot of age before filling missing value with median") sns.kdeplot(train_data["Age"]) # Fill missing values with median of Age of each passenger class in *train_data* and *test_data* Median_Age_Class = train_data.groupby(["Pclass"])[ "Age" ].median() # Median of each passenger class # this loop fills the median age value of each Pclass to the empty age values of that Pclass for idx in Median_Age_Class.index: train_data.loc[train_data["Pclass"] == idx, "Age"] = train_data[ train_data["Pclass"] == idx ]["Age"].fillna(Median_Age_Class.loc[idx]) Median_Age_Class = test_data.groupby(["Pclass"])[ "Age" ].median() # Median of each passenger class # this loop fills the median age value of each Pclass to the empty age values of that Pclass for idx in Median_Age_Class.index: test_data.loc[test_data["Pclass"] == idx, "Age"] = test_data[ test_data["Pclass"] == idx ]["Age"].fillna(Median_Age_Class.loc[idx]) # # EDA # **Sex v/s Survival count** sns.countplot(data=train_data, x="Sex", hue="Survived") train_data.groupby(["Sex"])["Survived"].value_counts( normalize=True ).unstack().plot.bar() # **Age and Survival** print("Maximum Age:", train_data["Age"].max()) print("Minimum Age:", train_data["Age"].min()) bins = [0, 12, 18, 25, 60, 80] # use pd.cut() to group age into different categories train_data["Age_group"] = pd.cut( train_data["Age"], bins=bins, labels=["Kid", "Teenager", "YoungAdult", "Adult", "SeniorCitizen"], ) sns.countplot(data=train_data, x="Age_group", hue="Survived") train_data["Age_group"].value_counts() # **Passenger class and Survival status** sns.barplot( data=train_data, x="Sex", y="Survived", hue="Pclass", order=["male", "female"], palette="Dark2", ) # **Fare and Survival status** sns.histplot(data=train_data, x="Fare", hue="Survived", bins=10, multiple="dodge") # **Relatives and survival status** sns.countplot(train_data, x="SibSp", hue="Survived") sns.countplot(train_data, x="Parch", hue="Survived", palette="Set1") train_data["relatives"] = train_data["Parch"] + train_data["SibSp"] test_data["relatives"] = test_data["Parch"] + test_data["SibSp"] sns.countplot(train_data, x="relatives", hue="Survived") # Having 1,2 or 3 relatives increase the possibility of survival # **Embarked and survival status** sns.countplot(train_data, x="Embarked", hue="Survived", palette="Set1") # Create a correlation matrix corr = train_data.corr() # Create a heatmap using seaborn sns.heatmap(corr, cmap="coolwarm", annot=True) # # Create training set # Here I am creating the training data by removing columns such as *PassengerId, Name, Ticket, Age_group* and also do hot encoding for non numeric columns train_data.info() test_data.info() drop_cols = [ "PassengerId", "Name", "Ticket", "Age_group", "relatives", ] # columns to be removed from features to train non_numeric_cols = ["Sex", "Embarked"] # non-numeric cols for one-hot encoding predict_cols = ["Survived"] # column to be predicted X = train_data.drop(columns=drop_cols + predict_cols) X = pd.get_dummies( X, columns=non_numeric_cols ) # one hot-encoding for non-numeric columns y = train_data[predict_cols].values.ravel() # ## Model Evaluation from sklearn.model_selection import cross_val_score from sklearn.svm import SVC from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.linear_model import LogisticRegression from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.preprocessing import StandardScaler # Create an instance of the StandardScaler scaler = StandardScaler() # Scale the features X_scaled = scaler.fit_transform(X) # List of models model_list = [ LogisticRegression(), DecisionTreeClassifier(), RandomForestClassifier(), SVC(), LinearDiscriminantAnalysis(), ] # Apply cross-validation scores = [] for model in model_list: scores = cross_val_score(model, X_scaled, y, cv=10) # Print the cross-validation scores print("Model :", model) print("Cross-validation scores:", scores) print("Mean accuracy:", scores.mean(), "\n") # Apply our model to test dataset drop_cols = [ "PassengerId", "Name", "Ticket", ] # columns to be removed from features to train non_numeric_cols = ["Sex", "Embarked"] # non-numeric cols for one-hot encoding X_test = test_data.drop(columns=drop_cols) X_test = pd.get_dummies( X_test, columns=non_numeric_cols ) # one hot-encoding for non-numeric columns # y = train_data[predict_cols].values.ravel() Xtest_scaled = scaler.fit_transform(X_test) svc = SVC() svc.fit(X_scaled, y) predictions = svc.predict(Xtest_scaled) # df_prediction = pd.DataFrame({"PassengerId":test_data['PassengerId'].values,"Survived":predictions}) df_prediction.to_csv("") # give weight to each feature of data and add these variables to make a new one # eg. 0.95*sex+0.70*Age+0.9*PClass
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/683/129683517.ipynb
null
null
[{"Id": 129683517, "ScriptId": 36476539, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 12608353, "CreationDate": "05/15/2023 18:06:38", "VersionNumber": 5.0, "Title": "Titanic_project", "EvaluationDate": "05/15/2023", "IsChange": true, "TotalLines": 225.0, "LinesInsertedFromPrevious": 180.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 45.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns # # Data Cleaning train_data = pd.read_csv("/kaggle/input/titanic/train.csv") test_data = pd.read_csv("/kaggle/input/titanic/test.csv") train_data.head() train_data.info() test_data.info() # We can see test data does not have **Survived** column # Remove duplicates and find null values print("Total duplicates for train data:", train_data.duplicated().sum()) print("Total duplicates for test data:", test_data.duplicated().sum()) print("Total null data for train data:", train_data.isnull().sum(), "\n") print("Total null data for test data:", test_data.isnull().sum()) # I am dropping the Cabin data from both training and test data because most of it is missing and also I am dropping one row where *Fare* is null in test data test_data[test_data["Fare"].isnull()] # Median Fare of Pclass 3 where Embarkment in S test_data[(test_data["Pclass"] == 3) & (test_data["Embarked"] == "S")].Fare.hist( bins=50 ) test_data[(test_data["Pclass"] == 3) & (test_data["Embarked"] == "S")].Fare.median() # so I will fill the missing Fare with median 8 euros test_data["Fare"].fillna(8, inplace=True) train_data.drop(columns="Cabin", axis=1, inplace=True) test_data.drop(columns="Cabin", axis=1, inplace=True) # Embarked has two null data for training set # Select the "Embarked" column embarked_column = train_data["Embarked"] # Convert the "Embarked" column to numeric values numeric_embarked = pd.Categorical(embarked_column).codes # Convert the numeric embarked back to a DataFrame numeric_embarked_df = pd.DataFrame(numeric_embarked, columns=["Embarked"]) # Concatenate the numeric embarked DataFrame with the original DataFrame data_with_numeric_embarked = pd.concat([train_data, numeric_embarked_df], axis=1) # Calculate the correlation between the embarked and other columns correlation = data_with_numeric_embarked.corr()["Embarked"] # Print the correlation with other columns print(correlation) plt.title("Embarked and Fare relationship for training data") sns.barplot(train_data, x="Embarked", y="Fare") # We can see from this, that "Fare" is highly correlated with "embarked". This relationship is used for filling the missing embarked values train_data[train_data["Embarked"].isnull()] # Since these two passengers have Fare 80 they possibly embarked in C # Fill the missing value for "Embarked" train_data["Embarked"].fillna("C", inplace=True) # There are 177 missing values in *Age* train_data plt.title("KDE plot of age before filling missing value with median") sns.kdeplot(train_data["Age"]) # Fill missing values with median of Age of each passenger class in *train_data* and *test_data* Median_Age_Class = train_data.groupby(["Pclass"])[ "Age" ].median() # Median of each passenger class # this loop fills the median age value of each Pclass to the empty age values of that Pclass for idx in Median_Age_Class.index: train_data.loc[train_data["Pclass"] == idx, "Age"] = train_data[ train_data["Pclass"] == idx ]["Age"].fillna(Median_Age_Class.loc[idx]) Median_Age_Class = test_data.groupby(["Pclass"])[ "Age" ].median() # Median of each passenger class # this loop fills the median age value of each Pclass to the empty age values of that Pclass for idx in Median_Age_Class.index: test_data.loc[test_data["Pclass"] == idx, "Age"] = test_data[ test_data["Pclass"] == idx ]["Age"].fillna(Median_Age_Class.loc[idx]) # # EDA # **Sex v/s Survival count** sns.countplot(data=train_data, x="Sex", hue="Survived") train_data.groupby(["Sex"])["Survived"].value_counts( normalize=True ).unstack().plot.bar() # **Age and Survival** print("Maximum Age:", train_data["Age"].max()) print("Minimum Age:", train_data["Age"].min()) bins = [0, 12, 18, 25, 60, 80] # use pd.cut() to group age into different categories train_data["Age_group"] = pd.cut( train_data["Age"], bins=bins, labels=["Kid", "Teenager", "YoungAdult", "Adult", "SeniorCitizen"], ) sns.countplot(data=train_data, x="Age_group", hue="Survived") train_data["Age_group"].value_counts() # **Passenger class and Survival status** sns.barplot( data=train_data, x="Sex", y="Survived", hue="Pclass", order=["male", "female"], palette="Dark2", ) # **Fare and Survival status** sns.histplot(data=train_data, x="Fare", hue="Survived", bins=10, multiple="dodge") # **Relatives and survival status** sns.countplot(train_data, x="SibSp", hue="Survived") sns.countplot(train_data, x="Parch", hue="Survived", palette="Set1") train_data["relatives"] = train_data["Parch"] + train_data["SibSp"] test_data["relatives"] = test_data["Parch"] + test_data["SibSp"] sns.countplot(train_data, x="relatives", hue="Survived") # Having 1,2 or 3 relatives increase the possibility of survival # **Embarked and survival status** sns.countplot(train_data, x="Embarked", hue="Survived", palette="Set1") # Create a correlation matrix corr = train_data.corr() # Create a heatmap using seaborn sns.heatmap(corr, cmap="coolwarm", annot=True) # # Create training set # Here I am creating the training data by removing columns such as *PassengerId, Name, Ticket, Age_group* and also do hot encoding for non numeric columns train_data.info() test_data.info() drop_cols = [ "PassengerId", "Name", "Ticket", "Age_group", "relatives", ] # columns to be removed from features to train non_numeric_cols = ["Sex", "Embarked"] # non-numeric cols for one-hot encoding predict_cols = ["Survived"] # column to be predicted X = train_data.drop(columns=drop_cols + predict_cols) X = pd.get_dummies( X, columns=non_numeric_cols ) # one hot-encoding for non-numeric columns y = train_data[predict_cols].values.ravel() # ## Model Evaluation from sklearn.model_selection import cross_val_score from sklearn.svm import SVC from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.linear_model import LogisticRegression from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.preprocessing import StandardScaler # Create an instance of the StandardScaler scaler = StandardScaler() # Scale the features X_scaled = scaler.fit_transform(X) # List of models model_list = [ LogisticRegression(), DecisionTreeClassifier(), RandomForestClassifier(), SVC(), LinearDiscriminantAnalysis(), ] # Apply cross-validation scores = [] for model in model_list: scores = cross_val_score(model, X_scaled, y, cv=10) # Print the cross-validation scores print("Model :", model) print("Cross-validation scores:", scores) print("Mean accuracy:", scores.mean(), "\n") # Apply our model to test dataset drop_cols = [ "PassengerId", "Name", "Ticket", ] # columns to be removed from features to train non_numeric_cols = ["Sex", "Embarked"] # non-numeric cols for one-hot encoding X_test = test_data.drop(columns=drop_cols) X_test = pd.get_dummies( X_test, columns=non_numeric_cols ) # one hot-encoding for non-numeric columns # y = train_data[predict_cols].values.ravel() Xtest_scaled = scaler.fit_transform(X_test) svc = SVC() svc.fit(X_scaled, y) predictions = svc.predict(Xtest_scaled) # df_prediction = pd.DataFrame({"PassengerId":test_data['PassengerId'].values,"Survived":predictions}) df_prediction.to_csv("") # give weight to each feature of data and add these variables to make a new one # eg. 0.95*sex+0.70*Age+0.9*PClass
false
0
2,384
0
2,384
2,384
129717054
import pandas as pd import numpy as np import datetime from datetime import datetime, date, timedelta import matplotlib import matplotlib.pyplot as plt import geopandas from shapely.geometry import Point, Polygon df2 = pd.read_csv( "/kaggle/input/chicago-crime-2022/Crimes_-_2022.csv", encoding="iso-8859-1" ) df2.info() import geopandas as gpd neighborhood = gpd.read_file( "/kaggle/input/neighborhoods-chicago/Boundaries - Neighborhoods (1).geojson" ) print(neighborhood.head()) neighborhood.info() neighborhood.plot() from shapely.geometry import Point, Polygon df2.dropna( subset=["Pickup Centroid Location", "Dropoff Centroid Location"], inplace=True ) def locat(row): point = Point(row["Longitude"], row["Latitude"]) for i in range(neighborhood["geometry"].count()): if point.within(neighborhood["geometry"][i]): return neighborhood["pri_neigh"][i] return "not_found" df2["Neighborhood"] = df2.apply(locat, axis=1) df2 df2.to_csv("2022 Crime Neighborhood Matched.csv")
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/717/129717054.ipynb
null
null
[{"Id": 129717054, "ScriptId": 38534806, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13405093, "CreationDate": "05/16/2023 01:57:19", "VersionNumber": 1.0, "Title": "Crime- Neighborhood Matching - Gig Work", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 36.0, "LinesInsertedFromPrevious": 6.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 30.0, "LinesInsertedFromFork": 6.0, "LinesDeletedFromFork": 15.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 30.0, "TotalVotes": 0}]
null
null
null
null
import pandas as pd import numpy as np import datetime from datetime import datetime, date, timedelta import matplotlib import matplotlib.pyplot as plt import geopandas from shapely.geometry import Point, Polygon df2 = pd.read_csv( "/kaggle/input/chicago-crime-2022/Crimes_-_2022.csv", encoding="iso-8859-1" ) df2.info() import geopandas as gpd neighborhood = gpd.read_file( "/kaggle/input/neighborhoods-chicago/Boundaries - Neighborhoods (1).geojson" ) print(neighborhood.head()) neighborhood.info() neighborhood.plot() from shapely.geometry import Point, Polygon df2.dropna( subset=["Pickup Centroid Location", "Dropoff Centroid Location"], inplace=True ) def locat(row): point = Point(row["Longitude"], row["Latitude"]) for i in range(neighborhood["geometry"].count()): if point.within(neighborhood["geometry"][i]): return neighborhood["pri_neigh"][i] return "not_found" df2["Neighborhood"] = df2.apply(locat, axis=1) df2 df2.to_csv("2022 Crime Neighborhood Matched.csv")
false
0
325
0
325
325
129717621
<jupyter_start><jupyter_text>Data science DAY1 Titanic Kaggle dataset identifier: data-science-day1-titanic <jupyter_code>import pandas as pd df = pd.read_csv('data-science-day1-titanic/DSB_Day1_Titanic_train.csv') df.info() <jupyter_output><class 'pandas.core.frame.DataFrame'> RangeIndex: 891 entries, 0 to 890 Data columns (total 12 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 PassengerId 891 non-null int64 1 Survived 891 non-null int64 2 Pclass 891 non-null int64 3 Name 891 non-null object 4 Sex 891 non-null object 5 Age 714 non-null float64 6 SibSp 891 non-null int64 7 Parch 891 non-null int64 8 Ticket 891 non-null object 9 Fare 891 non-null float64 10 Cabin 204 non-null object 11 Embarked 889 non-null object dtypes: float64(2), int64(5), object(5) memory usage: 83.7+ KB <jupyter_text>Examples: { "PassengerId": 1, "Survived": 0, "Pclass": 3, "Name": "Braund, Mr. Owen Harris", "Sex": "male", "Age": 22, "SibSp": 1, "Parch": 0, "Ticket": "A/5 21171", "Fare": 7.25, "Cabin": null, "Embarked": "S" } { "PassengerId": 2, "Survived": 1, "Pclass": 1, "Name": "Cumings, Mrs. John Bradley (Florence Briggs Thayer)", "Sex": "female", "Age": 38, "SibSp": 1, "Parch": 0, "Ticket": "PC 17599", "Fare": 71.2833, "Cabin": "C85", "Embarked": "C" } { "PassengerId": 3, "Survived": 1, "Pclass": 3, "Name": "Heikkinen, Miss. Laina", "Sex": "female", "Age": 26, "SibSp": 0, "Parch": 0, "Ticket": "STON/O2. 3101282", "Fare": 7.925, "Cabin": null, "Embarked": "S" } { "PassengerId": 4, "Survived": 1, "Pclass": 1, "Name": "Futrelle, Mrs. Jacques Heath (Lily May Peel)", "Sex": "female", "Age": 35, "SibSp": 1, "Parch": 0, "Ticket": "113803", "Fare": 53.1, "Cabin": "C123", "Embarked": "S" } <jupyter_script>import pandas as pd import numpy as np train = pd.read_csv( "/kaggle/input/data-science-day1-titanic/DSB_Day1_Titanic_train.csv" ) train.head() print("train data shape: ", train.shape) print("----------[train infomation]----------") print(train.info()) train.isna() train.fillna(0) import matplotlib.pyplot as plt import seaborn as sns sns.set() def pie_chart(feature): feature_ratio = train[feature].value_counts(sort=False) feature_size = feature_ratio.size feature_index = feature_ratio.index survived = train[train["Survived"] == 1][feature].value_counts() dead = train[train["Survived"] == 0][feature].value_counts() plt.plot(aspect="auto") plt.pie(feature_ratio, labels=feature_index, autopct="%1.1f%%") plt.title(feature + "'s ratio in total") plt.show() for i, index in enumerate(feature_index): plt.subplot(1, feature_size + 1, i + 1, aspect="equal") plt.pie( [survived[index], dead[index]], labels=["Survivied", "Dead"], autopct="%1.1f%%", ) plt.title(str(index) + "'s ratio") plt.show() pie_chart("Sex") pie_chart("Pclass") pie_chart("Embarked") def B(feature): survived = train[train["Survived"] == 1][feature].value_counts() dead = train[train["Survived"] == 0][feature].value_counts() df = pd.DataFrame([survived, dead]) df.index = ["Survived", "Dead"] df.plot(kind="bar", stacked=True, figsize=(5, 5)) B("SibSp") B("Parch")
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/717/129717621.ipynb
data-science-day1-titanic
soutarokirihara
[{"Id": 129717621, "ScriptId": 38575939, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11758727, "CreationDate": "05/16/2023 02:05:34", "VersionNumber": 1.0, "Title": "notebookea0282a779", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 55.0, "LinesInsertedFromPrevious": 55.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 186055663, "KernelVersionId": 129717621, "SourceDatasetVersionId": 2080558}]
[{"Id": 2080558, "DatasetId": 1247358, "DatasourceVersionId": 2120923, "CreatorUserId": 7088777, "LicenseName": "Unknown", "CreationDate": "04/02/2021 13:27:16", "VersionNumber": 1.0, "Title": "Data science DAY1 Titanic", "Slug": "data-science-day1-titanic", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 1247358, "CreatorUserId": 7088777, "OwnerUserId": 7088777.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2080558.0, "CurrentDatasourceVersionId": 2120923.0, "ForumId": 1265674, "Type": 2, "CreationDate": "04/02/2021 13:27:16", "LastActivityDate": "04/02/2021", "TotalViews": 6175, "TotalDownloads": 385, "TotalVotes": 16, "TotalKernels": 88}]
[{"Id": 7088777, "UserName": "soutarokirihara", "DisplayName": "Soutaro Kirihara", "RegisterDate": "04/02/2021", "PerformanceTier": 0}]
import pandas as pd import numpy as np train = pd.read_csv( "/kaggle/input/data-science-day1-titanic/DSB_Day1_Titanic_train.csv" ) train.head() print("train data shape: ", train.shape) print("----------[train infomation]----------") print(train.info()) train.isna() train.fillna(0) import matplotlib.pyplot as plt import seaborn as sns sns.set() def pie_chart(feature): feature_ratio = train[feature].value_counts(sort=False) feature_size = feature_ratio.size feature_index = feature_ratio.index survived = train[train["Survived"] == 1][feature].value_counts() dead = train[train["Survived"] == 0][feature].value_counts() plt.plot(aspect="auto") plt.pie(feature_ratio, labels=feature_index, autopct="%1.1f%%") plt.title(feature + "'s ratio in total") plt.show() for i, index in enumerate(feature_index): plt.subplot(1, feature_size + 1, i + 1, aspect="equal") plt.pie( [survived[index], dead[index]], labels=["Survivied", "Dead"], autopct="%1.1f%%", ) plt.title(str(index) + "'s ratio") plt.show() pie_chart("Sex") pie_chart("Pclass") pie_chart("Embarked") def B(feature): survived = train[train["Survived"] == 1][feature].value_counts() dead = train[train["Survived"] == 0][feature].value_counts() df = pd.DataFrame([survived, dead]) df.index = ["Survived", "Dead"] df.plot(kind="bar", stacked=True, figsize=(5, 5)) B("SibSp") B("Parch")
[{"data-science-day1-titanic/DSB_Day1_Titanic_train.csv": {"column_names": "[\"PassengerId\", \"Survived\", \"Pclass\", \"Name\", \"Sex\", \"Age\", \"SibSp\", \"Parch\", \"Ticket\", \"Fare\", \"Cabin\", \"Embarked\"]", "column_data_types": "{\"PassengerId\": \"int64\", \"Survived\": \"int64\", \"Pclass\": \"int64\", \"Name\": \"object\", \"Sex\": \"object\", \"Age\": \"float64\", \"SibSp\": \"int64\", \"Parch\": \"int64\", \"Ticket\": \"object\", \"Fare\": \"float64\", \"Cabin\": \"object\", \"Embarked\": \"object\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 891 entries, 0 to 890\nData columns (total 12 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 PassengerId 891 non-null int64 \n 1 Survived 891 non-null int64 \n 2 Pclass 891 non-null int64 \n 3 Name 891 non-null object \n 4 Sex 891 non-null object \n 5 Age 714 non-null float64\n 6 SibSp 891 non-null int64 \n 7 Parch 891 non-null int64 \n 8 Ticket 891 non-null object \n 9 Fare 891 non-null float64\n 10 Cabin 204 non-null object \n 11 Embarked 889 non-null object \ndtypes: float64(2), int64(5), object(5)\nmemory usage: 83.7+ KB\n", "summary": "{\"PassengerId\": {\"count\": 891.0, \"mean\": 446.0, \"std\": 257.3538420152301, \"min\": 1.0, \"25%\": 223.5, \"50%\": 446.0, \"75%\": 668.5, \"max\": 891.0}, \"Survived\": {\"count\": 891.0, \"mean\": 0.3838383838383838, \"std\": 0.4865924542648575, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 1.0, \"max\": 1.0}, \"Pclass\": {\"count\": 891.0, \"mean\": 2.308641975308642, \"std\": 0.836071240977049, \"min\": 1.0, \"25%\": 2.0, \"50%\": 3.0, \"75%\": 3.0, \"max\": 3.0}, \"Age\": {\"count\": 714.0, \"mean\": 29.69911764705882, \"std\": 14.526497332334042, \"min\": 0.42, \"25%\": 20.125, \"50%\": 28.0, \"75%\": 38.0, \"max\": 80.0}, \"SibSp\": {\"count\": 891.0, \"mean\": 0.5230078563411896, \"std\": 1.1027434322934317, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 1.0, \"max\": 8.0}, \"Parch\": {\"count\": 891.0, \"mean\": 0.38159371492704824, \"std\": 0.8060572211299483, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 6.0}, \"Fare\": {\"count\": 891.0, \"mean\": 32.204207968574636, \"std\": 49.6934285971809, \"min\": 0.0, \"25%\": 7.9104, \"50%\": 14.4542, \"75%\": 31.0, \"max\": 512.3292}}", "examples": "{\"PassengerId\":{\"0\":1,\"1\":2,\"2\":3,\"3\":4},\"Survived\":{\"0\":0,\"1\":1,\"2\":1,\"3\":1},\"Pclass\":{\"0\":3,\"1\":1,\"2\":3,\"3\":1},\"Name\":{\"0\":\"Braund, Mr. Owen Harris\",\"1\":\"Cumings, Mrs. John Bradley (Florence Briggs Thayer)\",\"2\":\"Heikkinen, Miss. Laina\",\"3\":\"Futrelle, Mrs. Jacques Heath (Lily May Peel)\"},\"Sex\":{\"0\":\"male\",\"1\":\"female\",\"2\":\"female\",\"3\":\"female\"},\"Age\":{\"0\":22.0,\"1\":38.0,\"2\":26.0,\"3\":35.0},\"SibSp\":{\"0\":1,\"1\":1,\"2\":0,\"3\":1},\"Parch\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"Ticket\":{\"0\":\"A\\/5 21171\",\"1\":\"PC 17599\",\"2\":\"STON\\/O2. 3101282\",\"3\":\"113803\"},\"Fare\":{\"0\":7.25,\"1\":71.2833,\"2\":7.925,\"3\":53.1},\"Cabin\":{\"0\":null,\"1\":\"C85\",\"2\":null,\"3\":\"C123\"},\"Embarked\":{\"0\":\"S\",\"1\":\"C\",\"2\":\"S\",\"3\":\"S\"}}"}}]
true
1
<start_data_description><data_path>data-science-day1-titanic/DSB_Day1_Titanic_train.csv: <column_names> ['PassengerId', 'Survived', 'Pclass', 'Name', 'Sex', 'Age', 'SibSp', 'Parch', 'Ticket', 'Fare', 'Cabin', 'Embarked'] <column_types> {'PassengerId': 'int64', 'Survived': 'int64', 'Pclass': 'int64', 'Name': 'object', 'Sex': 'object', 'Age': 'float64', 'SibSp': 'int64', 'Parch': 'int64', 'Ticket': 'object', 'Fare': 'float64', 'Cabin': 'object', 'Embarked': 'object'} <dataframe_Summary> {'PassengerId': {'count': 891.0, 'mean': 446.0, 'std': 257.3538420152301, 'min': 1.0, '25%': 223.5, '50%': 446.0, '75%': 668.5, 'max': 891.0}, 'Survived': {'count': 891.0, 'mean': 0.3838383838383838, 'std': 0.4865924542648575, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 1.0, 'max': 1.0}, 'Pclass': {'count': 891.0, 'mean': 2.308641975308642, 'std': 0.836071240977049, 'min': 1.0, '25%': 2.0, '50%': 3.0, '75%': 3.0, 'max': 3.0}, 'Age': {'count': 714.0, 'mean': 29.69911764705882, 'std': 14.526497332334042, 'min': 0.42, '25%': 20.125, '50%': 28.0, '75%': 38.0, 'max': 80.0}, 'SibSp': {'count': 891.0, 'mean': 0.5230078563411896, 'std': 1.1027434322934317, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 1.0, 'max': 8.0}, 'Parch': {'count': 891.0, 'mean': 0.38159371492704824, 'std': 0.8060572211299483, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 6.0}, 'Fare': {'count': 891.0, 'mean': 32.204207968574636, 'std': 49.6934285971809, 'min': 0.0, '25%': 7.9104, '50%': 14.4542, '75%': 31.0, 'max': 512.3292}} <dataframe_info> RangeIndex: 891 entries, 0 to 890 Data columns (total 12 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 PassengerId 891 non-null int64 1 Survived 891 non-null int64 2 Pclass 891 non-null int64 3 Name 891 non-null object 4 Sex 891 non-null object 5 Age 714 non-null float64 6 SibSp 891 non-null int64 7 Parch 891 non-null int64 8 Ticket 891 non-null object 9 Fare 891 non-null float64 10 Cabin 204 non-null object 11 Embarked 889 non-null object dtypes: float64(2), int64(5), object(5) memory usage: 83.7+ KB <some_examples> {'PassengerId': {'0': 1, '1': 2, '2': 3, '3': 4}, 'Survived': {'0': 0, '1': 1, '2': 1, '3': 1}, 'Pclass': {'0': 3, '1': 1, '2': 3, '3': 1}, 'Name': {'0': 'Braund, Mr. Owen Harris', '1': 'Cumings, Mrs. John Bradley (Florence Briggs Thayer)', '2': 'Heikkinen, Miss. Laina', '3': 'Futrelle, Mrs. Jacques Heath (Lily May Peel)'}, 'Sex': {'0': 'male', '1': 'female', '2': 'female', '3': 'female'}, 'Age': {'0': 22.0, '1': 38.0, '2': 26.0, '3': 35.0}, 'SibSp': {'0': 1, '1': 1, '2': 0, '3': 1}, 'Parch': {'0': 0, '1': 0, '2': 0, '3': 0}, 'Ticket': {'0': 'A/5 21171', '1': 'PC 17599', '2': 'STON/O2. 3101282', '3': '113803'}, 'Fare': {'0': 7.25, '1': 71.2833, '2': 7.925, '3': 53.1}, 'Cabin': {'0': None, '1': 'C85', '2': None, '3': 'C123'}, 'Embarked': {'0': 'S', '1': 'C', '2': 'S', '3': 'S'}} <end_description>
491
0
1,362
491
129760162
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import matplotlib.pyplot as plt def plot_some_lines(): plt.figure() x1 = [10, 20, 5, 40, 8] x2 = [30, 43, 9, 7, 20] plt.plot(x1, label="Group A") plt.plot(x2, label="Group B") plt.legend() plt.show() plot_some_lines() import seaborn as sns sns.set() plot_some_lines() sns.get_dataset_names(), len(sns.get_dataset_names()) mpg_df = sns.load_dataset("mpg") mpg_df.head() countplot = sns.countplot(data=mpg_df, x="cylinders") four_cy = mpg_df[mpg_df.cylinders == 4] by_origin = four_cy.groupby("origin", as_index=False) mpg_by_origin = by_origin.mpg.mean() barplot = sns.barplot(x="origin", y="mpg", data=mpg_by_origin) avg_mpg = mpg_df.groupby("model_year", as_index=False).mpg.mean() relplot = sns.relplot(x="model_year", y="mpg", data=avg_mpg) # ## Flights dataset # flights = sns.load_dataset("flights") flights.tail() flights_plot = sns.relplot(x="year", y="passengers", data=flights) flights_plot = sns.relplot(x="year", y="passengers", data=flights, hue="month") year_sums = flights.groupby("year", as_index=False).sum() sums_plot = sns.relplot(x="year", y="passengers", data=year_sums) sums_lmplot = sns.lmplot(x="year", y="passengers", data=year_sums) barplot = sns.barplot(x="year", y="passengers", data=flights) barplot = sns.barplot(x="year", y="passengers", data=flights, ci=None) barplot = sns.barplot(x="month", y="passengers", data=flights) anscombe = sns.load_dataset("anscombe") anscombe from sklearn.linear_model import LinearRegression datasets = ["I", "II", "III", "IV"] for dataset in datasets: print("Dataset", dataset) data = anscombe.query(f"dataset == '{dataset}'") am = LinearRegression().fit(data.x.values.reshape(-1, 1), data.y.values) print(am.coef_) print(am.intercept_) # ## Dataset 1 sns.regplot(x="x", y="y", data=anscombe.query("dataset == 'I'")) # ## Dataset 2 sns.regplot(x="x", y="y", data=anscombe.query("dataset == 'II'"), order=2) # the order=2 parameter specifies the degree of the polynomial regression line to be fitted to the data. In this case, order=2 indicates a second-degree polynomial regression line, also known as a quadratic regression line. # ## Dataset 3 sns.lmplot( x="x", y="y", data=anscombe.query("dataset == 'III'"), robust=True, ci=None, scatter_kws={"s": 80}, ) # robust=True: Enables robust regression, which reduces the influence of outliers on the regression line estimation. # ci=None: Disables the display of confidence intervals around the regression line. # scatter_kws={"s": 80}: Specifies additional keyword arguments to be passed to the underlying scatter plot function. In this case, it sets the size of the scatter plot markers to 80. # ## Dataset 4 sns.lmplot( x="x", y="y", data=anscombe.query("dataset == 'IV'"), ci=None, scatter_kws={"s": 80} ) # ci=None: Disables the display of confidence intervals around the regression line. # scatter_kws={"s": 80}: Specifies additional keyword arguments to be passed to the underlying scatter plot function. In this case, it sets the size of the scatter plot markers to 80.
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/760/129760162.ipynb
null
null
[{"Id": 129760162, "ScriptId": 38586775, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 10769997, "CreationDate": "05/16/2023 09:24:02", "VersionNumber": 1.0, "Title": "class-14", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 111.0, "LinesInsertedFromPrevious": 111.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import matplotlib.pyplot as plt def plot_some_lines(): plt.figure() x1 = [10, 20, 5, 40, 8] x2 = [30, 43, 9, 7, 20] plt.plot(x1, label="Group A") plt.plot(x2, label="Group B") plt.legend() plt.show() plot_some_lines() import seaborn as sns sns.set() plot_some_lines() sns.get_dataset_names(), len(sns.get_dataset_names()) mpg_df = sns.load_dataset("mpg") mpg_df.head() countplot = sns.countplot(data=mpg_df, x="cylinders") four_cy = mpg_df[mpg_df.cylinders == 4] by_origin = four_cy.groupby("origin", as_index=False) mpg_by_origin = by_origin.mpg.mean() barplot = sns.barplot(x="origin", y="mpg", data=mpg_by_origin) avg_mpg = mpg_df.groupby("model_year", as_index=False).mpg.mean() relplot = sns.relplot(x="model_year", y="mpg", data=avg_mpg) # ## Flights dataset # flights = sns.load_dataset("flights") flights.tail() flights_plot = sns.relplot(x="year", y="passengers", data=flights) flights_plot = sns.relplot(x="year", y="passengers", data=flights, hue="month") year_sums = flights.groupby("year", as_index=False).sum() sums_plot = sns.relplot(x="year", y="passengers", data=year_sums) sums_lmplot = sns.lmplot(x="year", y="passengers", data=year_sums) barplot = sns.barplot(x="year", y="passengers", data=flights) barplot = sns.barplot(x="year", y="passengers", data=flights, ci=None) barplot = sns.barplot(x="month", y="passengers", data=flights) anscombe = sns.load_dataset("anscombe") anscombe from sklearn.linear_model import LinearRegression datasets = ["I", "II", "III", "IV"] for dataset in datasets: print("Dataset", dataset) data = anscombe.query(f"dataset == '{dataset}'") am = LinearRegression().fit(data.x.values.reshape(-1, 1), data.y.values) print(am.coef_) print(am.intercept_) # ## Dataset 1 sns.regplot(x="x", y="y", data=anscombe.query("dataset == 'I'")) # ## Dataset 2 sns.regplot(x="x", y="y", data=anscombe.query("dataset == 'II'"), order=2) # the order=2 parameter specifies the degree of the polynomial regression line to be fitted to the data. In this case, order=2 indicates a second-degree polynomial regression line, also known as a quadratic regression line. # ## Dataset 3 sns.lmplot( x="x", y="y", data=anscombe.query("dataset == 'III'"), robust=True, ci=None, scatter_kws={"s": 80}, ) # robust=True: Enables robust regression, which reduces the influence of outliers on the regression line estimation. # ci=None: Disables the display of confidence intervals around the regression line. # scatter_kws={"s": 80}: Specifies additional keyword arguments to be passed to the underlying scatter plot function. In this case, it sets the size of the scatter plot markers to 80. # ## Dataset 4 sns.lmplot( x="x", y="y", data=anscombe.query("dataset == 'IV'"), ci=None, scatter_kws={"s": 80} ) # ci=None: Disables the display of confidence intervals around the regression line. # scatter_kws={"s": 80}: Specifies additional keyword arguments to be passed to the underlying scatter plot function. In this case, it sets the size of the scatter plot markers to 80.
false
0
1,247
0
1,247
1,247
129760174
# ### Prepare from datasets import load_from_disk, Dataset, concatenate_datasets from sklearn.feature_extraction.text import CountVectorizer from transformers import DataCollatorWithPadding import sys import os from matplotlib import pyplot as plt import pandas as pd import numpy as np import torch.utils.data as data_utils import torch from tqdm import tqdm from sklearn.metrics import roc_auc_score from sklearn.model_selection import train_test_split from torch.utils.data import TensorDataset, random_split from torch.utils.data import DataLoader, RandomSampler, SequentialSampler from transformers import ( get_linear_schedule_with_warmup, T5ForConditionalGeneration, T5Tokenizer, ) from transformers import AutoModelForSequenceClassification, TrainingArguments, Trainer from transformers import EarlyStoppingCallback import random import warnings import gc import json import evaluate from transformers import ( AutoTokenizer, AutoModelForSeq2SeqLM, Seq2SeqTrainingArguments, Seq2SeqTrainer, DataCollatorForSeq2Seq, ) from transformers import LongformerTokenizer, EncoderDecoderModel from transformers import AutoTokenizer, AutoModel import torch from tqdm import tqdm from torch import nn warnings.filterwarnings("ignore") import pickle from collections import defaultdict import datetime from transformers import GenerationConfig from datasets import disable_caching disable_caching() device = torch.device("cuda" if torch.cuda.is_available() else "cpu") print(f"Using device: {device}") #!cp -r /kaggle/input/short-pool short_pool def seed_everything(seed_val=0): random.seed(seed_val) np.random.seed(seed_val) torch.manual_seed(seed_val) torch.cuda.manual_seed_all(seed_val) rouge = evaluate.load("rouge") bleu = evaluate.load("bleu") exact_match = evaluate.load("exact_match") accuracy = evaluate.load("accuracy") tokenizer = T5Tokenizer.from_pretrained("t5-small", padding=True) def preprocess_logits_for_metrics(logits, labels): if isinstance(logits, tuple): logits = logits[0] return logits.argmax(dim=-1) def compute_metrics(eval_pred, multilabel=False, calc_all=False): predictions, labels = eval_pred decoded_preds = tokenizer.batch_decode(predictions, skip_special_tokens=True) labels = np.where(labels != -100, labels, tokenizer.pad_token_id) if not multilabel: decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True) else: decoded_labels = [ tokenizer.batch_decode(l, skip_special_tokens=True) for l in labels ] result = dict() rouge_result = rouge.compute(predictions=decoded_preds, references=decoded_labels) result["rouge1"] = rouge_result["rouge1"] result["rouge2"] = rouge_result["rouge2"] if calc_all: bleu_result = bleu.compute(predictions=decoded_preds, references=decoded_labels) result["Bleu"] = bleu_result["bleu"] if not multilabel: em_result = exact_match.compute( predictions=decoded_preds, references=decoded_labels, regexes_to_ignore=["the "], ignore_case=True, ignore_punctuation=True, ) result["EM"] = em_result["exact_match"] else: em_results = [] for pred, doc_labels in zip(decoded_preds, decoded_labels): max_em_result = 0 for label in doc_labels: em_result = exact_match.compute( predictions=[pred], references=[label], ignore_case=True, ignore_punctuation=True, ) max_em_result = max(max_em_result, em_result["exact_match"]) em_results.append(max_em_result) result["EM"] = np.mean(em_results) prediction_lens = [ np.count_nonzero(pred != tokenizer.pad_token_id) for pred in predictions ] result["gen_len"] = np.mean(prediction_lens) return {k: round(v, 4) for k, v in result.items()} def remove_logits(logits, labels): return torch.tensor([]), labels class ActiveQA: def __init__(self, config): self.config = config self._reset_models() self.model_is_trained = False self.training_args = Seq2SeqTrainingArguments( output_dir=self.config["model_output_dir"], evaluation_strategy="epoch", logging_strategy="epoch", save_strategy="epoch", learning_rate=self.config["learning_rate"], per_device_train_batch_size=self.config["per_device_train_batch_size"], per_device_eval_batch_size=self.config["per_device_eval_batch_size"], weight_decay=self.config["weight_decay"], num_train_epochs=self.config["num_train_epochs"], predict_with_generate=True, generation_max_length=self.config["max_length_answer"], report_to="none", push_to_hub=False, logging_dir="logs", metric_for_best_model="rouge1", load_best_model_at_end=True, save_total_limit=3, ) self.generation_config = GenerationConfig.from_pretrained("t5-small") self.generation_config.max_length = self.config["max_length_answer"] def _reset_models(self): self.model = T5ForConditionalGeneration.from_pretrained( self.config["checkpoint_answer"] ).to(self.config["device"]) self.tokenizer = T5Tokenizer.from_pretrained( self.config["checkpoint_answer"], padding=True ) self.data_collator = DataCollatorForSeq2Seq( tokenizer=self.tokenizer, model=self.model, max_length=self.config["max_length"], ) def load_from_disk(self, path): with open(path, "rb") as f: self.__dict__ = pickle.load(f) def save_to_disk(self, path): with open(path, "wb") as f: pickle.dump(self.__dict__, f) def train(self, train_dataset=None, test_dataset=None): self.trainer = Seq2SeqTrainer( model=self.model, args=self.training_args, train_dataset=train_dataset, eval_dataset=test_dataset, tokenizer=self.tokenizer, data_collator=self.data_collator, compute_metrics=compute_metrics, callbacks=[EarlyStoppingCallback(early_stopping_patience=2)], ) self.trainer.train() self.models_is_trained = True return self.trainer.state.log_history def _get_probs_from_logits(self, logits, labels, normalized=True): answer = [] probs = torch.softmax(logits, -1) for sample_probs, sample_labels in zip(probs, labels): p = 1.0 for idx, token in enumerate(sample_labels): p *= sample_probs[idx][token].item() if normalized: p = p ** (1 / len(sample_labels)) answer.append(p) return answer def _predict_probs(self, dataset, normalized=True): predictions = self.trainer.predict(dataset).predictions dataloader = data_utils.DataLoader( predictions, batch_size=self.config["per_device_eval_batch_size"], shuffle=False, ) probs_list = [] labels_list = [] with torch.no_grad(): for inputs in dataloader: inputs = inputs.to(self.config["device"]) labels = self.model.generate( inputs, generation_config=self.generation_config ) logits = self.model(input_ids=inputs, labels=labels).logits probs_list += self._get_probs_from_logits(logits, labels, normalized) labels_list += labels labels = [l.cpu().numpy() for l in labels_list] return {"prob": probs_list, "labels": labels} def evaluate(self, test_dataset, test_text): res_dict = self._predict_probs(test_dataset) res_dict["labels"] = res_dict["labels"] res_dict["document_id"] = test_dataset["document_id"] df = pd.DataFrame(res_dict) df = ( df.sort_values("prob", ascending=False) .groupby("document_id", as_index=False) .first() ) df = df.sort_values("document_id") metrics = self.trainer.compute_metrics( (df["labels"], test_text["answers"]), multilabel=True, calc_all=True ) return metrics def predict(self, input_text): input_ids = self.tokenizer(input_text, return_tensors="pt").input_ids.to( self.config["device"] ) with torch.no_grad(): labels = self.model.generate(input_ids) return self.tokenizer.decode(labels, skip_special_tokens=True)[0] def _best_ids_from_probs(self, doc_ids, probs, best_ids_cnt): if self.config["unsertainty_strategy"] == "min_normalized_prob": df = pd.DataFrame({"doc_id": doc_ids, "prob": probs}) df = df.sort_values("prob", ascending=True) df = ( df.groupby("doc_id", as_index=False) .first() .sort_values("prob", ascending=True) ) return df["doc_id"].values.tolist()[:best_ids_cnt] else: raise ValueError( f"Unsupported unsertainty strategy {self.config['unsertainty_strategy']}" ) def _choose_ids(self, dataset): document_ids = list(set(dataset["document_id"])) random_ids_cnt = int( self.config["step_document_cnt"] * self.config["random_sample_fraction"] ) best_ids_cnt = self.config["step_document_cnt"] - random_ids_cnt random_ids = set( random.sample(document_ids, min(len(document_ids), random_ids_cnt)) ) if best_ids_cnt == 0: return random_ids document_ids = list(set(document_ids) - random_ids) pool_ids = set( random.sample( document_ids, min(len(document_ids), self.config["pool_document_cnt"]) ) ) filtered_dataset = dataset.filter( lambda x: x["document_id"] in pool_ids ).remove_columns("labels") probs = self._predict_probs(filtered_dataset)["prob"] best_ids = self._best_ids_from_probs( filtered_dataset["document_id"], probs, best_ids_cnt ) return random_ids.union(set(best_ids)) def emulate_active_learning( self, pool, test_dataset, val_dataset, val_answers, save_path=None ): document_ids = list(set(pool["document_id"])) ids_in_train = set( random.sample( document_ids, min(len(document_ids), self.config["start_document_cnt"]) ) ) print(f"Step 0: {len(ids_in_train)} / {len(document_ids)} indexes are in train") train_step = pool.filter( lambda x: x["document_id"] in ids_in_train and x["labels"] is not None ) train_metrics = self.train(train_step, test_dataset) eval_metrics = self.evaluate(val_dataset, val_answers) metrics = {"train": [train_metrics], "val": [eval_metrics]} del train_step gc.collect() for step in range(self.config["active_learning_steps_cnt"]): self._reset_models() print(f"Step {step + 1}: choosing ids for train") pool_to_choose = pool.filter(lambda x: x["document_id"] not in ids_in_train) ids_to_add = self._choose_ids(pool_to_choose) ids_in_train = ids_in_train.union(ids_to_add) print( f"Step {step + 1}: {len(ids_in_train)} / {len(document_ids)} indexes are in train" ) train_step = pool.filter( lambda x: x["document_id"] in ids_in_train and x["labels"] is not None ) train_metrics = self.train(train_step, test_dataset) eval_metrics = self.evaluate(val_dataset, val_answers) metrics["train"].append(train_metrics) metrics["val"].append(eval_metrics) del train_step del pool_to_choose gc.collect() if save_path is not None: with open(save_path, "wb") as f: pickle.dump({f"step {step + 1} metrics": metrics}, f) return metrics def create_config(model_dir=None): if model_dir is None: model_dir = f"qa_{int(datetime.datetime.now().timestamp())}" os.makedirs(model_dir) print(f"model_dir {model_dir} created") config = { "checkpoint_answer": "t5-small", "max_length": 512, "max_length_answer": 32, "learning_rate": 1e-5, "weight_decay": 1e-2, "per_device_train_batch_size": 4, "per_device_eval_batch_size": 8, "num_train_epochs": 10, "device": device, "unsertainty_strategy": "min_normalized_prob", "start_document_cnt": 1000, "active_learning_steps_cnt": 10, "pool_document_cnt": 2500, "step_document_cnt": 500, "random_sample_fraction": 1.0, "model_output_dir": os.path.join(model_dir, "model"), "log_path": os.path.join(model_dir, "logs.pkl"), } with open(os.path.join(model_dir, "config.yaml"), "wb") as f: pickle.dump(config, f) return config torch.cuda.empty_cache() import gc gc.collect() def get_train_test(dataset_path, max_doc_id=None, test_size=0.05, as_pool=True): dataset = load_from_disk(dataset_path, keep_in_memory=True) if max_doc_id is not None: dataset = dataset.filter(lambda x: x["document_id"] < max_doc_id) if not as_pool: dataset = dataset.filter(lambda x: x["labels"] is not None) train_ids, test_ids = train_test_split( list(set(dataset["document_id"])), test_size=test_size ) train_dataset = dataset.filter(lambda x: x["document_id"] in train_ids) test_dataset = dataset.filter( lambda x: x["labels"] is not None and x["document_id"] in test_ids ) return train_dataset, test_dataset def get_val(val_path, val_answers_path, max_ids=3000): dataset = load_from_disk(val_path, keep_in_memory=True) val_dataset = dataset.filter(lambda x: x["document_id"] < max_ids) answers_dataset = load_from_disk(val_answers_path, keep_in_memory=True) val_answer_dataset = answers_dataset.filter(lambda x: x["document_id"] < max_ids) return val_dataset, val_answer_dataset dataset_path = "/kaggle/working/short_train" train_dataset, test_dataset = get_train_test(dataset_path, test_size=50) train_dataset val_path = "/kaggle/working/short-val" val_answers_path = "/kaggle/working/short-val-answers" val_dataset, val_answer_dataset = get_val(val_path, val_answers_path, max_ids=60) val_dataset seed_everything(0) config = create_config("random-short") documents = list(set(train_dataset["document_id"])) ids_in_train = [] metrics = dict() for n in [500]: ids_to_add = random.sample(documents, n - len(ids_in_train)) ids_in_train += ids_to_add documents = [x for x in documents if x not in ids_in_train] print(len(ids_in_train), len(documents)) new_train = train_dataset.filter(lambda x: x["document_id"] in ids_in_train) qa = ActiveQA(config) train_metrics = qa.train(new_train, test_dataset) eval_metrics = qa.evaluate(val_dataset.remove_columns("labels"), val_answer_dataset) print(eval_metrics) metrics[n] = (train_metrics, eval_metrics) with open("random_short.pkl", "wb") as f: pickle.dump(metrics, f) with open("ids.pkl", "wb") as f: pickle.dump({"ids": ids_in_train}, f) dataset_path = "/kaggle/working/short_train" train_dataset, test_dataset = get_train_test(dataset_path, test_size=200) train_dataset val_path = "/kaggle/working/short-val" val_answers_path = "/kaggle/working/short-val-answers" val_dataset, val_answer_dataset = get_val(val_path, val_answers_path, max_ids=1000) val_dataset config = create_config("short-all") qa = ActiveQA(config) train_metrics = qa.train(train_dataset, test_dataset) eval_metrics = qa.evaluate(val_dataset.remove_columns("labels"), val_answer_dataset)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/760/129760174.ipynb
null
null
[{"Id": 129760174, "ScriptId": 37978823, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6054920, "CreationDate": "05/16/2023 09:24:07", "VersionNumber": 24.0, "Title": "active learning v1", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 403.0, "LinesInsertedFromPrevious": 32.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 371.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# ### Prepare from datasets import load_from_disk, Dataset, concatenate_datasets from sklearn.feature_extraction.text import CountVectorizer from transformers import DataCollatorWithPadding import sys import os from matplotlib import pyplot as plt import pandas as pd import numpy as np import torch.utils.data as data_utils import torch from tqdm import tqdm from sklearn.metrics import roc_auc_score from sklearn.model_selection import train_test_split from torch.utils.data import TensorDataset, random_split from torch.utils.data import DataLoader, RandomSampler, SequentialSampler from transformers import ( get_linear_schedule_with_warmup, T5ForConditionalGeneration, T5Tokenizer, ) from transformers import AutoModelForSequenceClassification, TrainingArguments, Trainer from transformers import EarlyStoppingCallback import random import warnings import gc import json import evaluate from transformers import ( AutoTokenizer, AutoModelForSeq2SeqLM, Seq2SeqTrainingArguments, Seq2SeqTrainer, DataCollatorForSeq2Seq, ) from transformers import LongformerTokenizer, EncoderDecoderModel from transformers import AutoTokenizer, AutoModel import torch from tqdm import tqdm from torch import nn warnings.filterwarnings("ignore") import pickle from collections import defaultdict import datetime from transformers import GenerationConfig from datasets import disable_caching disable_caching() device = torch.device("cuda" if torch.cuda.is_available() else "cpu") print(f"Using device: {device}") #!cp -r /kaggle/input/short-pool short_pool def seed_everything(seed_val=0): random.seed(seed_val) np.random.seed(seed_val) torch.manual_seed(seed_val) torch.cuda.manual_seed_all(seed_val) rouge = evaluate.load("rouge") bleu = evaluate.load("bleu") exact_match = evaluate.load("exact_match") accuracy = evaluate.load("accuracy") tokenizer = T5Tokenizer.from_pretrained("t5-small", padding=True) def preprocess_logits_for_metrics(logits, labels): if isinstance(logits, tuple): logits = logits[0] return logits.argmax(dim=-1) def compute_metrics(eval_pred, multilabel=False, calc_all=False): predictions, labels = eval_pred decoded_preds = tokenizer.batch_decode(predictions, skip_special_tokens=True) labels = np.where(labels != -100, labels, tokenizer.pad_token_id) if not multilabel: decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True) else: decoded_labels = [ tokenizer.batch_decode(l, skip_special_tokens=True) for l in labels ] result = dict() rouge_result = rouge.compute(predictions=decoded_preds, references=decoded_labels) result["rouge1"] = rouge_result["rouge1"] result["rouge2"] = rouge_result["rouge2"] if calc_all: bleu_result = bleu.compute(predictions=decoded_preds, references=decoded_labels) result["Bleu"] = bleu_result["bleu"] if not multilabel: em_result = exact_match.compute( predictions=decoded_preds, references=decoded_labels, regexes_to_ignore=["the "], ignore_case=True, ignore_punctuation=True, ) result["EM"] = em_result["exact_match"] else: em_results = [] for pred, doc_labels in zip(decoded_preds, decoded_labels): max_em_result = 0 for label in doc_labels: em_result = exact_match.compute( predictions=[pred], references=[label], ignore_case=True, ignore_punctuation=True, ) max_em_result = max(max_em_result, em_result["exact_match"]) em_results.append(max_em_result) result["EM"] = np.mean(em_results) prediction_lens = [ np.count_nonzero(pred != tokenizer.pad_token_id) for pred in predictions ] result["gen_len"] = np.mean(prediction_lens) return {k: round(v, 4) for k, v in result.items()} def remove_logits(logits, labels): return torch.tensor([]), labels class ActiveQA: def __init__(self, config): self.config = config self._reset_models() self.model_is_trained = False self.training_args = Seq2SeqTrainingArguments( output_dir=self.config["model_output_dir"], evaluation_strategy="epoch", logging_strategy="epoch", save_strategy="epoch", learning_rate=self.config["learning_rate"], per_device_train_batch_size=self.config["per_device_train_batch_size"], per_device_eval_batch_size=self.config["per_device_eval_batch_size"], weight_decay=self.config["weight_decay"], num_train_epochs=self.config["num_train_epochs"], predict_with_generate=True, generation_max_length=self.config["max_length_answer"], report_to="none", push_to_hub=False, logging_dir="logs", metric_for_best_model="rouge1", load_best_model_at_end=True, save_total_limit=3, ) self.generation_config = GenerationConfig.from_pretrained("t5-small") self.generation_config.max_length = self.config["max_length_answer"] def _reset_models(self): self.model = T5ForConditionalGeneration.from_pretrained( self.config["checkpoint_answer"] ).to(self.config["device"]) self.tokenizer = T5Tokenizer.from_pretrained( self.config["checkpoint_answer"], padding=True ) self.data_collator = DataCollatorForSeq2Seq( tokenizer=self.tokenizer, model=self.model, max_length=self.config["max_length"], ) def load_from_disk(self, path): with open(path, "rb") as f: self.__dict__ = pickle.load(f) def save_to_disk(self, path): with open(path, "wb") as f: pickle.dump(self.__dict__, f) def train(self, train_dataset=None, test_dataset=None): self.trainer = Seq2SeqTrainer( model=self.model, args=self.training_args, train_dataset=train_dataset, eval_dataset=test_dataset, tokenizer=self.tokenizer, data_collator=self.data_collator, compute_metrics=compute_metrics, callbacks=[EarlyStoppingCallback(early_stopping_patience=2)], ) self.trainer.train() self.models_is_trained = True return self.trainer.state.log_history def _get_probs_from_logits(self, logits, labels, normalized=True): answer = [] probs = torch.softmax(logits, -1) for sample_probs, sample_labels in zip(probs, labels): p = 1.0 for idx, token in enumerate(sample_labels): p *= sample_probs[idx][token].item() if normalized: p = p ** (1 / len(sample_labels)) answer.append(p) return answer def _predict_probs(self, dataset, normalized=True): predictions = self.trainer.predict(dataset).predictions dataloader = data_utils.DataLoader( predictions, batch_size=self.config["per_device_eval_batch_size"], shuffle=False, ) probs_list = [] labels_list = [] with torch.no_grad(): for inputs in dataloader: inputs = inputs.to(self.config["device"]) labels = self.model.generate( inputs, generation_config=self.generation_config ) logits = self.model(input_ids=inputs, labels=labels).logits probs_list += self._get_probs_from_logits(logits, labels, normalized) labels_list += labels labels = [l.cpu().numpy() for l in labels_list] return {"prob": probs_list, "labels": labels} def evaluate(self, test_dataset, test_text): res_dict = self._predict_probs(test_dataset) res_dict["labels"] = res_dict["labels"] res_dict["document_id"] = test_dataset["document_id"] df = pd.DataFrame(res_dict) df = ( df.sort_values("prob", ascending=False) .groupby("document_id", as_index=False) .first() ) df = df.sort_values("document_id") metrics = self.trainer.compute_metrics( (df["labels"], test_text["answers"]), multilabel=True, calc_all=True ) return metrics def predict(self, input_text): input_ids = self.tokenizer(input_text, return_tensors="pt").input_ids.to( self.config["device"] ) with torch.no_grad(): labels = self.model.generate(input_ids) return self.tokenizer.decode(labels, skip_special_tokens=True)[0] def _best_ids_from_probs(self, doc_ids, probs, best_ids_cnt): if self.config["unsertainty_strategy"] == "min_normalized_prob": df = pd.DataFrame({"doc_id": doc_ids, "prob": probs}) df = df.sort_values("prob", ascending=True) df = ( df.groupby("doc_id", as_index=False) .first() .sort_values("prob", ascending=True) ) return df["doc_id"].values.tolist()[:best_ids_cnt] else: raise ValueError( f"Unsupported unsertainty strategy {self.config['unsertainty_strategy']}" ) def _choose_ids(self, dataset): document_ids = list(set(dataset["document_id"])) random_ids_cnt = int( self.config["step_document_cnt"] * self.config["random_sample_fraction"] ) best_ids_cnt = self.config["step_document_cnt"] - random_ids_cnt random_ids = set( random.sample(document_ids, min(len(document_ids), random_ids_cnt)) ) if best_ids_cnt == 0: return random_ids document_ids = list(set(document_ids) - random_ids) pool_ids = set( random.sample( document_ids, min(len(document_ids), self.config["pool_document_cnt"]) ) ) filtered_dataset = dataset.filter( lambda x: x["document_id"] in pool_ids ).remove_columns("labels") probs = self._predict_probs(filtered_dataset)["prob"] best_ids = self._best_ids_from_probs( filtered_dataset["document_id"], probs, best_ids_cnt ) return random_ids.union(set(best_ids)) def emulate_active_learning( self, pool, test_dataset, val_dataset, val_answers, save_path=None ): document_ids = list(set(pool["document_id"])) ids_in_train = set( random.sample( document_ids, min(len(document_ids), self.config["start_document_cnt"]) ) ) print(f"Step 0: {len(ids_in_train)} / {len(document_ids)} indexes are in train") train_step = pool.filter( lambda x: x["document_id"] in ids_in_train and x["labels"] is not None ) train_metrics = self.train(train_step, test_dataset) eval_metrics = self.evaluate(val_dataset, val_answers) metrics = {"train": [train_metrics], "val": [eval_metrics]} del train_step gc.collect() for step in range(self.config["active_learning_steps_cnt"]): self._reset_models() print(f"Step {step + 1}: choosing ids for train") pool_to_choose = pool.filter(lambda x: x["document_id"] not in ids_in_train) ids_to_add = self._choose_ids(pool_to_choose) ids_in_train = ids_in_train.union(ids_to_add) print( f"Step {step + 1}: {len(ids_in_train)} / {len(document_ids)} indexes are in train" ) train_step = pool.filter( lambda x: x["document_id"] in ids_in_train and x["labels"] is not None ) train_metrics = self.train(train_step, test_dataset) eval_metrics = self.evaluate(val_dataset, val_answers) metrics["train"].append(train_metrics) metrics["val"].append(eval_metrics) del train_step del pool_to_choose gc.collect() if save_path is not None: with open(save_path, "wb") as f: pickle.dump({f"step {step + 1} metrics": metrics}, f) return metrics def create_config(model_dir=None): if model_dir is None: model_dir = f"qa_{int(datetime.datetime.now().timestamp())}" os.makedirs(model_dir) print(f"model_dir {model_dir} created") config = { "checkpoint_answer": "t5-small", "max_length": 512, "max_length_answer": 32, "learning_rate": 1e-5, "weight_decay": 1e-2, "per_device_train_batch_size": 4, "per_device_eval_batch_size": 8, "num_train_epochs": 10, "device": device, "unsertainty_strategy": "min_normalized_prob", "start_document_cnt": 1000, "active_learning_steps_cnt": 10, "pool_document_cnt": 2500, "step_document_cnt": 500, "random_sample_fraction": 1.0, "model_output_dir": os.path.join(model_dir, "model"), "log_path": os.path.join(model_dir, "logs.pkl"), } with open(os.path.join(model_dir, "config.yaml"), "wb") as f: pickle.dump(config, f) return config torch.cuda.empty_cache() import gc gc.collect() def get_train_test(dataset_path, max_doc_id=None, test_size=0.05, as_pool=True): dataset = load_from_disk(dataset_path, keep_in_memory=True) if max_doc_id is not None: dataset = dataset.filter(lambda x: x["document_id"] < max_doc_id) if not as_pool: dataset = dataset.filter(lambda x: x["labels"] is not None) train_ids, test_ids = train_test_split( list(set(dataset["document_id"])), test_size=test_size ) train_dataset = dataset.filter(lambda x: x["document_id"] in train_ids) test_dataset = dataset.filter( lambda x: x["labels"] is not None and x["document_id"] in test_ids ) return train_dataset, test_dataset def get_val(val_path, val_answers_path, max_ids=3000): dataset = load_from_disk(val_path, keep_in_memory=True) val_dataset = dataset.filter(lambda x: x["document_id"] < max_ids) answers_dataset = load_from_disk(val_answers_path, keep_in_memory=True) val_answer_dataset = answers_dataset.filter(lambda x: x["document_id"] < max_ids) return val_dataset, val_answer_dataset dataset_path = "/kaggle/working/short_train" train_dataset, test_dataset = get_train_test(dataset_path, test_size=50) train_dataset val_path = "/kaggle/working/short-val" val_answers_path = "/kaggle/working/short-val-answers" val_dataset, val_answer_dataset = get_val(val_path, val_answers_path, max_ids=60) val_dataset seed_everything(0) config = create_config("random-short") documents = list(set(train_dataset["document_id"])) ids_in_train = [] metrics = dict() for n in [500]: ids_to_add = random.sample(documents, n - len(ids_in_train)) ids_in_train += ids_to_add documents = [x for x in documents if x not in ids_in_train] print(len(ids_in_train), len(documents)) new_train = train_dataset.filter(lambda x: x["document_id"] in ids_in_train) qa = ActiveQA(config) train_metrics = qa.train(new_train, test_dataset) eval_metrics = qa.evaluate(val_dataset.remove_columns("labels"), val_answer_dataset) print(eval_metrics) metrics[n] = (train_metrics, eval_metrics) with open("random_short.pkl", "wb") as f: pickle.dump(metrics, f) with open("ids.pkl", "wb") as f: pickle.dump({"ids": ids_in_train}, f) dataset_path = "/kaggle/working/short_train" train_dataset, test_dataset = get_train_test(dataset_path, test_size=200) train_dataset val_path = "/kaggle/working/short-val" val_answers_path = "/kaggle/working/short-val-answers" val_dataset, val_answer_dataset = get_val(val_path, val_answers_path, max_ids=1000) val_dataset config = create_config("short-all") qa = ActiveQA(config) train_metrics = qa.train(train_dataset, test_dataset) eval_metrics = qa.evaluate(val_dataset.remove_columns("labels"), val_answer_dataset)
false
0
4,522
0
4,522
4,522
129760477
<jupyter_start><jupyter_text>Marketing Linear Multiple Regression Kaggle dataset identifier: marketing-linear-multiple-regression <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import statsmodels.api as sm from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split from sklearn.metrics import r2_score, mean_squared_error df = pd.read_csv( "/kaggle/input/marketing-linear-multiple-regression/Marketing_Data.csv" ) df.head() df.info() df.describe() sns.pairplot(df, x_vars=df.columns[:3], y_vars=df.columns[3], height=5) sns.heatmap(df.corr(), annot=True) plt.show() data = df[["youtube", "facebook", "newspaper", "sales"]] X = data[["youtube", "facebook", "newspaper"]] y = data["sales"] X.shape y.shape y = y.values.reshape(-1, 1) y.shape lr = LinearRegression() X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.3, random_state=70 ) print("X_train shape: ", X_train.shape) print("X_test shape: ", X_test.shape) print("y_train shape: ", y_train.shape) print("y_test shape: ", y_test.shape) lr.fit(X_train, y_train) print("intercept: ", lr.intercept_) print("coefficiants(slope): ", lr.coef_) y_pred = lr.predict(X_test) y_pred[0:5] y_test[0:5] indexes = range(1, 53) # Grand Truth fig, ax = plt.subplots(figsize=(12, 8)) ax.plot( indexes, y_test, label="Grand Truth", color="blue", linewidth=2, marker=".", ms=8, mfc="red", mec="red", ) # Prediction ax.plot( indexes, y_pred, label="Prediction", color="green", linewidth=2, marker=".", ms=8, mfc="red", mec="red", ) plt.title("GRAND TRUTH - PREDICTION") plt.xlabel("Data Index") plt.ylabel("Sales") plt.legend(loc="upper left") plt.show() indexes = range(1, 53) # Residuls fig, ax = plt.subplots(figsize=(12, 8)) ax.plot( indexes, y_test - y_pred, label="Residuals", color="purple", linewidth=2, marker=".", ms=8, mfc="red", mec="red", ) ax.plot(indexes, np.zeros(52), color="black") plt.title("RESIDUALS") plt.xlabel("Data Index") plt.ylabel("Sales") plt.legend(loc="upper left") plt.show() import math R_2 = r2_score(y_test, y_pred) mse = mean_squared_error(y_test, y_pred) RMSE = math.sqrt(mse) print("R_2: ", R_2) print("RMSE: ", RMSE) X_train_ols = sm.add_constant(X_train) sm_model = sm.OLS(y_train, X_train_ols) result = sm_model.fit() print(result.summary()) X_train_new = X_train[["youtube", "facebook"]] X_test_new = X_test[["youtube", "facebook"]] X_train_new.shape X_test_new.shape lr.fit(X_train_new, y_train) y_pred_new = lr.predict(X_test_new) y_pred_new[0:5] y_test[0:5] X_train_new_ols = sm.add_constant(X_train_new) sm_model = sm.OLS(y_train, X_train_new_ols) result = sm_model.fit() print(result.summary()) plt.title("Youtube") sns.regplot(x=data.youtube, y=data.sales) plt.show() plt.title("Facebook") sns.regplot(x=data.facebook, y=data.sales) plt.show() plt.title("Newspaper") sns.regplot(x=data.newspaper, y=data.sales) plt.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/760/129760477.ipynb
marketing-linear-multiple-regression
fayejavad
[{"Id": 129760477, "ScriptId": 38590360, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14611578, "CreationDate": "05/16/2023 09:26:25", "VersionNumber": 1.0, "Title": "Marketing_MultipleLinearRegression", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 151.0, "LinesInsertedFromPrevious": 151.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
[{"Id": 186117723, "KernelVersionId": 129760477, "SourceDatasetVersionId": 1106121}]
[{"Id": 1106121, "DatasetId": 619455, "DatasourceVersionId": 1136279, "CreatorUserId": 4951964, "LicenseName": "Unknown", "CreationDate": "04/24/2020 19:41:25", "VersionNumber": 1.0, "Title": "Marketing Linear Multiple Regression", "Slug": "marketing-linear-multiple-regression", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 619455, "CreatorUserId": 4951964, "OwnerUserId": 4951964.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 1106121.0, "CurrentDatasourceVersionId": 1136279.0, "ForumId": 633584, "Type": 2, "CreationDate": "04/24/2020 19:41:25", "LastActivityDate": "04/24/2020", "TotalViews": 16003, "TotalDownloads": 2578, "TotalVotes": 23, "TotalKernels": 7}]
[{"Id": 4951964, "UserName": "fayejavad", "DisplayName": "FayeJavad", "RegisterDate": "04/24/2020", "PerformanceTier": 0}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import statsmodels.api as sm from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split from sklearn.metrics import r2_score, mean_squared_error df = pd.read_csv( "/kaggle/input/marketing-linear-multiple-regression/Marketing_Data.csv" ) df.head() df.info() df.describe() sns.pairplot(df, x_vars=df.columns[:3], y_vars=df.columns[3], height=5) sns.heatmap(df.corr(), annot=True) plt.show() data = df[["youtube", "facebook", "newspaper", "sales"]] X = data[["youtube", "facebook", "newspaper"]] y = data["sales"] X.shape y.shape y = y.values.reshape(-1, 1) y.shape lr = LinearRegression() X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.3, random_state=70 ) print("X_train shape: ", X_train.shape) print("X_test shape: ", X_test.shape) print("y_train shape: ", y_train.shape) print("y_test shape: ", y_test.shape) lr.fit(X_train, y_train) print("intercept: ", lr.intercept_) print("coefficiants(slope): ", lr.coef_) y_pred = lr.predict(X_test) y_pred[0:5] y_test[0:5] indexes = range(1, 53) # Grand Truth fig, ax = plt.subplots(figsize=(12, 8)) ax.plot( indexes, y_test, label="Grand Truth", color="blue", linewidth=2, marker=".", ms=8, mfc="red", mec="red", ) # Prediction ax.plot( indexes, y_pred, label="Prediction", color="green", linewidth=2, marker=".", ms=8, mfc="red", mec="red", ) plt.title("GRAND TRUTH - PREDICTION") plt.xlabel("Data Index") plt.ylabel("Sales") plt.legend(loc="upper left") plt.show() indexes = range(1, 53) # Residuls fig, ax = plt.subplots(figsize=(12, 8)) ax.plot( indexes, y_test - y_pred, label="Residuals", color="purple", linewidth=2, marker=".", ms=8, mfc="red", mec="red", ) ax.plot(indexes, np.zeros(52), color="black") plt.title("RESIDUALS") plt.xlabel("Data Index") plt.ylabel("Sales") plt.legend(loc="upper left") plt.show() import math R_2 = r2_score(y_test, y_pred) mse = mean_squared_error(y_test, y_pred) RMSE = math.sqrt(mse) print("R_2: ", R_2) print("RMSE: ", RMSE) X_train_ols = sm.add_constant(X_train) sm_model = sm.OLS(y_train, X_train_ols) result = sm_model.fit() print(result.summary()) X_train_new = X_train[["youtube", "facebook"]] X_test_new = X_test[["youtube", "facebook"]] X_train_new.shape X_test_new.shape lr.fit(X_train_new, y_train) y_pred_new = lr.predict(X_test_new) y_pred_new[0:5] y_test[0:5] X_train_new_ols = sm.add_constant(X_train_new) sm_model = sm.OLS(y_train, X_train_new_ols) result = sm_model.fit() print(result.summary()) plt.title("Youtube") sns.regplot(x=data.youtube, y=data.sales) plt.show() plt.title("Facebook") sns.regplot(x=data.facebook, y=data.sales) plt.show() plt.title("Newspaper") sns.regplot(x=data.newspaper, y=data.sales) plt.show()
false
1
1,307
1
1,333
1,307
129053981
# ## **Automatidata project** # **Course 2 - Get Started with Python** # Welcome to the Automatidata Project! # You have just started as a data professional in a fictional data analytics firm, Automatidata. Their client, the New York City Taxi and Limousine Commission (New York City TLC), has hired the Automatidata team for its reputation in helping their clients develop data-based solutions. # The team is still in the early stages of the project. Previously, you were asked to complete a project proposal by your supervisor, DeShawn Washington. You have received notice that your project proposal has been approved and that New York City TLC has given the Automatidata team access to their data. To get clear insights, New York TLC's data must be analyzed, key variables identified, and the dataset ensured it is ready for analysis. # **The purpose** of this project is to investigate and understand the data provided. # # **The goal** is to use a dataframe contructed within Python, perform a cursory inspection of the provided dataset, and inform team members of your findings. # # *This activity has three parts:* # **Part 1:** Understand the situation # * How can you best prepare to understand and organize the provided taxi cab information? # **Part 2:** Understand the data # * Create a pandas dataframe for data learning, future exploratory data analysis (EDA), and statistical activities. # * Compile summary information about the data to inform next steps. # **Part 3:** Understand the variables # * Use insights from your examination of the summary data to guide deeper investigation into specific variables. # # **Identify data types and relevant variables using Python** # ### **Task 2a. Build dataframe** # Create a pandas dataframe for data learning, exploratory data analysis (EDA), and statistical activities. # **Code the following,** # * import pandas as pd #library exercise for buidling dataframes # * import numpy as np #numpy is imported with pandas # * df = pd.read_csv('2017_Yellow_Taxi_Trip_Data.csv') # **Note:** pair the data object name "df" with pandas functions to manipulate data, such as df.groupby(). # **Note:** As shown in this cell, the dataset has been automatically loaded in for you. You do not need to download the .csv file, or provide more code, in order to access the dataset and proceed with this lab. Please continue with this activity by completing the following instructions. # import numpy as np import pandas as pd # RUN THIS CELL TO IMPORT YOUR DATA. df = pd.read_csv("2017_Yellow_Taxi_Trip_Data.csv") print("done") # ### **Task 2b. Understand the data - Inspect the data** # View and inspect summary information about the dataframe by coding the following: # 1. df.head(10) # 2. df.info() # 3. df.describe() # Consider the following two questions: # **Question 1:** When reviewing the df.info() output, what do you notice about the different variables? Are there any null values? Are all of the variables numeric? Does anything else stand out? # **Question 2:** When reviewing the df.describe() output, what do you notice about the distributions of each variable? Are there any questionable values? # **Responses** # **Question 1:** After reviewing the output, I noticed that most of the values are numerical with floats and integers except for 3 variables which are 'object' type output. Additionally, there are no null values in this dataset. # **Question 2:** Some variables have questionable distributions, especifically the maximum and minimum values for fare_amount and total_amount. df.head(10) df.info() df.describe() # ### **Task 2c. Understand the data - Investigate the variables** # Sort and interpret the data table for two variables:`trip_distance` and `total_amount`. # **Answer the following three questions:** # **Question 1:** Sort your first variable (`trip_distance`) from maximum to minimum value, do the values seem normal? # **Question 2:** Sort your by your second variable (`total_amount`), are any values unusual? # **Question 3:** Are the resulting rows similar for both sorts? Why or why not? # **Responses** # **Question 1:** The values seem normal in comparison to what was seen on the previous observations regarding the longest and shortest trip. However, after analyzing the other variables for some of these rows, some of the values don't seem to be correct. # **Question 2:** The first two results are a bit unusual since they are much higher than most, especially the first value which is almost 3 times higher than the second one. After taking a look at the other variables for these rows we can also see that there are some values that seem odd. # **Question 3:** We cannot assure that the first two highest total amount values can be related to the highest trip distance, as the results do not match. However, where the values seem to be normal, we can verify that there is an expected tendency of higher paid amounts for longer trip distances. # # Sort the data by trip distance from maximum to minimum value df_sort = df.sort_values(by=["trip_distance"], ascending=False) df_sort.head(10) # Sort the data by total amount and print the top 20 values total_amount_sorted = df.sort_values(by=["total_amount"], ascending=False) total_amount_sorted.head(20) # Sort the data by total amount and print the bottom 20 values total_amount_sorted.tail(20) # How many of each payment type are represented in the data? df["payment_type"].value_counts() # What is the average tip for trips paid for with credit card? avg_cc_tip = df[df["payment_type"] == 1]["tip_amount"].mean() print("Avg cc tip:", avg_cc_tip) # What is the average tip for trips paid for with cash? avg_cash_tip = df[df["payment_type"] == 2]["tip_amount"].mean() print("Avg cash tip:", avg_cash_tip) # How many times is each vendor ID represented in the data? df["VendorID"].value_counts() # What is the mean total amount for each vendor? df.groupby(["VendorID"]).mean()[["total_amount"]] # Filter the data for credit card payments only credit_card_only = df[df["payment_type"] == 1] credit_card_only.head(10) # Count the passenger number for credit card payments only credit_card_only["passenger_count"].value_counts() # Calculate the average tip amount for each passenger count (credit card payments only) credit_card_only.groupby(["passenger_count"]).mean()[["tip_amount"]]
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/053/129053981.ipynb
null
null
[{"Id": 129053981, "ScriptId": 38362988, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7901066, "CreationDate": "05/10/2023 15:45:38", "VersionNumber": 1.0, "Title": "Python Course Project", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 187.0, "LinesInsertedFromPrevious": 187.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# ## **Automatidata project** # **Course 2 - Get Started with Python** # Welcome to the Automatidata Project! # You have just started as a data professional in a fictional data analytics firm, Automatidata. Their client, the New York City Taxi and Limousine Commission (New York City TLC), has hired the Automatidata team for its reputation in helping their clients develop data-based solutions. # The team is still in the early stages of the project. Previously, you were asked to complete a project proposal by your supervisor, DeShawn Washington. You have received notice that your project proposal has been approved and that New York City TLC has given the Automatidata team access to their data. To get clear insights, New York TLC's data must be analyzed, key variables identified, and the dataset ensured it is ready for analysis. # **The purpose** of this project is to investigate and understand the data provided. # # **The goal** is to use a dataframe contructed within Python, perform a cursory inspection of the provided dataset, and inform team members of your findings. # # *This activity has three parts:* # **Part 1:** Understand the situation # * How can you best prepare to understand and organize the provided taxi cab information? # **Part 2:** Understand the data # * Create a pandas dataframe for data learning, future exploratory data analysis (EDA), and statistical activities. # * Compile summary information about the data to inform next steps. # **Part 3:** Understand the variables # * Use insights from your examination of the summary data to guide deeper investigation into specific variables. # # **Identify data types and relevant variables using Python** # ### **Task 2a. Build dataframe** # Create a pandas dataframe for data learning, exploratory data analysis (EDA), and statistical activities. # **Code the following,** # * import pandas as pd #library exercise for buidling dataframes # * import numpy as np #numpy is imported with pandas # * df = pd.read_csv('2017_Yellow_Taxi_Trip_Data.csv') # **Note:** pair the data object name "df" with pandas functions to manipulate data, such as df.groupby(). # **Note:** As shown in this cell, the dataset has been automatically loaded in for you. You do not need to download the .csv file, or provide more code, in order to access the dataset and proceed with this lab. Please continue with this activity by completing the following instructions. # import numpy as np import pandas as pd # RUN THIS CELL TO IMPORT YOUR DATA. df = pd.read_csv("2017_Yellow_Taxi_Trip_Data.csv") print("done") # ### **Task 2b. Understand the data - Inspect the data** # View and inspect summary information about the dataframe by coding the following: # 1. df.head(10) # 2. df.info() # 3. df.describe() # Consider the following two questions: # **Question 1:** When reviewing the df.info() output, what do you notice about the different variables? Are there any null values? Are all of the variables numeric? Does anything else stand out? # **Question 2:** When reviewing the df.describe() output, what do you notice about the distributions of each variable? Are there any questionable values? # **Responses** # **Question 1:** After reviewing the output, I noticed that most of the values are numerical with floats and integers except for 3 variables which are 'object' type output. Additionally, there are no null values in this dataset. # **Question 2:** Some variables have questionable distributions, especifically the maximum and minimum values for fare_amount and total_amount. df.head(10) df.info() df.describe() # ### **Task 2c. Understand the data - Investigate the variables** # Sort and interpret the data table for two variables:`trip_distance` and `total_amount`. # **Answer the following three questions:** # **Question 1:** Sort your first variable (`trip_distance`) from maximum to minimum value, do the values seem normal? # **Question 2:** Sort your by your second variable (`total_amount`), are any values unusual? # **Question 3:** Are the resulting rows similar for both sorts? Why or why not? # **Responses** # **Question 1:** The values seem normal in comparison to what was seen on the previous observations regarding the longest and shortest trip. However, after analyzing the other variables for some of these rows, some of the values don't seem to be correct. # **Question 2:** The first two results are a bit unusual since they are much higher than most, especially the first value which is almost 3 times higher than the second one. After taking a look at the other variables for these rows we can also see that there are some values that seem odd. # **Question 3:** We cannot assure that the first two highest total amount values can be related to the highest trip distance, as the results do not match. However, where the values seem to be normal, we can verify that there is an expected tendency of higher paid amounts for longer trip distances. # # Sort the data by trip distance from maximum to minimum value df_sort = df.sort_values(by=["trip_distance"], ascending=False) df_sort.head(10) # Sort the data by total amount and print the top 20 values total_amount_sorted = df.sort_values(by=["total_amount"], ascending=False) total_amount_sorted.head(20) # Sort the data by total amount and print the bottom 20 values total_amount_sorted.tail(20) # How many of each payment type are represented in the data? df["payment_type"].value_counts() # What is the average tip for trips paid for with credit card? avg_cc_tip = df[df["payment_type"] == 1]["tip_amount"].mean() print("Avg cc tip:", avg_cc_tip) # What is the average tip for trips paid for with cash? avg_cash_tip = df[df["payment_type"] == 2]["tip_amount"].mean() print("Avg cash tip:", avg_cash_tip) # How many times is each vendor ID represented in the data? df["VendorID"].value_counts() # What is the mean total amount for each vendor? df.groupby(["VendorID"]).mean()[["total_amount"]] # Filter the data for credit card payments only credit_card_only = df[df["payment_type"] == 1] credit_card_only.head(10) # Count the passenger number for credit card payments only credit_card_only["passenger_count"].value_counts() # Calculate the average tip amount for each passenger count (credit card payments only) credit_card_only.groupby(["passenger_count"]).mean()[["tip_amount"]]
false
0
1,608
0
1,608
1,608
129053197
# ### Course : 2CSDE61 Deep Learning # ### Roll No : 20BCE073 KUNJ GANDHI # ### Practical : 9 # ### Use Recurrent Neural Network/LSTM for Sentiment Analysis (IMDB Review) import pandas as pd import numpy as np # already preprocessed in practical 6b df = pd.read_csv("../input/processedimdb/processedIMDB.csv") df.drop(columns=["Unnamed: 0"], axis=1, inplace=True) df.head() df.drop_duplicates(inplace=True) df.shape df["sentiment"].value_counts() from tensorflow.keras.preprocessing.text import Tokenizer from tensorflow.keras.preprocessing.sequence import pad_sequences from keras.layers import LSTM, Embedding, Dense, Dropout from keras.models import Sequential from nltk.tokenize import word_tokenize corpus = [] for sentence in df["review"].to_numpy(): words = word_tokenize(sentence) corpus.append(words) len(corpus) X = df["preprocessed_review"].to_numpy() y = df["sentiment"].to_numpy() from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=40, stratify=y ) X_train.shape, y_train.shape X_test.shape, y_test.shape tokenizer = Tokenizer() tokenizer.document_count tokenizer.fit_on_texts(X_train) # creates vocabulary tokenizer.document_count len(tokenizer.word_index) X_train_num = tokenizer.texts_to_sequences( X_train ) # creates numeric data from textual based on word index print(X_train[0]) print(" ".join(map(str, X_train_num[0]))) # creating word from tokenized index for idx in X_train_num[0]: print(tokenizer.index_word[idx], end=" ") # finding maxlen of sentence val = 0 for sent in X_train: val = max(val, len(word_tokenize(sent))) print(val) # 1456 is too huge, keeping 250 as maxlen X_train_pad = pad_sequences(X_train_num, maxlen=250, padding="post", truncating="post") X_train_pad.shape X_test_num = tokenizer.texts_to_sequences(X_test) X_test_pad = pad_sequences(X_test_num, maxlen=250, padding="post", truncating="post") X_test_pad.shape vocab_size = len(tokenizer.word_index) + 1 vocab_size # model model = Sequential() # always use keras tokenizer if using embedding layer model.add( Embedding(input_dim=vocab_size, output_dim=100, input_length=250) ) # Turns positive integers (indexes) into dense vectors of fixed size. model.add(LSTM(100, dropout=0.2, return_sequences=True)) # Stacked LSTM model.add(LSTM(50, dropout=0.2)) model.add(Dense(64, activation="relu")) model.add(Dense(1, activation="sigmoid")) model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"]) model.summary() history = model.fit(X_train_pad, y_train, validation_split=0.2, epochs=10, verbose=1) from matplotlib import pyplot as plt train_loss = history.history["loss"] train_acc = history.history["accuracy"] val_loss = history.history["val_loss"] val_acc = history.history["val_accuracy"] plt.figure(figsize=(20, 8)) plt.subplot(1, 2, 1) plt.plot(np.arange(1, 11), train_loss, label="Training Loss") plt.plot(np.arange(1, 11), val_loss, label="Validation Loss") plt.xlabel("epochs") plt.ylabel("loss") plt.title("loss vs epochs") plt.legend() plt.subplot(1, 2, 2) plt.plot(np.arange(1, 11), train_acc, label="Training Accuracy") plt.plot(np.arange(1, 11), val_acc, label="Validation Accuracy") plt.xlabel("epochs") plt.ylabel("accuracy") plt.title("accuracy vs epochs") plt.legend() plt.show() # ### EVALUATION scores = model.evaluate(X_test_pad, y_test) print(f"Test Loss : {scores[0]}, Test Accuracy : {scores[1]*100} %") import pickle with open("tokenizer_senti.pkl", "wb") as f: pickle.dump(tokenizer, f) model.save("sentiment.h5") # ### TESTING from bs4 import BeautifulSoup import contractions import string from nltk.corpus import stopwords from nltk.tokenize import word_tokenize from nltk.stem import WordNetLemmatizer # stop words and punctuation removal stop_words = set(stopwords.words("english")) punc = set(string.punctuation) stop_words.remove("not") stop_words.remove("against") # removing those stopwords which might be important punc.add("`") def preprocessing(doc): # preprocessing a single document # since there are html tags in reviews we need to remove them soup = BeautifulSoup(doc, "html.parser") doc = soup.get_text() # contracting the shorthand words -> we'll -> we will doc = contractions.fix(doc) # tokenize words = word_tokenize(doc) # removing punctuation or stopwords words = [i.strip().lower() for i in words] words = [i for i in words if i not in stop_words and i not in punc] # lemmetization wnl = WordNetLemmatizer() lemma = [wnl.lemmatize(i) for i in words] doc = " ".join(lemma) return doc prediction = lambda x: "Positive review" if x >= 0.5 else "Negative review" sentiment1 = "This movie is very bad i don't like it at all, very boring movie" sentiment2 = "The movie is outstanding, i really loved the movie, the actors did excellent job in potraying their characters especially the villian" pre_sentiment1 = preprocessing(sentiment1) pre_sentiment2 = preprocessing(sentiment2) s1 = tokenizer.texts_to_sequences([pre_sentiment1]) s1 = pad_sequences(s1, maxlen=250, padding="post", truncating="post") s2 = tokenizer.texts_to_sequences([pre_sentiment2]) s2 = pad_sequences(s2, maxlen=250, padding="post", truncating="post") pred1 = model.predict(s1) pred2 = model.predict(s2) print( f"Original review : {sentiment1} \nProcessed review : {pre_sentiment1} \nPrediction : {prediction(pred1)}" ) print( f"\nOriginal review : {sentiment2} \nProcessed review : {pre_sentiment2} \nPrediction : {prediction(pred2)}" )
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/053/129053197.ipynb
null
null
[{"Id": 129053197, "ScriptId": 38361000, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 9842779, "CreationDate": "05/10/2023 15:39:29", "VersionNumber": 1.0, "Title": "IMDB Sentiment Analysis Using LSTM", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 192.0, "LinesInsertedFromPrevious": 192.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# ### Course : 2CSDE61 Deep Learning # ### Roll No : 20BCE073 KUNJ GANDHI # ### Practical : 9 # ### Use Recurrent Neural Network/LSTM for Sentiment Analysis (IMDB Review) import pandas as pd import numpy as np # already preprocessed in practical 6b df = pd.read_csv("../input/processedimdb/processedIMDB.csv") df.drop(columns=["Unnamed: 0"], axis=1, inplace=True) df.head() df.drop_duplicates(inplace=True) df.shape df["sentiment"].value_counts() from tensorflow.keras.preprocessing.text import Tokenizer from tensorflow.keras.preprocessing.sequence import pad_sequences from keras.layers import LSTM, Embedding, Dense, Dropout from keras.models import Sequential from nltk.tokenize import word_tokenize corpus = [] for sentence in df["review"].to_numpy(): words = word_tokenize(sentence) corpus.append(words) len(corpus) X = df["preprocessed_review"].to_numpy() y = df["sentiment"].to_numpy() from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=40, stratify=y ) X_train.shape, y_train.shape X_test.shape, y_test.shape tokenizer = Tokenizer() tokenizer.document_count tokenizer.fit_on_texts(X_train) # creates vocabulary tokenizer.document_count len(tokenizer.word_index) X_train_num = tokenizer.texts_to_sequences( X_train ) # creates numeric data from textual based on word index print(X_train[0]) print(" ".join(map(str, X_train_num[0]))) # creating word from tokenized index for idx in X_train_num[0]: print(tokenizer.index_word[idx], end=" ") # finding maxlen of sentence val = 0 for sent in X_train: val = max(val, len(word_tokenize(sent))) print(val) # 1456 is too huge, keeping 250 as maxlen X_train_pad = pad_sequences(X_train_num, maxlen=250, padding="post", truncating="post") X_train_pad.shape X_test_num = tokenizer.texts_to_sequences(X_test) X_test_pad = pad_sequences(X_test_num, maxlen=250, padding="post", truncating="post") X_test_pad.shape vocab_size = len(tokenizer.word_index) + 1 vocab_size # model model = Sequential() # always use keras tokenizer if using embedding layer model.add( Embedding(input_dim=vocab_size, output_dim=100, input_length=250) ) # Turns positive integers (indexes) into dense vectors of fixed size. model.add(LSTM(100, dropout=0.2, return_sequences=True)) # Stacked LSTM model.add(LSTM(50, dropout=0.2)) model.add(Dense(64, activation="relu")) model.add(Dense(1, activation="sigmoid")) model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"]) model.summary() history = model.fit(X_train_pad, y_train, validation_split=0.2, epochs=10, verbose=1) from matplotlib import pyplot as plt train_loss = history.history["loss"] train_acc = history.history["accuracy"] val_loss = history.history["val_loss"] val_acc = history.history["val_accuracy"] plt.figure(figsize=(20, 8)) plt.subplot(1, 2, 1) plt.plot(np.arange(1, 11), train_loss, label="Training Loss") plt.plot(np.arange(1, 11), val_loss, label="Validation Loss") plt.xlabel("epochs") plt.ylabel("loss") plt.title("loss vs epochs") plt.legend() plt.subplot(1, 2, 2) plt.plot(np.arange(1, 11), train_acc, label="Training Accuracy") plt.plot(np.arange(1, 11), val_acc, label="Validation Accuracy") plt.xlabel("epochs") plt.ylabel("accuracy") plt.title("accuracy vs epochs") plt.legend() plt.show() # ### EVALUATION scores = model.evaluate(X_test_pad, y_test) print(f"Test Loss : {scores[0]}, Test Accuracy : {scores[1]*100} %") import pickle with open("tokenizer_senti.pkl", "wb") as f: pickle.dump(tokenizer, f) model.save("sentiment.h5") # ### TESTING from bs4 import BeautifulSoup import contractions import string from nltk.corpus import stopwords from nltk.tokenize import word_tokenize from nltk.stem import WordNetLemmatizer # stop words and punctuation removal stop_words = set(stopwords.words("english")) punc = set(string.punctuation) stop_words.remove("not") stop_words.remove("against") # removing those stopwords which might be important punc.add("`") def preprocessing(doc): # preprocessing a single document # since there are html tags in reviews we need to remove them soup = BeautifulSoup(doc, "html.parser") doc = soup.get_text() # contracting the shorthand words -> we'll -> we will doc = contractions.fix(doc) # tokenize words = word_tokenize(doc) # removing punctuation or stopwords words = [i.strip().lower() for i in words] words = [i for i in words if i not in stop_words and i not in punc] # lemmetization wnl = WordNetLemmatizer() lemma = [wnl.lemmatize(i) for i in words] doc = " ".join(lemma) return doc prediction = lambda x: "Positive review" if x >= 0.5 else "Negative review" sentiment1 = "This movie is very bad i don't like it at all, very boring movie" sentiment2 = "The movie is outstanding, i really loved the movie, the actors did excellent job in potraying their characters especially the villian" pre_sentiment1 = preprocessing(sentiment1) pre_sentiment2 = preprocessing(sentiment2) s1 = tokenizer.texts_to_sequences([pre_sentiment1]) s1 = pad_sequences(s1, maxlen=250, padding="post", truncating="post") s2 = tokenizer.texts_to_sequences([pre_sentiment2]) s2 = pad_sequences(s2, maxlen=250, padding="post", truncating="post") pred1 = model.predict(s1) pred2 = model.predict(s2) print( f"Original review : {sentiment1} \nProcessed review : {pre_sentiment1} \nPrediction : {prediction(pred1)}" ) print( f"\nOriginal review : {sentiment2} \nProcessed review : {pre_sentiment2} \nPrediction : {prediction(pred2)}" )
false
0
1,796
0
1,796
1,796
129053572
<jupyter_start><jupyter_text>Insurance Premium Data This Dataset is something I found online when I wanted to practice regression models. It is an openly available online dataset at multiple places. Though I do not know the exact origin and collection methodology of the data, I would recommend this dataset to everybody who is just beginning their journey in Data science. Kaggle dataset identifier: insurance <jupyter_code>import pandas as pd df = pd.read_csv('insurance/insurance.csv') df.info() <jupyter_output><class 'pandas.core.frame.DataFrame'> RangeIndex: 1338 entries, 0 to 1337 Data columns (total 7 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 age 1338 non-null int64 1 sex 1338 non-null object 2 bmi 1338 non-null float64 3 children 1338 non-null int64 4 smoker 1338 non-null object 5 region 1338 non-null object 6 charges 1338 non-null float64 dtypes: float64(2), int64(2), object(3) memory usage: 73.3+ KB <jupyter_text>Examples: { "age": 19, "sex": "female", "bmi": 27.9, "children": 0, "smoker": "yes", "region": "southwest", "charges": 16884.924 } { "age": 18, "sex": "male", "bmi": 33.77, "children": 1, "smoker": "no", "region": "southeast", "charges": 1725.5523 } { "age": 28, "sex": "male", "bmi": 33.0, "children": 3, "smoker": "no", "region": "southeast", "charges": 4449.462 } { "age": 33, "sex": "male", "bmi": 22.705, "children": 0, "smoker": "no", "region": "northwest", "charges": 21984.47061 } <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from argparse import Namespace from sklearn.preprocessing import scale from sklearn import preprocessing df = pd.read_csv("/kaggle/input/insurance/insurance.csv") df.info() df.head() df.shape df.isnull().sum() df.dtypes df.describe().T df["age"].unique() df["children"].unique() df["bmi"].unique() df["region"].unique() df["smoker"].value_counts() df["sex"].value_counts() df["children"].value_counts() df["region"].value_counts() df["age"].describe() sns.violinplot(df["age"]) sns.histplot(df["age"], bins=20) sns.boxplot(df["age"]) df["bmi"].describe() sns.violinplot(df["bmi"]) sns.histplot(df["bmi"], bins=20) sns.boxplot(df["bmi"]) sns.violinplot(df["charges"]) sns.histplot(df["charges"], bins=15) sns.boxplot(df["charges"]) # age ve charges surekli dagilim analizi sns.scatterplot(data=df, x="age", y="charges", alpha=0.7) sns.jointplot(data=df, x="age", y="charges", kind="reg", height=9) sns.scatterplot(data=df, x="age", y="charges", hue="sex") sns.scatterplot(data=df, x="age", y="charges", hue="region") sns.scatterplot(data=df, x="age", y="charges", hue="children") sns.catplot(x="sex", y="age", data=df, height=8, alpha=0.9) sns.violinplot(x="sex", y="age", data=df, height=8, alpha=0.9) sns.catplot(x="smoker", y="age", data=df, height=8, alpha=0.9) sns.violinplot(x="smoker", y="age", data=df, height=8, alpha=0.9) sns.catplot(x="children", y="age", data=df, height=8, alpha=0.9) sns.violinplot(x="children", y="age", data=df, height=8, alpha=0.9) sns.catplot(x="region", y="age", data=df, height=8, alpha=0.9) sns.violinplot(x="region", y="age", data=df, height=8, alpha=0.9) df.groupby(by="sex").describe()["charges"] df.groupby(by="smoker").describe()["charges"] df.groupby(by="region").describe()["charges"] df.groupby(by="children").describe()["charges"] pd.pivot_table(df, values="charges", index=["smoker"], aggfunc=np.mean) pd.pivot_table(df, values="charges", index=["sex"], aggfunc=np.mean) pd.pivot_table(df, values="charges", index=["children"], aggfunc=np.mean) pd.pivot_table(df, values="charges", index=["region"], aggfunc=np.mean) pd.pivot_table(df, values="charges", index=["smoker", "sex"], aggfunc=np.mean) pd.pivot_table( df, values="charges", index=["smoker", "children"], columns=["sex"], aggfunc=np.mean ) pd.pivot_table( df, values="charges", index=["smoker", "region"], columns=["sex"], aggfunc=np.mean ) pd.pivot_table( df, values="charges", index=["region", "children"], columns=["sex", "smoker"], aggfunc=np.mean, ) le = preprocessing.LabelEncoder() df["sex"] = le.fit_transform(df["sex"]) df.head() df["smoker"] = le.fit_transform(df["smoker"]) df.head() dfRegion = pd.get_dummies(df["region"]) dfRegion.head() dfRegion.columns = ["Izmir", "Bursa", "Ankara", "Istanbul"] dfRegion.head() df = pd.concat([df, dfRegion], axis=1) df.drop(["Bursa", "region"], axis=1, inplace=True) df.head() df.head() age = df[["age"]].values.astype(float) min_max_scaler = preprocessing.MinMaxScaler() age_scaled = min_max_scaler.fit_transform(age) df["age_scaled"] = pd.DataFrame(age_scaled) bmi = df[["bmi"]].values.astype(float) min_max_scaler = preprocessing.MinMaxScaler() bmi_scaled = min_max_scaler.fit_transform(bmi) df["bmi_scaled"] = pd.DataFrame(bmi_scaled) df.head() df.drop(["age", "bmi"], axis=1, inplace=True) df.head()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/053/129053572.ipynb
insurance
simranjain17
[{"Id": 129053572, "ScriptId": 38357409, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14530120, "CreationDate": "05/10/2023 15:42:29", "VersionNumber": 1.0, "Title": "Kesifsel Veri Analizi: Sigorta Senaryosu", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 173.0, "LinesInsertedFromPrevious": 173.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 184771267, "KernelVersionId": 129053572, "SourceDatasetVersionId": 1474547}]
[{"Id": 1474547, "DatasetId": 865278, "DatasourceVersionId": 1508337, "CreatorUserId": 1396467, "LicenseName": "CC0: Public Domain", "CreationDate": "09/09/2020 08:34:47", "VersionNumber": 1.0, "Title": "Insurance Premium Data", "Slug": "insurance", "Subtitle": "Health Insurance Premium charges based on Gender, BMI and other characteristics", "Description": "This Dataset is something I found online when I wanted to practice regression models. It is an openly available online dataset at multiple places. Though I do not know the exact origin and collection methodology of the data, I would recommend this dataset to everybody who is just beginning their journey in Data science.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 865278, "CreatorUserId": 1396467, "OwnerUserId": 1396467.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 1474547.0, "CurrentDatasourceVersionId": 1508337.0, "ForumId": 880634, "Type": 2, "CreationDate": "09/09/2020 08:34:47", "LastActivityDate": "09/09/2020", "TotalViews": 36653, "TotalDownloads": 4945, "TotalVotes": 64, "TotalKernels": 21}]
[{"Id": 1396467, "UserName": "simranjain17", "DisplayName": "Simran Jain", "RegisterDate": "11/07/2017", "PerformanceTier": 2}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from argparse import Namespace from sklearn.preprocessing import scale from sklearn import preprocessing df = pd.read_csv("/kaggle/input/insurance/insurance.csv") df.info() df.head() df.shape df.isnull().sum() df.dtypes df.describe().T df["age"].unique() df["children"].unique() df["bmi"].unique() df["region"].unique() df["smoker"].value_counts() df["sex"].value_counts() df["children"].value_counts() df["region"].value_counts() df["age"].describe() sns.violinplot(df["age"]) sns.histplot(df["age"], bins=20) sns.boxplot(df["age"]) df["bmi"].describe() sns.violinplot(df["bmi"]) sns.histplot(df["bmi"], bins=20) sns.boxplot(df["bmi"]) sns.violinplot(df["charges"]) sns.histplot(df["charges"], bins=15) sns.boxplot(df["charges"]) # age ve charges surekli dagilim analizi sns.scatterplot(data=df, x="age", y="charges", alpha=0.7) sns.jointplot(data=df, x="age", y="charges", kind="reg", height=9) sns.scatterplot(data=df, x="age", y="charges", hue="sex") sns.scatterplot(data=df, x="age", y="charges", hue="region") sns.scatterplot(data=df, x="age", y="charges", hue="children") sns.catplot(x="sex", y="age", data=df, height=8, alpha=0.9) sns.violinplot(x="sex", y="age", data=df, height=8, alpha=0.9) sns.catplot(x="smoker", y="age", data=df, height=8, alpha=0.9) sns.violinplot(x="smoker", y="age", data=df, height=8, alpha=0.9) sns.catplot(x="children", y="age", data=df, height=8, alpha=0.9) sns.violinplot(x="children", y="age", data=df, height=8, alpha=0.9) sns.catplot(x="region", y="age", data=df, height=8, alpha=0.9) sns.violinplot(x="region", y="age", data=df, height=8, alpha=0.9) df.groupby(by="sex").describe()["charges"] df.groupby(by="smoker").describe()["charges"] df.groupby(by="region").describe()["charges"] df.groupby(by="children").describe()["charges"] pd.pivot_table(df, values="charges", index=["smoker"], aggfunc=np.mean) pd.pivot_table(df, values="charges", index=["sex"], aggfunc=np.mean) pd.pivot_table(df, values="charges", index=["children"], aggfunc=np.mean) pd.pivot_table(df, values="charges", index=["region"], aggfunc=np.mean) pd.pivot_table(df, values="charges", index=["smoker", "sex"], aggfunc=np.mean) pd.pivot_table( df, values="charges", index=["smoker", "children"], columns=["sex"], aggfunc=np.mean ) pd.pivot_table( df, values="charges", index=["smoker", "region"], columns=["sex"], aggfunc=np.mean ) pd.pivot_table( df, values="charges", index=["region", "children"], columns=["sex", "smoker"], aggfunc=np.mean, ) le = preprocessing.LabelEncoder() df["sex"] = le.fit_transform(df["sex"]) df.head() df["smoker"] = le.fit_transform(df["smoker"]) df.head() dfRegion = pd.get_dummies(df["region"]) dfRegion.head() dfRegion.columns = ["Izmir", "Bursa", "Ankara", "Istanbul"] dfRegion.head() df = pd.concat([df, dfRegion], axis=1) df.drop(["Bursa", "region"], axis=1, inplace=True) df.head() df.head() age = df[["age"]].values.astype(float) min_max_scaler = preprocessing.MinMaxScaler() age_scaled = min_max_scaler.fit_transform(age) df["age_scaled"] = pd.DataFrame(age_scaled) bmi = df[["bmi"]].values.astype(float) min_max_scaler = preprocessing.MinMaxScaler() bmi_scaled = min_max_scaler.fit_transform(bmi) df["bmi_scaled"] = pd.DataFrame(bmi_scaled) df.head() df.drop(["age", "bmi"], axis=1, inplace=True) df.head()
[{"insurance/insurance.csv": {"column_names": "[\"age\", \"sex\", \"bmi\", \"children\", \"smoker\", \"region\", \"charges\"]", "column_data_types": "{\"age\": \"int64\", \"sex\": \"object\", \"bmi\": \"float64\", \"children\": \"int64\", \"smoker\": \"object\", \"region\": \"object\", \"charges\": \"float64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 1338 entries, 0 to 1337\nData columns (total 7 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 age 1338 non-null int64 \n 1 sex 1338 non-null object \n 2 bmi 1338 non-null float64\n 3 children 1338 non-null int64 \n 4 smoker 1338 non-null object \n 5 region 1338 non-null object \n 6 charges 1338 non-null float64\ndtypes: float64(2), int64(2), object(3)\nmemory usage: 73.3+ KB\n", "summary": "{\"age\": {\"count\": 1338.0, \"mean\": 39.20702541106129, \"std\": 14.049960379216154, \"min\": 18.0, \"25%\": 27.0, \"50%\": 39.0, \"75%\": 51.0, \"max\": 64.0}, \"bmi\": {\"count\": 1338.0, \"mean\": 30.66339686098655, \"std\": 6.098186911679014, \"min\": 15.96, \"25%\": 26.29625, \"50%\": 30.4, \"75%\": 34.69375, \"max\": 53.13}, \"children\": {\"count\": 1338.0, \"mean\": 1.0949177877429, \"std\": 1.205492739781914, \"min\": 0.0, \"25%\": 0.0, \"50%\": 1.0, \"75%\": 2.0, \"max\": 5.0}, \"charges\": {\"count\": 1338.0, \"mean\": 13270.422265141257, \"std\": 12110.011236694001, \"min\": 1121.8739, \"25%\": 4740.28715, \"50%\": 9382.033, \"75%\": 16639.912515, \"max\": 63770.42801}}", "examples": "{\"age\":{\"0\":19,\"1\":18,\"2\":28,\"3\":33},\"sex\":{\"0\":\"female\",\"1\":\"male\",\"2\":\"male\",\"3\":\"male\"},\"bmi\":{\"0\":27.9,\"1\":33.77,\"2\":33.0,\"3\":22.705},\"children\":{\"0\":0,\"1\":1,\"2\":3,\"3\":0},\"smoker\":{\"0\":\"yes\",\"1\":\"no\",\"2\":\"no\",\"3\":\"no\"},\"region\":{\"0\":\"southwest\",\"1\":\"southeast\",\"2\":\"southeast\",\"3\":\"northwest\"},\"charges\":{\"0\":16884.924,\"1\":1725.5523,\"2\":4449.462,\"3\":21984.47061}}"}}]
true
1
<start_data_description><data_path>insurance/insurance.csv: <column_names> ['age', 'sex', 'bmi', 'children', 'smoker', 'region', 'charges'] <column_types> {'age': 'int64', 'sex': 'object', 'bmi': 'float64', 'children': 'int64', 'smoker': 'object', 'region': 'object', 'charges': 'float64'} <dataframe_Summary> {'age': {'count': 1338.0, 'mean': 39.20702541106129, 'std': 14.049960379216154, 'min': 18.0, '25%': 27.0, '50%': 39.0, '75%': 51.0, 'max': 64.0}, 'bmi': {'count': 1338.0, 'mean': 30.66339686098655, 'std': 6.098186911679014, 'min': 15.96, '25%': 26.29625, '50%': 30.4, '75%': 34.69375, 'max': 53.13}, 'children': {'count': 1338.0, 'mean': 1.0949177877429, 'std': 1.205492739781914, 'min': 0.0, '25%': 0.0, '50%': 1.0, '75%': 2.0, 'max': 5.0}, 'charges': {'count': 1338.0, 'mean': 13270.422265141257, 'std': 12110.011236694001, 'min': 1121.8739, '25%': 4740.28715, '50%': 9382.033, '75%': 16639.912515, 'max': 63770.42801}} <dataframe_info> RangeIndex: 1338 entries, 0 to 1337 Data columns (total 7 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 age 1338 non-null int64 1 sex 1338 non-null object 2 bmi 1338 non-null float64 3 children 1338 non-null int64 4 smoker 1338 non-null object 5 region 1338 non-null object 6 charges 1338 non-null float64 dtypes: float64(2), int64(2), object(3) memory usage: 73.3+ KB <some_examples> {'age': {'0': 19, '1': 18, '2': 28, '3': 33}, 'sex': {'0': 'female', '1': 'male', '2': 'male', '3': 'male'}, 'bmi': {'0': 27.9, '1': 33.77, '2': 33.0, '3': 22.705}, 'children': {'0': 0, '1': 1, '2': 3, '3': 0}, 'smoker': {'0': 'yes', '1': 'no', '2': 'no', '3': 'no'}, 'region': {'0': 'southwest', '1': 'southeast', '2': 'southeast', '3': 'northwest'}, 'charges': {'0': 16884.924, '1': 1725.5523, '2': 4449.462, '3': 21984.47061}} <end_description>
1,417
0
2,006
1,417
129053835
<jupyter_start><jupyter_text>Breast Cancer Wisconsin (Diagnostic) Data Set Features are computed from a digitized image of a fine needle aspirate (FNA) of a breast mass. They describe characteristics of the cell nuclei present in the image. n the 3-dimensional space is that described in: [K. P. Bennett and O. L. Mangasarian: "Robust Linear Programming Discrimination of Two Linearly Inseparable Sets", Optimization Methods and Software 1, 1992, 23-34]. This database is also available through the UW CS ftp server: ftp ftp.cs.wisc.edu cd math-prog/cpo-dataset/machine-learn/WDBC/ Also can be found on UCI Machine Learning Repository: https://archive.ics.uci.edu/ml/datasets/Breast+Cancer+Wisconsin+%28Diagnostic%29 Attribute Information: 1) ID number 2) Diagnosis (M = malignant, B = benign) 3-32) Ten real-valued features are computed for each cell nucleus: a) radius (mean of distances from center to points on the perimeter) b) texture (standard deviation of gray-scale values) c) perimeter d) area e) smoothness (local variation in radius lengths) f) compactness (perimeter^2 / area - 1.0) g) concavity (severity of concave portions of the contour) h) concave points (number of concave portions of the contour) i) symmetry j) fractal dimension ("coastline approximation" - 1) The mean, standard error and "worst" or largest (mean of the three largest values) of these features were computed for each image, resulting in 30 features. For instance, field 3 is Mean Radius, field 13 is Radius SE, field 23 is Worst Radius. All feature values are recoded with four significant digits. Missing attribute values: none Class distribution: 357 benign, 212 malignant Kaggle dataset identifier: breast-cancer-wisconsin-data <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns # data visualization library import matplotlib.pyplot as plt import plotly.express as px import plotly.io as pio # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory import time from subprocess import check_output print(check_output(["ls", "../input"]).decode("utf8")) # import warnings library import warnings import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # ignore all warnings warnings.filterwarnings("ignore") # Any results you write to the current directory are saved as output. # Veri İçeriği # 1. 1. kimlik Numarası # 1. 2. Teşhis (M = malign, B = iyi huylu) # 1. 3. yarıçap (merkezden çevre üzerindeki noktalara olan mesafelerin ortalaması) # 1. 4. doku (gri tonlama değerlerinin standart sapması) # 1. 5. çevre # 1. 6. alan # 1. 7. pürüzsüzlük (yarıçap uzunluklarında yerel değişiklik) # 1. 8. kompaktlık (çevre^2 / alan - 1,0) # 1. 9. içbükeylik (konturun içbükey kısımlarının ciddiyeti) # 1. 10. içbükey noktalar (konturun içbükey kısımlarının sayısı) # 1. 11. simetri # 1. 12. fraktal boyut ("kıyı şeridi yaklaşımı" - 1) # 1. 13. Bu özelliklerin ortalaması, standart hatası ve "en kötü" veya en büyüğü (en büyük üç değerin ortalaması), her görüntü için hesaplandı ve sonuçta 30 özellik elde edildi. Örneğin, alan 3 Ortalama Yarıçap, alan 13 Yarıçap SE, alan 23 En Kötü Yarıçaptır. # 1. 14. Tüm özellik değerleri, dört anlamlı basamakla yeniden kodlanır. # 1. 15. Eksik özellik değerleri: yok # 1. 16. Sınıf dağılımı: 357 iyi huylu, 212 kötü huylu # ---------------------------- # Data Content # 1. ID number # 2. Diagnosis (M = malignant, B = benign) # 3. radius (mean of distances from center to points on the perimeter) # 4. texture (standard deviation of gray-scale values) # 5. perimeter # 6. area # 7. smoothness (local variation in radius lengths) # 8. compactness (perimeter^2 / area - 1.0) # 9. concavity (severity of concave portions of the contour) # 10. concave points (number of concave portions of the contour) # 11. symmetry # 12. fractal dimension ("coastline approximation" - 1) # 13. The mean, standard error and "worst" or largest (mean of the three largest values) of these features were computed for each image, resulting in 30 features. For instance, field 3 is Mean Radius, field 13 is Radius SE, field 23 is Worst Radius. # 14. All feature values are recoded with four significant digits. # 15. Missing attribute values: none # 16. Class distribution: 357 benign, 212 malignant data = pd.read_csv("/kaggle/input/breast-cancer-wisconsin-data/data.csv") data.head() # Dikkatimi çeken 4 şey var 1) Sınıflandırma için kullanılamayacak bir id var 2) Tanı bizim sınıf etiketimiz 3) Unnamed: 32 özelliği NaN içeriyor yani ihtiyacımız yok. 4) Diğer özellik adları hakkında hiçbir fikrim yok aslında ihtiyacım yok çünkü makine öğrenimi harika :) # Bu nedenle, bu gereksiz özellikleri bırakın. Ancak bunun bir özellik seçimi olmadığını unutmayın. Bu bir pub'a göz atmak gibi, içeceğimizi henüz seçmiyoruz !!! # feature names as a list col = data.columns # .columns gives columns names in data print(col) # y includes our labels and x includes our features y = data.diagnosis # M or B list = ["Unnamed: 32", "id", "diagnosis"] x = data.drop(list, axis=1) x.head() fig = px.histogram(y, x="diagnosis", color="diagnosis", width=700, height=500) fig.show() # Tamam, şimdi özelliklerimiz var ama ne anlama geliyorlar ya da aslında bu özellikler hakkında ne kadar bilmemiz gerekiyor? varyans, standart sapma, örnek sayısı (count) veya max min değerleri. Bu tür bilgiler, verilerde neler olup bittiğini anlamaya yardımcı olur. Örneğin, aklıma field_mean özelliğinin max değeri 2500 ve smoothness_mean özelliklerinin max 0.16340 olduğu sorusu geldi. Bu nedenle görselleştirme, özellik seçimi, özellik çıkarma veya sınıflandırmadan önce standartlaştırmaya veya normalleştirmeye ihtiyacımız var mı? Cevap evet ve hayır şaşırtıcı değil. # Neyse adım adım gidelim ve görselleştirme ile başlayalım. x.describe() # görselleştirme # Verileri görselleştirmek için, sizi bilgilendirmek ve arazilerin çeşitliliği için diğer çekirdeklerde kullanılmayan seaborn grafiklerini kullanacağız. Gerçek hayatta kullandığım şeyler çoğunlukla keman planı ve sürü planıdır. Unutmayın, özellik seçmiyoruz, bar kapısındaki içecek listesine bakmak gibi verileri öğrenmeye çalışıyoruz. # Keman ve sürü grafiğinden önce normalleştirme veya standardizasyona ihtiyacımız var. Çünkü özelliklerin değerleri arasındaki farklar arsa üzerinde gözlemlenemeyecek kadar yüksektir. Özellikleri 3 grupta çiziyorum ve her grupta daha iyi gözlemlemek için 10 özellik var. df = px.data.tips() fig = px.violin( df, y="tip", color="sex", violinmode="overlay", # draw violins on top of each other # default violinmode is 'group' as in example above hover_data=df.columns, ) fig.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/053/129053835.ipynb
breast-cancer-wisconsin-data
null
[{"Id": 129053835, "ScriptId": 38361348, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 9683291, "CreationDate": "05/10/2023 15:44:29", "VersionNumber": 1.0, "Title": "Breast Cancer Wisconsin Feature Selection and CNN", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 103.0, "LinesInsertedFromPrevious": 103.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 184771848, "KernelVersionId": 129053835, "SourceDatasetVersionId": 408}]
[{"Id": 408, "DatasetId": 180, "DatasourceVersionId": 408, "CreatorUserId": 711301, "LicenseName": "CC BY-NC-SA 4.0", "CreationDate": "09/25/2016 10:49:04", "VersionNumber": 2.0, "Title": "Breast Cancer Wisconsin (Diagnostic) Data Set", "Slug": "breast-cancer-wisconsin-data", "Subtitle": "Predict whether the cancer is benign or malignant", "Description": "Features are computed from a digitized image of a fine needle aspirate (FNA) of a breast mass. They describe characteristics of the cell nuclei present in the image. \nn the 3-dimensional space is that described in: [K. P. Bennett and O. L. Mangasarian: \"Robust Linear Programming Discrimination of Two Linearly Inseparable Sets\", Optimization Methods and Software 1, 1992, 23-34]. \n\nThis database is also available through the UW CS ftp server: \nftp ftp.cs.wisc.edu \ncd math-prog/cpo-dataset/machine-learn/WDBC/\n\nAlso can be found on UCI Machine Learning Repository: https://archive.ics.uci.edu/ml/datasets/Breast+Cancer+Wisconsin+%28Diagnostic%29\n\nAttribute Information:\n\n1) ID number \n2) Diagnosis (M = malignant, B = benign) \n3-32) \n\nTen real-valued features are computed for each cell nucleus: \n\na) radius (mean of distances from center to points on the perimeter) \nb) texture (standard deviation of gray-scale values) \nc) perimeter \nd) area \ne) smoothness (local variation in radius lengths) \nf) compactness (perimeter^2 / area - 1.0) \ng) concavity (severity of concave portions of the contour) \nh) concave points (number of concave portions of the contour) \ni) symmetry \nj) fractal dimension (\"coastline approximation\" - 1)\n\nThe mean, standard error and \"worst\" or largest (mean of the three\nlargest values) of these features were computed for each image,\nresulting in 30 features. For instance, field 3 is Mean Radius, field\n13 is Radius SE, field 23 is Worst Radius.\n\nAll feature values are recoded with four significant digits.\n\nMissing attribute values: none\n\nClass distribution: 357 benign, 212 malignant", "VersionNotes": "This updated dataset has column names added", "TotalCompressedBytes": 125204.0, "TotalUncompressedBytes": 125204.0}]
[{"Id": 180, "CreatorUserId": 711301, "OwnerUserId": NaN, "OwnerOrganizationId": 7.0, "CurrentDatasetVersionId": 408.0, "CurrentDatasourceVersionId": 408.0, "ForumId": 1547, "Type": 2, "CreationDate": "09/19/2016 20:27:05", "LastActivityDate": "02/06/2018", "TotalViews": 1744898, "TotalDownloads": 301790, "TotalVotes": 3191, "TotalKernels": 2628}]
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns # data visualization library import matplotlib.pyplot as plt import plotly.express as px import plotly.io as pio # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory import time from subprocess import check_output print(check_output(["ls", "../input"]).decode("utf8")) # import warnings library import warnings import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # ignore all warnings warnings.filterwarnings("ignore") # Any results you write to the current directory are saved as output. # Veri İçeriği # 1. 1. kimlik Numarası # 1. 2. Teşhis (M = malign, B = iyi huylu) # 1. 3. yarıçap (merkezden çevre üzerindeki noktalara olan mesafelerin ortalaması) # 1. 4. doku (gri tonlama değerlerinin standart sapması) # 1. 5. çevre # 1. 6. alan # 1. 7. pürüzsüzlük (yarıçap uzunluklarında yerel değişiklik) # 1. 8. kompaktlık (çevre^2 / alan - 1,0) # 1. 9. içbükeylik (konturun içbükey kısımlarının ciddiyeti) # 1. 10. içbükey noktalar (konturun içbükey kısımlarının sayısı) # 1. 11. simetri # 1. 12. fraktal boyut ("kıyı şeridi yaklaşımı" - 1) # 1. 13. Bu özelliklerin ortalaması, standart hatası ve "en kötü" veya en büyüğü (en büyük üç değerin ortalaması), her görüntü için hesaplandı ve sonuçta 30 özellik elde edildi. Örneğin, alan 3 Ortalama Yarıçap, alan 13 Yarıçap SE, alan 23 En Kötü Yarıçaptır. # 1. 14. Tüm özellik değerleri, dört anlamlı basamakla yeniden kodlanır. # 1. 15. Eksik özellik değerleri: yok # 1. 16. Sınıf dağılımı: 357 iyi huylu, 212 kötü huylu # ---------------------------- # Data Content # 1. ID number # 2. Diagnosis (M = malignant, B = benign) # 3. radius (mean of distances from center to points on the perimeter) # 4. texture (standard deviation of gray-scale values) # 5. perimeter # 6. area # 7. smoothness (local variation in radius lengths) # 8. compactness (perimeter^2 / area - 1.0) # 9. concavity (severity of concave portions of the contour) # 10. concave points (number of concave portions of the contour) # 11. symmetry # 12. fractal dimension ("coastline approximation" - 1) # 13. The mean, standard error and "worst" or largest (mean of the three largest values) of these features were computed for each image, resulting in 30 features. For instance, field 3 is Mean Radius, field 13 is Radius SE, field 23 is Worst Radius. # 14. All feature values are recoded with four significant digits. # 15. Missing attribute values: none # 16. Class distribution: 357 benign, 212 malignant data = pd.read_csv("/kaggle/input/breast-cancer-wisconsin-data/data.csv") data.head() # Dikkatimi çeken 4 şey var 1) Sınıflandırma için kullanılamayacak bir id var 2) Tanı bizim sınıf etiketimiz 3) Unnamed: 32 özelliği NaN içeriyor yani ihtiyacımız yok. 4) Diğer özellik adları hakkında hiçbir fikrim yok aslında ihtiyacım yok çünkü makine öğrenimi harika :) # Bu nedenle, bu gereksiz özellikleri bırakın. Ancak bunun bir özellik seçimi olmadığını unutmayın. Bu bir pub'a göz atmak gibi, içeceğimizi henüz seçmiyoruz !!! # feature names as a list col = data.columns # .columns gives columns names in data print(col) # y includes our labels and x includes our features y = data.diagnosis # M or B list = ["Unnamed: 32", "id", "diagnosis"] x = data.drop(list, axis=1) x.head() fig = px.histogram(y, x="diagnosis", color="diagnosis", width=700, height=500) fig.show() # Tamam, şimdi özelliklerimiz var ama ne anlama geliyorlar ya da aslında bu özellikler hakkında ne kadar bilmemiz gerekiyor? varyans, standart sapma, örnek sayısı (count) veya max min değerleri. Bu tür bilgiler, verilerde neler olup bittiğini anlamaya yardımcı olur. Örneğin, aklıma field_mean özelliğinin max değeri 2500 ve smoothness_mean özelliklerinin max 0.16340 olduğu sorusu geldi. Bu nedenle görselleştirme, özellik seçimi, özellik çıkarma veya sınıflandırmadan önce standartlaştırmaya veya normalleştirmeye ihtiyacımız var mı? Cevap evet ve hayır şaşırtıcı değil. # Neyse adım adım gidelim ve görselleştirme ile başlayalım. x.describe() # görselleştirme # Verileri görselleştirmek için, sizi bilgilendirmek ve arazilerin çeşitliliği için diğer çekirdeklerde kullanılmayan seaborn grafiklerini kullanacağız. Gerçek hayatta kullandığım şeyler çoğunlukla keman planı ve sürü planıdır. Unutmayın, özellik seçmiyoruz, bar kapısındaki içecek listesine bakmak gibi verileri öğrenmeye çalışıyoruz. # Keman ve sürü grafiğinden önce normalleştirme veya standardizasyona ihtiyacımız var. Çünkü özelliklerin değerleri arasındaki farklar arsa üzerinde gözlemlenemeyecek kadar yüksektir. Özellikleri 3 grupta çiziyorum ve her grupta daha iyi gözlemlemek için 10 özellik var. df = px.data.tips() fig = px.violin( df, y="tip", color="sex", violinmode="overlay", # draw violins on top of each other # default violinmode is 'group' as in example above hover_data=df.columns, ) fig.show()
false
0
1,914
0
2,440
1,914
129053332
<jupyter_start><jupyter_text>Netflix Movies and TV Shows ## Netflix - TV Shows and Movies This dataset was created to list all shows and movies available on Netflix. It was collected from [JustWatch](https://www.justwatch.com/us) in March 2023, containing data available in the United States. ## Content This dataset contains two files, one for the titles (**titles.csv**) and the other for the cast (**credits.csv**) of each movie and show on the platform. The **titles.csv** contains more than **6k** **titles**, witch 15 columns containing: - id: The title ID on JustWatch. - title: The name of the title. - show_type: TV show or movie. - description: A brief description. - release_year: The release year. - age_certification: The age certification. - runtime: The length of the episode (SHOW) or movie. - genres: A list of genres. - production_countries: A list of countries that - produced the title. - seasons: Number of seasons if it's a SHOW. - imdb_id: The title ID on IMDB. - imdb_score: Score on IMDB. - imdb_votes: Votes on IMDB. - tmdb_popularity: Popularity on TMDB. - tmdb_score: Score on TMDB. The **credits.csv** contains over **81k** credits of **actors and directors**, with 5 columns containing: - person_ID: The person ID on JustWatch. - id: The title ID on JustWatch. - name: The actor or director's name. - character_name: The character name. - role: ACTOR or DIRECTOR. ## Other streaming platforms datasets:: - [Paramount+ Movies and TV Shows](https://www.kaggle.com/datasets/dgoenrique/paramount-movies-and-tv-shows) - [Disney+ Movies and TV Shows](https://www.kaggle.com/datasets/dgoenrique/disney-movies-and-tv-shows) - [AppleTV+ Movies and TV Shows](https://www.kaggle.com/datasets/dgoenrique/apple-tv-movies-and-tv-shows) - [Amazon Prime Movies and TV Shows](https://www.kaggle.com/datasets/dgoenrique/amazon-prime-movies-and-tv-shows) - [HBO Max Movies and TV Shows](https://www.kaggle.com/datasets/dgoenrique/hbo-max-movies-and-tv-shows) Kaggle dataset identifier: netflix-movies-and-tv-shows <jupyter_script># # NETFLIX and IMDB import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import scipy.stats as stats import statsmodels.api as sm from sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegression from sklearn.tree import DecisionTreeClassifier from sklearn.metrics import accuracy_score, confusion_matrix from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error # ## Data Cleaning netflix = pd.read_csv("/kaggle/input/netflix-movies-and-tv-shows/titles.csv") netflix.head(5) netflix.info() netflix.isna().sum() # remove charactors in 'genres' and 'production_countries' columns chars_to_remove = ["'", "[", "]"] cols_to_clean = ["genres", "production_countries"] for char in chars_to_remove: for col in cols_to_clean: netflix[col] = netflix[col].apply(lambda x: x.replace(char, "")) # many movies/shows have more than one genre : remove residual genres for i in netflix["genres"]: newstring = "" for char in i: if char != ",": newstring = newstring + char elif char == ",": break netflix["genres"] = netflix["genres"].replace(i, newstring) netflix # remove missing imdb score & age certification rows netflix = netflix[ (~netflix["imdb_score"].isnull()) & (~netflix["age_certification"].isnull()) ] # fill NaN values in seasons column with 1 netflix["seasons"] = netflix["seasons"].fillna(1) # remove id column netflix = netflix.iloc[:, 1:] netflix.head(5) # ## Exploratory Data Analysis types = netflix["type"].value_counts() print(types) seasons = netflix[netflix["seasons"] > 1] seasons_movie = seasons[seasons["type"] != "SHOW"] print("The number of movie that have more than one season. :", len(seasons_movie)) max_year = netflix["release_year"].max() min_year = netflix["release_year"].min() print("Range of releasing year : ", [min_year, max_year]) age_cer = netflix["age_certification"].value_counts() print("Age certification categories :") print(age_cer) age_genre = netflix.groupby("genres")["age_certification"].value_counts(normalize=True) age_genre.unstack() # from previous table, we found there are movies which have no genre. netflix["genres"] = netflix["genres"].replace({"": "unknown"}) # check it again age_genre_edit = netflix.groupby("genres")["age_certification"].value_counts() age_genre_edit.unstack() count_release = netflix.groupby("release_year")["type"].value_counts() count_release = count_release.unstack() count_release = count_release.iloc[:-1, :] print(count_release) sns.lineplot(data=count_release, x=count_release.index, y="MOVIE", label="Movie") sns.lineplot(data=count_release, x=count_release.index, y="SHOW", label="Show") plt.xlabel("Years") plt.ylabel("Number of contents") plt.title("Number of released contents, since 1969.") plt.show() # # Introduction # The film and television industry has been a major player in the entertainment world for decades, bringing to life a wide range of stories, genres, and characters that capture the hearts and minds of audiences around the globe. In recent years, the industry has been rapidly evolving with the emergence of new technologies and changing consumer preferences, making it more important than ever to understand what makes a movie or TV show successful. # In this data science project, we will be analyzing a dataset that includes information on various movies and TV shows from different countries and genres. The dataset includes features such as release year, age certification, runtime, genres, production countries, seasons, IMDB scores, IMDB votes, tmdb popularity, and tmdb score. We will be exploring this data to uncover interesting patterns and relationships between these variables, as well as to develop hypotheses that we can test using machine learning algorithms. # On the one hand, The tmdb score is a rating provided by the website The Movie Database (TMDb). It is calculated based on the number of votes a movie or TV show receives, as well as its popularity among TMDb users. The score ranges from 0 to 10, with higher scores indicating a higher level of popularity. # The tmdb popularity score is a measure of how much interest a particular title is generating among TMDb users at a given time. It is calculated based on factors such as page views, user ratings, and user favorites, and is intended to provide a snapshot of a title's current popularity. # On the other hand, the IMDB score is a rating provided by the website the Internet Movie Database (IMDB). It is calculated based on the weighted average of ratings given by registered IMDB users. The score ranges from 0 to 10, with higher scores indicating a higher level of quality. # While both scores are useful in evaluating the popularity and quality of movies and TV shows, they are based on different data sources and calculation methods, and may therefore provide different insights into the success of a particular title. It's important to consider both scores, as well as other variables, when evaluating a movie or TV show. # Our goal is to gain insights into what factors contribute to the success of a movie or TV show, and to build predictive models that can help stakeholders in the film and television industry make more informed decisions about which projects to invest in. Through our analysis, we hope to provide valuable insights that can be used to improve the quality of content produced in the industry, and to better meet the needs and preferences of audiences around the world. # ## Objective # ### Null Hypothesis # 1. There are no significant difference of IMDB score between action, drama, comedy, and thriller movies. # 2. The release year of a movie has no significant impact on its IMDB score. # 3. There are not significant correlation between IMDB and TMDb score of US Movies. # hypothesis 1 : There are no significant difference of IMDB score between action, drama, comedy, and thriller movies. genres_imdb = pd.pivot_table( data=netflix, index="genres", columns="type", values="imdb_score", aggfunc=[np.median, np.count_nonzero], ) print(genres_imdb) sns.pointplot( data=netflix[ (netflix["type"] == "MOVIE") & (netflix["genres"].isin(["action", "drama", "comedy", "thriller"])) ], x="genres", y="imdb_score", ci=95, estimator="median", ) plt.xticks(rotation=45) plt.title("Difference of average IMDB score.") plt.xlabel("Genres") plt.ylabel("IMDB score") plt.show() sns.boxplot( data=netflix[ (netflix["type"] == "MOVIE") & (netflix["genres"].isin(["action", "drama", "comedy", "thriller"])) ], x="genres", y="imdb_score", whis=[0, 100], ) plt.xlabel("Genres") plt.ylabel("IMDB score") plt.title("IMDB score of movies in each genre category.") plt.show() movies = netflix[netflix["type"] == "MOVIE"] action = movies[movies["genres"] == "action"]["imdb_score"] drama = movies[movies["genres"] == "drama"]["imdb_score"] comedy = movies[movies["genres"] == "comedy"]["imdb_score"] thriller = movies[movies["genres"] == "thriller"]["imdb_score"] print( "Test for normal distribution of action, drama, comedy, and thriller, respectively." ) for df in [action, drama, comedy, thriller]: stat, pvalue = stats.shapiro(df) print("P-value : ", pvalue) print() print( "After examining the box plot, it appears that all genres have normally distributed IMDB scores. " ) print( "However, after conducting an assumption test, it was found that only the IMDB score for the thriller genre follows a normal distribution." ) print() stat, pvalue = stats.kruskal(action, drama, comedy, thriller) if pvalue < 0.05: print("There is significant difference of imdb score between genres.") else: print("There is no significant difference of imdb score between genres.") print("pvalue : ", pvalue) # hypothesis 2 : 2. The release year of a movie has no significant impact on its IMDB score. number_movie = netflix["release_year"].value_counts(normalize=True) number_movie = number_movie.reset_index() number_movie = number_movie.rename( columns={"index": "release_year", "release_year": "count"} ) print(number_movie.head(10)) # Compare between before 2020 and after 2020 describe_imdb_before20 = netflix[ (netflix["release_year"] < 2020) & (netflix["type"] == "MOVIE") ]["imdb_score"].describe() describe_imdb_after20 = netflix[ (netflix["release_year"] >= 2020) & (netflix["type"] == "MOVIE") ]["imdb_score"].describe() describe_imdb_before20 = describe_imdb_before20.reset_index() describe_imdb_after20 = describe_imdb_after20.reset_index() print() describe_imdb = pd.merge_ordered( describe_imdb_before20, describe_imdb_after20, left_on="index", right_on="index", how="inner", suffixes=(" before2020", " after2020"), ) print("Descriptive statistics of IMDB score in each released year categories.") print(describe_imdb) range_year = [] for index, rows in netflix.iterrows(): if rows["release_year"] < 2020: range_year.append("before2020") else: range_year.append("after2020") range_year = np.array(range_year) netflix["range_year"] = range_year movies = netflix[netflix["type"] == "MOVIE"] # assumption test for appropiated central tendency measurement stat, pvalue_before2020 = stats.shapiro( movies[movies["range_year"] == "before2020"]["imdb_score"] ) stat, pvalue_after2020 = stats.shapiro( movies[movies["range_year"] == "after2020"]["imdb_score"] ) print( "p-value of assumption test for IMDB score of movies released before 2020 : ", pvalue_before2020, ) print( "p-value of assumption test for IMDB score of movies released after 2020 : ", pvalue_after2020, ) sns.kdeplot(data=movies, x="imdb_score", hue="range_year", alpha=0.2, fill=True) plt.axvline( movies[movies["range_year"] == "before2020"]["imdb_score"].median(), linestyle="--", color="navy", ) plt.axvline( movies[movies["range_year"] == "after2020"]["imdb_score"].median(), linestyle="--", color="orange", ) plt.text( movies[movies["range_year"] == "before2020"]["imdb_score"].median() + 0.2, 0.15, s="median IMDB score before 2020", color="navy", ) plt.text( movies[movies["range_year"] == "after2020"]["imdb_score"].median() + 0.2, 0.10, s="median IMDB score after 2020", color="orange", ) plt.show() # hypothesis testing imdb_movies_before2020 = movies[movies["range_year"] == "before2020"]["imdb_score"] imdb_movies_after2020 = movies[movies["range_year"] == "after2020"]["imdb_score"] stat, pvalue_year = stats.mannwhitneyu(imdb_movies_before2020, imdb_movies_after2020) class hypothesis: def test(self, group1, group2, tailed="two-tailed", alpha=0.05, CI=0.95): # test for normality self.stat1, self.pvalue1 = stats.shapiro(group1) self.stat2, self.pvalue2 = stats.shapiro(group2) # calculate z alpha : z alpha two-tailed ที่ alpha 0.05 == z alpha one tailed ที่ alpha 0.025 ที่ 95%CI, z = +- 1.96 if tailed == "two-tailed": self.z_alpha = stats.norm.ppf(1 - (alpha / 2)) elif tailed == "one-tailed": self.z_alpha = stats.norm.ppf(1 - alpha) # calculate mean difference and p-value if self.pvalue1 >= 0.05 and self.pvalue2 >= 0.05: self.meandiff = np.mean(group1) - np.mean(group2) self.stat, self.pvalue_year = stats.ttest_ind(group1, group2) else: self.meandiff = np.median(group1) - np.median(group2) self.stat, self.pvalue_year = stats.mannwhitneyu(group1, group2) # calculate confidential interval # there is 95% chance that mean difference is within 1.96 * std(mean1 - mean2) self.std_error = np.sqrt( (np.var(group1) / len(group1)) + (np.var(group2) / len(group2)) ) self.ci_low = self.meandiff - (self.z_alpha * self.std_error) self.ci_high = self.meandiff + (self.z_alpha * self.std_error) self.ci = (self.ci_low, self.ci_high) return self.meandiff, self.ci, self.pvalue_year study = hypothesis() meandiff, CI, pvalue = study.test(imdb_movies_before2020, imdb_movies_after2020) print( "There is significant difference of IMDB score between movies released before 2020 and after 2020" ) print("Mean difference : ", meandiff, " 95% Confidential interval ", CI) print("P-value : ", pvalue) # hypothesis 3 : There are not significant correlation between IMDB and TMDb score of US Movies. MOVIE_US = netflix[ (netflix["type"] == "MOVIE") & (netflix["production_countries"] == "US") ] sns.jointplot( data=MOVIE_US, x="imdb_score", y="tmdb_score", kind="reg", order=1, scatter_kws={"alpha": 0.5}, ) plt.show() corr, pvalue = stats.pearsonr(MOVIE_US["imdb_score"], MOVIE_US["tmdb_score"]) print( "There is significant positive correlation between IMDB score and TMDb score of each US Movies." ) print("Correlation coefficient : ", corr) print("P-value : ", pvalue)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/053/129053332.ipynb
netflix-movies-and-tv-shows
dgoenrique
[{"Id": 129053332, "ScriptId": 38362763, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13032399, "CreationDate": "05/10/2023 15:40:41", "VersionNumber": 1.0, "Title": "Netflix and IMDB", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 245.0, "LinesInsertedFromPrevious": 245.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 184770811, "KernelVersionId": 129053332, "SourceDatasetVersionId": 5160035}]
[{"Id": 5160035, "DatasetId": 2998553, "DatasourceVersionId": 5231847, "CreatorUserId": 8005566, "LicenseName": "CC0: Public Domain", "CreationDate": "03/13/2023 18:49:00", "VersionNumber": 1.0, "Title": "Netflix Movies and TV Shows", "Slug": "netflix-movies-and-tv-shows", "Subtitle": "Movies and TV Shows listings on Netflix (March, 2023)", "Description": "## Netflix - TV Shows and Movies\n\nThis dataset was created to list all shows and movies available on Netflix. It was collected from [JustWatch](https://www.justwatch.com/us) in March 2023, containing data available in the United States.\n\n\n## Content\n\nThis dataset contains two files, one for the titles (**titles.csv**) and the other for the cast (**credits.csv**) of each movie and show on the platform.\n\nThe **titles.csv** contains more than **6k** **titles**, witch 15 columns containing:\n\n- id: The title ID on JustWatch.\n- title: The name of the title.\n- show_type: TV show or movie.\n- description: A brief description.\n- release_year: The release year.\n- age_certification: The age certification.\n- runtime: The length of the episode (SHOW) or movie.\n- genres: A list of genres.\n- production_countries: A list of countries that - produced the title.\n- seasons: Number of seasons if it's a SHOW.\n- imdb_id: The title ID on IMDB.\n- imdb_score: Score on IMDB.\n- imdb_votes: Votes on IMDB.\n- tmdb_popularity: Popularity on TMDB.\n- tmdb_score: Score on TMDB.\n\nThe **credits.csv** contains over **81k** credits of **actors and directors**, with 5 columns containing:\n\n- person_ID: The person ID on JustWatch.\n- id: The title ID on JustWatch.\n- name: The actor or director's name.\n- character_name: The character name.\n- role: ACTOR or DIRECTOR.\n\n## Other streaming platforms datasets::\n\n- [Paramount+ Movies and TV Shows](https://www.kaggle.com/datasets/dgoenrique/paramount-movies-and-tv-shows) \n- [Disney+ Movies and TV Shows](https://www.kaggle.com/datasets/dgoenrique/disney-movies-and-tv-shows)\n- [AppleTV+ Movies and TV Shows](https://www.kaggle.com/datasets/dgoenrique/apple-tv-movies-and-tv-shows)\n- [Amazon Prime Movies and TV Shows](https://www.kaggle.com/datasets/dgoenrique/amazon-prime-movies-and-tv-shows)\n- [HBO Max Movies and TV Shows](https://www.kaggle.com/datasets/dgoenrique/hbo-max-movies-and-tv-shows)", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 2998553, "CreatorUserId": 8005566, "OwnerUserId": 8005566.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5160035.0, "CurrentDatasourceVersionId": 5231847.0, "ForumId": 3037339, "Type": 2, "CreationDate": "03/13/2023 18:49:00", "LastActivityDate": "03/13/2023", "TotalViews": 26767, "TotalDownloads": 5176, "TotalVotes": 111, "TotalKernels": 4}]
[{"Id": 8005566, "UserName": "dgoenrique", "DisplayName": "Diego Enrique", "RegisterDate": "07/29/2021", "PerformanceTier": 2}]
# # NETFLIX and IMDB import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import scipy.stats as stats import statsmodels.api as sm from sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegression from sklearn.tree import DecisionTreeClassifier from sklearn.metrics import accuracy_score, confusion_matrix from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error # ## Data Cleaning netflix = pd.read_csv("/kaggle/input/netflix-movies-and-tv-shows/titles.csv") netflix.head(5) netflix.info() netflix.isna().sum() # remove charactors in 'genres' and 'production_countries' columns chars_to_remove = ["'", "[", "]"] cols_to_clean = ["genres", "production_countries"] for char in chars_to_remove: for col in cols_to_clean: netflix[col] = netflix[col].apply(lambda x: x.replace(char, "")) # many movies/shows have more than one genre : remove residual genres for i in netflix["genres"]: newstring = "" for char in i: if char != ",": newstring = newstring + char elif char == ",": break netflix["genres"] = netflix["genres"].replace(i, newstring) netflix # remove missing imdb score & age certification rows netflix = netflix[ (~netflix["imdb_score"].isnull()) & (~netflix["age_certification"].isnull()) ] # fill NaN values in seasons column with 1 netflix["seasons"] = netflix["seasons"].fillna(1) # remove id column netflix = netflix.iloc[:, 1:] netflix.head(5) # ## Exploratory Data Analysis types = netflix["type"].value_counts() print(types) seasons = netflix[netflix["seasons"] > 1] seasons_movie = seasons[seasons["type"] != "SHOW"] print("The number of movie that have more than one season. :", len(seasons_movie)) max_year = netflix["release_year"].max() min_year = netflix["release_year"].min() print("Range of releasing year : ", [min_year, max_year]) age_cer = netflix["age_certification"].value_counts() print("Age certification categories :") print(age_cer) age_genre = netflix.groupby("genres")["age_certification"].value_counts(normalize=True) age_genre.unstack() # from previous table, we found there are movies which have no genre. netflix["genres"] = netflix["genres"].replace({"": "unknown"}) # check it again age_genre_edit = netflix.groupby("genres")["age_certification"].value_counts() age_genre_edit.unstack() count_release = netflix.groupby("release_year")["type"].value_counts() count_release = count_release.unstack() count_release = count_release.iloc[:-1, :] print(count_release) sns.lineplot(data=count_release, x=count_release.index, y="MOVIE", label="Movie") sns.lineplot(data=count_release, x=count_release.index, y="SHOW", label="Show") plt.xlabel("Years") plt.ylabel("Number of contents") plt.title("Number of released contents, since 1969.") plt.show() # # Introduction # The film and television industry has been a major player in the entertainment world for decades, bringing to life a wide range of stories, genres, and characters that capture the hearts and minds of audiences around the globe. In recent years, the industry has been rapidly evolving with the emergence of new technologies and changing consumer preferences, making it more important than ever to understand what makes a movie or TV show successful. # In this data science project, we will be analyzing a dataset that includes information on various movies and TV shows from different countries and genres. The dataset includes features such as release year, age certification, runtime, genres, production countries, seasons, IMDB scores, IMDB votes, tmdb popularity, and tmdb score. We will be exploring this data to uncover interesting patterns and relationships between these variables, as well as to develop hypotheses that we can test using machine learning algorithms. # On the one hand, The tmdb score is a rating provided by the website The Movie Database (TMDb). It is calculated based on the number of votes a movie or TV show receives, as well as its popularity among TMDb users. The score ranges from 0 to 10, with higher scores indicating a higher level of popularity. # The tmdb popularity score is a measure of how much interest a particular title is generating among TMDb users at a given time. It is calculated based on factors such as page views, user ratings, and user favorites, and is intended to provide a snapshot of a title's current popularity. # On the other hand, the IMDB score is a rating provided by the website the Internet Movie Database (IMDB). It is calculated based on the weighted average of ratings given by registered IMDB users. The score ranges from 0 to 10, with higher scores indicating a higher level of quality. # While both scores are useful in evaluating the popularity and quality of movies and TV shows, they are based on different data sources and calculation methods, and may therefore provide different insights into the success of a particular title. It's important to consider both scores, as well as other variables, when evaluating a movie or TV show. # Our goal is to gain insights into what factors contribute to the success of a movie or TV show, and to build predictive models that can help stakeholders in the film and television industry make more informed decisions about which projects to invest in. Through our analysis, we hope to provide valuable insights that can be used to improve the quality of content produced in the industry, and to better meet the needs and preferences of audiences around the world. # ## Objective # ### Null Hypothesis # 1. There are no significant difference of IMDB score between action, drama, comedy, and thriller movies. # 2. The release year of a movie has no significant impact on its IMDB score. # 3. There are not significant correlation between IMDB and TMDb score of US Movies. # hypothesis 1 : There are no significant difference of IMDB score between action, drama, comedy, and thriller movies. genres_imdb = pd.pivot_table( data=netflix, index="genres", columns="type", values="imdb_score", aggfunc=[np.median, np.count_nonzero], ) print(genres_imdb) sns.pointplot( data=netflix[ (netflix["type"] == "MOVIE") & (netflix["genres"].isin(["action", "drama", "comedy", "thriller"])) ], x="genres", y="imdb_score", ci=95, estimator="median", ) plt.xticks(rotation=45) plt.title("Difference of average IMDB score.") plt.xlabel("Genres") plt.ylabel("IMDB score") plt.show() sns.boxplot( data=netflix[ (netflix["type"] == "MOVIE") & (netflix["genres"].isin(["action", "drama", "comedy", "thriller"])) ], x="genres", y="imdb_score", whis=[0, 100], ) plt.xlabel("Genres") plt.ylabel("IMDB score") plt.title("IMDB score of movies in each genre category.") plt.show() movies = netflix[netflix["type"] == "MOVIE"] action = movies[movies["genres"] == "action"]["imdb_score"] drama = movies[movies["genres"] == "drama"]["imdb_score"] comedy = movies[movies["genres"] == "comedy"]["imdb_score"] thriller = movies[movies["genres"] == "thriller"]["imdb_score"] print( "Test for normal distribution of action, drama, comedy, and thriller, respectively." ) for df in [action, drama, comedy, thriller]: stat, pvalue = stats.shapiro(df) print("P-value : ", pvalue) print() print( "After examining the box plot, it appears that all genres have normally distributed IMDB scores. " ) print( "However, after conducting an assumption test, it was found that only the IMDB score for the thriller genre follows a normal distribution." ) print() stat, pvalue = stats.kruskal(action, drama, comedy, thriller) if pvalue < 0.05: print("There is significant difference of imdb score between genres.") else: print("There is no significant difference of imdb score between genres.") print("pvalue : ", pvalue) # hypothesis 2 : 2. The release year of a movie has no significant impact on its IMDB score. number_movie = netflix["release_year"].value_counts(normalize=True) number_movie = number_movie.reset_index() number_movie = number_movie.rename( columns={"index": "release_year", "release_year": "count"} ) print(number_movie.head(10)) # Compare between before 2020 and after 2020 describe_imdb_before20 = netflix[ (netflix["release_year"] < 2020) & (netflix["type"] == "MOVIE") ]["imdb_score"].describe() describe_imdb_after20 = netflix[ (netflix["release_year"] >= 2020) & (netflix["type"] == "MOVIE") ]["imdb_score"].describe() describe_imdb_before20 = describe_imdb_before20.reset_index() describe_imdb_after20 = describe_imdb_after20.reset_index() print() describe_imdb = pd.merge_ordered( describe_imdb_before20, describe_imdb_after20, left_on="index", right_on="index", how="inner", suffixes=(" before2020", " after2020"), ) print("Descriptive statistics of IMDB score in each released year categories.") print(describe_imdb) range_year = [] for index, rows in netflix.iterrows(): if rows["release_year"] < 2020: range_year.append("before2020") else: range_year.append("after2020") range_year = np.array(range_year) netflix["range_year"] = range_year movies = netflix[netflix["type"] == "MOVIE"] # assumption test for appropiated central tendency measurement stat, pvalue_before2020 = stats.shapiro( movies[movies["range_year"] == "before2020"]["imdb_score"] ) stat, pvalue_after2020 = stats.shapiro( movies[movies["range_year"] == "after2020"]["imdb_score"] ) print( "p-value of assumption test for IMDB score of movies released before 2020 : ", pvalue_before2020, ) print( "p-value of assumption test for IMDB score of movies released after 2020 : ", pvalue_after2020, ) sns.kdeplot(data=movies, x="imdb_score", hue="range_year", alpha=0.2, fill=True) plt.axvline( movies[movies["range_year"] == "before2020"]["imdb_score"].median(), linestyle="--", color="navy", ) plt.axvline( movies[movies["range_year"] == "after2020"]["imdb_score"].median(), linestyle="--", color="orange", ) plt.text( movies[movies["range_year"] == "before2020"]["imdb_score"].median() + 0.2, 0.15, s="median IMDB score before 2020", color="navy", ) plt.text( movies[movies["range_year"] == "after2020"]["imdb_score"].median() + 0.2, 0.10, s="median IMDB score after 2020", color="orange", ) plt.show() # hypothesis testing imdb_movies_before2020 = movies[movies["range_year"] == "before2020"]["imdb_score"] imdb_movies_after2020 = movies[movies["range_year"] == "after2020"]["imdb_score"] stat, pvalue_year = stats.mannwhitneyu(imdb_movies_before2020, imdb_movies_after2020) class hypothesis: def test(self, group1, group2, tailed="two-tailed", alpha=0.05, CI=0.95): # test for normality self.stat1, self.pvalue1 = stats.shapiro(group1) self.stat2, self.pvalue2 = stats.shapiro(group2) # calculate z alpha : z alpha two-tailed ที่ alpha 0.05 == z alpha one tailed ที่ alpha 0.025 ที่ 95%CI, z = +- 1.96 if tailed == "two-tailed": self.z_alpha = stats.norm.ppf(1 - (alpha / 2)) elif tailed == "one-tailed": self.z_alpha = stats.norm.ppf(1 - alpha) # calculate mean difference and p-value if self.pvalue1 >= 0.05 and self.pvalue2 >= 0.05: self.meandiff = np.mean(group1) - np.mean(group2) self.stat, self.pvalue_year = stats.ttest_ind(group1, group2) else: self.meandiff = np.median(group1) - np.median(group2) self.stat, self.pvalue_year = stats.mannwhitneyu(group1, group2) # calculate confidential interval # there is 95% chance that mean difference is within 1.96 * std(mean1 - mean2) self.std_error = np.sqrt( (np.var(group1) / len(group1)) + (np.var(group2) / len(group2)) ) self.ci_low = self.meandiff - (self.z_alpha * self.std_error) self.ci_high = self.meandiff + (self.z_alpha * self.std_error) self.ci = (self.ci_low, self.ci_high) return self.meandiff, self.ci, self.pvalue_year study = hypothesis() meandiff, CI, pvalue = study.test(imdb_movies_before2020, imdb_movies_after2020) print( "There is significant difference of IMDB score between movies released before 2020 and after 2020" ) print("Mean difference : ", meandiff, " 95% Confidential interval ", CI) print("P-value : ", pvalue) # hypothesis 3 : There are not significant correlation between IMDB and TMDb score of US Movies. MOVIE_US = netflix[ (netflix["type"] == "MOVIE") & (netflix["production_countries"] == "US") ] sns.jointplot( data=MOVIE_US, x="imdb_score", y="tmdb_score", kind="reg", order=1, scatter_kws={"alpha": 0.5}, ) plt.show() corr, pvalue = stats.pearsonr(MOVIE_US["imdb_score"], MOVIE_US["tmdb_score"]) print( "There is significant positive correlation between IMDB score and TMDb score of each US Movies." ) print("Correlation coefficient : ", corr) print("P-value : ", pvalue)
false
1
3,889
0
4,527
3,889
129024934
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) from numpy.random import randn # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # # Series labels = ["a", "b", "c"] my_data = [10, 20, 30] arr = np.array(my_data) d = {"a": 10, "b": 20, "c": 30} pd.Series(data=my_data) pd.Series(data=my_data, index=labels) pd.Series(d) d ser1 = pd.Series([1, 2, 3, 4], ["USA", "Germany", "USSR", "Japan"]) ser1 ser2 = pd.Series([1, 2, 3, 4], ["USA", "Germany", "Italy", "Japan"]) ser2 ser1["USA"] # # DataFrames # create same random numbers np.random.seed(101) df = pd.DataFrame(randn(5, 4), ["A", "B", "C", "D", "E"], ["W", "X", "Y", "Z"]) df df["W"] print(type(df["W"])) print(type(df)) df[["W", "Z"]] df["new"] = df["W"] + df["Y"] df # axis=0 refer to row and axis=1 refers to column # inplace=True making sure changes occur to the original df df.drop("new", axis=1, inplace=True) df df.drop("E", axis=0, inplace=True) df # Selecting ROWS # Same as df.iloc[0] df.loc["A"] # Same as df.loc['B'] df.iloc[1] df.loc["B", "Y"] df.loc[["A", "B"], ["W", "Y"]] booldf = df > 0 booldf df["W"] df["W"] > 0 # Get df where W is > 0. Then show me X column df[df["W"] > 0]["X"] states = "CA NY WY OR".split() states df["States"] = states df df.set_index("States") df # Index Levels - Hierarchy outside = ["G1", "G1", "G1", "G2", "G2", "G2"] inside = [1, 2, 3, 1, 2, 3] hier_index = list(zip(outside, inside)) hier_index = pd.MultiIndex.from_tuples(hier_index) hier_index dfnew = pd.DataFrame(randn(6, 2), hier_index, ["A", "B"]) dfnew dfnew.loc["G1"] dfnew.loc["G1"].loc[2] dfnew.loc["G1"].loc[2]["B"] dfnew dfnew.xs("G1") dfnew.index.names = ["Groups", "Num"] dfnew dfnew.xs(1, level="Num") # # Missing Data df3 = {"A": [1, 2, np.nan], "B": [5, np.nan, np.nan], "C": [1, 2, 3]} df3 = pd.DataFrame(df3) df3 # drop columns with null value, axis=1 df3.dropna(axis=1) # drop rows with rows value, by default axis=0 df3.dropna() # drop if 2 or more null value df3.dropna(thresh=2) df3 df3.fillna(value="X") # Fill null values in column A with mean of values in column A df3["A"].fillna(value=df3["A"].mean())
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/024/129024934.ipynb
null
null
[{"Id": 129024934, "ScriptId": 38333111, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14559352, "CreationDate": "05/10/2023 11:52:00", "VersionNumber": 3.0, "Title": "Pandas Examples", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 160.0, "LinesInsertedFromPrevious": 76.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 84.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) from numpy.random import randn # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # # Series labels = ["a", "b", "c"] my_data = [10, 20, 30] arr = np.array(my_data) d = {"a": 10, "b": 20, "c": 30} pd.Series(data=my_data) pd.Series(data=my_data, index=labels) pd.Series(d) d ser1 = pd.Series([1, 2, 3, 4], ["USA", "Germany", "USSR", "Japan"]) ser1 ser2 = pd.Series([1, 2, 3, 4], ["USA", "Germany", "Italy", "Japan"]) ser2 ser1["USA"] # # DataFrames # create same random numbers np.random.seed(101) df = pd.DataFrame(randn(5, 4), ["A", "B", "C", "D", "E"], ["W", "X", "Y", "Z"]) df df["W"] print(type(df["W"])) print(type(df)) df[["W", "Z"]] df["new"] = df["W"] + df["Y"] df # axis=0 refer to row and axis=1 refers to column # inplace=True making sure changes occur to the original df df.drop("new", axis=1, inplace=True) df df.drop("E", axis=0, inplace=True) df # Selecting ROWS # Same as df.iloc[0] df.loc["A"] # Same as df.loc['B'] df.iloc[1] df.loc["B", "Y"] df.loc[["A", "B"], ["W", "Y"]] booldf = df > 0 booldf df["W"] df["W"] > 0 # Get df where W is > 0. Then show me X column df[df["W"] > 0]["X"] states = "CA NY WY OR".split() states df["States"] = states df df.set_index("States") df # Index Levels - Hierarchy outside = ["G1", "G1", "G1", "G2", "G2", "G2"] inside = [1, 2, 3, 1, 2, 3] hier_index = list(zip(outside, inside)) hier_index = pd.MultiIndex.from_tuples(hier_index) hier_index dfnew = pd.DataFrame(randn(6, 2), hier_index, ["A", "B"]) dfnew dfnew.loc["G1"] dfnew.loc["G1"].loc[2] dfnew.loc["G1"].loc[2]["B"] dfnew dfnew.xs("G1") dfnew.index.names = ["Groups", "Num"] dfnew dfnew.xs(1, level="Num") # # Missing Data df3 = {"A": [1, 2, np.nan], "B": [5, np.nan, np.nan], "C": [1, 2, 3]} df3 = pd.DataFrame(df3) df3 # drop columns with null value, axis=1 df3.dropna(axis=1) # drop rows with rows value, by default axis=0 df3.dropna() # drop if 2 or more null value df3.dropna(thresh=2) df3 df3.fillna(value="X") # Fill null values in column A with mean of values in column A df3["A"].fillna(value=df3["A"].mean())
false
0
1,072
0
1,072
1,072
129024960
<jupyter_start><jupyter_text>Room Occupancy Estimation Data Set Data Set Information: The experimental testbed for occupancy estimation was deployed in a 6m × 4.6m room. The setup consisted of 7 sensor nodes and one edge node in a star configuration with the sensor nodes transmitting data to the edge every 30s using wireless transceivers. No HVAC systems were in use while the dataset was being collected. Attribute Information: Date: YYYY/MM/DD Time: HH:MM:SS Temperature: In degree Celsius Light: In Lux Sound: In Volts (amplifier output read by ADC) CO2: In PPM CO2 Slope: Slope of CO2 values taken in a sliding window PIR: Binary value conveying motion detection Room_Occupancy_Count: Ground Truth Kaggle dataset identifier: room-occupancy-estimation-data-set <jupyter_script># # # Table of Contents # ### 1. [Problem Statement](#1) # ### 2. [Data Set Information](#2) # ### 3. [Exploratory Data Analysis and Visualization](#3) # ### 4. [Variable Selection](#4) # ### 5. [HyperParameter Tuning and Model Selection](#5) # # Back to top # ### $$\mathbf{\text{Part 1. Problem Statement:}}$$ # Estimating the precise number of occupants in a room using multiple non-intrusive environmental sensors like temperature, light, sound, CO2 and PIR. # Back to top # # ## Part 2. Data Set Information: # The experimental testbed for occupancy estimation was deployed in a 6m x 4.6m room. In this setup, there were 7 sensor nodes and one edge arranged in a star configuration, with the sensor nodes sending data to the edge every 30 seconds. No HVAC systems (Heating, Ventilation and Air-Conditioning) were in use while the dataset was being collected. # Five different types of non-intrusive sensors were used in this experiment: temperature, light, sound, CO2 and digital passive infrared (PIR). Sensor nodes S1-S4 consisted of temperature, light and sound sensors, S5 had a CO2 sensor and S6 and S7 had one PIR sensor each that were deployed on the ceiling ledges at an angle that maximized the sensor's field of view for motion detection. # The data was collected for a period of 4 days in a controlled manner with the occupancy in the room varying between 0 and 3 people. The ground truth of the occupancy count in the room was noted manually. # ## Attribute Information: # #### Categorical Variable: # - Date: YYYY/MM/DD # - Time: HH:MM:SS # - Room_Occupancy_Count: Ground Truth # #### Numerical Variables: # - Temperature: In degree Celsius # - Light: In Lux (*Lux is a measure of how much light falls on a particular surface*) # - Sound: In Volts (amplifier output read by ADC) # - CO2: In PPM # - CO2 Slope: Slope of CO2 values taken in a sliding window # - PIR: Binary value conveying motion detection # # Importing relevant modules import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.preprocessing import StandardScaler from sklearn.decomposition import PCA from matplotlib.patches import Patch from sklearn.model_selection import train_test_split, GridSearchCV from sklearn.linear_model import Lasso from sklearn.linear_model import SGDClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import RepeatedStratifiedKFold from sklearn.model_selection import GridSearchCV from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score from sklearn.metrics import confusion_matrix, accuracy_score, classification_report import warnings warnings.filterwarnings("ignore") df = pd.read_csv("room_occupancy_estimation_dataset.csv") df.head() df.shape df["Room_Occupancy_Count"].value_counts().plot(kind="pie") # In this dataset, there are 10129 records with 18 input variables and one output variable, "Room_Occupancy_Count". # Back to top # # ## Part 3. Exploratory data analysis (EDA) and Visualization : # ## EDA: # #### 1. Checking for duplicates in the dataset df[df.duplicated()] # There are no duplicate rows in our dataset. df.drop(columns=["Date", "Time"], axis=1, inplace=True) df[df.duplicated()] # After dropping the 'Date' and 'Time' columns, we notice that there are a few duplicate values which were not present before. These columns need not be dropped as they are independent readings and not duplicates. # #### 2. Checking for null values in the dataset df.info() # We can see that there are no missing values in the dataset. # #### 3. Visualizing and treating outliers df.describe() # From the summary, we can see that a few columns in the dataset have a high standard deviation. We know that outliers easily influence the mean. This becomes more evident when you notice that the mean is small compared to the max value suggesting that there are some outliers. We can visualize these attributes and decide how to treat them further. # We can use a violin plot to look for outliers in numerical attributes. As a result, it will be possible to visualize the distribution of the dataset and also identify the density (peak) of each variable at the same time. By default, boxplot() will plot the same y-axis for all the selected columns. This will not be feasible if the columns in question are not of the same scale which leads to improper analysis. In order to avoid this, we will need to identify univariate outliers using the violin plot. # Setting output to have 5 subplots in a single row fig, ax = plt.subplots(1, 5, figsize=(10, 5)) # To tune the spacing between the subplots plt.subplots_adjust(wspace=0.5) # Drawing boxplot for S1_Light in the 1st subplot sns.violinplot(data=df["S1_Light"], ax=ax[0], color="brown") ax[0].set_xlabel("S1_Light") # Drawing boxplot for S2_Light in the 1st subplot sns.violinplot(data=df["S2_Light"], ax=ax[1], color="g") ax[1].set_xlabel("S2_Light") # Drawing boxplot for S3_Light in the 1st subplot sns.violinplot(data=df["S3_Light"], ax=ax[2]) ax[2].set_xlabel("S3_Light") # Drawing boxplot for S4_Light in the 1st subplot sns.violinplot(data=df["S4_Light"], ax=ax[3], color="y") ax[3].set_xlabel("S4_Light") # Drawing boxplot for S5_CO2 in the 1st subplot sns.violinplot(data=df["S5_CO2"], ax=ax[4], color="b") ax[4].set_xlabel("S5_CO2") # by default, you'll see x-tick label set to 0 in each subplot # remove it by setting it to empty list for subplot in ax: subplot.set_xticklabels([]) plt.show() # ##### Light: # There appear to be a lot of outliers for all light (lux) variables. From the violin plots, we can see that most of the density is around 0 for these variables. This suggests that there is no light source during those times. Typically, indoor light levels range between 100 and 300 for normal activities, which means that values above the 75th percentile are significant. Excluding these values solely based on their extremeness will lead to the loss of valuable information. # #### CO2: # Here are the different CO2 levels which are considered typical and dangerous: # - 400ppm – Normal outdoor air level. # - 400 ~ 1,100ppm – Typical value level indoors with good ventilation. If CO2 levels are low when the building is sealed and occupied, check for over-ventilation (too much fresh air = energy wasted). # - 1,100ppm – the OSHA/ASHRAE recommended maximum level in a closed room. # - '> 1,200ppm'– Poor air quality – requires ventilation to the room. # - 2,000ppm – According to many studies this level of CO2 produces a significant increase in drowsiness, tiredness, headaches, lower levels of concentration, and an increased likelihood of spreading respiratory viruses like colds, etc. # From this we can conclude that the extreme values of the CO2 level add value to the dataset. Hence, we do not need to exclude the values here as well. # #### 4. Data Reduction df["S5_CO2_Slope"] = np.round(df["S5_CO2_Slope"], decimals=2) df.head() # ## Visualization Analysis # ### 1. Pairplot sns.pairplot(data=df, hue="Room_Occupancy_Count") # Through the pairplot we can tell pairwise linear relationships between two varibles in the dataset and also their distributions. Through these plots we can see that the the dataset is linearly seperable. We can further check this with the help of a correlation matrix. # ### 2. Box-and-whisker Plot # This type of plot is commonly used to visualize relationships between numerical variables and categorical variables. fig, axes = plt.subplots(nrows=5, ncols=4, figsize=(20, 15)) for idx, feat in enumerate(df.columns.to_list(), start=0): ax = axes[int(idx / 4), idx % 4] sns.boxplot(x="Room_Occupancy_Count", y=feat, data=df, ax=ax) ax.set_xlabel("") ax.set_ylabel(feat) fig.tight_layout() # Insights: # - The temperature increases when there are people present in the room. # - All light variables have higher readings when the room is occupied. This should be an effective variable to estimate occupancy in a room. # - The sound variable seems to vary according to the number of occupants in the room but is generally higher with more than one occupant. # - The CO2 reading seems higher when there is no occupant. At first glance, these look like outliers. These abnormalities may be caused by plants inside and CO2-emitting equipment inside. Poor ventilation could also contribute to the problem. # - As expected, motion is detected when occupants are in a room. This can also be a good variable to estimate room occupancy. # Back to top # # ## Part 4. Variable Selection : # ### 1. Lasso Regression # X = df.drop(["Room_Occupancy_Count"], axis=1) y = df[["Room_Occupancy_Count"]] # Scaling the dataset scaler = StandardScaler() X_scaled = scaler.fit_transform(X) lasso_reg = Lasso() lasso_reg.fit(X_scaled, y) # define grid search grid = {"alpha": [0.0001, 0.001, 0.01, 0.1, 1]} cv = RepeatedStratifiedKFold(n_splits=5, n_repeats=3, random_state=42) grid_search_lasso = GridSearchCV( estimator=lasso_reg, param_grid=grid, n_jobs=-1, cv=cv, scoring="accuracy", error_score=0, ) # Results grid_result_lasso = grid_search_lasso.fit(X_scaled, y) grid_result_lasso.best_params_ coef = grid_result_lasso.best_estimator_.coef_ coef X.columns[coef == 0] # An empty list shows that we could not eliminate any variables. It could also be inferred that all the variables are relevant to a certain extent according to lasso regression. # ### 3. Correlation Matrix # The correlation matrix will help us find the relationship between all the possible variables in the dataset. Based on the correlation coefficients, we can determine a threshold for statistical significance and select variables with variables graeter values. # Creating a correlation matrix corr_matrix = df.corr() fig, ax = plt.subplots(figsize=(10, 6)) sns.heatmap( corr_matrix, cmap=sns.diverging_palette(220, 10, as_cmap=True), annot=True, annot_kws={"fontsize": 7}, ) plt.xticks(rotation=45, ha="right", fontsize=7) plt.yticks(fontsize=7) plt.show() df.shape # Creating a function to return all variables that have good correlation coefficients def get_correlated_variables(dataset, threshold): corr_columns = set() # To get unique correlated column names corr_matrix = dataset.corr() for i in range(len(corr_matrix.columns)): for j in range(i): if ( abs(corr_matrix.iloc[i][j]) > threshold ): # Checking the absolute value of the corr coeff column_name = corr_matrix.columns[i] # Getting the name of the column corr_columns.add(column_name) return corr_columns corr_features = get_correlated_variables(X, 0.8) corr_features # All the 3 models were run on variables selected through a correlation matrix, but they did not give good enough results. This could be because the correlation matrix measures the relationship between pairs of variables. It does not consider the impact they have on a target variable when other variables are included in the model. Due to this, a few variables may appear redundant in a correlation matrix but provide crucial information relevant to the target variable. # Further we try a wrapper method for variable selection. # ### 3. Bi-Directional Elimination as a wrapper method # Wrapper method: This is a method of variable selection that trains and evaluates a machine learning algorithm for different subsets of features and determines the most suitable subset of features that yields the highest performance. # Here we use bi-directional elimination as a wrapper method on the "Random Forest Classifier". This wrapper method combines both forward selection and backward elimination methods. While adding a new feature, it checks the significance of already added features. If it finds any of the already selected features to be insignificant, it simply removes that particular feature through backward elimination. from mlxtend.feature_selection import SequentialFeatureSelector as SFS # Bi-directional Sequential Selection(sffs) sffs = SFS( RandomForestClassifier(), k_features=(1, len(X.columns)), forward=True, floating=True, scoring="accuracy", cv=5, ) sffs.fit(X, y) corr_features = list(sffs.k_feature_names_) corr_features X_final = X[corr_features] X_final.head() X_final.shape # Back to top # # ## Part 5. HyperParameter Tuning and Model Selection : # Splitting the dataset into train and test X_train, X_test, y_train, y_test = train_test_split( X_final, y, test_size=0.25, random_state=42 ) # Scaling the dataset scaler = StandardScaler() X_train_scaled = scaler.fit_transform(X_train) X_test_scaled = scaler.transform(X_test) # ### 1. Linear Classifier sgd_model = SGDClassifier(random_state=42) sgd_model.fit(X_train_scaled, y_train) print("Accuracy on train set: ", sgd_model.score(X_train_scaled, y_train)) print("Accuracy on test set: ", sgd_model.score(X_test_scaled, y_test)) # Defining hyperparameters loss = ["hinge", "log", "squared_hinge", "modified_huber"] penalty = ["l1", "l2", "elasticnet"] alpha = [0.0001, 0.001, 0.01, 0.1] # define grid search grid = dict(loss=loss, penalty=penalty, alpha=alpha) cv = RepeatedStratifiedKFold(n_splits=5, n_repeats=3, random_state=42) grid_search = GridSearchCV( estimator=sgd_model, param_grid=grid, n_jobs=-1, cv=cv, scoring="accuracy", error_score=0, ) # Results grid_result = grid_search.fit(X_train_scaled, y_train) # summarize results print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_)) means = grid_result.cv_results_["mean_test_score"] stds = grid_result.cv_results_["std_test_score"] params = grid_result.cv_results_["params"] for mean, stdev, param in zip(means, stds, params): print("%f (%f) with: %r" % (mean, stdev, param)) sgd_model = SGDClassifier( alpha=grid_result.best_params_["alpha"], loss=grid_result.best_params_["loss"], penalty=grid_result.best_params_["penalty"], random_state=42, ) sgd_model.fit(X_train_scaled, y_train) print( "Accuracy on test set after hyperparameter tuning: ", sgd_model.score(X_test_scaled, y_test), ) y_predict = sgd_model.predict(X_test_scaled) cm = confusion_matrix(y_test, y_predict) plt.figure(figsize=(6, 4)) sns.heatmap(cm, annot=True, cmap="Spectral", fmt="g") plt.xlabel("Predicted", fontsize=10) plt.ylabel("Actual/Observed", fontsize=10) print(classification_report(y_test, y_predict)) # Calculating the evaluation metrics accuracy = accuracy_score(y_test, y_predict) weighted_precision = precision_score(y_test, y_predict, average="weighted") weighted_recall = recall_score(y_test, y_predict, average="weighted") weighted_f1 = f1_score(y_test, y_predict, average="weighted") # Creating an evaluation metrics dataframe for the model results = pd.DataFrame( [ [ "Linear Classifier", round(accuracy * 100, 2), round(weighted_precision * 100, 2), round(weighted_recall * 100, 2), round(weighted_f1 * 100, 2), ] ], columns=["Model", "Accuracy", "Precision", "Recall", "F1 Score"], ) results # ### 2. KNN Classifier knn_model = KNeighborsClassifier(n_neighbors=7) knn_model.fit(X_train_scaled, y_train) print("Accuracy on train set: ", knn_model.score(X_train_scaled, y_train)) print("Accuracy on test set: ", knn_model.score(X_test_scaled, y_test)) # define grid search grid = {"n_neighbors": range(1, 20), "weights": ["uniform", "distance"], "p": [1, 2]} cv = RepeatedStratifiedKFold(n_splits=5, n_repeats=3, random_state=1) grid_search = GridSearchCV( estimator=knn_model, param_grid=grid, n_jobs=-1, cv=cv, scoring="accuracy", error_score=0, verbose=1, ) grid_result = grid_search.fit(X_train_scaled, y_train) # Summarize Results print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_)) means = grid_result.cv_results_["mean_test_score"] stds = grid_result.cv_results_["std_test_score"] params = grid_result.cv_results_["params"] for mean, stdev, param in zip(means, stds, params): print("%f (%f) with: %r" % (mean, stdev, param)) knn_model = KNeighborsClassifier( n_neighbors=grid_result.best_params_["n_neighbors"], p=grid_result.best_params_["p"], weights=grid_result.best_params_["weights"], ) knn_model.fit(X_train_scaled, y_train) print("Accuracy on test set: ", knn_model.score(X_test_scaled, y_test)) y_pred = knn_model.predict(X_test_scaled) cm = confusion_matrix(y_test, y_pred) plt.figure(figsize=(6, 4)) sns.heatmap(cm, annot=True, cmap="Spectral", fmt="g") plt.xlabel("Predicted", fontsize=10) plt.ylabel("Actual/Observed", fontsize=10) print(classification_report(y_test, y_pred)) # Calculating the evaluation metrics accuracy = accuracy_score(y_test, y_pred) weighted_precision = precision_score(y_test, y_pred, average="weighted") weighted_recall = recall_score(y_test, y_pred, average="weighted") weighted_f1 = f1_score(y_test, y_pred, average="weighted") # Creating an evaluation metrics dataframe for the model lr_results = pd.DataFrame( [ [ "KNN Classifier", round(accuracy * 100, 2), round(weighted_precision * 100, 2), round(weighted_recall * 100, 2), round(weighted_f1 * 100, 2), ] ], columns=["Model", "Accuracy", "Precision", "Recall", "F1 Score"], ) # Appending the evaluation metrics into a dataframe results = results.append(lr_results, ignore_index=True) results # ### 3. Random Forest rf_model = RandomForestClassifier() rf_model.fit(X_train_scaled, y_train) print("Accuracy on train set: ", rf_model.score(X_train_scaled, y_train)) print("Accuracy on test set: ", rf_model.score(X_test_scaled, y_test)) # define grid search grid = { "bootstrap": ["True"], "max_features": ["sqrt", "log2"], "max_depth": [10, 20, 30, 40, 50], "n_estimators": [100, 200, 300, 400, 500], } cv = RepeatedStratifiedKFold(n_splits=5, n_repeats=3, random_state=1) grid_search = GridSearchCV( estimator=rf_model, param_grid=grid, n_jobs=-1, cv=cv, scoring="accuracy", error_score=0, verbose=1, ) grid_result = grid_search.fit(X_test_scaled, y_test) # Summarize Results print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_)) means = grid_result.cv_results_["mean_test_score"] stds = grid_result.cv_results_["std_test_score"] params = grid_result.cv_results_["params"] for mean, stdev, param in zip(means, stds, params): print("%f (%f) with: %r" % (mean, stdev, param)) rf_model = RandomForestClassifier( bootstrap="True", max_depth=grid_result.best_params_["max_depth"], max_features=grid_result.best_params_["max_features"], n_estimators=grid_result.best_params_["n_estimators"], ) rf_model.fit(X_train_scaled, y_train) print("Accuracy on test set: ", rf_model.score(X_test_scaled, y_test)) y_pred = rf_model.predict(X_test_scaled) cm = confusion_matrix(y_test, y_pred) plt.figure(figsize=(6, 4)) sns.heatmap(cm, annot=True, cmap="Spectral", fmt="g") plt.xlabel("Predicted", fontsize=10) plt.ylabel("Actual/Observed", fontsize=10) print(classification_report(y_test, y_pred)) # Calculating the evaluation metrics accuracy = accuracy_score(y_test, y_pred) weighted_precision = precision_score(y_test, y_pred, average="weighted") weighted_recall = recall_score(y_test, y_pred, average="weighted") weighted_f1 = f1_score(y_test, y_pred, average="weighted") # Creating an evaluation metrics dataframe for the model lr_results = pd.DataFrame( [ [ "Random Forest", round(accuracy * 100, 2), round(weighted_precision * 100, 2), round(weighted_recall * 100, 2), round(weighted_f1 * 100, 2), ] ], columns=["Model", "Accuracy", "Precision", "Recall", "F1 Score"], ) # Appending the evaluation metrics into a dataframe results = results.append(lr_results, ignore_index=True) results
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/024/129024960.ipynb
room-occupancy-estimation-data-set
ananthr1
[{"Id": 129024960, "ScriptId": 38354562, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 3643325, "CreationDate": "05/10/2023 11:52:14", "VersionNumber": 1.0, "Title": "Room Occupancy Estimation with Variable Selection", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 540.0, "LinesInsertedFromPrevious": 540.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 3}]
[{"Id": 184716467, "KernelVersionId": 129024960, "SourceDatasetVersionId": 2994353}]
[{"Id": 2994353, "DatasetId": 1834752, "DatasourceVersionId": 3042142, "CreatorUserId": 7825963, "LicenseName": "CC0: Public Domain", "CreationDate": "01/01/2022 17:29:05", "VersionNumber": 1.0, "Title": "Room Occupancy Estimation Data Set", "Slug": "room-occupancy-estimation-data-set", "Subtitle": "Data set for estimating the precise number of occupants in a room", "Description": "Data Set Information:\n\nThe experimental testbed for occupancy estimation was deployed in a 6m \u00c3\u2014 4.6m room. The setup consisted of 7 sensor nodes and one edge node in a star configuration with the sensor nodes transmitting data to the edge every 30s using wireless transceivers. No HVAC systems were in use while the dataset was being collected.\n\n\n\n\nAttribute Information:\n\nDate: YYYY/MM/DD\nTime: HH:MM:SS\nTemperature: In degree Celsius\nLight: In Lux\nSound: In Volts (amplifier output read by ADC)\nCO2: In PPM\nCO2 Slope: Slope of CO2 values taken in a sliding window\nPIR: Binary value conveying motion detection\nRoom_Occupancy_Count: Ground Truth", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 1834752, "CreatorUserId": 7825963, "OwnerUserId": 7825963.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2994353.0, "CurrentDatasourceVersionId": 3042142.0, "ForumId": 1857543, "Type": 2, "CreationDate": "01/01/2022 17:29:05", "LastActivityDate": "01/01/2022", "TotalViews": 8856, "TotalDownloads": 495, "TotalVotes": 33, "TotalKernels": 12}]
[{"Id": 7825963, "UserName": "ananthr1", "DisplayName": "ANANTH R", "RegisterDate": "07/03/2021", "PerformanceTier": 2}]
# # # Table of Contents # ### 1. [Problem Statement](#1) # ### 2. [Data Set Information](#2) # ### 3. [Exploratory Data Analysis and Visualization](#3) # ### 4. [Variable Selection](#4) # ### 5. [HyperParameter Tuning and Model Selection](#5) # # Back to top # ### $$\mathbf{\text{Part 1. Problem Statement:}}$$ # Estimating the precise number of occupants in a room using multiple non-intrusive environmental sensors like temperature, light, sound, CO2 and PIR. # Back to top # # ## Part 2. Data Set Information: # The experimental testbed for occupancy estimation was deployed in a 6m x 4.6m room. In this setup, there were 7 sensor nodes and one edge arranged in a star configuration, with the sensor nodes sending data to the edge every 30 seconds. No HVAC systems (Heating, Ventilation and Air-Conditioning) were in use while the dataset was being collected. # Five different types of non-intrusive sensors were used in this experiment: temperature, light, sound, CO2 and digital passive infrared (PIR). Sensor nodes S1-S4 consisted of temperature, light and sound sensors, S5 had a CO2 sensor and S6 and S7 had one PIR sensor each that were deployed on the ceiling ledges at an angle that maximized the sensor's field of view for motion detection. # The data was collected for a period of 4 days in a controlled manner with the occupancy in the room varying between 0 and 3 people. The ground truth of the occupancy count in the room was noted manually. # ## Attribute Information: # #### Categorical Variable: # - Date: YYYY/MM/DD # - Time: HH:MM:SS # - Room_Occupancy_Count: Ground Truth # #### Numerical Variables: # - Temperature: In degree Celsius # - Light: In Lux (*Lux is a measure of how much light falls on a particular surface*) # - Sound: In Volts (amplifier output read by ADC) # - CO2: In PPM # - CO2 Slope: Slope of CO2 values taken in a sliding window # - PIR: Binary value conveying motion detection # # Importing relevant modules import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.preprocessing import StandardScaler from sklearn.decomposition import PCA from matplotlib.patches import Patch from sklearn.model_selection import train_test_split, GridSearchCV from sklearn.linear_model import Lasso from sklearn.linear_model import SGDClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import RepeatedStratifiedKFold from sklearn.model_selection import GridSearchCV from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score from sklearn.metrics import confusion_matrix, accuracy_score, classification_report import warnings warnings.filterwarnings("ignore") df = pd.read_csv("room_occupancy_estimation_dataset.csv") df.head() df.shape df["Room_Occupancy_Count"].value_counts().plot(kind="pie") # In this dataset, there are 10129 records with 18 input variables and one output variable, "Room_Occupancy_Count". # Back to top # # ## Part 3. Exploratory data analysis (EDA) and Visualization : # ## EDA: # #### 1. Checking for duplicates in the dataset df[df.duplicated()] # There are no duplicate rows in our dataset. df.drop(columns=["Date", "Time"], axis=1, inplace=True) df[df.duplicated()] # After dropping the 'Date' and 'Time' columns, we notice that there are a few duplicate values which were not present before. These columns need not be dropped as they are independent readings and not duplicates. # #### 2. Checking for null values in the dataset df.info() # We can see that there are no missing values in the dataset. # #### 3. Visualizing and treating outliers df.describe() # From the summary, we can see that a few columns in the dataset have a high standard deviation. We know that outliers easily influence the mean. This becomes more evident when you notice that the mean is small compared to the max value suggesting that there are some outliers. We can visualize these attributes and decide how to treat them further. # We can use a violin plot to look for outliers in numerical attributes. As a result, it will be possible to visualize the distribution of the dataset and also identify the density (peak) of each variable at the same time. By default, boxplot() will plot the same y-axis for all the selected columns. This will not be feasible if the columns in question are not of the same scale which leads to improper analysis. In order to avoid this, we will need to identify univariate outliers using the violin plot. # Setting output to have 5 subplots in a single row fig, ax = plt.subplots(1, 5, figsize=(10, 5)) # To tune the spacing between the subplots plt.subplots_adjust(wspace=0.5) # Drawing boxplot for S1_Light in the 1st subplot sns.violinplot(data=df["S1_Light"], ax=ax[0], color="brown") ax[0].set_xlabel("S1_Light") # Drawing boxplot for S2_Light in the 1st subplot sns.violinplot(data=df["S2_Light"], ax=ax[1], color="g") ax[1].set_xlabel("S2_Light") # Drawing boxplot for S3_Light in the 1st subplot sns.violinplot(data=df["S3_Light"], ax=ax[2]) ax[2].set_xlabel("S3_Light") # Drawing boxplot for S4_Light in the 1st subplot sns.violinplot(data=df["S4_Light"], ax=ax[3], color="y") ax[3].set_xlabel("S4_Light") # Drawing boxplot for S5_CO2 in the 1st subplot sns.violinplot(data=df["S5_CO2"], ax=ax[4], color="b") ax[4].set_xlabel("S5_CO2") # by default, you'll see x-tick label set to 0 in each subplot # remove it by setting it to empty list for subplot in ax: subplot.set_xticklabels([]) plt.show() # ##### Light: # There appear to be a lot of outliers for all light (lux) variables. From the violin plots, we can see that most of the density is around 0 for these variables. This suggests that there is no light source during those times. Typically, indoor light levels range between 100 and 300 for normal activities, which means that values above the 75th percentile are significant. Excluding these values solely based on their extremeness will lead to the loss of valuable information. # #### CO2: # Here are the different CO2 levels which are considered typical and dangerous: # - 400ppm – Normal outdoor air level. # - 400 ~ 1,100ppm – Typical value level indoors with good ventilation. If CO2 levels are low when the building is sealed and occupied, check for over-ventilation (too much fresh air = energy wasted). # - 1,100ppm – the OSHA/ASHRAE recommended maximum level in a closed room. # - '> 1,200ppm'– Poor air quality – requires ventilation to the room. # - 2,000ppm – According to many studies this level of CO2 produces a significant increase in drowsiness, tiredness, headaches, lower levels of concentration, and an increased likelihood of spreading respiratory viruses like colds, etc. # From this we can conclude that the extreme values of the CO2 level add value to the dataset. Hence, we do not need to exclude the values here as well. # #### 4. Data Reduction df["S5_CO2_Slope"] = np.round(df["S5_CO2_Slope"], decimals=2) df.head() # ## Visualization Analysis # ### 1. Pairplot sns.pairplot(data=df, hue="Room_Occupancy_Count") # Through the pairplot we can tell pairwise linear relationships between two varibles in the dataset and also their distributions. Through these plots we can see that the the dataset is linearly seperable. We can further check this with the help of a correlation matrix. # ### 2. Box-and-whisker Plot # This type of plot is commonly used to visualize relationships between numerical variables and categorical variables. fig, axes = plt.subplots(nrows=5, ncols=4, figsize=(20, 15)) for idx, feat in enumerate(df.columns.to_list(), start=0): ax = axes[int(idx / 4), idx % 4] sns.boxplot(x="Room_Occupancy_Count", y=feat, data=df, ax=ax) ax.set_xlabel("") ax.set_ylabel(feat) fig.tight_layout() # Insights: # - The temperature increases when there are people present in the room. # - All light variables have higher readings when the room is occupied. This should be an effective variable to estimate occupancy in a room. # - The sound variable seems to vary according to the number of occupants in the room but is generally higher with more than one occupant. # - The CO2 reading seems higher when there is no occupant. At first glance, these look like outliers. These abnormalities may be caused by plants inside and CO2-emitting equipment inside. Poor ventilation could also contribute to the problem. # - As expected, motion is detected when occupants are in a room. This can also be a good variable to estimate room occupancy. # Back to top # # ## Part 4. Variable Selection : # ### 1. Lasso Regression # X = df.drop(["Room_Occupancy_Count"], axis=1) y = df[["Room_Occupancy_Count"]] # Scaling the dataset scaler = StandardScaler() X_scaled = scaler.fit_transform(X) lasso_reg = Lasso() lasso_reg.fit(X_scaled, y) # define grid search grid = {"alpha": [0.0001, 0.001, 0.01, 0.1, 1]} cv = RepeatedStratifiedKFold(n_splits=5, n_repeats=3, random_state=42) grid_search_lasso = GridSearchCV( estimator=lasso_reg, param_grid=grid, n_jobs=-1, cv=cv, scoring="accuracy", error_score=0, ) # Results grid_result_lasso = grid_search_lasso.fit(X_scaled, y) grid_result_lasso.best_params_ coef = grid_result_lasso.best_estimator_.coef_ coef X.columns[coef == 0] # An empty list shows that we could not eliminate any variables. It could also be inferred that all the variables are relevant to a certain extent according to lasso regression. # ### 3. Correlation Matrix # The correlation matrix will help us find the relationship between all the possible variables in the dataset. Based on the correlation coefficients, we can determine a threshold for statistical significance and select variables with variables graeter values. # Creating a correlation matrix corr_matrix = df.corr() fig, ax = plt.subplots(figsize=(10, 6)) sns.heatmap( corr_matrix, cmap=sns.diverging_palette(220, 10, as_cmap=True), annot=True, annot_kws={"fontsize": 7}, ) plt.xticks(rotation=45, ha="right", fontsize=7) plt.yticks(fontsize=7) plt.show() df.shape # Creating a function to return all variables that have good correlation coefficients def get_correlated_variables(dataset, threshold): corr_columns = set() # To get unique correlated column names corr_matrix = dataset.corr() for i in range(len(corr_matrix.columns)): for j in range(i): if ( abs(corr_matrix.iloc[i][j]) > threshold ): # Checking the absolute value of the corr coeff column_name = corr_matrix.columns[i] # Getting the name of the column corr_columns.add(column_name) return corr_columns corr_features = get_correlated_variables(X, 0.8) corr_features # All the 3 models were run on variables selected through a correlation matrix, but they did not give good enough results. This could be because the correlation matrix measures the relationship between pairs of variables. It does not consider the impact they have on a target variable when other variables are included in the model. Due to this, a few variables may appear redundant in a correlation matrix but provide crucial information relevant to the target variable. # Further we try a wrapper method for variable selection. # ### 3. Bi-Directional Elimination as a wrapper method # Wrapper method: This is a method of variable selection that trains and evaluates a machine learning algorithm for different subsets of features and determines the most suitable subset of features that yields the highest performance. # Here we use bi-directional elimination as a wrapper method on the "Random Forest Classifier". This wrapper method combines both forward selection and backward elimination methods. While adding a new feature, it checks the significance of already added features. If it finds any of the already selected features to be insignificant, it simply removes that particular feature through backward elimination. from mlxtend.feature_selection import SequentialFeatureSelector as SFS # Bi-directional Sequential Selection(sffs) sffs = SFS( RandomForestClassifier(), k_features=(1, len(X.columns)), forward=True, floating=True, scoring="accuracy", cv=5, ) sffs.fit(X, y) corr_features = list(sffs.k_feature_names_) corr_features X_final = X[corr_features] X_final.head() X_final.shape # Back to top # # ## Part 5. HyperParameter Tuning and Model Selection : # Splitting the dataset into train and test X_train, X_test, y_train, y_test = train_test_split( X_final, y, test_size=0.25, random_state=42 ) # Scaling the dataset scaler = StandardScaler() X_train_scaled = scaler.fit_transform(X_train) X_test_scaled = scaler.transform(X_test) # ### 1. Linear Classifier sgd_model = SGDClassifier(random_state=42) sgd_model.fit(X_train_scaled, y_train) print("Accuracy on train set: ", sgd_model.score(X_train_scaled, y_train)) print("Accuracy on test set: ", sgd_model.score(X_test_scaled, y_test)) # Defining hyperparameters loss = ["hinge", "log", "squared_hinge", "modified_huber"] penalty = ["l1", "l2", "elasticnet"] alpha = [0.0001, 0.001, 0.01, 0.1] # define grid search grid = dict(loss=loss, penalty=penalty, alpha=alpha) cv = RepeatedStratifiedKFold(n_splits=5, n_repeats=3, random_state=42) grid_search = GridSearchCV( estimator=sgd_model, param_grid=grid, n_jobs=-1, cv=cv, scoring="accuracy", error_score=0, ) # Results grid_result = grid_search.fit(X_train_scaled, y_train) # summarize results print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_)) means = grid_result.cv_results_["mean_test_score"] stds = grid_result.cv_results_["std_test_score"] params = grid_result.cv_results_["params"] for mean, stdev, param in zip(means, stds, params): print("%f (%f) with: %r" % (mean, stdev, param)) sgd_model = SGDClassifier( alpha=grid_result.best_params_["alpha"], loss=grid_result.best_params_["loss"], penalty=grid_result.best_params_["penalty"], random_state=42, ) sgd_model.fit(X_train_scaled, y_train) print( "Accuracy on test set after hyperparameter tuning: ", sgd_model.score(X_test_scaled, y_test), ) y_predict = sgd_model.predict(X_test_scaled) cm = confusion_matrix(y_test, y_predict) plt.figure(figsize=(6, 4)) sns.heatmap(cm, annot=True, cmap="Spectral", fmt="g") plt.xlabel("Predicted", fontsize=10) plt.ylabel("Actual/Observed", fontsize=10) print(classification_report(y_test, y_predict)) # Calculating the evaluation metrics accuracy = accuracy_score(y_test, y_predict) weighted_precision = precision_score(y_test, y_predict, average="weighted") weighted_recall = recall_score(y_test, y_predict, average="weighted") weighted_f1 = f1_score(y_test, y_predict, average="weighted") # Creating an evaluation metrics dataframe for the model results = pd.DataFrame( [ [ "Linear Classifier", round(accuracy * 100, 2), round(weighted_precision * 100, 2), round(weighted_recall * 100, 2), round(weighted_f1 * 100, 2), ] ], columns=["Model", "Accuracy", "Precision", "Recall", "F1 Score"], ) results # ### 2. KNN Classifier knn_model = KNeighborsClassifier(n_neighbors=7) knn_model.fit(X_train_scaled, y_train) print("Accuracy on train set: ", knn_model.score(X_train_scaled, y_train)) print("Accuracy on test set: ", knn_model.score(X_test_scaled, y_test)) # define grid search grid = {"n_neighbors": range(1, 20), "weights": ["uniform", "distance"], "p": [1, 2]} cv = RepeatedStratifiedKFold(n_splits=5, n_repeats=3, random_state=1) grid_search = GridSearchCV( estimator=knn_model, param_grid=grid, n_jobs=-1, cv=cv, scoring="accuracy", error_score=0, verbose=1, ) grid_result = grid_search.fit(X_train_scaled, y_train) # Summarize Results print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_)) means = grid_result.cv_results_["mean_test_score"] stds = grid_result.cv_results_["std_test_score"] params = grid_result.cv_results_["params"] for mean, stdev, param in zip(means, stds, params): print("%f (%f) with: %r" % (mean, stdev, param)) knn_model = KNeighborsClassifier( n_neighbors=grid_result.best_params_["n_neighbors"], p=grid_result.best_params_["p"], weights=grid_result.best_params_["weights"], ) knn_model.fit(X_train_scaled, y_train) print("Accuracy on test set: ", knn_model.score(X_test_scaled, y_test)) y_pred = knn_model.predict(X_test_scaled) cm = confusion_matrix(y_test, y_pred) plt.figure(figsize=(6, 4)) sns.heatmap(cm, annot=True, cmap="Spectral", fmt="g") plt.xlabel("Predicted", fontsize=10) plt.ylabel("Actual/Observed", fontsize=10) print(classification_report(y_test, y_pred)) # Calculating the evaluation metrics accuracy = accuracy_score(y_test, y_pred) weighted_precision = precision_score(y_test, y_pred, average="weighted") weighted_recall = recall_score(y_test, y_pred, average="weighted") weighted_f1 = f1_score(y_test, y_pred, average="weighted") # Creating an evaluation metrics dataframe for the model lr_results = pd.DataFrame( [ [ "KNN Classifier", round(accuracy * 100, 2), round(weighted_precision * 100, 2), round(weighted_recall * 100, 2), round(weighted_f1 * 100, 2), ] ], columns=["Model", "Accuracy", "Precision", "Recall", "F1 Score"], ) # Appending the evaluation metrics into a dataframe results = results.append(lr_results, ignore_index=True) results # ### 3. Random Forest rf_model = RandomForestClassifier() rf_model.fit(X_train_scaled, y_train) print("Accuracy on train set: ", rf_model.score(X_train_scaled, y_train)) print("Accuracy on test set: ", rf_model.score(X_test_scaled, y_test)) # define grid search grid = { "bootstrap": ["True"], "max_features": ["sqrt", "log2"], "max_depth": [10, 20, 30, 40, 50], "n_estimators": [100, 200, 300, 400, 500], } cv = RepeatedStratifiedKFold(n_splits=5, n_repeats=3, random_state=1) grid_search = GridSearchCV( estimator=rf_model, param_grid=grid, n_jobs=-1, cv=cv, scoring="accuracy", error_score=0, verbose=1, ) grid_result = grid_search.fit(X_test_scaled, y_test) # Summarize Results print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_)) means = grid_result.cv_results_["mean_test_score"] stds = grid_result.cv_results_["std_test_score"] params = grid_result.cv_results_["params"] for mean, stdev, param in zip(means, stds, params): print("%f (%f) with: %r" % (mean, stdev, param)) rf_model = RandomForestClassifier( bootstrap="True", max_depth=grid_result.best_params_["max_depth"], max_features=grid_result.best_params_["max_features"], n_estimators=grid_result.best_params_["n_estimators"], ) rf_model.fit(X_train_scaled, y_train) print("Accuracy on test set: ", rf_model.score(X_test_scaled, y_test)) y_pred = rf_model.predict(X_test_scaled) cm = confusion_matrix(y_test, y_pred) plt.figure(figsize=(6, 4)) sns.heatmap(cm, annot=True, cmap="Spectral", fmt="g") plt.xlabel("Predicted", fontsize=10) plt.ylabel("Actual/Observed", fontsize=10) print(classification_report(y_test, y_pred)) # Calculating the evaluation metrics accuracy = accuracy_score(y_test, y_pred) weighted_precision = precision_score(y_test, y_pred, average="weighted") weighted_recall = recall_score(y_test, y_pred, average="weighted") weighted_f1 = f1_score(y_test, y_pred, average="weighted") # Creating an evaluation metrics dataframe for the model lr_results = pd.DataFrame( [ [ "Random Forest", round(accuracy * 100, 2), round(weighted_precision * 100, 2), round(weighted_recall * 100, 2), round(weighted_f1 * 100, 2), ] ], columns=["Model", "Accuracy", "Precision", "Recall", "F1 Score"], ) # Appending the evaluation metrics into a dataframe results = results.append(lr_results, ignore_index=True) results
false
0
6,133
3
6,348
6,133
129024559
<jupyter_start><jupyter_text>Marketing Campaign ### Context A response model can provide a significant boost to the efficiency of a marketing campaign by increasing responses or reducing expenses. The objective is to predict who will respond to an offer for a product or service ### Content AcceptedCmp1 - 1 if customer accepted the offer in the 1st campaign, 0 otherwise AcceptedCmp2 - 1 if customer accepted the offer in the 2nd campaign, 0 otherwise AcceptedCmp3 - 1 if customer accepted the offer in the 3rd campaign, 0 otherwise AcceptedCmp4 - 1 if customer accepted the offer in the 4th campaign, 0 otherwise AcceptedCmp5 - 1 if customer accepted the offer in the 5th campaign, 0 otherwise Response (target) - 1 if customer accepted the offer in the last campaign, 0 otherwise Complain - 1 if customer complained in the last 2 years DtCustomer - date of customer’s enrolment with the company Education - customer’s level of education Marital - customer’s marital status Kidhome - number of small children in customer’s household 
Teenhome - number of teenagers in customer’s household 
Income - customer’s yearly household income MntFishProducts - amount spent on fish products in the last 2 years MntMeatProducts - amount spent on meat products in the last 2 years MntFruits - amount spent on fruits products in the last 2 years MntSweetProducts - amount spent on sweet products in the last 2 years MntWines - amount spent on wine products in the last 2 years MntGoldProds - amount spent on gold products in the last 2 years NumDealsPurchases - number of purchases made with discount NumCatalogPurchases - number of purchases made using catalogue NumStorePurchases - number of purchases made directly in stores NumWebPurchases - number of purchases made through company’s web site NumWebVisitsMonth - number of visits to company’s web site in the last month Recency - number of days since the last purchase Kaggle dataset identifier: arketing-campaign <jupyter_script># # Marketing Campaign Prediction using XGBoost & LightGBM # ## Table of Content # ### 1. What is XGBoost and LightGBM? # ### 2. Importing Libraries # ### 3. Loading Dataset # ### 4. Data PreProcessing # ### 5. EDA # ### 6. Data Splitting # ### 7. Model Selection and Training # ### 8. Model Evaluation # ### 9. LightGBM # ### 10. Conclusion # ## 1. What is XGBoost & LightGBM? # ### XGBoost # 1. XGBoost (eXtreme Gradient Boosting) is a popular open-source gradient boosting framework that is widely used in machine learning and data mining for both regression and classification problems. XGBoost is a type of ensemble learning algorithm that combines the predictions of several weak learners (i.e., decision trees) to create a strong learner. # # 2. It works by iteratively training decision trees on the residual errors of the previous trees, with each new tree attempting to correct the errors of the previous ones. XGBoost uses a gradient descent algorithm to optimize the objective function, which is a combination of the loss function and a regularization term that helps prevent overfitting. # # 3. XGBoost is known for its high performance, speed, and accuracy, and has been used to win several Kaggle competitions. It also has many advanced features such as early stopping, cross-validation, and the ability to handle missing values, which make it a powerful tool for machine learning practitioners. # # Gradient Boost has three main components. # Loss Function: The role of the loss function is to estimate how best is the model in making predictions with the given data. This could vary depending on the type of the problem. # # Weak Learner: Weak learner is one that classifies the data so poorly when compared to random guessing. The weak learners are mostly decision trees, but other models can be used in GBM. # # Additive Model: It is an iterative and sequential process in adding the decision trees one step at a time. Each iteration should reduce the value of loss function. A fixed number of trees are added, or training stops once loss reaches an acceptable level or no longer improves on an external validation dataset. # ### LightGBM # 1. LightGBM is an open-source gradient boosting framework that was developed by Microsoft and is designed to be efficient, scalable, and accurate for large-scale machine learning tasks. It is a faster and more memory-efficient implementation of gradient boosting than XGBoost and is specifically designed to work well with large datasets. # # Here are some key features and concepts of LightGBM: # Gradient-based One-Side Sampling (GOSS): GOSS is a technique used in LightGBM to reduce the number of data instances used in each boosting iteration. It keeps a subset of data instances with large gradients and samples the rest of the instances with smaller gradients. This helps to speed up the training process while maintaining good accuracy. # # Exclusive Feature Bundling (EFB): EFB is another optimization technique used in LightGBM to handle high-cardinality categorical features. It bundles the categories in a feature into exclusive bins based on their interactions with the target variable. This reduces the memory consumption and improves the training speed. # # Leaf-wise Tree Growth: LightGBM uses a leaf-wise tree growth strategy instead of the traditional level-wise strategy. In leaf-wise growth, the algorithm chooses the leaf node that reduces the loss the most in each iteration. This approach can lead to a more complex tree structure and better accuracy, but it may also result in overfitting if not properly controlled. # # LightGBM Datasets: LightGBM introduces its own data structure called Dataset to efficiently handle and process large datasets. The Dataset can be constructed from various sources such as NumPy arrays, Pandas dataframes, or CSV files. It provides features like data partitioning, parallel data loading, and efficient memory storage. # Parallel and GPU Learning: LightGBM supports parallel and GPU learning, which allows it to leverage multi-core processors and GPUs to speed up the training process. This is particularly beneficial when dealing with large-scale datasets. # Regularization and Control Parameters: LightGBM provides various regularization techniques such as L1 and L2 regularization to prevent overfitting. It also offers control parameters to adjust the learning rate, tree depth, number of leaves, and other aspects of the boosting process. # ## 2. Import Libraries import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import datetime import openpyxl from sklearn.metrics import accuracy_score from sklearn.metrics import precision_score from sklearn.metrics import f1_score from sklearn.metrics import recall_score from sklearn.metrics import classification_report from sklearn.metrics import confusion_matrix from sklearn.model_selection import train_test_split from xgboost import XGBClassifier import lightgbm as lgb # ## 3. Loading Dataset df = pd.read_excel("/kaggle/input/arketing-campaign/marketing_campaign.xlsx") df df.shape df.info() # ## 4. Data Preprocessing df.isnull().sum() # remove the null value from the features df["Income"] = df["Income"].fillna(df["Income"].mean()) df.isnull().sum() df["Income"].value_counts() # # remove these two variables # df = df.drop(['Z_CostContact','Z_Revenue'], axis=1) df["Education"].unique() df.head() df.describe() # ## 5. EDA # fetch age from Year_Birth df.Year_Birth = pd.to_datetime(df["Year_Birth"], format="%Y") year_now = datetime.date.today().year df["Age"] = df["Year_Birth"].apply(lambda x: year_now - x.year) df.drop("Year_Birth", axis=1, inplace=True) # Income spending by age fig, ax = plt.subplots(1, 2, figsize=(18, 4)) sns.scatterplot(x="Age", y="Income", data=df, hue="Response", ax=ax[0]) sns.boxplot(x="Age", data=df, ax=ax[1]) plt.show() # Seems there are few outliers in Age as well as Income # Income for most of the population is under 100k # Most of the people in the dataset aged between 45-65 # calculate customers spending df["spending"] = ( df.MntFishProducts + df.MntFruits + df.MntGoldProds + df.MntMeatProducts + df.MntSweetProducts + df.MntWines ) # dropping spending on each products df.drop( [ "MntFishProducts", "MntFruits", "MntGoldProds", "MntMeatProducts", "MntSweetProducts", "MntWines", ], axis=1, inplace=True, ) # Income and spending fig, ax = plt.subplots(1, 2, figsize=(16, 4)) sns.scatterplot(x="Income", y="spending", data=df, hue="Response", ax=ax[0]) sns.histplot(df.spending, ax=ax[1]) plt.show() # Most of the people who responded have high spending # The number of people whose spending is less than 200 are high in numbers # Income and spending by education fig, ax = plt.subplots(2, 2, figsize=(14, 8)) sns.barplot(x="Education", y="Income", data=df, ax=ax[0, 0]) sns.boxplot(x="Education", y="Income", data=df, ax=ax[0, 1]) sns.barplot(x="Education", y="spending", data=df, ax=ax[1, 0]) sns.boxplot(x="Education", y="spending", data=df, ax=ax[1, 1]) plt.show() # Graduate spending is higher according to their income # convert the date of enrolment to datetime df.Dt_Customer = pd.to_datetime(df.Dt_Customer) # creating features from date of enrolment df["Year_Customer"] = df["Dt_Customer"].apply(lambda x: x.year) df["Month_customer"] = df["Dt_Customer"].apply(lambda x: x.month) df["Day_customer"] = df["Dt_Customer"].apply(lambda x: x.day) df = df.drop("Dt_Customer", axis=1) fig, ax = plt.subplots(3, 2, figsize=(18, 12)) sns.barplot(x="Year_Customer", y="Income", data=df, ax=ax[0, 0]) sns.barplot(x="Year_Customer", y="spending", data=df, ax=ax[0, 1]) sns.barplot(x="Month_customer", y="Income", data=df, ax=ax[1, 0]) sns.barplot(x="Month_customer", y="spending", data=df, ax=ax[1, 1]) sns.barplot(x="Day_customer", y="Income", data=df, ax=ax[2, 0]) sns.barplot(x="Day_customer", y="spending", data=df, ax=ax[2, 1]) plt.show() # Year over year the income has increased however the spending has decreases df.columns # ## Label Encoding from sklearn import preprocessing label_encoder = preprocessing.LabelEncoder() df["Education"] = label_encoder.fit_transform(df["Education"]) df["Marital_Status"] = label_encoder.fit_transform(df["Marital_Status"]) # ## 6. Data Splitting x = df.drop("Response", axis=1) y = df.Response # shape of X and y print("Shape of x: ", x.shape) print("Shape of y: ", y.shape) # ## 7. Model Selection and Training X_train, X_test, Y_train, Y_test = train_test_split( x, y, test_size=0.3, random_state=10 ) print(X_train) model = XGBClassifier() model.fit(X_train._get_numeric_data(), Y_train) pred = model.predict(X_test._get_numeric_data()) # ## 8. Model Evaluation print("Accuracy Score: ", accuracy_score(pred, Y_test) * 100) print("Precision Score: ", precision_score(pred, Y_test) * 100) print("F1 Score: ", f1_score(pred, Y_test) * 100) print("Recall Score: ", recall_score(pred, Y_test) * 100) print(classification_report(Y_test, pred)) # ## Confusion Matrix cm = confusion_matrix(Y_test, pred) cm ax = sns.heatmap(cm / np.sum(cm), annot=True, fmt=".2%", cmap="Blues") ax.set_title("Confusion Matrix with labels\n\n") ax.set_xlabel("\nPredicted Values") ax.set_ylabel("Actual Values ") plt.show() # ## 9. LightGBM df.info() lgb_model = lgb.LGBMClassifier() lgb_model.fit(X_train, Y_train) lgb_acc = lgb_model.score(X_test, Y_test) lgb_acc y_pred_lgb = model.predict(X_test) print(y_pred_lgb) from sklearn.metrics import confusion_matrix cm = confusion_matrix(Y_test, y_pred_lgb) cm ax = sns.heatmap(cm, annot=True, cmap="Blues") ax.set_title("Confusion Matrix with labels\n\n") ax.set_xlabel("\nPredicted Values") ax.set_ylabel("Actual Values ") plt.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/024/129024559.ipynb
arketing-campaign
rodsaldanha
[{"Id": 129024559, "ScriptId": 38354406, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11063747, "CreationDate": "05/10/2023 11:48:12", "VersionNumber": 1.0, "Title": "Project - XGBoost & LightGBM", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 252.0, "LinesInsertedFromPrevious": 252.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 6}]
[{"Id": 184715949, "KernelVersionId": 129024559, "SourceDatasetVersionId": 1139908}]
[{"Id": 1139908, "DatasetId": 178248, "DatasourceVersionId": 1170542, "CreatorUserId": 516318, "LicenseName": "Other (specified in description)", "CreationDate": "05/08/2020 13:14:32", "VersionNumber": 8.0, "Title": "Marketing Campaign", "Slug": "arketing-campaign", "Subtitle": "Boost the profit of a marketing campaign", "Description": "### Context\n\nA response model can provide a significant boost to the efficiency of a marketing campaign by increasing responses or reducing expenses. The objective is to predict who will respond to an offer for a product or service\n\n\n### Content\n\nAcceptedCmp1 - 1 if customer accepted the offer in the 1st campaign, 0 otherwise \nAcceptedCmp2 - 1 if customer accepted the offer in the 2nd campaign, 0 otherwise \nAcceptedCmp3 - 1 if customer accepted the offer in the 3rd campaign, 0 otherwise \nAcceptedCmp4 - 1 if customer accepted the offer in the 4th campaign, 0 otherwise \nAcceptedCmp5 - 1 if customer accepted the offer in the 5th campaign, 0 otherwise \nResponse (target) - 1 if customer accepted the offer in the last campaign, 0 otherwise \nComplain - 1 if customer complained in the last 2 years\nDtCustomer - date of customer\u2019s enrolment with the company\nEducation - customer\u2019s level of education\nMarital - customer\u2019s marital status\nKidhome - number of small children in customer\u2019s household\n\u2028Teenhome - number of teenagers in customer\u2019s household\n\u2028Income - customer\u2019s yearly household income\nMntFishProducts - amount spent on fish products in the last 2 years\nMntMeatProducts - amount spent on meat products in the last 2 years\nMntFruits - amount spent on fruits products in the last 2 years\nMntSweetProducts - amount spent on sweet products in the last 2 years\nMntWines - amount spent on wine products in the last 2 years\nMntGoldProds - amount spent on gold products in the last 2 years\nNumDealsPurchases - number of purchases made with discount\nNumCatalogPurchases - number of purchases made using catalogue\nNumStorePurchases - number of purchases made directly in stores\nNumWebPurchases - number of purchases made through company\u2019s web site\nNumWebVisitsMonth - number of visits to company\u2019s web site in the last month\nRecency - number of days since the last purchase \n\n\n### Acknowledgements\n\nO. Parr-Rud. Business Analytics Using SAS Enterprise Guide and SAS Enterprise Miner. SAS Institute, 2014.\n\n\n### Inspiration\n\nThe main objective is to train a predictive model which allows the company to maximize the profit of the next marketing campaign.", "VersionNotes": "Fixed csv issue", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 178248, "CreatorUserId": 516318, "OwnerUserId": 516318.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 1139908.0, "CurrentDatasourceVersionId": 1170542.0, "ForumId": 189027, "Type": 2, "CreationDate": "04/29/2019 14:27:27", "LastActivityDate": "04/29/2019", "TotalViews": 189232, "TotalDownloads": 23020, "TotalVotes": 269, "TotalKernels": 66}]
[{"Id": 516318, "UserName": "rodsaldanha", "DisplayName": "Rodolfo Saldanha", "RegisterDate": "01/29/2016", "PerformanceTier": 1}]
# # Marketing Campaign Prediction using XGBoost & LightGBM # ## Table of Content # ### 1. What is XGBoost and LightGBM? # ### 2. Importing Libraries # ### 3. Loading Dataset # ### 4. Data PreProcessing # ### 5. EDA # ### 6. Data Splitting # ### 7. Model Selection and Training # ### 8. Model Evaluation # ### 9. LightGBM # ### 10. Conclusion # ## 1. What is XGBoost & LightGBM? # ### XGBoost # 1. XGBoost (eXtreme Gradient Boosting) is a popular open-source gradient boosting framework that is widely used in machine learning and data mining for both regression and classification problems. XGBoost is a type of ensemble learning algorithm that combines the predictions of several weak learners (i.e., decision trees) to create a strong learner. # # 2. It works by iteratively training decision trees on the residual errors of the previous trees, with each new tree attempting to correct the errors of the previous ones. XGBoost uses a gradient descent algorithm to optimize the objective function, which is a combination of the loss function and a regularization term that helps prevent overfitting. # # 3. XGBoost is known for its high performance, speed, and accuracy, and has been used to win several Kaggle competitions. It also has many advanced features such as early stopping, cross-validation, and the ability to handle missing values, which make it a powerful tool for machine learning practitioners. # # Gradient Boost has three main components. # Loss Function: The role of the loss function is to estimate how best is the model in making predictions with the given data. This could vary depending on the type of the problem. # # Weak Learner: Weak learner is one that classifies the data so poorly when compared to random guessing. The weak learners are mostly decision trees, but other models can be used in GBM. # # Additive Model: It is an iterative and sequential process in adding the decision trees one step at a time. Each iteration should reduce the value of loss function. A fixed number of trees are added, or training stops once loss reaches an acceptable level or no longer improves on an external validation dataset. # ### LightGBM # 1. LightGBM is an open-source gradient boosting framework that was developed by Microsoft and is designed to be efficient, scalable, and accurate for large-scale machine learning tasks. It is a faster and more memory-efficient implementation of gradient boosting than XGBoost and is specifically designed to work well with large datasets. # # Here are some key features and concepts of LightGBM: # Gradient-based One-Side Sampling (GOSS): GOSS is a technique used in LightGBM to reduce the number of data instances used in each boosting iteration. It keeps a subset of data instances with large gradients and samples the rest of the instances with smaller gradients. This helps to speed up the training process while maintaining good accuracy. # # Exclusive Feature Bundling (EFB): EFB is another optimization technique used in LightGBM to handle high-cardinality categorical features. It bundles the categories in a feature into exclusive bins based on their interactions with the target variable. This reduces the memory consumption and improves the training speed. # # Leaf-wise Tree Growth: LightGBM uses a leaf-wise tree growth strategy instead of the traditional level-wise strategy. In leaf-wise growth, the algorithm chooses the leaf node that reduces the loss the most in each iteration. This approach can lead to a more complex tree structure and better accuracy, but it may also result in overfitting if not properly controlled. # # LightGBM Datasets: LightGBM introduces its own data structure called Dataset to efficiently handle and process large datasets. The Dataset can be constructed from various sources such as NumPy arrays, Pandas dataframes, or CSV files. It provides features like data partitioning, parallel data loading, and efficient memory storage. # Parallel and GPU Learning: LightGBM supports parallel and GPU learning, which allows it to leverage multi-core processors and GPUs to speed up the training process. This is particularly beneficial when dealing with large-scale datasets. # Regularization and Control Parameters: LightGBM provides various regularization techniques such as L1 and L2 regularization to prevent overfitting. It also offers control parameters to adjust the learning rate, tree depth, number of leaves, and other aspects of the boosting process. # ## 2. Import Libraries import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import datetime import openpyxl from sklearn.metrics import accuracy_score from sklearn.metrics import precision_score from sklearn.metrics import f1_score from sklearn.metrics import recall_score from sklearn.metrics import classification_report from sklearn.metrics import confusion_matrix from sklearn.model_selection import train_test_split from xgboost import XGBClassifier import lightgbm as lgb # ## 3. Loading Dataset df = pd.read_excel("/kaggle/input/arketing-campaign/marketing_campaign.xlsx") df df.shape df.info() # ## 4. Data Preprocessing df.isnull().sum() # remove the null value from the features df["Income"] = df["Income"].fillna(df["Income"].mean()) df.isnull().sum() df["Income"].value_counts() # # remove these two variables # df = df.drop(['Z_CostContact','Z_Revenue'], axis=1) df["Education"].unique() df.head() df.describe() # ## 5. EDA # fetch age from Year_Birth df.Year_Birth = pd.to_datetime(df["Year_Birth"], format="%Y") year_now = datetime.date.today().year df["Age"] = df["Year_Birth"].apply(lambda x: year_now - x.year) df.drop("Year_Birth", axis=1, inplace=True) # Income spending by age fig, ax = plt.subplots(1, 2, figsize=(18, 4)) sns.scatterplot(x="Age", y="Income", data=df, hue="Response", ax=ax[0]) sns.boxplot(x="Age", data=df, ax=ax[1]) plt.show() # Seems there are few outliers in Age as well as Income # Income for most of the population is under 100k # Most of the people in the dataset aged between 45-65 # calculate customers spending df["spending"] = ( df.MntFishProducts + df.MntFruits + df.MntGoldProds + df.MntMeatProducts + df.MntSweetProducts + df.MntWines ) # dropping spending on each products df.drop( [ "MntFishProducts", "MntFruits", "MntGoldProds", "MntMeatProducts", "MntSweetProducts", "MntWines", ], axis=1, inplace=True, ) # Income and spending fig, ax = plt.subplots(1, 2, figsize=(16, 4)) sns.scatterplot(x="Income", y="spending", data=df, hue="Response", ax=ax[0]) sns.histplot(df.spending, ax=ax[1]) plt.show() # Most of the people who responded have high spending # The number of people whose spending is less than 200 are high in numbers # Income and spending by education fig, ax = plt.subplots(2, 2, figsize=(14, 8)) sns.barplot(x="Education", y="Income", data=df, ax=ax[0, 0]) sns.boxplot(x="Education", y="Income", data=df, ax=ax[0, 1]) sns.barplot(x="Education", y="spending", data=df, ax=ax[1, 0]) sns.boxplot(x="Education", y="spending", data=df, ax=ax[1, 1]) plt.show() # Graduate spending is higher according to their income # convert the date of enrolment to datetime df.Dt_Customer = pd.to_datetime(df.Dt_Customer) # creating features from date of enrolment df["Year_Customer"] = df["Dt_Customer"].apply(lambda x: x.year) df["Month_customer"] = df["Dt_Customer"].apply(lambda x: x.month) df["Day_customer"] = df["Dt_Customer"].apply(lambda x: x.day) df = df.drop("Dt_Customer", axis=1) fig, ax = plt.subplots(3, 2, figsize=(18, 12)) sns.barplot(x="Year_Customer", y="Income", data=df, ax=ax[0, 0]) sns.barplot(x="Year_Customer", y="spending", data=df, ax=ax[0, 1]) sns.barplot(x="Month_customer", y="Income", data=df, ax=ax[1, 0]) sns.barplot(x="Month_customer", y="spending", data=df, ax=ax[1, 1]) sns.barplot(x="Day_customer", y="Income", data=df, ax=ax[2, 0]) sns.barplot(x="Day_customer", y="spending", data=df, ax=ax[2, 1]) plt.show() # Year over year the income has increased however the spending has decreases df.columns # ## Label Encoding from sklearn import preprocessing label_encoder = preprocessing.LabelEncoder() df["Education"] = label_encoder.fit_transform(df["Education"]) df["Marital_Status"] = label_encoder.fit_transform(df["Marital_Status"]) # ## 6. Data Splitting x = df.drop("Response", axis=1) y = df.Response # shape of X and y print("Shape of x: ", x.shape) print("Shape of y: ", y.shape) # ## 7. Model Selection and Training X_train, X_test, Y_train, Y_test = train_test_split( x, y, test_size=0.3, random_state=10 ) print(X_train) model = XGBClassifier() model.fit(X_train._get_numeric_data(), Y_train) pred = model.predict(X_test._get_numeric_data()) # ## 8. Model Evaluation print("Accuracy Score: ", accuracy_score(pred, Y_test) * 100) print("Precision Score: ", precision_score(pred, Y_test) * 100) print("F1 Score: ", f1_score(pred, Y_test) * 100) print("Recall Score: ", recall_score(pred, Y_test) * 100) print(classification_report(Y_test, pred)) # ## Confusion Matrix cm = confusion_matrix(Y_test, pred) cm ax = sns.heatmap(cm / np.sum(cm), annot=True, fmt=".2%", cmap="Blues") ax.set_title("Confusion Matrix with labels\n\n") ax.set_xlabel("\nPredicted Values") ax.set_ylabel("Actual Values ") plt.show() # ## 9. LightGBM df.info() lgb_model = lgb.LGBMClassifier() lgb_model.fit(X_train, Y_train) lgb_acc = lgb_model.score(X_test, Y_test) lgb_acc y_pred_lgb = model.predict(X_test) print(y_pred_lgb) from sklearn.metrics import confusion_matrix cm = confusion_matrix(Y_test, y_pred_lgb) cm ax = sns.heatmap(cm, annot=True, cmap="Blues") ax.set_title("Confusion Matrix with labels\n\n") ax.set_xlabel("\nPredicted Values") ax.set_ylabel("Actual Values ") plt.show()
false
0
2,895
6
3,397
2,895
129024374
<jupyter_start><jupyter_text>Stroke Prediction Dataset ### Similar Datasets - [**HIGHLIGHTED**] CERN Electron Collision Data ☄️[LINK](https://www.kaggle.com/datasets/fedesoriano/cern-electron-collision-data) - Hepatitis C Dataset: [LINK](https://www.kaggle.com/fedesoriano/hepatitis-c-dataset) - Body Fat Prediction Dataset: [LINK](https://www.kaggle.com/fedesoriano/body-fat-prediction-dataset) - Cirrhosis Prediction Dataset: [LINK](https://www.kaggle.com/fedesoriano/cirrhosis-prediction-dataset) - Heart Failure Prediction Dataset: [LINK](https://www.kaggle.com/fedesoriano/heart-failure-prediction) - Stellar Classification Dataset - SDSS17: [LINK](https://www.kaggle.com/fedesoriano/stellar-classification-dataset-sdss17) - Wind Speed Prediction Dataset: [LINK](https://www.kaggle.com/datasets/fedesoriano/wind-speed-prediction-dataset) - Spanish Wine Quality Dataset: [LINK](https://www.kaggle.com/datasets/fedesoriano/spanish-wine-quality-dataset) ### Context According to the World Health Organization (WHO) stroke is the 2nd leading cause of death globally, responsible for approximately 11% of total deaths. This dataset is used to predict whether a patient is likely to get stroke based on the input parameters like gender, age, various diseases, and smoking status. Each row in the data provides relavant information about the patient. ### Attribute Information 1) id: unique identifier 2) gender: "Male", "Female" or "Other" 3) age: age of the patient 4) hypertension: 0 if the patient doesn't have hypertension, 1 if the patient has hypertension 5) heart\_disease: 0 if the patient doesn't have any heart diseases, 1 if the patient has a heart disease 6) ever\_married: "No" or "Yes" 7) work\_type: "children", "Govt\_jov", "Never\_worked", "Private" or "Self-employed" 8) Residence\_type: "Rural" or "Urban" 9) avg\_glucose\_level: average glucose level in blood 10) bmi: body mass index 11) smoking\_status: "formerly smoked", "never smoked", "smokes" or "Unknown"* 12) stroke: 1 if the patient had a stroke or 0 if not *Note: "Unknown" in smoking\_status means that the information is unavailable for this patient Kaggle dataset identifier: stroke-prediction-dataset <jupyter_code>import pandas as pd df = pd.read_csv('stroke-prediction-dataset/healthcare-dataset-stroke-data.csv') df.info() <jupyter_output><class 'pandas.core.frame.DataFrame'> RangeIndex: 5110 entries, 0 to 5109 Data columns (total 12 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 id 5110 non-null int64 1 gender 5110 non-null object 2 age 5110 non-null float64 3 hypertension 5110 non-null int64 4 heart_disease 5110 non-null int64 5 ever_married 5110 non-null object 6 work_type 5110 non-null object 7 Residence_type 5110 non-null object 8 avg_glucose_level 5110 non-null float64 9 bmi 4909 non-null float64 10 smoking_status 5110 non-null object 11 stroke 5110 non-null int64 dtypes: float64(3), int64(4), object(5) memory usage: 479.2+ KB <jupyter_text>Examples: { "id": 9046, "gender": "Male", "age": 67, "hypertension": 0, "heart_disease": 1, "ever_married": "Yes", "work_type": "Private", "Residence_type": "Urban", "avg_glucose_level": 228.69, "bmi": 36.6, "smoking_status": "formerly smoked", "stroke": 1 } { "id": 51676, "gender": "Female", "age": 61, "hypertension": 0, "heart_disease": 0, "ever_married": "Yes", "work_type": "Self-employed", "Residence_type": "Rural", "avg_glucose_level": 202.21, "bmi": NaN, "smoking_status": "never smoked", "stroke": 1 } { "id": 31112, "gender": "Male", "age": 80, "hypertension": 0, "heart_disease": 1, "ever_married": "Yes", "work_type": "Private", "Residence_type": "Rural", "avg_glucose_level": 105.92, "bmi": 32.5, "smoking_status": "never smoked", "stroke": 1 } { "id": 60182, "gender": "Female", "age": 49, "hypertension": 0, "heart_disease": 0, "ever_married": "Yes", "work_type": "Private", "Residence_type": "Urban", "avg_glucose_level": 171.23, "bmi": 34.4, "smoking_status": "smokes", "stroke": 1 } <jupyter_script># # Imports import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import plotly.express as px # # Data df = pd.read_csv( "/kaggle/input/stroke-prediction-dataset/healthcare-dataset-stroke-data.csv" ) df.head() # # Data Preprocessing df.shape df.info() df.drop("id", axis=1, inplace=True) df.isna().sum() df.fillna(df["bmi"].mean(), inplace=True) df.info() df.nunique() # # Visualization columns_list = list(df.columns) plt.figure(figsize=(12, 20)) for i in range(len(columns_list)): plt.subplot(5, 3, i + 1) plt.title(columns_list[i]) plt.xticks(rotation=45) plt.hist(df[columns_list[i]]) plt.tight_layout() # ## Age plt.figure(figsize=(10, 3)) sns.boxplot(x="age", data=df) # ## Average Glucose Level plt.figure(figsize=(10, 3)) sns.boxplot(x="avg_glucose_level", data=df) # ## AVC Percent qtd_stroke = df["stroke"].value_counts() plt.pie(qtd_stroke, labels=["Stroke", "No Stroke"], autopct="%.3f%%", explode=[0, 0.2]) # ## Stroke x Married qtd_casado = df["ever_married"].value_counts() plt.pie( qtd_casado, labels=["Already Married", "Never Married"], autopct="%.3f%%", explode=[0, 0.03], ) # ## BMI by Age plt.figure(figsize=(10, 5)) sns.histplot(df["age"], color="red", label="BMI", kde=True) plt.legend() plt.grid() # ## AVC por IDADE plt.figure(figsize=(10, 5)) sns.histplot(df["age"], color="red", label="stroke", kde=True) plt.legend() plt.grid() ## Age x BMI x Average Glucose Level fig = px.scatter_3d( df, x="age", y="bmi", z="avg_glucose_level", color="stroke", color_discrete_sequence=px.colors.qualitative.Bold, width=1200, height=1200, ) fig.show() # ## Convert Categorical Columns to Numeric col_categ = df.select_dtypes(include=["object"]).columns.tolist() from sklearn.preprocessing import LabelEncoder le = LabelEncoder() for c in col_categ: le.fit(df[c]) df[c] = le.transform(df[c]) df.head() # ## Split Train and Test X = df.drop("stroke", axis=1) y = df["stroke"] from sklearn.model_selection import train_test_split xtrain, xtest, ytrain, ytest = train_test_split(X, y, test_size=0.25, random_state=0) # ## Data Balancing from imblearn.over_sampling import SMOTE smt = SMOTE(random_state=0) xtrain_res, ytrain_res = smt.fit_resample(xtrain, ytrain) print("Distribuição ANTES do balanceamento: ", ytrain.value_counts()) print("Distribuição DEPOIS do balanceamento: ", ytrain_res.value_counts()) # ## Comparing models # Concat train_df = pd.concat([xtrain_res, ytrain_res], axis=1) from pycaret.classification import * s = setup(data=train_df, target="stroke", session_id=0, normalize=True) compare_models() xgb = create_model("xgboost") # ## Predicts preds = predict_model(xgb) preds
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/024/129024374.ipynb
stroke-prediction-dataset
fedesoriano
[{"Id": 129024374, "ScriptId": 35972489, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 12038039, "CreationDate": "05/10/2023 11:46:18", "VersionNumber": 5.0, "Title": "\ud83e\ude7a Stroke Predict \u2764\ufe0f", "EvaluationDate": "05/10/2023", "IsChange": false, "TotalLines": 136.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 136.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 7}]
[{"Id": 184715620, "KernelVersionId": 129024374, "SourceDatasetVersionId": 1882037}]
[{"Id": 1882037, "DatasetId": 1120859, "DatasourceVersionId": 1920174, "CreatorUserId": 6402661, "LicenseName": "Data files \u00a9 Original Authors", "CreationDate": "01/26/2021 19:29:28", "VersionNumber": 1.0, "Title": "Stroke Prediction Dataset", "Slug": "stroke-prediction-dataset", "Subtitle": "11 clinical features for predicting stroke events", "Description": "### Similar Datasets\n\n- [**HIGHLIGHTED**] CERN Electron Collision Data \u2604\ufe0f[LINK](https://www.kaggle.com/datasets/fedesoriano/cern-electron-collision-data)\n- Hepatitis C Dataset: [LINK](https://www.kaggle.com/fedesoriano/hepatitis-c-dataset)\n- Body Fat Prediction Dataset: [LINK](https://www.kaggle.com/fedesoriano/body-fat-prediction-dataset)\n- Cirrhosis Prediction Dataset: [LINK](https://www.kaggle.com/fedesoriano/cirrhosis-prediction-dataset)\n- Heart Failure Prediction Dataset: [LINK](https://www.kaggle.com/fedesoriano/heart-failure-prediction)\n- Stellar Classification Dataset - SDSS17: [LINK](https://www.kaggle.com/fedesoriano/stellar-classification-dataset-sdss17)\n- Wind Speed Prediction Dataset: [LINK](https://www.kaggle.com/datasets/fedesoriano/wind-speed-prediction-dataset)\n- Spanish Wine Quality Dataset: [LINK](https://www.kaggle.com/datasets/fedesoriano/spanish-wine-quality-dataset)\n\n\n### Context\n\nAccording to the World Health Organization (WHO) stroke is the 2nd leading cause of death globally, responsible for approximately 11% of total deaths. \nThis dataset is used to predict whether a patient is likely to get stroke based on the input parameters like gender, age, various diseases, and smoking status. Each row in the data provides relavant information about the patient.\n\n\n### Attribute Information\n\n1) id: unique identifier\n2) gender: \"Male\", \"Female\" or \"Other\"\n3) age: age of the patient\n4) hypertension: 0 if the patient doesn't have hypertension, 1 if the patient has hypertension\n5) heart\\_disease: 0 if the patient doesn't have any heart diseases, 1 if the patient has a heart disease\n6) ever\\_married: \"No\" or \"Yes\"\n7) work\\_type: \"children\", \"Govt\\_jov\", \"Never\\_worked\", \"Private\" or \"Self-employed\"\n8) Residence\\_type: \"Rural\" or \"Urban\"\n9) avg\\_glucose\\_level: average glucose level in blood\n10) bmi: body mass index\n11) smoking\\_status: \"formerly smoked\", \"never smoked\", \"smokes\" or \"Unknown\"*\n12) stroke: 1 if the patient had a stroke or 0 if not\n*Note: \"Unknown\" in smoking\\_status means that the information is unavailable for this patient\n\n### Acknowledgements\n\n**(Confidential Source)** - *Use only for educational purposes*\nIf you use this dataset in your research, please credit the author.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 1120859, "CreatorUserId": 6402661, "OwnerUserId": 6402661.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 1882037.0, "CurrentDatasourceVersionId": 1920174.0, "ForumId": 1138221, "Type": 2, "CreationDate": "01/26/2021 19:29:28", "LastActivityDate": "01/26/2021", "TotalViews": 1035230, "TotalDownloads": 137380, "TotalVotes": 2710, "TotalKernels": 1002}]
[{"Id": 6402661, "UserName": "fedesoriano", "DisplayName": "fedesoriano", "RegisterDate": "12/18/2020", "PerformanceTier": 4}]
# # Imports import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import plotly.express as px # # Data df = pd.read_csv( "/kaggle/input/stroke-prediction-dataset/healthcare-dataset-stroke-data.csv" ) df.head() # # Data Preprocessing df.shape df.info() df.drop("id", axis=1, inplace=True) df.isna().sum() df.fillna(df["bmi"].mean(), inplace=True) df.info() df.nunique() # # Visualization columns_list = list(df.columns) plt.figure(figsize=(12, 20)) for i in range(len(columns_list)): plt.subplot(5, 3, i + 1) plt.title(columns_list[i]) plt.xticks(rotation=45) plt.hist(df[columns_list[i]]) plt.tight_layout() # ## Age plt.figure(figsize=(10, 3)) sns.boxplot(x="age", data=df) # ## Average Glucose Level plt.figure(figsize=(10, 3)) sns.boxplot(x="avg_glucose_level", data=df) # ## AVC Percent qtd_stroke = df["stroke"].value_counts() plt.pie(qtd_stroke, labels=["Stroke", "No Stroke"], autopct="%.3f%%", explode=[0, 0.2]) # ## Stroke x Married qtd_casado = df["ever_married"].value_counts() plt.pie( qtd_casado, labels=["Already Married", "Never Married"], autopct="%.3f%%", explode=[0, 0.03], ) # ## BMI by Age plt.figure(figsize=(10, 5)) sns.histplot(df["age"], color="red", label="BMI", kde=True) plt.legend() plt.grid() # ## AVC por IDADE plt.figure(figsize=(10, 5)) sns.histplot(df["age"], color="red", label="stroke", kde=True) plt.legend() plt.grid() ## Age x BMI x Average Glucose Level fig = px.scatter_3d( df, x="age", y="bmi", z="avg_glucose_level", color="stroke", color_discrete_sequence=px.colors.qualitative.Bold, width=1200, height=1200, ) fig.show() # ## Convert Categorical Columns to Numeric col_categ = df.select_dtypes(include=["object"]).columns.tolist() from sklearn.preprocessing import LabelEncoder le = LabelEncoder() for c in col_categ: le.fit(df[c]) df[c] = le.transform(df[c]) df.head() # ## Split Train and Test X = df.drop("stroke", axis=1) y = df["stroke"] from sklearn.model_selection import train_test_split xtrain, xtest, ytrain, ytest = train_test_split(X, y, test_size=0.25, random_state=0) # ## Data Balancing from imblearn.over_sampling import SMOTE smt = SMOTE(random_state=0) xtrain_res, ytrain_res = smt.fit_resample(xtrain, ytrain) print("Distribuição ANTES do balanceamento: ", ytrain.value_counts()) print("Distribuição DEPOIS do balanceamento: ", ytrain_res.value_counts()) # ## Comparing models # Concat train_df = pd.concat([xtrain_res, ytrain_res], axis=1) from pycaret.classification import * s = setup(data=train_df, target="stroke", session_id=0, normalize=True) compare_models() xgb = create_model("xgboost") # ## Predicts preds = predict_model(xgb) preds
[{"stroke-prediction-dataset/healthcare-dataset-stroke-data.csv": {"column_names": "[\"id\", \"gender\", \"age\", \"hypertension\", \"heart_disease\", \"ever_married\", \"work_type\", \"Residence_type\", \"avg_glucose_level\", \"bmi\", \"smoking_status\", \"stroke\"]", "column_data_types": "{\"id\": \"int64\", \"gender\": \"object\", \"age\": \"float64\", \"hypertension\": \"int64\", \"heart_disease\": \"int64\", \"ever_married\": \"object\", \"work_type\": \"object\", \"Residence_type\": \"object\", \"avg_glucose_level\": \"float64\", \"bmi\": \"float64\", \"smoking_status\": \"object\", \"stroke\": \"int64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 5110 entries, 0 to 5109\nData columns (total 12 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 id 5110 non-null int64 \n 1 gender 5110 non-null object \n 2 age 5110 non-null float64\n 3 hypertension 5110 non-null int64 \n 4 heart_disease 5110 non-null int64 \n 5 ever_married 5110 non-null object \n 6 work_type 5110 non-null object \n 7 Residence_type 5110 non-null object \n 8 avg_glucose_level 5110 non-null float64\n 9 bmi 4909 non-null float64\n 10 smoking_status 5110 non-null object \n 11 stroke 5110 non-null int64 \ndtypes: float64(3), int64(4), object(5)\nmemory usage: 479.2+ KB\n", "summary": "{\"id\": {\"count\": 5110.0, \"mean\": 36517.82935420744, \"std\": 21161.721624827165, \"min\": 67.0, \"25%\": 17741.25, \"50%\": 36932.0, \"75%\": 54682.0, \"max\": 72940.0}, \"age\": {\"count\": 5110.0, \"mean\": 43.226614481409, \"std\": 22.61264672311349, \"min\": 0.08, \"25%\": 25.0, \"50%\": 45.0, \"75%\": 61.0, \"max\": 82.0}, \"hypertension\": {\"count\": 5110.0, \"mean\": 0.0974559686888454, \"std\": 0.29660667423379117, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"heart_disease\": {\"count\": 5110.0, \"mean\": 0.05401174168297456, \"std\": 0.22606298750336543, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"avg_glucose_level\": {\"count\": 5110.0, \"mean\": 106.1476771037182, \"std\": 45.28356015058198, \"min\": 55.12, \"25%\": 77.245, \"50%\": 91.88499999999999, \"75%\": 114.09, \"max\": 271.74}, \"bmi\": {\"count\": 4909.0, \"mean\": 28.893236911794666, \"std\": 7.854066729680164, \"min\": 10.3, \"25%\": 23.5, \"50%\": 28.1, \"75%\": 33.1, \"max\": 97.6}, \"stroke\": {\"count\": 5110.0, \"mean\": 0.0487279843444227, \"std\": 0.2153198569802376, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}}", "examples": "{\"id\":{\"0\":9046,\"1\":51676,\"2\":31112,\"3\":60182},\"gender\":{\"0\":\"Male\",\"1\":\"Female\",\"2\":\"Male\",\"3\":\"Female\"},\"age\":{\"0\":67.0,\"1\":61.0,\"2\":80.0,\"3\":49.0},\"hypertension\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"heart_disease\":{\"0\":1,\"1\":0,\"2\":1,\"3\":0},\"ever_married\":{\"0\":\"Yes\",\"1\":\"Yes\",\"2\":\"Yes\",\"3\":\"Yes\"},\"work_type\":{\"0\":\"Private\",\"1\":\"Self-employed\",\"2\":\"Private\",\"3\":\"Private\"},\"Residence_type\":{\"0\":\"Urban\",\"1\":\"Rural\",\"2\":\"Rural\",\"3\":\"Urban\"},\"avg_glucose_level\":{\"0\":228.69,\"1\":202.21,\"2\":105.92,\"3\":171.23},\"bmi\":{\"0\":36.6,\"1\":null,\"2\":32.5,\"3\":34.4},\"smoking_status\":{\"0\":\"formerly smoked\",\"1\":\"never smoked\",\"2\":\"never smoked\",\"3\":\"smokes\"},\"stroke\":{\"0\":1,\"1\":1,\"2\":1,\"3\":1}}"}}]
true
1
<start_data_description><data_path>stroke-prediction-dataset/healthcare-dataset-stroke-data.csv: <column_names> ['id', 'gender', 'age', 'hypertension', 'heart_disease', 'ever_married', 'work_type', 'Residence_type', 'avg_glucose_level', 'bmi', 'smoking_status', 'stroke'] <column_types> {'id': 'int64', 'gender': 'object', 'age': 'float64', 'hypertension': 'int64', 'heart_disease': 'int64', 'ever_married': 'object', 'work_type': 'object', 'Residence_type': 'object', 'avg_glucose_level': 'float64', 'bmi': 'float64', 'smoking_status': 'object', 'stroke': 'int64'} <dataframe_Summary> {'id': {'count': 5110.0, 'mean': 36517.82935420744, 'std': 21161.721624827165, 'min': 67.0, '25%': 17741.25, '50%': 36932.0, '75%': 54682.0, 'max': 72940.0}, 'age': {'count': 5110.0, 'mean': 43.226614481409, 'std': 22.61264672311349, 'min': 0.08, '25%': 25.0, '50%': 45.0, '75%': 61.0, 'max': 82.0}, 'hypertension': {'count': 5110.0, 'mean': 0.0974559686888454, 'std': 0.29660667423379117, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'heart_disease': {'count': 5110.0, 'mean': 0.05401174168297456, 'std': 0.22606298750336543, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'avg_glucose_level': {'count': 5110.0, 'mean': 106.1476771037182, 'std': 45.28356015058198, 'min': 55.12, '25%': 77.245, '50%': 91.88499999999999, '75%': 114.09, 'max': 271.74}, 'bmi': {'count': 4909.0, 'mean': 28.893236911794666, 'std': 7.854066729680164, 'min': 10.3, '25%': 23.5, '50%': 28.1, '75%': 33.1, 'max': 97.6}, 'stroke': {'count': 5110.0, 'mean': 0.0487279843444227, 'std': 0.2153198569802376, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}} <dataframe_info> RangeIndex: 5110 entries, 0 to 5109 Data columns (total 12 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 id 5110 non-null int64 1 gender 5110 non-null object 2 age 5110 non-null float64 3 hypertension 5110 non-null int64 4 heart_disease 5110 non-null int64 5 ever_married 5110 non-null object 6 work_type 5110 non-null object 7 Residence_type 5110 non-null object 8 avg_glucose_level 5110 non-null float64 9 bmi 4909 non-null float64 10 smoking_status 5110 non-null object 11 stroke 5110 non-null int64 dtypes: float64(3), int64(4), object(5) memory usage: 479.2+ KB <some_examples> {'id': {'0': 9046, '1': 51676, '2': 31112, '3': 60182}, 'gender': {'0': 'Male', '1': 'Female', '2': 'Male', '3': 'Female'}, 'age': {'0': 67.0, '1': 61.0, '2': 80.0, '3': 49.0}, 'hypertension': {'0': 0, '1': 0, '2': 0, '3': 0}, 'heart_disease': {'0': 1, '1': 0, '2': 1, '3': 0}, 'ever_married': {'0': 'Yes', '1': 'Yes', '2': 'Yes', '3': 'Yes'}, 'work_type': {'0': 'Private', '1': 'Self-employed', '2': 'Private', '3': 'Private'}, 'Residence_type': {'0': 'Urban', '1': 'Rural', '2': 'Rural', '3': 'Urban'}, 'avg_glucose_level': {'0': 228.69, '1': 202.21, '2': 105.92, '3': 171.23}, 'bmi': {'0': 36.6, '1': None, '2': 32.5, '3': 34.4}, 'smoking_status': {'0': 'formerly smoked', '1': 'never smoked', '2': 'never smoked', '3': 'smokes'}, 'stroke': {'0': 1, '1': 1, '2': 1, '3': 1}} <end_description>
1,010
7
2,529
1,010
129024743
from sklearn import datasets from sklearn.datasets import fetch_openml import matplotlib.pyplot as plt # Загрузка данных digits digits = datasets.load_digits() # Отображение последней цифры входящей в этот датасет plt.figure(1, figsize=(3, 3)) plt.imshow(digits.images[-1], cmap=plt.cm.gray_r, interpolation="nearest") plt.show() X, y = datasets.load_digits(return_X_y=True) X2, y2 = fetch_openml("mnist_784", version=1, return_X_y=True, as_frame=False) # # **Классификация** import pandas as pd import matplotlib.pyplot as plt from sklearn.preprocessing import OrdinalEncoder from sklearn.preprocessing import OneHotEncoder from sklearn.preprocessing import StandardScaler from sklearn.model_selection import train_test_split from sklearn.neural_network import MLPClassifier # **Для набора данных (а)** X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.3, random_state=53 ) clf = MLPClassifier(random_state=1) clf.fit(X_train, y_train) clf.score(X_test, y_test) # Класс MLPClassifier реализует алгоритм многослойного перцептрона (MLP), который обучается с использованием обратного распространения . # MLP обучается на двух массивах: массив X, который содержит обучающие образцы, представленные как векторы признаков с плавающей запятой; и массив y, который содержит целевые значения (метки классов) для обучающих выборок. # **Для набора данных (б)** X2_train, X2_test, y2_train, y2_test = train_test_split( X2, y2, test_size=0.3, random_state=53 ) clf = MLPClassifier(random_state=1) clf.fit(X2_train, y2_train) clf.score(X2_test, y2_test) # **Для набора (а)** scaler = StandardScaler() scaler.fit(X_train) X_train = scaler.transform(X_train) X_test = scaler.transform(X_test) clf = MLPClassifier(random_state=1) clf.fit(X_train, y_train) clf.score(X_test, y_test) # **Для набора (б)** scaler = StandardScaler() scaler.fit(X2_train) X2_train = scaler.transform(X2_train) X2_test = scaler.transform(X2_test) clf = MLPClassifier(random_state=1) clf.fit(X2_train, y2_train) clf.score(X2_test, y2_test) # # **РЕГРЕССИЯ** # **Для набора (а)** X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3) import seaborn as sns from sklearn.preprocessing import MinMaxScaler scaler_mm = MinMaxScaler() X_train = scaler_mm.fit_transform(X_train) X_test = scaler_mm.transform(X_test) from sklearn.model_selection import train_test_split from sklearn.neural_network import MLPRegressor reg = MLPRegressor(max_iter=300) reg.fit(X_train, y_train) reg.score(X_test, y_test) # Класс MLPRegressor реализует многослойный перцептрон (MLP), который обучается с использованием обратного распространения без функции активации в выходном слое, что также можно рассматривать как использование функции идентификации в качестве функции активации. # Следовательно, он использует квадратную ошибку как функцию потерь, а на выходе представляет собой набор непрерывных значений. # **Для набора (б)** X2_train, X2_test, y2_train, y2_test = train_test_split(X2, y2, test_size=0.3) import seaborn as sns from sklearn.preprocessing import MinMaxScaler scaler_mm = MinMaxScaler() X2_train = scaler_mm.fit_transform(X2_train) X2_test = scaler_mm.transform(X2_test) from sklearn.model_selection import train_test_split from sklearn.neural_network import MLPRegressor reg = MLPRegressor(max_iter=300) reg.fit(X2_train, y2_train) reg.score(X2_test, y2_test) # # **Четные нечетные** # **Для набора данных (а)** from sklearn.datasets import load_digits from sklearn.model_selection import train_test_split from sklearn.neural_network import MLPClassifier from sklearn.metrics import accuracy_score # Загрузка данных digits X, y = datasets.load_digits(return_X_y=True) # Создание меток классов для четных и нечетных цифр y_binary = [1 if val % 2 != 0 else 0 for val in y] # Разбиение на обучающую и тестовую выборки X_train, X_test, y_train, y_test = train_test_split( X, y_binary, test_size=0.2, random_state=42 ) # clf = MLPClassifier(random_state=1) clf.fit(X_train, y_train) clf.score(X_test, y_test) # **Для набора данных (б)** from sklearn.datasets import load_digits from sklearn.datasets import fetch_openml from sklearn.model_selection import train_test_split from sklearn.neural_network import MLPClassifier from sklearn.metrics import accuracy_score import numpy as np # Загрузка данных digits X, y = fetch_openml("mnist_784", version=1, return_X_y=True, as_frame=False) y = y.astype(np.uint8) # Создание меток классов для четных и нечетных цифр y_binary = [1 if val % 2 != 0 else 0 for val in y] # Разбиение на обучающую и тестовую выборки X_train, X_test, y_train, y_test = train_test_split( X, y_binary, test_size=0.2, random_state=42 ) clf = MLPClassifier(random_state=1) clf.fit(X_train, y_train) clf.score(X_test, y_test) # # **На 0 и остальные цифры** # **Для набора данных (а)** from sklearn.datasets import load_digits from sklearn.model_selection import train_test_split from sklearn.neural_network import MLPClassifier from sklearn.metrics import accuracy_score # Загрузка данных digits X, y = datasets.load_digits(return_X_y=True) # Создание меток классов для 0 и остальных цифр y_binary = [1 if i == 0 else 0 for i in y] # Разбиение на обучающую и тестовую выборки X_train, X_test, y_train, y_test = train_test_split( X, y_binary, test_size=0.2, random_state=42 ) clf = MLPClassifier(random_state=1) clf.fit(X_train, y_train) clf.score(X_test, y_test) # **Для набора данных (б)** from sklearn.datasets import load_digits from sklearn.datasets import fetch_openml from sklearn.model_selection import train_test_split from sklearn.neural_network import MLPClassifier from sklearn.metrics import accuracy_score import numpy as np # Загрузка данных digits X, y = fetch_openml("mnist_784", version=1, return_X_y=True, as_frame=False) y = y.astype(np.uint8) # Создание меток классов для 0 и остальных цифр y_binary = [1 if i == 0 else 0 for i in y] # Разбиение на обучающую и тестовую выборки X_train, X_test, y_train, y_test = train_test_split( X, y_binary, test_size=0.2, random_state=42 ) clf = MLPClassifier(random_state=1) clf.fit(X_train, y_train) clf.score(X_test, y_test)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/024/129024743.ipynb
null
null
[{"Id": 129024743, "ScriptId": 38352406, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6692497, "CreationDate": "05/10/2023 11:50:06", "VersionNumber": 1.0, "Title": "Task6", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 220.0, "LinesInsertedFromPrevious": 220.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
from sklearn import datasets from sklearn.datasets import fetch_openml import matplotlib.pyplot as plt # Загрузка данных digits digits = datasets.load_digits() # Отображение последней цифры входящей в этот датасет plt.figure(1, figsize=(3, 3)) plt.imshow(digits.images[-1], cmap=plt.cm.gray_r, interpolation="nearest") plt.show() X, y = datasets.load_digits(return_X_y=True) X2, y2 = fetch_openml("mnist_784", version=1, return_X_y=True, as_frame=False) # # **Классификация** import pandas as pd import matplotlib.pyplot as plt from sklearn.preprocessing import OrdinalEncoder from sklearn.preprocessing import OneHotEncoder from sklearn.preprocessing import StandardScaler from sklearn.model_selection import train_test_split from sklearn.neural_network import MLPClassifier # **Для набора данных (а)** X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.3, random_state=53 ) clf = MLPClassifier(random_state=1) clf.fit(X_train, y_train) clf.score(X_test, y_test) # Класс MLPClassifier реализует алгоритм многослойного перцептрона (MLP), который обучается с использованием обратного распространения . # MLP обучается на двух массивах: массив X, который содержит обучающие образцы, представленные как векторы признаков с плавающей запятой; и массив y, который содержит целевые значения (метки классов) для обучающих выборок. # **Для набора данных (б)** X2_train, X2_test, y2_train, y2_test = train_test_split( X2, y2, test_size=0.3, random_state=53 ) clf = MLPClassifier(random_state=1) clf.fit(X2_train, y2_train) clf.score(X2_test, y2_test) # **Для набора (а)** scaler = StandardScaler() scaler.fit(X_train) X_train = scaler.transform(X_train) X_test = scaler.transform(X_test) clf = MLPClassifier(random_state=1) clf.fit(X_train, y_train) clf.score(X_test, y_test) # **Для набора (б)** scaler = StandardScaler() scaler.fit(X2_train) X2_train = scaler.transform(X2_train) X2_test = scaler.transform(X2_test) clf = MLPClassifier(random_state=1) clf.fit(X2_train, y2_train) clf.score(X2_test, y2_test) # # **РЕГРЕССИЯ** # **Для набора (а)** X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3) import seaborn as sns from sklearn.preprocessing import MinMaxScaler scaler_mm = MinMaxScaler() X_train = scaler_mm.fit_transform(X_train) X_test = scaler_mm.transform(X_test) from sklearn.model_selection import train_test_split from sklearn.neural_network import MLPRegressor reg = MLPRegressor(max_iter=300) reg.fit(X_train, y_train) reg.score(X_test, y_test) # Класс MLPRegressor реализует многослойный перцептрон (MLP), который обучается с использованием обратного распространения без функции активации в выходном слое, что также можно рассматривать как использование функции идентификации в качестве функции активации. # Следовательно, он использует квадратную ошибку как функцию потерь, а на выходе представляет собой набор непрерывных значений. # **Для набора (б)** X2_train, X2_test, y2_train, y2_test = train_test_split(X2, y2, test_size=0.3) import seaborn as sns from sklearn.preprocessing import MinMaxScaler scaler_mm = MinMaxScaler() X2_train = scaler_mm.fit_transform(X2_train) X2_test = scaler_mm.transform(X2_test) from sklearn.model_selection import train_test_split from sklearn.neural_network import MLPRegressor reg = MLPRegressor(max_iter=300) reg.fit(X2_train, y2_train) reg.score(X2_test, y2_test) # # **Четные нечетные** # **Для набора данных (а)** from sklearn.datasets import load_digits from sklearn.model_selection import train_test_split from sklearn.neural_network import MLPClassifier from sklearn.metrics import accuracy_score # Загрузка данных digits X, y = datasets.load_digits(return_X_y=True) # Создание меток классов для четных и нечетных цифр y_binary = [1 if val % 2 != 0 else 0 for val in y] # Разбиение на обучающую и тестовую выборки X_train, X_test, y_train, y_test = train_test_split( X, y_binary, test_size=0.2, random_state=42 ) # clf = MLPClassifier(random_state=1) clf.fit(X_train, y_train) clf.score(X_test, y_test) # **Для набора данных (б)** from sklearn.datasets import load_digits from sklearn.datasets import fetch_openml from sklearn.model_selection import train_test_split from sklearn.neural_network import MLPClassifier from sklearn.metrics import accuracy_score import numpy as np # Загрузка данных digits X, y = fetch_openml("mnist_784", version=1, return_X_y=True, as_frame=False) y = y.astype(np.uint8) # Создание меток классов для четных и нечетных цифр y_binary = [1 if val % 2 != 0 else 0 for val in y] # Разбиение на обучающую и тестовую выборки X_train, X_test, y_train, y_test = train_test_split( X, y_binary, test_size=0.2, random_state=42 ) clf = MLPClassifier(random_state=1) clf.fit(X_train, y_train) clf.score(X_test, y_test) # # **На 0 и остальные цифры** # **Для набора данных (а)** from sklearn.datasets import load_digits from sklearn.model_selection import train_test_split from sklearn.neural_network import MLPClassifier from sklearn.metrics import accuracy_score # Загрузка данных digits X, y = datasets.load_digits(return_X_y=True) # Создание меток классов для 0 и остальных цифр y_binary = [1 if i == 0 else 0 for i in y] # Разбиение на обучающую и тестовую выборки X_train, X_test, y_train, y_test = train_test_split( X, y_binary, test_size=0.2, random_state=42 ) clf = MLPClassifier(random_state=1) clf.fit(X_train, y_train) clf.score(X_test, y_test) # **Для набора данных (б)** from sklearn.datasets import load_digits from sklearn.datasets import fetch_openml from sklearn.model_selection import train_test_split from sklearn.neural_network import MLPClassifier from sklearn.metrics import accuracy_score import numpy as np # Загрузка данных digits X, y = fetch_openml("mnist_784", version=1, return_X_y=True, as_frame=False) y = y.astype(np.uint8) # Создание меток классов для 0 и остальных цифр y_binary = [1 if i == 0 else 0 for i in y] # Разбиение на обучающую и тестовую выборки X_train, X_test, y_train, y_test = train_test_split( X, y_binary, test_size=0.2, random_state=42 ) clf = MLPClassifier(random_state=1) clf.fit(X_train, y_train) clf.score(X_test, y_test)
false
0
2,342
0
2,342
2,342
129024099
<jupyter_start><jupyter_text>Car Price Prediction Multiple Linear Regression ### Problem Statement A Chinese automobile company Geely Auto aspires to enter the US market by setting up their manufacturing unit there and producing cars locally to give competition to their US and European counterparts. They have contracted an automobile consulting company to understand the factors on which the pricing of cars depends. Specifically, they want to understand the factors affecting the pricing of cars in the American market, since those may be very different from the Chinese market. The company wants to know: Which variables are significant in predicting the price of a car How well those variables describe the price of a car Based on various market surveys, the consulting firm has gathered a large data set of different types of cars across the America market. ### Business Goal We are required to model the price of cars with the available independent variables. It will be used by the management to understand how exactly the prices vary with the independent variables. They can accordingly manipulate the design of the cars, the business strategy etc. to meet certain price levels. Further, the model will be a good way for management to understand the pricing dynamics of a new market. ### Please Note : The dataset provided is for learning purpose. Please don’t draw any inference with real world scenario. Kaggle dataset identifier: car-price-prediction <jupyter_code>import pandas as pd df = pd.read_csv('car-price-prediction/CarPrice_Assignment.csv') df.info() <jupyter_output><class 'pandas.core.frame.DataFrame'> RangeIndex: 205 entries, 0 to 204 Data columns (total 26 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 car_ID 205 non-null int64 1 symboling 205 non-null int64 2 CarName 205 non-null object 3 fueltype 205 non-null object 4 aspiration 205 non-null object 5 doornumber 205 non-null object 6 carbody 205 non-null object 7 drivewheel 205 non-null object 8 enginelocation 205 non-null object 9 wheelbase 205 non-null float64 10 carlength 205 non-null float64 11 carwidth 205 non-null float64 12 carheight 205 non-null float64 13 curbweight 205 non-null int64 14 enginetype 205 non-null object 15 cylindernumber 205 non-null object 16 enginesize 205 non-null int64 17 fuelsystem 205 non-null object 18 boreratio 205 non-null float64 19 stroke 205 non-null float64 20 compressionratio 205 non-null float64 21 horsepower 205 non-null int64 22 peakrpm 205 non-null int64 23 citympg 205 non-null int64 24 highwaympg 205 non-null int64 25 price 205 non-null float64 dtypes: float64(8), int64(8), object(10) memory usage: 41.8+ KB <jupyter_text>Examples: { "car_ID": 1, "symboling": 3, "CarName": "alfa-romero giulia", "fueltype": "gas", "aspiration": "std", "doornumber": "two", "carbody": "convertible", "drivewheel": "rwd", "enginelocation": "front", "wheelbase": 88.6, "carlength": 168.8, "carwidth": 64.1, "carheight": 48.8, "curbweight": 2548, "enginetype": "dohc", "cylindernumber": "four", "enginesize": 130, "fuelsystem": "mpfi", "boreratio": 3.47, "stroke": 2.68, "...": "and 6 more columns" } { "car_ID": 2, "symboling": 3, "CarName": "alfa-romero stelvio", "fueltype": "gas", "aspiration": "std", "doornumber": "two", "carbody": "convertible", "drivewheel": "rwd", "enginelocation": "front", "wheelbase": 88.6, "carlength": 168.8, "carwidth": 64.1, "carheight": 48.8, "curbweight": 2548, "enginetype": "dohc", "cylindernumber": "four", "enginesize": 130, "fuelsystem": "mpfi", "boreratio": 3.47, "stroke": 2.68, "...": "and 6 more columns" } { "car_ID": 3, "symboling": 1, "CarName": "alfa-romero Quadrifoglio", "fueltype": "gas", "aspiration": "std", "doornumber": "two", "carbody": "hatchback", "drivewheel": "rwd", "enginelocation": "front", "wheelbase": 94.5, "carlength": 171.2, "carwidth": 65.5, "carheight": 52.4, "curbweight": 2823, "enginetype": "ohcv", "cylindernumber": "six", "enginesize": 152, "fuelsystem": "mpfi", "boreratio": 2.68, "stroke": 3.47, "...": "and 6 more columns" } { "car_ID": 4, "symboling": 2, "CarName": "audi 100 ls", "fueltype": "gas", "aspiration": "std", "doornumber": "four", "carbody": "sedan", "drivewheel": "fwd", "enginelocation": "front", "wheelbase": 99.8, "carlength": 176.6, "carwidth": 66.2, "carheight": 54.3, "curbweight": 2337, "enginetype": "ohc", "cylindernumber": "four", "enginesize": 109, "fuelsystem": "mpfi", "boreratio": 3.19, "stroke": 3.4, "...": "and 6 more columns" } <jupyter_script>import pandas as pd import numpy as np df = pd.read_csv("/kaggle/input/car-price-prediction/CarPrice_Assignment.csv") df.head() dfx = df.copy() # Creating a list for categorical and numerical column names cat = [] num = [] for n, d in dfx.items(): if d.dtype == "object": cat.append(n) else: num.append(n) dfx = df.copy() from sklearn.preprocessing import LabelEncoder, StandardScaler # Transform Categorical columns le = LabelEncoder() for i in cat: dfx[i] = le.fit_transform(dfx[i]) # Transform numerical columns ss = StandardScaler() for i in num: dfx[i] = ss.fit_transform(dfx[[i]]) import matplotlib.pyplot as plt import seaborn as sns corr = dfx.corr() matrix = np.triu(corr) plt.figure(figsize=(17, 7)) sns.heatmap(corr, annot=True, mask=matrix, fmt=".2f", cmap="inferno") abs(corr["price"]).sort_values(ascending=False) # Splitting features and labels X = df.drop(["price"], axis=1) y = df[["price"]] # Creating a list for categorical and numerical column names cat = [] num = [] for n, d in X.items(): if d.dtype == "object": cat.append(n) else: num.append(n) print(f"categorical columns : {cat}") print(f"numerical columns : {num}") # Transform Categorical columns le = LabelEncoder() for i in cat: X[i] = le.fit_transform(X[i]) # Transform numerical columns ss = StandardScaler() for i in num: X[i] = ss.fit_transform(X[[i]]) for i in cat: X[i] = ss.fit_transform(X[[i]]) X.head() import pprint from sklearn.model_selection import train_test_split, cross_validate from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_absolute_error, r2_score, mean_squared_error def model_eval(m, X, y): a, d, s, f = train_test_split(X, y) m.fit(a, s) g = m.predict(d) print(m) print(f"mae\t: {round(mean_absolute_error(f,g),2)}") print(f"rmse\t: {round(np.sqrt(mean_squared_error(f,g)),2)}") print(f"r2\t: {round(r2_score(f,g),4)}") def model_cv(m, X, y): scoring = ["neg_mean_absolute_error", "neg_root_mean_squared_error", "r2"] scores = cross_validate(m, X, y, scoring=scoring, cv=4, return_train_score=False) pprint.pprint(scores) # Making random null in enginesize column X1 = X.copy() X1["enginesize"].loc[np.random.randint(153, size=30)] = np.nan X1.isnull().sum() es_mean = X1["enginesize"].mean() X11 = X1.copy() X11 = X11.fillna(value=0) X12 = X1.copy() X12 = X12.fillna(value=es_mean) des = [ "without null values :", "null values replaced by 0", "null values replaced by mean", ] j = 0 for i in [X, X11, X12]: print(des[j]) j += 1 model_eval(lr, i, y) print("cv\t:") model_cv(lr, i, y) print()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/024/129024099.ipynb
car-price-prediction
hellbuoy
[{"Id": 129024099, "ScriptId": 38349649, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 10465532, "CreationDate": "05/10/2023 11:43:52", "VersionNumber": 1.0, "Title": "car_price_prediction", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 118.0, "LinesInsertedFromPrevious": 118.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 184715103, "KernelVersionId": 129024099, "SourceDatasetVersionId": 741735}]
[{"Id": 741735, "DatasetId": 383055, "DatasourceVersionId": 762363, "CreatorUserId": 2318606, "LicenseName": "Unknown", "CreationDate": "10/15/2019 16:45:27", "VersionNumber": 1.0, "Title": "Car Price Prediction Multiple Linear Regression", "Slug": "car-price-prediction", "Subtitle": "Predicting the Prices of cars using RFE and VIF", "Description": "### Problem Statement\n\nA Chinese automobile company Geely Auto aspires to enter the US market by setting up their manufacturing unit there and producing cars locally to give competition to their US and European counterparts. \n\n \n\nThey have contracted an automobile consulting company to understand the factors on which the pricing of cars depends. Specifically, they want to understand the factors affecting the pricing of cars in the American market, since those may be very different from the Chinese market. The company wants to know:\n\nWhich variables are significant in predicting the price of a car\nHow well those variables describe the price of a car\nBased on various market surveys, the consulting firm has gathered a large data set of different types of cars across the America market. \n\n\n### Business Goal\n\nWe are required to model the price of cars with the available independent variables. It will be used by the management to understand how exactly the prices vary with the independent variables. They can accordingly manipulate the design of the cars, the business strategy etc. to meet certain price levels. Further, the model will be a good way for management to understand the pricing dynamics of a new market. \n\n### Please Note : The dataset provided is for learning purpose. Please don\u2019t draw any inference with real world scenario.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 383055, "CreatorUserId": 2318606, "OwnerUserId": 2318606.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 741735.0, "CurrentDatasourceVersionId": 762363.0, "ForumId": 395004, "Type": 2, "CreationDate": "10/15/2019 16:45:27", "LastActivityDate": "10/15/2019", "TotalViews": 339360, "TotalDownloads": 50133, "TotalVotes": 491, "TotalKernels": 345}]
[{"Id": 2318606, "UserName": "hellbuoy", "DisplayName": "Manish Kumar", "RegisterDate": "10/03/2018", "PerformanceTier": 2}]
import pandas as pd import numpy as np df = pd.read_csv("/kaggle/input/car-price-prediction/CarPrice_Assignment.csv") df.head() dfx = df.copy() # Creating a list for categorical and numerical column names cat = [] num = [] for n, d in dfx.items(): if d.dtype == "object": cat.append(n) else: num.append(n) dfx = df.copy() from sklearn.preprocessing import LabelEncoder, StandardScaler # Transform Categorical columns le = LabelEncoder() for i in cat: dfx[i] = le.fit_transform(dfx[i]) # Transform numerical columns ss = StandardScaler() for i in num: dfx[i] = ss.fit_transform(dfx[[i]]) import matplotlib.pyplot as plt import seaborn as sns corr = dfx.corr() matrix = np.triu(corr) plt.figure(figsize=(17, 7)) sns.heatmap(corr, annot=True, mask=matrix, fmt=".2f", cmap="inferno") abs(corr["price"]).sort_values(ascending=False) # Splitting features and labels X = df.drop(["price"], axis=1) y = df[["price"]] # Creating a list for categorical and numerical column names cat = [] num = [] for n, d in X.items(): if d.dtype == "object": cat.append(n) else: num.append(n) print(f"categorical columns : {cat}") print(f"numerical columns : {num}") # Transform Categorical columns le = LabelEncoder() for i in cat: X[i] = le.fit_transform(X[i]) # Transform numerical columns ss = StandardScaler() for i in num: X[i] = ss.fit_transform(X[[i]]) for i in cat: X[i] = ss.fit_transform(X[[i]]) X.head() import pprint from sklearn.model_selection import train_test_split, cross_validate from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_absolute_error, r2_score, mean_squared_error def model_eval(m, X, y): a, d, s, f = train_test_split(X, y) m.fit(a, s) g = m.predict(d) print(m) print(f"mae\t: {round(mean_absolute_error(f,g),2)}") print(f"rmse\t: {round(np.sqrt(mean_squared_error(f,g)),2)}") print(f"r2\t: {round(r2_score(f,g),4)}") def model_cv(m, X, y): scoring = ["neg_mean_absolute_error", "neg_root_mean_squared_error", "r2"] scores = cross_validate(m, X, y, scoring=scoring, cv=4, return_train_score=False) pprint.pprint(scores) # Making random null in enginesize column X1 = X.copy() X1["enginesize"].loc[np.random.randint(153, size=30)] = np.nan X1.isnull().sum() es_mean = X1["enginesize"].mean() X11 = X1.copy() X11 = X11.fillna(value=0) X12 = X1.copy() X12 = X12.fillna(value=es_mean) des = [ "without null values :", "null values replaced by 0", "null values replaced by mean", ] j = 0 for i in [X, X11, X12]: print(des[j]) j += 1 model_eval(lr, i, y) print("cv\t:") model_cv(lr, i, y) print()
[{"car-price-prediction/CarPrice_Assignment.csv": {"column_names": "[\"car_ID\", \"symboling\", \"CarName\", \"fueltype\", \"aspiration\", \"doornumber\", \"carbody\", \"drivewheel\", \"enginelocation\", \"wheelbase\", \"carlength\", \"carwidth\", \"carheight\", \"curbweight\", \"enginetype\", \"cylindernumber\", \"enginesize\", \"fuelsystem\", \"boreratio\", \"stroke\", \"compressionratio\", \"horsepower\", \"peakrpm\", \"citympg\", \"highwaympg\", \"price\"]", "column_data_types": "{\"car_ID\": \"int64\", \"symboling\": \"int64\", \"CarName\": \"object\", \"fueltype\": \"object\", \"aspiration\": \"object\", \"doornumber\": \"object\", \"carbody\": \"object\", \"drivewheel\": \"object\", \"enginelocation\": \"object\", \"wheelbase\": \"float64\", \"carlength\": \"float64\", \"carwidth\": \"float64\", \"carheight\": \"float64\", \"curbweight\": \"int64\", \"enginetype\": \"object\", \"cylindernumber\": \"object\", \"enginesize\": \"int64\", \"fuelsystem\": \"object\", \"boreratio\": \"float64\", \"stroke\": \"float64\", \"compressionratio\": \"float64\", \"horsepower\": \"int64\", \"peakrpm\": \"int64\", \"citympg\": \"int64\", \"highwaympg\": \"int64\", \"price\": \"float64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 205 entries, 0 to 204\nData columns (total 26 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 car_ID 205 non-null int64 \n 1 symboling 205 non-null int64 \n 2 CarName 205 non-null object \n 3 fueltype 205 non-null object \n 4 aspiration 205 non-null object \n 5 doornumber 205 non-null object \n 6 carbody 205 non-null object \n 7 drivewheel 205 non-null object \n 8 enginelocation 205 non-null object \n 9 wheelbase 205 non-null float64\n 10 carlength 205 non-null float64\n 11 carwidth 205 non-null float64\n 12 carheight 205 non-null float64\n 13 curbweight 205 non-null int64 \n 14 enginetype 205 non-null object \n 15 cylindernumber 205 non-null object \n 16 enginesize 205 non-null int64 \n 17 fuelsystem 205 non-null object \n 18 boreratio 205 non-null float64\n 19 stroke 205 non-null float64\n 20 compressionratio 205 non-null float64\n 21 horsepower 205 non-null int64 \n 22 peakrpm 205 non-null int64 \n 23 citympg 205 non-null int64 \n 24 highwaympg 205 non-null int64 \n 25 price 205 non-null float64\ndtypes: float64(8), int64(8), object(10)\nmemory usage: 41.8+ KB\n", "summary": "{\"car_ID\": {\"count\": 205.0, \"mean\": 103.0, \"std\": 59.32256456582661, \"min\": 1.0, \"25%\": 52.0, \"50%\": 103.0, \"75%\": 154.0, \"max\": 205.0}, \"symboling\": {\"count\": 205.0, \"mean\": 0.8341463414634146, \"std\": 1.2453068281055297, \"min\": -2.0, \"25%\": 0.0, \"50%\": 1.0, \"75%\": 2.0, \"max\": 3.0}, \"wheelbase\": {\"count\": 205.0, \"mean\": 98.75658536585367, \"std\": 6.021775685025571, \"min\": 86.6, \"25%\": 94.5, \"50%\": 97.0, \"75%\": 102.4, \"max\": 120.9}, \"carlength\": {\"count\": 205.0, \"mean\": 174.04926829268288, \"std\": 12.33728852655518, \"min\": 141.1, \"25%\": 166.3, \"50%\": 173.2, \"75%\": 183.1, \"max\": 208.1}, \"carwidth\": {\"count\": 205.0, \"mean\": 65.90780487804878, \"std\": 2.145203852687183, \"min\": 60.3, \"25%\": 64.1, \"50%\": 65.5, \"75%\": 66.9, \"max\": 72.3}, \"carheight\": {\"count\": 205.0, \"mean\": 53.72487804878049, \"std\": 2.4435219699049036, \"min\": 47.8, \"25%\": 52.0, \"50%\": 54.1, \"75%\": 55.5, \"max\": 59.8}, \"curbweight\": {\"count\": 205.0, \"mean\": 2555.5658536585365, \"std\": 520.6802035016387, \"min\": 1488.0, \"25%\": 2145.0, \"50%\": 2414.0, \"75%\": 2935.0, \"max\": 4066.0}, \"enginesize\": {\"count\": 205.0, \"mean\": 126.90731707317073, \"std\": 41.64269343817984, \"min\": 61.0, \"25%\": 97.0, \"50%\": 120.0, \"75%\": 141.0, \"max\": 326.0}, \"boreratio\": {\"count\": 205.0, \"mean\": 3.329756097560975, \"std\": 0.27084370542622926, \"min\": 2.54, \"25%\": 3.15, \"50%\": 3.31, \"75%\": 3.58, \"max\": 3.94}, \"stroke\": {\"count\": 205.0, \"mean\": 3.255414634146341, \"std\": 0.31359701376080407, \"min\": 2.07, \"25%\": 3.11, \"50%\": 3.29, \"75%\": 3.41, \"max\": 4.17}, \"compressionratio\": {\"count\": 205.0, \"mean\": 10.142536585365855, \"std\": 3.972040321863298, \"min\": 7.0, \"25%\": 8.6, \"50%\": 9.0, \"75%\": 9.4, \"max\": 23.0}, \"horsepower\": {\"count\": 205.0, \"mean\": 104.1170731707317, \"std\": 39.54416680936116, \"min\": 48.0, \"25%\": 70.0, \"50%\": 95.0, \"75%\": 116.0, \"max\": 288.0}, \"peakrpm\": {\"count\": 205.0, \"mean\": 5125.121951219512, \"std\": 476.98564305694634, \"min\": 4150.0, \"25%\": 4800.0, \"50%\": 5200.0, \"75%\": 5500.0, \"max\": 6600.0}, \"citympg\": {\"count\": 205.0, \"mean\": 25.21951219512195, \"std\": 6.542141653001622, \"min\": 13.0, \"25%\": 19.0, \"50%\": 24.0, \"75%\": 30.0, \"max\": 49.0}, \"highwaympg\": {\"count\": 205.0, \"mean\": 30.75121951219512, \"std\": 6.886443130941824, \"min\": 16.0, \"25%\": 25.0, \"50%\": 30.0, \"75%\": 34.0, \"max\": 54.0}, \"price\": {\"count\": 205.0, \"mean\": 13276.710570731706, \"std\": 7988.85233174315, \"min\": 5118.0, \"25%\": 7788.0, \"50%\": 10295.0, \"75%\": 16503.0, \"max\": 45400.0}}", "examples": "{\"car_ID\":{\"0\":1,\"1\":2,\"2\":3,\"3\":4},\"symboling\":{\"0\":3,\"1\":3,\"2\":1,\"3\":2},\"CarName\":{\"0\":\"alfa-romero giulia\",\"1\":\"alfa-romero stelvio\",\"2\":\"alfa-romero Quadrifoglio\",\"3\":\"audi 100 ls\"},\"fueltype\":{\"0\":\"gas\",\"1\":\"gas\",\"2\":\"gas\",\"3\":\"gas\"},\"aspiration\":{\"0\":\"std\",\"1\":\"std\",\"2\":\"std\",\"3\":\"std\"},\"doornumber\":{\"0\":\"two\",\"1\":\"two\",\"2\":\"two\",\"3\":\"four\"},\"carbody\":{\"0\":\"convertible\",\"1\":\"convertible\",\"2\":\"hatchback\",\"3\":\"sedan\"},\"drivewheel\":{\"0\":\"rwd\",\"1\":\"rwd\",\"2\":\"rwd\",\"3\":\"fwd\"},\"enginelocation\":{\"0\":\"front\",\"1\":\"front\",\"2\":\"front\",\"3\":\"front\"},\"wheelbase\":{\"0\":88.6,\"1\":88.6,\"2\":94.5,\"3\":99.8},\"carlength\":{\"0\":168.8,\"1\":168.8,\"2\":171.2,\"3\":176.6},\"carwidth\":{\"0\":64.1,\"1\":64.1,\"2\":65.5,\"3\":66.2},\"carheight\":{\"0\":48.8,\"1\":48.8,\"2\":52.4,\"3\":54.3},\"curbweight\":{\"0\":2548,\"1\":2548,\"2\":2823,\"3\":2337},\"enginetype\":{\"0\":\"dohc\",\"1\":\"dohc\",\"2\":\"ohcv\",\"3\":\"ohc\"},\"cylindernumber\":{\"0\":\"four\",\"1\":\"four\",\"2\":\"six\",\"3\":\"four\"},\"enginesize\":{\"0\":130,\"1\":130,\"2\":152,\"3\":109},\"fuelsystem\":{\"0\":\"mpfi\",\"1\":\"mpfi\",\"2\":\"mpfi\",\"3\":\"mpfi\"},\"boreratio\":{\"0\":3.47,\"1\":3.47,\"2\":2.68,\"3\":3.19},\"stroke\":{\"0\":2.68,\"1\":2.68,\"2\":3.47,\"3\":3.4},\"compressionratio\":{\"0\":9.0,\"1\":9.0,\"2\":9.0,\"3\":10.0},\"horsepower\":{\"0\":111,\"1\":111,\"2\":154,\"3\":102},\"peakrpm\":{\"0\":5000,\"1\":5000,\"2\":5000,\"3\":5500},\"citympg\":{\"0\":21,\"1\":21,\"2\":19,\"3\":24},\"highwaympg\":{\"0\":27,\"1\":27,\"2\":26,\"3\":30},\"price\":{\"0\":13495.0,\"1\":16500.0,\"2\":16500.0,\"3\":13950.0}}"}}]
true
1
<start_data_description><data_path>car-price-prediction/CarPrice_Assignment.csv: <column_names> ['car_ID', 'symboling', 'CarName', 'fueltype', 'aspiration', 'doornumber', 'carbody', 'drivewheel', 'enginelocation', 'wheelbase', 'carlength', 'carwidth', 'carheight', 'curbweight', 'enginetype', 'cylindernumber', 'enginesize', 'fuelsystem', 'boreratio', 'stroke', 'compressionratio', 'horsepower', 'peakrpm', 'citympg', 'highwaympg', 'price'] <column_types> {'car_ID': 'int64', 'symboling': 'int64', 'CarName': 'object', 'fueltype': 'object', 'aspiration': 'object', 'doornumber': 'object', 'carbody': 'object', 'drivewheel': 'object', 'enginelocation': 'object', 'wheelbase': 'float64', 'carlength': 'float64', 'carwidth': 'float64', 'carheight': 'float64', 'curbweight': 'int64', 'enginetype': 'object', 'cylindernumber': 'object', 'enginesize': 'int64', 'fuelsystem': 'object', 'boreratio': 'float64', 'stroke': 'float64', 'compressionratio': 'float64', 'horsepower': 'int64', 'peakrpm': 'int64', 'citympg': 'int64', 'highwaympg': 'int64', 'price': 'float64'} <dataframe_Summary> {'car_ID': {'count': 205.0, 'mean': 103.0, 'std': 59.32256456582661, 'min': 1.0, '25%': 52.0, '50%': 103.0, '75%': 154.0, 'max': 205.0}, 'symboling': {'count': 205.0, 'mean': 0.8341463414634146, 'std': 1.2453068281055297, 'min': -2.0, '25%': 0.0, '50%': 1.0, '75%': 2.0, 'max': 3.0}, 'wheelbase': {'count': 205.0, 'mean': 98.75658536585367, 'std': 6.021775685025571, 'min': 86.6, '25%': 94.5, '50%': 97.0, '75%': 102.4, 'max': 120.9}, 'carlength': {'count': 205.0, 'mean': 174.04926829268288, 'std': 12.33728852655518, 'min': 141.1, '25%': 166.3, '50%': 173.2, '75%': 183.1, 'max': 208.1}, 'carwidth': {'count': 205.0, 'mean': 65.90780487804878, 'std': 2.145203852687183, 'min': 60.3, '25%': 64.1, '50%': 65.5, '75%': 66.9, 'max': 72.3}, 'carheight': {'count': 205.0, 'mean': 53.72487804878049, 'std': 2.4435219699049036, 'min': 47.8, '25%': 52.0, '50%': 54.1, '75%': 55.5, 'max': 59.8}, 'curbweight': {'count': 205.0, 'mean': 2555.5658536585365, 'std': 520.6802035016387, 'min': 1488.0, '25%': 2145.0, '50%': 2414.0, '75%': 2935.0, 'max': 4066.0}, 'enginesize': {'count': 205.0, 'mean': 126.90731707317073, 'std': 41.64269343817984, 'min': 61.0, '25%': 97.0, '50%': 120.0, '75%': 141.0, 'max': 326.0}, 'boreratio': {'count': 205.0, 'mean': 3.329756097560975, 'std': 0.27084370542622926, 'min': 2.54, '25%': 3.15, '50%': 3.31, '75%': 3.58, 'max': 3.94}, 'stroke': {'count': 205.0, 'mean': 3.255414634146341, 'std': 0.31359701376080407, 'min': 2.07, '25%': 3.11, '50%': 3.29, '75%': 3.41, 'max': 4.17}, 'compressionratio': {'count': 205.0, 'mean': 10.142536585365855, 'std': 3.972040321863298, 'min': 7.0, '25%': 8.6, '50%': 9.0, '75%': 9.4, 'max': 23.0}, 'horsepower': {'count': 205.0, 'mean': 104.1170731707317, 'std': 39.54416680936116, 'min': 48.0, '25%': 70.0, '50%': 95.0, '75%': 116.0, 'max': 288.0}, 'peakrpm': {'count': 205.0, 'mean': 5125.121951219512, 'std': 476.98564305694634, 'min': 4150.0, '25%': 4800.0, '50%': 5200.0, '75%': 5500.0, 'max': 6600.0}, 'citympg': {'count': 205.0, 'mean': 25.21951219512195, 'std': 6.542141653001622, 'min': 13.0, '25%': 19.0, '50%': 24.0, '75%': 30.0, 'max': 49.0}, 'highwaympg': {'count': 205.0, 'mean': 30.75121951219512, 'std': 6.886443130941824, 'min': 16.0, '25%': 25.0, '50%': 30.0, '75%': 34.0, 'max': 54.0}, 'price': {'count': 205.0, 'mean': 13276.710570731706, 'std': 7988.85233174315, 'min': 5118.0, '25%': 7788.0, '50%': 10295.0, '75%': 16503.0, 'max': 45400.0}} <dataframe_info> RangeIndex: 205 entries, 0 to 204 Data columns (total 26 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 car_ID 205 non-null int64 1 symboling 205 non-null int64 2 CarName 205 non-null object 3 fueltype 205 non-null object 4 aspiration 205 non-null object 5 doornumber 205 non-null object 6 carbody 205 non-null object 7 drivewheel 205 non-null object 8 enginelocation 205 non-null object 9 wheelbase 205 non-null float64 10 carlength 205 non-null float64 11 carwidth 205 non-null float64 12 carheight 205 non-null float64 13 curbweight 205 non-null int64 14 enginetype 205 non-null object 15 cylindernumber 205 non-null object 16 enginesize 205 non-null int64 17 fuelsystem 205 non-null object 18 boreratio 205 non-null float64 19 stroke 205 non-null float64 20 compressionratio 205 non-null float64 21 horsepower 205 non-null int64 22 peakrpm 205 non-null int64 23 citympg 205 non-null int64 24 highwaympg 205 non-null int64 25 price 205 non-null float64 dtypes: float64(8), int64(8), object(10) memory usage: 41.8+ KB <some_examples> {'car_ID': {'0': 1, '1': 2, '2': 3, '3': 4}, 'symboling': {'0': 3, '1': 3, '2': 1, '3': 2}, 'CarName': {'0': 'alfa-romero giulia', '1': 'alfa-romero stelvio', '2': 'alfa-romero Quadrifoglio', '3': 'audi 100 ls'}, 'fueltype': {'0': 'gas', '1': 'gas', '2': 'gas', '3': 'gas'}, 'aspiration': {'0': 'std', '1': 'std', '2': 'std', '3': 'std'}, 'doornumber': {'0': 'two', '1': 'two', '2': 'two', '3': 'four'}, 'carbody': {'0': 'convertible', '1': 'convertible', '2': 'hatchback', '3': 'sedan'}, 'drivewheel': {'0': 'rwd', '1': 'rwd', '2': 'rwd', '3': 'fwd'}, 'enginelocation': {'0': 'front', '1': 'front', '2': 'front', '3': 'front'}, 'wheelbase': {'0': 88.6, '1': 88.6, '2': 94.5, '3': 99.8}, 'carlength': {'0': 168.8, '1': 168.8, '2': 171.2, '3': 176.6}, 'carwidth': {'0': 64.1, '1': 64.1, '2': 65.5, '3': 66.2}, 'carheight': {'0': 48.8, '1': 48.8, '2': 52.4, '3': 54.3}, 'curbweight': {'0': 2548, '1': 2548, '2': 2823, '3': 2337}, 'enginetype': {'0': 'dohc', '1': 'dohc', '2': 'ohcv', '3': 'ohc'}, 'cylindernumber': {'0': 'four', '1': 'four', '2': 'six', '3': 'four'}, 'enginesize': {'0': 130, '1': 130, '2': 152, '3': 109}, 'fuelsystem': {'0': 'mpfi', '1': 'mpfi', '2': 'mpfi', '3': 'mpfi'}, 'boreratio': {'0': 3.47, '1': 3.47, '2': 2.68, '3': 3.19}, 'stroke': {'0': 2.68, '1': 2.68, '2': 3.47, '3': 3.4}, 'compressionratio': {'0': 9.0, '1': 9.0, '2': 9.0, '3': 10.0}, 'horsepower': {'0': 111, '1': 111, '2': 154, '3': 102}, 'peakrpm': {'0': 5000, '1': 5000, '2': 5000, '3': 5500}, 'citympg': {'0': 21, '1': 21, '2': 19, '3': 24}, 'highwaympg': {'0': 27, '1': 27, '2': 26, '3': 30}, 'price': {'0': 13495.0, '1': 16500.0, '2': 16500.0, '3': 13950.0}} <end_description>
961
0
2,669
961
129839451
<jupyter_start><jupyter_text>Solar Power Generation Data This data has been gathered at two solar power plants in India over a 34 day period. It has two pairs of files - each pair has one power generation dataset and one sensor readings dataset. The power generation datasets are gathered at the inverter level - each inverter has multiple lines of solar panels attached to it. The sensor data is gathered at a plant level - single array of sensors optimally placed at the plant. There are a few areas of concern at the solar power plant - 1. Can we predict the power generation for next couple of days? - this allows for better grid management 2. Can we identify the need for panel cleaning/maintenance? 3. Can we identify faulty or suboptimally performing equipment? Kaggle dataset identifier: solar-power-generation-data <jupyter_script># # # Machine Learning and Time Series Forecasting # In the first part of this project [(part 1)](https://www.kaggle.com/code/sedimir/using-data-analytics-to-manage-a-solar-power-plant), we conducted an exploratory data analysis (EDA) on the power plant dataset. In the second part, our objective is to employ machine learning (ML) techniques to forecast power generation for the upcoming days. # There are several steps involved in time series forecasting with machine learning (ML), including: # 1. **data preparation**: cleaning and transforming the data # 2. **feature engineering**: selecting or creating relevant features to train the model # 3. **model selection**: choosing an appropriate algorithm or method to fit the time series data # 4. **evaluation**: assessing the performance of the model on test data # Some of ML models specifically designed to handle time series data are: # * Autoregressive Integrated Moving Average (**ARIMA**): ARIMA is a widely used model for time series forecasting that considers the autoregressive (AR), integrated (I), and moving average (MA) components of the data. # * Seasonal ARIMA (**SARIMA**): SARIMA extends the ARIMA model by incorporating seasonal components to account for seasonal patterns in time series data. # * Exponential Smoothing (**ES**): Exponential smoothing methods, such as Simple Exponential Smoothing (SES), Holt's Linear Exponential Smoothing (Holt-Winters), and Seasonal Exponential Smoothing (ETS), capture trends and seasonality in time series data. # * Recurrent Neural Networks (**RNNs**): RNNs, specifically Long Short-Term Memory (LSTM) networks, are powerful ML models capable of capturing long-term dependencies in time series data. # * **Prophet**: Prophet is a forecasting framework developed by Facebook that utilizes an additive model to capture seasonality, trends, and other components in time series data. # * Gaussian Processes (**GPs**): GPs are a probabilistic approach for time series modeling, capable of capturing uncertainty and providing flexible regression and forecasting capabilities. # * Convolutional Neural Networks (**CNNs**): CNNs, commonly used in computer vision tasks, can also be applied to time series data by treating them as images or using one-dimensional convolutions to capture temporal patterns. # * Support Vector Regression (**SVR**): SVR is a regression technique that can be applied to time series data, leveraging support vector machines to find the best fit curve. # * State Space Models (**SSMs**): SSMs represent time series data as a combination of latent states and observations. They provide a flexible framework for modeling complex dependencies in time series. # It's important to note that the choice of procedure depends on the specific characteristics of the time series data and the forecasting task at hand. # ## 1. Exploratoy Data Analysis (EDA) # [Click to go the first part](https://www.kaggle.com/code/sedimir/using-data-analytics-to-manage-a-solar-power-plant) # ## 2. Machine Learning: Forecasting Power Generation # In what follows, we will employ ARIMA and Prophet procedures independently to predict DC power in the next couple of days and compare their respective results. The workflow will follow the steps outlined below: # 2. 1 Stationarity - Lag - Autocorrleation # 2. 2 Train and Test datasets # 2. 3 ARIMA # 2. 4 Prophet # 2. 5 Evaluation # 2. 6 Conclusion # # # import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from datetime import datetime import matplotlib.dates as mdates ##reading genration dataset custom_date_parser = lambda x: datetime.strptime(x, "%d-%m-%Y %H:%M") gen = pd.read_csv( "/kaggle/input/solar-power-generation-data/Plant_1_Generation_Data.csv", index_col="DATE_TIME", parse_dates=["DATE_TIME"], date_parser=custom_date_parser, ) gen.head(2) # # For the sake of simplicity we are going to limit our data to June 10th to June 15th. Using this data we are going to predict generation for June 16th and 17th and compare it with actual generation on these two days. # en_pred=gen['2020-06-11 00:00:00':'2020-06-15 23:45:00'].drop('SOURCE_KEY',axis=1).DC_POWER.groupby('DATE_TIME').sum().reset_index().set_index('DATE_TIME') # en_real=gen['2020-06-16 00:00:00':'2020-06-17 23:45:00'].drop('SOURCE_KEY',axis=1).DC_POWER.groupby('DATE_TIME').sum().reset_index().set_index('DATE_TIME') gen_pred = ( gen["2020-06-11 00:00:00":"2020-06-15 23:45:00"] .drop("SOURCE_KEY", axis=1) .groupby("DATE_TIME") .sum() ) gen_real = ( gen["2020-06-16 00:00:00":"2020-06-17 23:45:00"] .drop("SOURCE_KEY", axis=1) .groupby("DATE_TIME") .sum() ) plt.plot(gen_pred.DC_POWER, label="Data to be used for prediction") plt.plot(gen_real.DC_POWER, label="Data to be used to evaluate the prediction") plt.xticks(rotation=45) plt.title("DC POWER GENERATION") plt.legend() plt.show() # ### 2.1 Stationarity - Lag - Autocorrleation # #### **Stationarity** # For forecasting purposes, it is desirable for a time series to exhibit stationarity. In a stationary time series, the mean, variance, and covariance remain constant over time. Stationarity is important because it allows us to make reliable predictions based on the assumption that the future behavior of the time series will be similar to its past behavior. # When a time series is non-stationary, it may exhibit trends, seasonality, or other patterns that can make forecasting challenging. In such cases, it becomes necessary to transform the time series to achieve stationarity before applying forecasting models. # Common techniques for achieving stationarity include taking first differences to remove trends or applying transformations such as logarithmic to stabilize the variance. Once stationarity is achieved, forecasting models can be effectively applied to make accurate predictions. # By ensuring stationarity in a time series, we can rely on the assumption that the statistical properties of the series will remain consistent, enabling us to generate reliable forecasts for future observations. # There are several statistical tests available to determine the stationarity of a time series. Here we are going to use Augmented Dickey-Fuller (ADF) Test. The ADF tests the null hypothesis that a unit root is present in the time series (indicating non-stationarity). A unit root implies that the autoregressive coefficient in the lagged values of the series is equal to 1. This means that the effect of past values on the current value of the time series does not decay over time. If the p-value obtained from the ADF test is below a chosen significance level (e.g., 0.05), we can reject the null hypothesis and conclude that the series is stationary. from statsmodels.tsa.stattools import adfuller result = adfuller(gen_pred.DC_POWER) adf_statistic = result[0] p_value = result[1] critical_values = result[4] print("ADF Statistic:", adf_statistic) print("p-value:", p_value) print("Critical Values:") for key, value in critical_values.items(): print(f"{key}: {value}") # The *ADF statistic* quantifies the strength of evidence against non-stationarity (negative values mean a strong evidence which timeseries is probabely stationary), while the *p-value* provides the probability associated with that evidence. The Critical Values indicate different confidence levels at which the null hypothesis (non-stationarity) would be rejected. # In our case, the *ADF statistic* value is significantly negative, indicating strong evidence against the presence of a unit root (non-stationarity). Additionally, the *p-value* is very small (less than the commonly used significance level of 0.05), further supporting the rejection of the null hypothesis of a unit root and indicating the presence of stationarity. # Let's investigate the staionarity furthere using lag and autocorrelation. # **Lag and Autocorrelation** # For timeseries,lag refers to the time difference between observations in the series. It represents how far back in time you are looking when examining the relationship between observations. # Autocorrelation measures the linear relationship between an observation and its lagged values. It quantifies the degree of similarity or dependence between observations at different time points. Positive autocorrelation indicates that observations tend to be similar at higher lags, while negative autocorrelation indicates that observations tend to be dissimilar. # In a stationary time series, the autocorrelation between observations typically decreases as the lag increases. This means that as you look further back in time (increase the lag), the dependence between observations tends to diminish. # Figure 1 and 2 below, clearly illustrate a positive correlation between two observations in the timeseries indicating that each observation is positively correlated by its preceding observation (Figure 1). Additionally, as the lag increases, the autocorelations tend to be diminish, which aligns with the characteristics of a stationary timeseries. # f, ax = plt.subplots(2, 1) plt.tight_layout(pad=4) pd.plotting.lag_plot(train.DC_POWER, ax=ax[0]) pd.plotting.autocorrelation_plot(train.DC_POWER, ax=ax[1]) ax[0].set_title( "Figure 1 : lag between two observations in the same timeseries", fontsize=10 ) ax[1].set_title("Figure 2: autocorrelation as a function of lag", fontsize=10) plt.show() # ### 2.2 Train and Test Datasets # from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split( gen_pred.index, gen_pred.DC_POWER, test_size=0.2, shuffle=False ) plt.plot(x_train, y_train, label="Train") plt.plot(x_test, y_test, label="Test") plt.legend() plt.show() # ### 2.3 Autoregressive Integrated Moving Average (ARIMA) # Determining the optimal values for the parameters p, d, and q in an ARIMA model is an important step in time series analysis. These parameters represent the autoregressive (p), differencing (d), and moving average (q) components of the model, respectively. # The selection of these parameters depends on the characteristics of your time series data. Our timeseries is stationary, therefore d=0 in ARIMA model (for non-stationary timesereis: d=1). The autoregressive component (p) captures the relationship between the current observation and a specified number of lagged observations. The moving average component (q) considers the dependency between the current observation and a specified number of lagged forecast errors. # To tune the parameters of an ARIMA model for a stationary time series, you can use the auto_arima function available in the pmdarima library in Python. This function automates the process of selecting the optimal p, d, and q values for the ARIMA model using a stepwise approach. # # pip install pmdarima import pmdarima as pm model = pm.auto_arima(train, seasonal=False, trace=True) model.summary() model.fit(train) predictions = model.predict(n_periods=len(test)) plt.plot(test.DC_POWER, label="Real Generation") plt.plot(predictions, label="Predicted Generation") plt.xticks(rotation=45) plt.title("DC POWER GENERATION") plt.legend() plt.show() from statsmodels.tsa.arima.model import ARIMA model1 = ARIMA(train.DC_POWER, order=(5, 0, 5)) model_fit = model1.fit() model_fit.summary() predic = model_fit.predict(start=test.index[0], end=test.index[-1], dynamic=False) plt.plot(test.DC_POWER, label="Real Generation") plt.plot(predic, label="Predicted Generation") plt.xticks(rotation=45) plt.title("DC POWER GENERATION") plt.legend() plt.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/839/129839451.ipynb
solar-power-generation-data
anikannal
[{"Id": 129839451, "ScriptId": 38605109, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11890312, "CreationDate": "05/16/2023 21:35:54", "VersionNumber": 2.0, "Title": "Machine Learning: Forecasting Power Generation", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 193.0, "LinesInsertedFromPrevious": 1.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 192.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 186222786, "KernelVersionId": 129839451, "SourceDatasetVersionId": 1428586}]
[{"Id": 1428586, "DatasetId": 836676, "DatasourceVersionId": 1461947, "CreatorUserId": 8503, "LicenseName": "Data files \u00a9 Original Authors", "CreationDate": "08/18/2020 15:52:03", "VersionNumber": 1.0, "Title": "Solar Power Generation Data", "Slug": "solar-power-generation-data", "Subtitle": "Solar power generation and sensor data for two power plants.", "Description": "This data has been gathered at two solar power plants in India over a 34 day period. It has two pairs of files - each pair has one power generation dataset and one sensor readings dataset. The power generation datasets are gathered at the inverter level - each inverter has multiple lines of solar panels attached to it. The sensor data is gathered at a plant level - single array of sensors optimally placed at the plant.\n\nThere are a few areas of concern at the solar power plant - \n1. Can we predict the power generation for next couple of days? - this allows for better grid management\n2. Can we identify the need for panel cleaning/maintenance?\n3. Can we identify faulty or suboptimally performing equipment?", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 836676, "CreatorUserId": 8503, "OwnerUserId": 8503.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 1428586.0, "CurrentDatasourceVersionId": 1461947.0, "ForumId": 851864, "Type": 2, "CreationDate": "08/18/2020 15:52:03", "LastActivityDate": "08/18/2020", "TotalViews": 474439, "TotalDownloads": 34660, "TotalVotes": 854, "TotalKernels": 239}]
[{"Id": 8503, "UserName": "anikannal", "DisplayName": "Ani Kannal", "RegisterDate": "04/05/2011", "PerformanceTier": 0}]
# # # Machine Learning and Time Series Forecasting # In the first part of this project [(part 1)](https://www.kaggle.com/code/sedimir/using-data-analytics-to-manage-a-solar-power-plant), we conducted an exploratory data analysis (EDA) on the power plant dataset. In the second part, our objective is to employ machine learning (ML) techniques to forecast power generation for the upcoming days. # There are several steps involved in time series forecasting with machine learning (ML), including: # 1. **data preparation**: cleaning and transforming the data # 2. **feature engineering**: selecting or creating relevant features to train the model # 3. **model selection**: choosing an appropriate algorithm or method to fit the time series data # 4. **evaluation**: assessing the performance of the model on test data # Some of ML models specifically designed to handle time series data are: # * Autoregressive Integrated Moving Average (**ARIMA**): ARIMA is a widely used model for time series forecasting that considers the autoregressive (AR), integrated (I), and moving average (MA) components of the data. # * Seasonal ARIMA (**SARIMA**): SARIMA extends the ARIMA model by incorporating seasonal components to account for seasonal patterns in time series data. # * Exponential Smoothing (**ES**): Exponential smoothing methods, such as Simple Exponential Smoothing (SES), Holt's Linear Exponential Smoothing (Holt-Winters), and Seasonal Exponential Smoothing (ETS), capture trends and seasonality in time series data. # * Recurrent Neural Networks (**RNNs**): RNNs, specifically Long Short-Term Memory (LSTM) networks, are powerful ML models capable of capturing long-term dependencies in time series data. # * **Prophet**: Prophet is a forecasting framework developed by Facebook that utilizes an additive model to capture seasonality, trends, and other components in time series data. # * Gaussian Processes (**GPs**): GPs are a probabilistic approach for time series modeling, capable of capturing uncertainty and providing flexible regression and forecasting capabilities. # * Convolutional Neural Networks (**CNNs**): CNNs, commonly used in computer vision tasks, can also be applied to time series data by treating them as images or using one-dimensional convolutions to capture temporal patterns. # * Support Vector Regression (**SVR**): SVR is a regression technique that can be applied to time series data, leveraging support vector machines to find the best fit curve. # * State Space Models (**SSMs**): SSMs represent time series data as a combination of latent states and observations. They provide a flexible framework for modeling complex dependencies in time series. # It's important to note that the choice of procedure depends on the specific characteristics of the time series data and the forecasting task at hand. # ## 1. Exploratoy Data Analysis (EDA) # [Click to go the first part](https://www.kaggle.com/code/sedimir/using-data-analytics-to-manage-a-solar-power-plant) # ## 2. Machine Learning: Forecasting Power Generation # In what follows, we will employ ARIMA and Prophet procedures independently to predict DC power in the next couple of days and compare their respective results. The workflow will follow the steps outlined below: # 2. 1 Stationarity - Lag - Autocorrleation # 2. 2 Train and Test datasets # 2. 3 ARIMA # 2. 4 Prophet # 2. 5 Evaluation # 2. 6 Conclusion # # # import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from datetime import datetime import matplotlib.dates as mdates ##reading genration dataset custom_date_parser = lambda x: datetime.strptime(x, "%d-%m-%Y %H:%M") gen = pd.read_csv( "/kaggle/input/solar-power-generation-data/Plant_1_Generation_Data.csv", index_col="DATE_TIME", parse_dates=["DATE_TIME"], date_parser=custom_date_parser, ) gen.head(2) # # For the sake of simplicity we are going to limit our data to June 10th to June 15th. Using this data we are going to predict generation for June 16th and 17th and compare it with actual generation on these two days. # en_pred=gen['2020-06-11 00:00:00':'2020-06-15 23:45:00'].drop('SOURCE_KEY',axis=1).DC_POWER.groupby('DATE_TIME').sum().reset_index().set_index('DATE_TIME') # en_real=gen['2020-06-16 00:00:00':'2020-06-17 23:45:00'].drop('SOURCE_KEY',axis=1).DC_POWER.groupby('DATE_TIME').sum().reset_index().set_index('DATE_TIME') gen_pred = ( gen["2020-06-11 00:00:00":"2020-06-15 23:45:00"] .drop("SOURCE_KEY", axis=1) .groupby("DATE_TIME") .sum() ) gen_real = ( gen["2020-06-16 00:00:00":"2020-06-17 23:45:00"] .drop("SOURCE_KEY", axis=1) .groupby("DATE_TIME") .sum() ) plt.plot(gen_pred.DC_POWER, label="Data to be used for prediction") plt.plot(gen_real.DC_POWER, label="Data to be used to evaluate the prediction") plt.xticks(rotation=45) plt.title("DC POWER GENERATION") plt.legend() plt.show() # ### 2.1 Stationarity - Lag - Autocorrleation # #### **Stationarity** # For forecasting purposes, it is desirable for a time series to exhibit stationarity. In a stationary time series, the mean, variance, and covariance remain constant over time. Stationarity is important because it allows us to make reliable predictions based on the assumption that the future behavior of the time series will be similar to its past behavior. # When a time series is non-stationary, it may exhibit trends, seasonality, or other patterns that can make forecasting challenging. In such cases, it becomes necessary to transform the time series to achieve stationarity before applying forecasting models. # Common techniques for achieving stationarity include taking first differences to remove trends or applying transformations such as logarithmic to stabilize the variance. Once stationarity is achieved, forecasting models can be effectively applied to make accurate predictions. # By ensuring stationarity in a time series, we can rely on the assumption that the statistical properties of the series will remain consistent, enabling us to generate reliable forecasts for future observations. # There are several statistical tests available to determine the stationarity of a time series. Here we are going to use Augmented Dickey-Fuller (ADF) Test. The ADF tests the null hypothesis that a unit root is present in the time series (indicating non-stationarity). A unit root implies that the autoregressive coefficient in the lagged values of the series is equal to 1. This means that the effect of past values on the current value of the time series does not decay over time. If the p-value obtained from the ADF test is below a chosen significance level (e.g., 0.05), we can reject the null hypothesis and conclude that the series is stationary. from statsmodels.tsa.stattools import adfuller result = adfuller(gen_pred.DC_POWER) adf_statistic = result[0] p_value = result[1] critical_values = result[4] print("ADF Statistic:", adf_statistic) print("p-value:", p_value) print("Critical Values:") for key, value in critical_values.items(): print(f"{key}: {value}") # The *ADF statistic* quantifies the strength of evidence against non-stationarity (negative values mean a strong evidence which timeseries is probabely stationary), while the *p-value* provides the probability associated with that evidence. The Critical Values indicate different confidence levels at which the null hypothesis (non-stationarity) would be rejected. # In our case, the *ADF statistic* value is significantly negative, indicating strong evidence against the presence of a unit root (non-stationarity). Additionally, the *p-value* is very small (less than the commonly used significance level of 0.05), further supporting the rejection of the null hypothesis of a unit root and indicating the presence of stationarity. # Let's investigate the staionarity furthere using lag and autocorrelation. # **Lag and Autocorrelation** # For timeseries,lag refers to the time difference between observations in the series. It represents how far back in time you are looking when examining the relationship between observations. # Autocorrelation measures the linear relationship between an observation and its lagged values. It quantifies the degree of similarity or dependence between observations at different time points. Positive autocorrelation indicates that observations tend to be similar at higher lags, while negative autocorrelation indicates that observations tend to be dissimilar. # In a stationary time series, the autocorrelation between observations typically decreases as the lag increases. This means that as you look further back in time (increase the lag), the dependence between observations tends to diminish. # Figure 1 and 2 below, clearly illustrate a positive correlation between two observations in the timeseries indicating that each observation is positively correlated by its preceding observation (Figure 1). Additionally, as the lag increases, the autocorelations tend to be diminish, which aligns with the characteristics of a stationary timeseries. # f, ax = plt.subplots(2, 1) plt.tight_layout(pad=4) pd.plotting.lag_plot(train.DC_POWER, ax=ax[0]) pd.plotting.autocorrelation_plot(train.DC_POWER, ax=ax[1]) ax[0].set_title( "Figure 1 : lag between two observations in the same timeseries", fontsize=10 ) ax[1].set_title("Figure 2: autocorrelation as a function of lag", fontsize=10) plt.show() # ### 2.2 Train and Test Datasets # from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split( gen_pred.index, gen_pred.DC_POWER, test_size=0.2, shuffle=False ) plt.plot(x_train, y_train, label="Train") plt.plot(x_test, y_test, label="Test") plt.legend() plt.show() # ### 2.3 Autoregressive Integrated Moving Average (ARIMA) # Determining the optimal values for the parameters p, d, and q in an ARIMA model is an important step in time series analysis. These parameters represent the autoregressive (p), differencing (d), and moving average (q) components of the model, respectively. # The selection of these parameters depends on the characteristics of your time series data. Our timeseries is stationary, therefore d=0 in ARIMA model (for non-stationary timesereis: d=1). The autoregressive component (p) captures the relationship between the current observation and a specified number of lagged observations. The moving average component (q) considers the dependency between the current observation and a specified number of lagged forecast errors. # To tune the parameters of an ARIMA model for a stationary time series, you can use the auto_arima function available in the pmdarima library in Python. This function automates the process of selecting the optimal p, d, and q values for the ARIMA model using a stepwise approach. # # pip install pmdarima import pmdarima as pm model = pm.auto_arima(train, seasonal=False, trace=True) model.summary() model.fit(train) predictions = model.predict(n_periods=len(test)) plt.plot(test.DC_POWER, label="Real Generation") plt.plot(predictions, label="Predicted Generation") plt.xticks(rotation=45) plt.title("DC POWER GENERATION") plt.legend() plt.show() from statsmodels.tsa.arima.model import ARIMA model1 = ARIMA(train.DC_POWER, order=(5, 0, 5)) model_fit = model1.fit() model_fit.summary() predic = model_fit.predict(start=test.index[0], end=test.index[-1], dynamic=False) plt.plot(test.DC_POWER, label="Real Generation") plt.plot(predic, label="Predicted Generation") plt.xticks(rotation=45) plt.title("DC POWER GENERATION") plt.legend() plt.show()
false
1
3,118
0
3,302
3,118
129839561
import pandas as pd import seaborn as sns from matplotlib import pyplot as plt import numpy as np from sklearn.linear_model import LogisticRegression from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import ( RandomForestClassifier, ExtraTreesClassifier, GradientBoostingClassifier, ) from sklearn.metrics import * from sklearn.model_selection import * train = pd.read_json("/kaggle/input/whats-cooking-kernels-only/train.json") test = pd.read_json("/kaggle/input/whats-cooking-kernels-only/test.json") # **EDA : Exploratory Data Analysis** train.count() # On a beaucoup de data... (une bonne nouvelle !) (train.isnull().sum() / len(train)) * 100 # ...et aucune valeur nulle (deux bonnes nouvelles !) train.head() # On n'a que 3 colonnes : l'id de la recette (id), son style (cuisine) et un tableau contenant ses ingrédients (ingredients). # On va reformater la colonne ingredients et encoder chaque ingrédient distinct en une colonne (0 ou 1). # # Commençons par récupérer chaque ingrédient dans un tableau : len(train) train = train.sample(n=10000) # On affiche un diagramme de la proportion de chaque style culinaire (code repris de ce notebook : https://www.kaggle.com/code/anmoltripathi/what-s-cooking-top-7-solution) fig, ax = plt.subplots(figsize=(10, 10)) per_vals = round(train["cuisine"].value_counts(normalize=True) * 100, 2) for i, v in enumerate(per_vals): ax.text(v + 3, i + 0.25, str(v) + "%", color="blue", fontweight="bold") train["cuisine"].value_counts().plot.barh(ax=ax) plt.show() # On a une légère prédominance des recettes italiennes et mexicaines dans le dataset. # Visualisons les 10 ingrédients les plus utilisés par style culinaire (code repris de ce notebook : https://www.kaggle.com/code/anmoltripathi/what-s-cooking-top-7-solution) : cuisine = train["cuisine"].unique() all_cus = dict() for cs in cuisine: i = [] for ing_list in train[train["cuisine"] == cs]["ingredients"]: for ing in ing_list: i.append(ing) all_cus[cs] = i for key in all_cus.keys(): fig, ax = plt.subplots(figsize=(10, 2)) pd.Series(all_cus[key]).value_counts().head(10).plot.bar(ax=ax, title=key) plt.show() # Le sel est présent partout, ce sera donc un très mauvais indicateur. Par contre, certains ingrédients sont pertinents : exemple, s'il y a de la sauce soja on peut être sûr que la recette est chinoise, japonaise ou coréenne. train list_ingredients = [] for i in range(len(train)): for ingredient in train["ingredients"].iloc[i]: if ingredient not in list_ingredients: list_ingredients.append(ingredient) list_ingredients # Maintenant que c'est fait, on va ajouter une colonne à notre DataFrame pour chaque ingrédient de la liste (leur valeur sera binaire : 0 si l'ingrédient n'est pas dans la recette, 1 s'il l'est). # On va initialiser toutes ces colonnes à 0 (petit bémol : ce n'est pas optimisé, d'où le warning, et l'exécution prend beaucoup de temps) for ingredient in list_ingredients: train[ingredient] = np.zeros(len(train)) train.head() # Nos colonnes ont bien été créées. On peut maintenant mettre à 1 les champs pour les lignes ayant l'ingrédient concerné dans leur recette : for i in range(len(train)): ingredients = train["ingredients"].iloc[i] for j in ingredients: train[j].iloc[i] = 1 train.head() # Ca y est ! Notre dataset est prêt à être exploité. X = train.drop(["id", "cuisine", "ingredients"], axis=1) y = train["cuisine"] X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2 ) # X_train et y_train pour l’entraînement # X_test et y_test pour l’évaluation des prédictions # On commence tranquillement avec une régression logistique : from sklearn.linear_model import LogisticRegression classifier = LogisticRegression(solver="liblinear") classifier.fit(X_train, y_train) y_hat = classifier.predict(X_test) accuracy_score(y_test, y_hat)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/839/129839561.ipynb
null
null
[{"Id": 129839561, "ScriptId": 38149654, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13901516, "CreationDate": "05/16/2023 21:37:24", "VersionNumber": 1.0, "Title": "What's Cooking - FC LE4 2023", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 117.0, "LinesInsertedFromPrevious": 117.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import pandas as pd import seaborn as sns from matplotlib import pyplot as plt import numpy as np from sklearn.linear_model import LogisticRegression from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import ( RandomForestClassifier, ExtraTreesClassifier, GradientBoostingClassifier, ) from sklearn.metrics import * from sklearn.model_selection import * train = pd.read_json("/kaggle/input/whats-cooking-kernels-only/train.json") test = pd.read_json("/kaggle/input/whats-cooking-kernels-only/test.json") # **EDA : Exploratory Data Analysis** train.count() # On a beaucoup de data... (une bonne nouvelle !) (train.isnull().sum() / len(train)) * 100 # ...et aucune valeur nulle (deux bonnes nouvelles !) train.head() # On n'a que 3 colonnes : l'id de la recette (id), son style (cuisine) et un tableau contenant ses ingrédients (ingredients). # On va reformater la colonne ingredients et encoder chaque ingrédient distinct en une colonne (0 ou 1). # # Commençons par récupérer chaque ingrédient dans un tableau : len(train) train = train.sample(n=10000) # On affiche un diagramme de la proportion de chaque style culinaire (code repris de ce notebook : https://www.kaggle.com/code/anmoltripathi/what-s-cooking-top-7-solution) fig, ax = plt.subplots(figsize=(10, 10)) per_vals = round(train["cuisine"].value_counts(normalize=True) * 100, 2) for i, v in enumerate(per_vals): ax.text(v + 3, i + 0.25, str(v) + "%", color="blue", fontweight="bold") train["cuisine"].value_counts().plot.barh(ax=ax) plt.show() # On a une légère prédominance des recettes italiennes et mexicaines dans le dataset. # Visualisons les 10 ingrédients les plus utilisés par style culinaire (code repris de ce notebook : https://www.kaggle.com/code/anmoltripathi/what-s-cooking-top-7-solution) : cuisine = train["cuisine"].unique() all_cus = dict() for cs in cuisine: i = [] for ing_list in train[train["cuisine"] == cs]["ingredients"]: for ing in ing_list: i.append(ing) all_cus[cs] = i for key in all_cus.keys(): fig, ax = plt.subplots(figsize=(10, 2)) pd.Series(all_cus[key]).value_counts().head(10).plot.bar(ax=ax, title=key) plt.show() # Le sel est présent partout, ce sera donc un très mauvais indicateur. Par contre, certains ingrédients sont pertinents : exemple, s'il y a de la sauce soja on peut être sûr que la recette est chinoise, japonaise ou coréenne. train list_ingredients = [] for i in range(len(train)): for ingredient in train["ingredients"].iloc[i]: if ingredient not in list_ingredients: list_ingredients.append(ingredient) list_ingredients # Maintenant que c'est fait, on va ajouter une colonne à notre DataFrame pour chaque ingrédient de la liste (leur valeur sera binaire : 0 si l'ingrédient n'est pas dans la recette, 1 s'il l'est). # On va initialiser toutes ces colonnes à 0 (petit bémol : ce n'est pas optimisé, d'où le warning, et l'exécution prend beaucoup de temps) for ingredient in list_ingredients: train[ingredient] = np.zeros(len(train)) train.head() # Nos colonnes ont bien été créées. On peut maintenant mettre à 1 les champs pour les lignes ayant l'ingrédient concerné dans leur recette : for i in range(len(train)): ingredients = train["ingredients"].iloc[i] for j in ingredients: train[j].iloc[i] = 1 train.head() # Ca y est ! Notre dataset est prêt à être exploité. X = train.drop(["id", "cuisine", "ingredients"], axis=1) y = train["cuisine"] X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2 ) # X_train et y_train pour l’entraînement # X_test et y_test pour l’évaluation des prédictions # On commence tranquillement avec une régression logistique : from sklearn.linear_model import LogisticRegression classifier = LogisticRegression(solver="liblinear") classifier.fit(X_train, y_train) y_hat = classifier.predict(X_test) accuracy_score(y_test, y_hat)
false
0
1,309
0
1,309
1,309
129839935
<jupyter_start><jupyter_text>Oxford-IIIT-Pet-from-XijiaTao Kaggle dataset identifier: oxfordiiitpetfromxijiatao <jupyter_script># # Starter notebook for Oxford IIIT Pets using Torchvision and Albumenations # This is a starter notebook that shows how to use the Oxford IIIT Pet dataset with Torchvision in Kaggle notebooks w/o downloading it. We create a symlink in the working directory in the format that torchvision expects to make this work. # import torch import torchvision import os from matplotlib import pyplot from PIL import Image import numpy as np # Convert a PIL image to a PyTorch float tensor img2t = torchvision.transforms.ToTensor() # Convery a PyTorch float tensor to a PIL image t2img = torchvision.transforms.ToPILImage() # ## Torchvision dataset # Create symlinks and point `torchvision.datasets.OxfordIIITPet` to this location. # Oxford IIIT Pets Segmentation dataset loaded via torchvision. oxford_pets_path = "/kaggle/working" pets_train_orig = torchvision.datasets.OxfordIIITPet( root=oxford_pets_path, split="trainval", target_types="segmentation", download=False ) pets_test_orig = torchvision.datasets.OxfordIIITPet( root=oxford_pets_path, split="test", target_types="segmentation", download=False ) pets_train_orig, pets_test_orig # ## Display images and segmentation masks def display_images_and_masks(dataset, indexes): """Display images and segmentation masks next to each other.""" # Display a maximum of 2 sets of (image, mask) pairs per row. nrows = (len(indexes) + 1) // 2 # 3 units height per row. fig = pyplot.figure(figsize=(10, 3 * nrows)) for i in range(len(indexes)): image, mask = dataset[i][0], dataset[i][1] fig.add_subplot(nrows, 4, i * 2 + 1) pyplot.imshow(image) pyplot.axis("off") fig.add_subplot(nrows, 4, i * 2 + 2) pyplot.imshow(mask) pyplot.axis("off") # end for # end def # Display 4 training and test images. display_images_and_masks(pets_train_orig, indexes=(0, 1, 2, 3)) pyplot.show() display_images_and_masks(pets_test_orig, indexes=(0, 1, 2, 3)) pyplot.show() # ## Resize images to 128x128 # Resize the images to 128x128 using torchvision transforms, and display the results. resizer = torchvision.transforms.Resize((128, 128)) image = pets_train_orig[0][0] image128 = resizer(image) print(image.size, image128.size) fig = pyplot.figure(figsize=(10, 4)) pyplot.subplot(1, 2, 1) pyplot.imshow(image) pyplot.axis("off") pyplot.subplot(1, 2, 2) pyplot.imshow(image128) pyplot.axis("off") # ## Apply multiple color transforms using torchvision # And display the results. def display_images_with_augmentations(image, transforms, labels): # Display a maximum of 4 images per row. nrows = (len(transforms) + 1 + 3) // 4 # 3 units height per row. fig = pyplot.figure(figsize=(10, 3 * nrows)) # The first time an image is displayed, don't transform it. transforms = [lambda x: x] + transforms labels = ["Original"] + labels for i in range(len(transforms)): timage = transforms[i](image) args = {} if timage.mode == "L": args["cmap"] = "gray" fig.add_subplot(nrows, 4, i + 1) pyplot.title(labels[i]) pyplot.imshow(timage, **args) pyplot.axis("off") # end for # end def class ChannelShuffle: def __init__(self, permute): super().__init__() self.permute = list(permute) def __call__(self, x): if isinstance(x, Image.Image): t = img2t(x) back = t2img else: t = x back = lambda x: x tnew = t[self.permute] return back(tnew) image = torchvision.transforms.Resize((128, 128))(pets_train_orig[2][0]) transforms = [ torchvision.transforms.ColorJitter(brightness=0.5, contrast=0.5), torchvision.transforms.Grayscale(num_output_channels=1), torchvision.transforms.GaussianBlur(kernel_size=5), torchvision.transforms.RandomPosterize(bits=3, p=1.0), torchvision.transforms.RandomSolarize(threshold=0.3, p=1.0), torchvision.transforms.RandomInvert(p=1.0), ChannelShuffle((1, 2, 0)), torchvision.transforms.RandomHorizontalFlip(p=1.0), torchvision.transforms.RandomEqualize(p=1.0), torchvision.transforms.RandomAutocontrast(p=1.0), torchvision.transforms.RandomAdjustSharpness(sharpness_factor=2.0, p=1.0), ] labels = [ "Color Jitter", "Grayscale", "Gaussian Blur", "Posterize", "Solarize", "Invert", "Channel Suffle", "Horizontal Flip", "Equalize", "Autocontrast", "Sharpness", ] display_images_with_augmentations(image, transforms, labels) # ## Display images with segmentation masks overlaid using torchvision # Torchvision provides utilities to visualize images with overlaid segmentation masks. This section shows how to use these utilities. def image_with_segmask(image, mask): # image and mask are PIL images. First convert them to a uint8 np.array # and then into a pytorch tensor. The dimensions are ordered as HWC, so # we permute them to be CHW since draw_segmentation_masks() accepts CHW # image tensors. imaget = torch.tensor(np.array(image)).permute(2, 0, 1) maskt = torch.tensor(np.array(mask)).unsqueeze(0) # Convert maskt into a boolean tensor since that is what # draw_segmentation_masks() accepts. 2 is the background pixel, so we # remove it and keep the rest as the segmentation mask. maskt = maskt != 2 # Return a tensor with the segmentation mask superimposed in green colour # with 0.4 opacity. return torchvision.utils.draw_segmentation_masks( image=imaget, masks=maskt, alpha=0.4, colors="purple" ) def grid_image_with_segmask(dataset, indexes): # We resize the image and segmentation mask to 128x128 # before plotting them. resizer = torchvision.transforms.Resize((128, 128)) # Plot 4 images per row tops. nrows = (len(indexes) + 3) // 4 fig = pyplot.figure(figsize=(10, nrows * 3)) for i in indexes: image, mask = dataset[i][0].convert("RGB"), dataset[i][1] image128, mask128 = resizer(image), resizer(mask) imaget = image_with_segmask(image128, mask128) pyplot.subplot(nrows, 4, i + 1) pyplot.imshow(t2img(imaget)) pyplot.axis("off") # end for # end def # Plot 12 images with their segmentation masks. grid_image_with_segmask(pets_train_orig, indexes=list(range(12))) # # Albumenations # In this section, we'll see how to use Albumenations for data augmentation on images. Albumenations is able to perform simultaneous augmentations on both the images as well as the segmentation masks, making it much easier to perform more complex spatial transforms on the image and segmentation masks simultaneously. This capability extends to bounding boxes as well as keypoints. # See the full reference for more details: https://albumentations.ai/docs/api_reference/full_reference/ import albumentations as A import cv2 def show_pair(image, mask): """Display a pair of image beside each other. Typically this is the image along with its segmentation mask. """ fig = pyplot.figure(figsize=(7, 3)) pyplot.subplot(1, 2, 1) pyplot.imshow(image) pyplot.axis("off") pyplot.subplot(1, 2, 2) pyplot.imshow(mask) pyplot.axis("off") image, mask = pets_train_orig[2][0], pets_train_orig[2][1] pil_image = image.convert("RGB") # OpenCV uses BGR images, but Albumenations expects RGB images, so if we start # with a PIL image, then we just convert it into a NumPy array, which is RGB # ordered opencv_image = np.array(pil_image) opencv_mask = np.array(mask) show_pair(opencv_image, opencv_mask) pyplot.show() flipper = A.HorizontalFlip(p=1.0) flipped = flipper(image=opencv_image, mask=opencv_mask) # Plot the image and the mask alongside each other. show_pair(flipped["image"], flipped["mask"]) pyplot.show() tx = A.Compose( [ A.CenterCrop(350, 350), A.Resize(128, 128), ] ) txed = tx(image=opencv_image, mask=opencv_mask) show_pair(txed["image"], txed["mask"]) pyplot.show() # ## Apply multiple spatial transforms using Albumenations # And display the results. def display_all_transformed(image, mask, transforms, labels, resizer): # Display a maximum of 4 images per row. nrows = (len(transforms) + 1 + 3) // 4 # 3 units height per row. fig = pyplot.figure(figsize=(10, 3 * nrows)) # The first time an image is displayed, don't transform it. transforms = [lambda **kwargs: kwargs] + transforms labels = ["Original"] + labels for i in range(len(transforms)): txed = transforms[i](image=image, mask=mask) # Covert to PIL Image. pil_image = Image.fromarray(txed["image"]) pil_mask = Image.fromarray(txed["mask"]) args = {} if pil_image.mode == "L": args["cmap"] = "gray" fig.add_subplot(nrows, 4, i + 1) pyplot.title(labels[i]) # Convert from CHW to HWC by permuting dimensions. image_with_mask = ( image_with_segmask(pil_image, pil_mask).permute(1, 2, 0).numpy() ) image_with_mask = resizer(image=image_with_mask)["image"] pyplot.imshow(Image.fromarray(image_with_mask), **args) pyplot.axis("off") # end for # end def transforms = [ A.CoarseDropout(p=1.0, max_height=32, max_width=32), A.CenterCrop(350, 350), A.HorizontalFlip(p=1.0), A.ChannelDropout(channel_drop_range=(1, 2), p=1.0), A.SafeRotate(p=1.0), A.Affine(shear=30.0, p=1.0), A.PiecewiseAffine(p=1.0), ] final_resize = A.Resize(128, 128) labels = "Coarse Dropout,Center Crop,Horizontal Flip,Channel Dropout,Safe Rotate,Shear,Piecewise Affine".split( "," ) display_all_transformed(opencv_image, opencv_mask, transforms, labels, final_resize)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/839/129839935.ipynb
oxfordiiitpetfromxijiatao
cielceline
[{"Id": 129839935, "ScriptId": 38579855, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4930, "CreationDate": "05/16/2023 21:43:08", "VersionNumber": 3.0, "Title": "Starter for Oxford IIIT Pet using Torchvision", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 280.0, "LinesInsertedFromPrevious": 85.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 195.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 186223454, "KernelVersionId": 129839935, "SourceDatasetVersionId": 1491706}]
[{"Id": 1491706, "DatasetId": 875741, "DatasourceVersionId": 1525665, "CreatorUserId": 3593372, "LicenseName": "Unknown", "CreationDate": "09/16/2020 02:21:31", "VersionNumber": 1.0, "Title": "Oxford-IIIT-Pet-from-XijiaTao", "Slug": "oxfordiiitpetfromxijiatao", "Subtitle": "For Training of Image Classification and Object Localization", "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 875741, "CreatorUserId": 3593372, "OwnerUserId": 3593372.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 1491706.0, "CurrentDatasourceVersionId": 1525665.0, "ForumId": 891166, "Type": 2, "CreationDate": "09/16/2020 02:21:31", "LastActivityDate": "09/16/2020", "TotalViews": 1308, "TotalDownloads": 16, "TotalVotes": 3, "TotalKernels": 2}]
[{"Id": 3593372, "UserName": "cielceline", "DisplayName": "ciel", "RegisterDate": "08/18/2019", "PerformanceTier": 1}]
# # Starter notebook for Oxford IIIT Pets using Torchvision and Albumenations # This is a starter notebook that shows how to use the Oxford IIIT Pet dataset with Torchvision in Kaggle notebooks w/o downloading it. We create a symlink in the working directory in the format that torchvision expects to make this work. # import torch import torchvision import os from matplotlib import pyplot from PIL import Image import numpy as np # Convert a PIL image to a PyTorch float tensor img2t = torchvision.transforms.ToTensor() # Convery a PyTorch float tensor to a PIL image t2img = torchvision.transforms.ToPILImage() # ## Torchvision dataset # Create symlinks and point `torchvision.datasets.OxfordIIITPet` to this location. # Oxford IIIT Pets Segmentation dataset loaded via torchvision. oxford_pets_path = "/kaggle/working" pets_train_orig = torchvision.datasets.OxfordIIITPet( root=oxford_pets_path, split="trainval", target_types="segmentation", download=False ) pets_test_orig = torchvision.datasets.OxfordIIITPet( root=oxford_pets_path, split="test", target_types="segmentation", download=False ) pets_train_orig, pets_test_orig # ## Display images and segmentation masks def display_images_and_masks(dataset, indexes): """Display images and segmentation masks next to each other.""" # Display a maximum of 2 sets of (image, mask) pairs per row. nrows = (len(indexes) + 1) // 2 # 3 units height per row. fig = pyplot.figure(figsize=(10, 3 * nrows)) for i in range(len(indexes)): image, mask = dataset[i][0], dataset[i][1] fig.add_subplot(nrows, 4, i * 2 + 1) pyplot.imshow(image) pyplot.axis("off") fig.add_subplot(nrows, 4, i * 2 + 2) pyplot.imshow(mask) pyplot.axis("off") # end for # end def # Display 4 training and test images. display_images_and_masks(pets_train_orig, indexes=(0, 1, 2, 3)) pyplot.show() display_images_and_masks(pets_test_orig, indexes=(0, 1, 2, 3)) pyplot.show() # ## Resize images to 128x128 # Resize the images to 128x128 using torchvision transforms, and display the results. resizer = torchvision.transforms.Resize((128, 128)) image = pets_train_orig[0][0] image128 = resizer(image) print(image.size, image128.size) fig = pyplot.figure(figsize=(10, 4)) pyplot.subplot(1, 2, 1) pyplot.imshow(image) pyplot.axis("off") pyplot.subplot(1, 2, 2) pyplot.imshow(image128) pyplot.axis("off") # ## Apply multiple color transforms using torchvision # And display the results. def display_images_with_augmentations(image, transforms, labels): # Display a maximum of 4 images per row. nrows = (len(transforms) + 1 + 3) // 4 # 3 units height per row. fig = pyplot.figure(figsize=(10, 3 * nrows)) # The first time an image is displayed, don't transform it. transforms = [lambda x: x] + transforms labels = ["Original"] + labels for i in range(len(transforms)): timage = transforms[i](image) args = {} if timage.mode == "L": args["cmap"] = "gray" fig.add_subplot(nrows, 4, i + 1) pyplot.title(labels[i]) pyplot.imshow(timage, **args) pyplot.axis("off") # end for # end def class ChannelShuffle: def __init__(self, permute): super().__init__() self.permute = list(permute) def __call__(self, x): if isinstance(x, Image.Image): t = img2t(x) back = t2img else: t = x back = lambda x: x tnew = t[self.permute] return back(tnew) image = torchvision.transforms.Resize((128, 128))(pets_train_orig[2][0]) transforms = [ torchvision.transforms.ColorJitter(brightness=0.5, contrast=0.5), torchvision.transforms.Grayscale(num_output_channels=1), torchvision.transforms.GaussianBlur(kernel_size=5), torchvision.transforms.RandomPosterize(bits=3, p=1.0), torchvision.transforms.RandomSolarize(threshold=0.3, p=1.0), torchvision.transforms.RandomInvert(p=1.0), ChannelShuffle((1, 2, 0)), torchvision.transforms.RandomHorizontalFlip(p=1.0), torchvision.transforms.RandomEqualize(p=1.0), torchvision.transforms.RandomAutocontrast(p=1.0), torchvision.transforms.RandomAdjustSharpness(sharpness_factor=2.0, p=1.0), ] labels = [ "Color Jitter", "Grayscale", "Gaussian Blur", "Posterize", "Solarize", "Invert", "Channel Suffle", "Horizontal Flip", "Equalize", "Autocontrast", "Sharpness", ] display_images_with_augmentations(image, transforms, labels) # ## Display images with segmentation masks overlaid using torchvision # Torchvision provides utilities to visualize images with overlaid segmentation masks. This section shows how to use these utilities. def image_with_segmask(image, mask): # image and mask are PIL images. First convert them to a uint8 np.array # and then into a pytorch tensor. The dimensions are ordered as HWC, so # we permute them to be CHW since draw_segmentation_masks() accepts CHW # image tensors. imaget = torch.tensor(np.array(image)).permute(2, 0, 1) maskt = torch.tensor(np.array(mask)).unsqueeze(0) # Convert maskt into a boolean tensor since that is what # draw_segmentation_masks() accepts. 2 is the background pixel, so we # remove it and keep the rest as the segmentation mask. maskt = maskt != 2 # Return a tensor with the segmentation mask superimposed in green colour # with 0.4 opacity. return torchvision.utils.draw_segmentation_masks( image=imaget, masks=maskt, alpha=0.4, colors="purple" ) def grid_image_with_segmask(dataset, indexes): # We resize the image and segmentation mask to 128x128 # before plotting them. resizer = torchvision.transforms.Resize((128, 128)) # Plot 4 images per row tops. nrows = (len(indexes) + 3) // 4 fig = pyplot.figure(figsize=(10, nrows * 3)) for i in indexes: image, mask = dataset[i][0].convert("RGB"), dataset[i][1] image128, mask128 = resizer(image), resizer(mask) imaget = image_with_segmask(image128, mask128) pyplot.subplot(nrows, 4, i + 1) pyplot.imshow(t2img(imaget)) pyplot.axis("off") # end for # end def # Plot 12 images with their segmentation masks. grid_image_with_segmask(pets_train_orig, indexes=list(range(12))) # # Albumenations # In this section, we'll see how to use Albumenations for data augmentation on images. Albumenations is able to perform simultaneous augmentations on both the images as well as the segmentation masks, making it much easier to perform more complex spatial transforms on the image and segmentation masks simultaneously. This capability extends to bounding boxes as well as keypoints. # See the full reference for more details: https://albumentations.ai/docs/api_reference/full_reference/ import albumentations as A import cv2 def show_pair(image, mask): """Display a pair of image beside each other. Typically this is the image along with its segmentation mask. """ fig = pyplot.figure(figsize=(7, 3)) pyplot.subplot(1, 2, 1) pyplot.imshow(image) pyplot.axis("off") pyplot.subplot(1, 2, 2) pyplot.imshow(mask) pyplot.axis("off") image, mask = pets_train_orig[2][0], pets_train_orig[2][1] pil_image = image.convert("RGB") # OpenCV uses BGR images, but Albumenations expects RGB images, so if we start # with a PIL image, then we just convert it into a NumPy array, which is RGB # ordered opencv_image = np.array(pil_image) opencv_mask = np.array(mask) show_pair(opencv_image, opencv_mask) pyplot.show() flipper = A.HorizontalFlip(p=1.0) flipped = flipper(image=opencv_image, mask=opencv_mask) # Plot the image and the mask alongside each other. show_pair(flipped["image"], flipped["mask"]) pyplot.show() tx = A.Compose( [ A.CenterCrop(350, 350), A.Resize(128, 128), ] ) txed = tx(image=opencv_image, mask=opencv_mask) show_pair(txed["image"], txed["mask"]) pyplot.show() # ## Apply multiple spatial transforms using Albumenations # And display the results. def display_all_transformed(image, mask, transforms, labels, resizer): # Display a maximum of 4 images per row. nrows = (len(transforms) + 1 + 3) // 4 # 3 units height per row. fig = pyplot.figure(figsize=(10, 3 * nrows)) # The first time an image is displayed, don't transform it. transforms = [lambda **kwargs: kwargs] + transforms labels = ["Original"] + labels for i in range(len(transforms)): txed = transforms[i](image=image, mask=mask) # Covert to PIL Image. pil_image = Image.fromarray(txed["image"]) pil_mask = Image.fromarray(txed["mask"]) args = {} if pil_image.mode == "L": args["cmap"] = "gray" fig.add_subplot(nrows, 4, i + 1) pyplot.title(labels[i]) # Convert from CHW to HWC by permuting dimensions. image_with_mask = ( image_with_segmask(pil_image, pil_mask).permute(1, 2, 0).numpy() ) image_with_mask = resizer(image=image_with_mask)["image"] pyplot.imshow(Image.fromarray(image_with_mask), **args) pyplot.axis("off") # end for # end def transforms = [ A.CoarseDropout(p=1.0, max_height=32, max_width=32), A.CenterCrop(350, 350), A.HorizontalFlip(p=1.0), A.ChannelDropout(channel_drop_range=(1, 2), p=1.0), A.SafeRotate(p=1.0), A.Affine(shear=30.0, p=1.0), A.PiecewiseAffine(p=1.0), ] final_resize = A.Resize(128, 128) labels = "Coarse Dropout,Center Crop,Horizontal Flip,Channel Dropout,Safe Rotate,Shear,Piecewise Affine".split( "," ) display_all_transformed(opencv_image, opencv_mask, transforms, labels, final_resize)
false
0
3,007
0
3,047
3,007
129839463
<jupyter_start><jupyter_text>MSCI raw counts anndata [open-problems-raw-counts](https://www.kaggle.com/datasets/ryanholbrook/open-problems-raw-counts) as anndata files Kaggle dataset identifier: msci-raw-counts-anndata <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # # Data loading from Bio import SeqIO import pandas as pd import mygene import scanpy as sc import anndata import h5py from sklearn.decomposition import PCA from tqdm import tqdm mg = mygene.MyGeneInfo() tax_table = pd.read_table( "/kaggle/input/cafa-5-protein-function-prediction/Train/train_taxonomy.tsv", sep="\t", index_col="EntryID", ) prot_names = [ record.id for record in SeqIO.parse( "/kaggle/input/cafa-5-protein-function-prediction/Train/train_sequences.fasta", "fasta", ) ] # # Read Human scRNA-Seq and normalize sc_hum_data = sc.read_h5ad( "/kaggle/input/msci-raw-counts-anndata/train_cite_inputs_raw.ann.h5" ) sc_hum_data.var_names = [x.split("_")[0] for x in sc_hum_data.var_names] sc.pp.normalize_total(sc_hum_data, inplace=True) sc.pp.log1p(sc_hum_data) sc.pp.normalize_total(sc_hum_data, inplace=True) # # Read Mouse scRNA-Seq sc_mus_data = sc.read_text( "/kaggle/input/single-cell-rna-seq-nestorova2016-mouse-hspc/nestorowa_corrected_log2_transformed_counts.txt" ) # # Select human and mouse proteins hum_prot = tax_table.loc[tax_table["taxonomyID"] == 9606].index.to_list() mus_prot = tax_table.loc[tax_table["taxonomyID"] == 10090].index.to_list() # # Query MyGene to get gene names hum_gene_prot_conv = mg.querymany( hum_prot, scopes="uniprot", fields="symbol,ensembl.gene", species="human", returnall=False, as_dataframe=True, ) mus_gene_prot_conv = mg.querymany( mus_prot, scopes="uniprot", fields="symbol,ensembl.gene", species="mouse", returnall=False, as_dataframe=True, ) hum_gene_prot_conv mus_gene_prot_conv # # Subset scRNA-Seq to CAFA5 genes hum_shared_genes = list( set(sc_hum_data.var_names).intersection( set(hum_gene_prot_conv["ensembl.gene"].unique()) ) ) cafa_hum_gene_features = sc_hum_data[:, hum_shared_genes] cafa_hum_gene_features mus_shared_genes = list( set(sc_mus_data.var_names).intersection(set(mus_gene_prot_conv["symbol"].unique())) ) cafa_mus_gene_features = sc_mus_data[:, mus_shared_genes] cafa_mus_gene_features # # PCA of scRNA-Seq to get new features hum_gene_pca = PCA(n_components=100).fit_transform(cafa_hum_gene_features.X.T.todense()) hum_gene_pca = pd.DataFrame( hum_gene_pca.T, columns=cafa_hum_gene_features.var_names.to_list() ) new_feature_hum = [] hum_proteins = [] for prot in tqdm(hum_prot): try: genes = hum_gene_prot_conv.loc[prot].dropna()["ensembl.gene"] if type(genes) == str: genes = [genes] for gene in genes: if gene in hum_gene_pca.columns.to_list(): hum_proteins.append(prot) new_feature_hum.append(hum_gene_pca[gene].to_list()) except: continue new_features_hum = pd.DataFrame(new_feature_hum).T new_features_hum.columns = hum_proteins new_features_hum.to_csv("/kaggle/working/new_features_hum_genes.csv") new_features_hum mus_gene_pca = PCA(n_components=100).fit_transform(cafa_mus_gene_features.X.T) mus_gene_pca = pd.DataFrame( mus_gene_pca.T, columns=cafa_mus_gene_features.var_names.to_list() ) new_feature_mus = [] mus_proteins = [] for prot in mus_prot: try: genes = mus_gene_prot_conv.loc[prot].dropna()["symbol"] if type(genes) == str: genes = [genes] for gene in genes: if gene in mus_gene_pca.columns.to_list(): mus_proteins.append(prot) new_feature_mus.append(mus_gene_pca[gene].to_list()) except: continue new_features_mus = pd.DataFrame(new_feature_mus).T new_features_mus.columns = mus_proteins new_features_mus.to_csv("/kaggle/working/new_features_mus_genes.csv") new_features_mus new_feature = pd.concat([new_features_hum.T, new_features_mus.T]) new_feature # # Ridge regression # Loading train labels n_labels_to_consider = 1499 trainTerms = pd.read_csv( "/kaggle/input/cafa-5-protein-function-prediction/Train/train_terms.tsv", sep="\t" ) print(trainTerms.shape) display(trainTerms.head(2)) vec_freqCount = trainTerms["term"].value_counts() print(vec_freqCount) print() labels_to_consider = list(vec_freqCount.index[:n_labels_to_consider]) print( "n_labels_to_consider:", len(labels_to_consider), "First 10:", labels_to_consider[:10], ) # Load protein IDs fn = "/kaggle/input/t5embeds/train_ids.npy" vec_train_protein_ids = np.load(fn) print(vec_train_protein_ids.shape) vec_train_protein_ids # Prepare Y train_size = 142246 Y = np.zeros((train_size, n_labels_to_consider)) print(Y.shape) series_train_protein_ids = pd.Series(vec_train_protein_ids) # trainTerms_smaller = trainTerms[ trainTerms["term"].isin(labels_to_consider) ] # to speed-up the next step print(trainTerms_smaller.shape) for i in tqdm(range(Y.shape[1])): m = trainTerms_smaller["term"] == labels_to_consider[i] # m.sum() Y[:, i] = series_train_protein_ids.isin( set(trainTerms_smaller[m]["EntryID"]) ).astype(float) Y # Loading precalculated embedings # fn = '/kaggle/input/protein-embeddings-1/reduced_embeddings_file.npy' # fn = '/kaggle/input/protein-embeddings-1/embed_protbert_train_clip_1200_first_70000_prot.csv' fn = "/kaggle/input/t5embeds/train_embeds.npy" # fn = '/kaggle/input/t5embeds/test_embeds.npy' print(fn) if ".csv" in fn: df = pd.read_csv(fn, index_col=0) X = df.values elif ".npy" in fn: X = np.load(fn) print(X.shape) X # Load protein IDs fn = "/kaggle/input/t5embeds/train_ids.npy" vec_train_protein_ids = np.load(fn) print(vec_train_protein_ids.shape) vec_train_protein_ids X_ext = [] for ind, prot_id in tqdm(enumerate(vec_train_protein_ids)): if prot_id in new_feature.index: X_ext.append(np.r_[X[ind, :], new_feature.loc[prot_id]]) else: X_ext.append(np.r_[X[ind, :], [np.mean(X[ind, :])] * 100]) X = np.array(X_ext) from sklearn.model_selection import train_test_split IX = np.arange(len(X)) IX_train, IX_test, _, _ = train_test_split(IX, IX, train_size=0.1, random_state=42) print(len(IX_train), len(IX_test), IX_train[:10], IX_test[:10]) # Modeling from sklearn.linear_model import Ridge model = Ridge(alpha=1.0) str_model_id = "Ridge1" df_models_stat = pd.DataFrame() model import time from sklearn.metrics import roc_auc_score t0 = time.time() model.fit(X[IX_train, :], Y[IX_train, :]) Y_pred_test = model.predict(X[IX_test, :]) tt = time.time() - t0 print(str_model_id, tt) l = [] for i in range(Y.shape[1]): if len(np.unique(Y[IX_test, i])) > 1: s = roc_auc_score(Y[IX_test, i], Y_pred_test[:, i]) else: s = 0.5 l.append(s) if i % 10 == 0: print(i, s) df_models_stat.loc[str_model_id, "RocAuc Mean Test"] = np.mean(l) df_models_stat.loc[str_model_id, "Time"] = np.round(tt, 1) df_models_stat.loc[str_model_id, "Test Size"] = len(IX_test) df_models_stat # Scores statistics across targets import matplotlib.pyplot as plt plt.hist(l) plt.show() pd.Series(l).describe() # Full dataset model model.fit(X, Y)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/839/129839463.ipynb
msci-raw-counts-anndata
kseniyapetrova
[{"Id": 129839463, "ScriptId": 38595212, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4988408, "CreationDate": "05/16/2023 21:36:09", "VersionNumber": 1.0, "Title": "RidgeReg with expression features", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 261.0, "LinesInsertedFromPrevious": 241.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 20.0, "LinesInsertedFromFork": 241.0, "LinesDeletedFromFork": 199.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 20.0, "TotalVotes": 5}]
[{"Id": 186222804, "KernelVersionId": 129839463, "SourceDatasetVersionId": 4496555}, {"Id": 186222805, "KernelVersionId": 129839463, "SourceDatasetVersionId": 5499219}, {"Id": 186222803, "KernelVersionId": 129839463, "SourceDatasetVersionId": 2934799}]
[{"Id": 4496555, "DatasetId": 2630050, "DatasourceVersionId": 4556915, "CreatorUserId": 11373715, "LicenseName": "Unknown", "CreationDate": "11/12/2022 16:42:17", "VersionNumber": 1.0, "Title": "MSCI raw counts anndata", "Slug": "msci-raw-counts-anndata", "Subtitle": NaN, "Description": "[open-problems-raw-counts](https://www.kaggle.com/datasets/ryanholbrook/open-problems-raw-counts) as anndata files", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 2630050, "CreatorUserId": 11373715, "OwnerUserId": 11373715.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 4496555.0, "CurrentDatasourceVersionId": 4556915.0, "ForumId": 2661202, "Type": 2, "CreationDate": "11/12/2022 16:42:17", "LastActivityDate": "11/12/2022", "TotalViews": 154, "TotalDownloads": 13, "TotalVotes": 1, "TotalKernels": 10}]
[{"Id": 11373715, "UserName": "kseniyapetrova", "DisplayName": "praefrontalis", "RegisterDate": "08/23/2022", "PerformanceTier": 1}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # # Data loading from Bio import SeqIO import pandas as pd import mygene import scanpy as sc import anndata import h5py from sklearn.decomposition import PCA from tqdm import tqdm mg = mygene.MyGeneInfo() tax_table = pd.read_table( "/kaggle/input/cafa-5-protein-function-prediction/Train/train_taxonomy.tsv", sep="\t", index_col="EntryID", ) prot_names = [ record.id for record in SeqIO.parse( "/kaggle/input/cafa-5-protein-function-prediction/Train/train_sequences.fasta", "fasta", ) ] # # Read Human scRNA-Seq and normalize sc_hum_data = sc.read_h5ad( "/kaggle/input/msci-raw-counts-anndata/train_cite_inputs_raw.ann.h5" ) sc_hum_data.var_names = [x.split("_")[0] for x in sc_hum_data.var_names] sc.pp.normalize_total(sc_hum_data, inplace=True) sc.pp.log1p(sc_hum_data) sc.pp.normalize_total(sc_hum_data, inplace=True) # # Read Mouse scRNA-Seq sc_mus_data = sc.read_text( "/kaggle/input/single-cell-rna-seq-nestorova2016-mouse-hspc/nestorowa_corrected_log2_transformed_counts.txt" ) # # Select human and mouse proteins hum_prot = tax_table.loc[tax_table["taxonomyID"] == 9606].index.to_list() mus_prot = tax_table.loc[tax_table["taxonomyID"] == 10090].index.to_list() # # Query MyGene to get gene names hum_gene_prot_conv = mg.querymany( hum_prot, scopes="uniprot", fields="symbol,ensembl.gene", species="human", returnall=False, as_dataframe=True, ) mus_gene_prot_conv = mg.querymany( mus_prot, scopes="uniprot", fields="symbol,ensembl.gene", species="mouse", returnall=False, as_dataframe=True, ) hum_gene_prot_conv mus_gene_prot_conv # # Subset scRNA-Seq to CAFA5 genes hum_shared_genes = list( set(sc_hum_data.var_names).intersection( set(hum_gene_prot_conv["ensembl.gene"].unique()) ) ) cafa_hum_gene_features = sc_hum_data[:, hum_shared_genes] cafa_hum_gene_features mus_shared_genes = list( set(sc_mus_data.var_names).intersection(set(mus_gene_prot_conv["symbol"].unique())) ) cafa_mus_gene_features = sc_mus_data[:, mus_shared_genes] cafa_mus_gene_features # # PCA of scRNA-Seq to get new features hum_gene_pca = PCA(n_components=100).fit_transform(cafa_hum_gene_features.X.T.todense()) hum_gene_pca = pd.DataFrame( hum_gene_pca.T, columns=cafa_hum_gene_features.var_names.to_list() ) new_feature_hum = [] hum_proteins = [] for prot in tqdm(hum_prot): try: genes = hum_gene_prot_conv.loc[prot].dropna()["ensembl.gene"] if type(genes) == str: genes = [genes] for gene in genes: if gene in hum_gene_pca.columns.to_list(): hum_proteins.append(prot) new_feature_hum.append(hum_gene_pca[gene].to_list()) except: continue new_features_hum = pd.DataFrame(new_feature_hum).T new_features_hum.columns = hum_proteins new_features_hum.to_csv("/kaggle/working/new_features_hum_genes.csv") new_features_hum mus_gene_pca = PCA(n_components=100).fit_transform(cafa_mus_gene_features.X.T) mus_gene_pca = pd.DataFrame( mus_gene_pca.T, columns=cafa_mus_gene_features.var_names.to_list() ) new_feature_mus = [] mus_proteins = [] for prot in mus_prot: try: genes = mus_gene_prot_conv.loc[prot].dropna()["symbol"] if type(genes) == str: genes = [genes] for gene in genes: if gene in mus_gene_pca.columns.to_list(): mus_proteins.append(prot) new_feature_mus.append(mus_gene_pca[gene].to_list()) except: continue new_features_mus = pd.DataFrame(new_feature_mus).T new_features_mus.columns = mus_proteins new_features_mus.to_csv("/kaggle/working/new_features_mus_genes.csv") new_features_mus new_feature = pd.concat([new_features_hum.T, new_features_mus.T]) new_feature # # Ridge regression # Loading train labels n_labels_to_consider = 1499 trainTerms = pd.read_csv( "/kaggle/input/cafa-5-protein-function-prediction/Train/train_terms.tsv", sep="\t" ) print(trainTerms.shape) display(trainTerms.head(2)) vec_freqCount = trainTerms["term"].value_counts() print(vec_freqCount) print() labels_to_consider = list(vec_freqCount.index[:n_labels_to_consider]) print( "n_labels_to_consider:", len(labels_to_consider), "First 10:", labels_to_consider[:10], ) # Load protein IDs fn = "/kaggle/input/t5embeds/train_ids.npy" vec_train_protein_ids = np.load(fn) print(vec_train_protein_ids.shape) vec_train_protein_ids # Prepare Y train_size = 142246 Y = np.zeros((train_size, n_labels_to_consider)) print(Y.shape) series_train_protein_ids = pd.Series(vec_train_protein_ids) # trainTerms_smaller = trainTerms[ trainTerms["term"].isin(labels_to_consider) ] # to speed-up the next step print(trainTerms_smaller.shape) for i in tqdm(range(Y.shape[1])): m = trainTerms_smaller["term"] == labels_to_consider[i] # m.sum() Y[:, i] = series_train_protein_ids.isin( set(trainTerms_smaller[m]["EntryID"]) ).astype(float) Y # Loading precalculated embedings # fn = '/kaggle/input/protein-embeddings-1/reduced_embeddings_file.npy' # fn = '/kaggle/input/protein-embeddings-1/embed_protbert_train_clip_1200_first_70000_prot.csv' fn = "/kaggle/input/t5embeds/train_embeds.npy" # fn = '/kaggle/input/t5embeds/test_embeds.npy' print(fn) if ".csv" in fn: df = pd.read_csv(fn, index_col=0) X = df.values elif ".npy" in fn: X = np.load(fn) print(X.shape) X # Load protein IDs fn = "/kaggle/input/t5embeds/train_ids.npy" vec_train_protein_ids = np.load(fn) print(vec_train_protein_ids.shape) vec_train_protein_ids X_ext = [] for ind, prot_id in tqdm(enumerate(vec_train_protein_ids)): if prot_id in new_feature.index: X_ext.append(np.r_[X[ind, :], new_feature.loc[prot_id]]) else: X_ext.append(np.r_[X[ind, :], [np.mean(X[ind, :])] * 100]) X = np.array(X_ext) from sklearn.model_selection import train_test_split IX = np.arange(len(X)) IX_train, IX_test, _, _ = train_test_split(IX, IX, train_size=0.1, random_state=42) print(len(IX_train), len(IX_test), IX_train[:10], IX_test[:10]) # Modeling from sklearn.linear_model import Ridge model = Ridge(alpha=1.0) str_model_id = "Ridge1" df_models_stat = pd.DataFrame() model import time from sklearn.metrics import roc_auc_score t0 = time.time() model.fit(X[IX_train, :], Y[IX_train, :]) Y_pred_test = model.predict(X[IX_test, :]) tt = time.time() - t0 print(str_model_id, tt) l = [] for i in range(Y.shape[1]): if len(np.unique(Y[IX_test, i])) > 1: s = roc_auc_score(Y[IX_test, i], Y_pred_test[:, i]) else: s = 0.5 l.append(s) if i % 10 == 0: print(i, s) df_models_stat.loc[str_model_id, "RocAuc Mean Test"] = np.mean(l) df_models_stat.loc[str_model_id, "Time"] = np.round(tt, 1) df_models_stat.loc[str_model_id, "Test Size"] = len(IX_test) df_models_stat # Scores statistics across targets import matplotlib.pyplot as plt plt.hist(l) plt.show() pd.Series(l).describe() # Full dataset model model.fit(X, Y)
false
0
2,741
5
2,809
2,741
129839627
<jupyter_start><jupyter_text>Diabetes Dataset ### Context This dataset is originally from the National Institute of Diabetes and Digestive and Kidney Diseases. The objective is to predict based on diagnostic measurements whether a patient has diabetes. ### Content Several constraints were placed on the selection of these instances from a larger database. In particular, all patients here are females at least 21 years old of Pima Indian heritage. - Pregnancies: Number of times pregnant - Glucose: Plasma glucose concentration a 2 hours in an oral glucose tolerance test - BloodPressure: Diastolic blood pressure (mm Hg) - SkinThickness: Triceps skin fold thickness (mm) - Insulin: 2-Hour serum insulin (mu U/ml) - BMI: Body mass index (weight in kg/(height in m)^2) - DiabetesPedigreeFunction: Diabetes pedigree function - Age: Age (years) - Outcome: Class variable (0 or 1) #### Sources: (a) Original owners: National Institute of Diabetes and Digestive and Kidney Diseases (b) Donor of database: Vincent Sigillito ([email protected]) Research Center, RMI Group Leader Applied Physics Laboratory The Johns Hopkins University Johns Hopkins Road Laurel, MD 20707 (301) 953-6231 (c) Date received: 9 May 1990 #### Past Usage: 1. Smith,~J.~W., Everhart,~J.~E., Dickson,~W.~C., Knowler,~W.~C., \& Johannes,~R.~S. (1988). Using the ADAP learning algorithm to forecast the onset of diabetes mellitus. In {\it Proceedings of the Symposium on Computer Applications and Medical Care} (pp. 261--265). IEEE Computer Society Press. The diagnostic, binary-valued variable investigated is whether the patient shows signs of diabetes according to World Health Organization criteria (i.e., if the 2 hour post-load plasma glucose was at least 200 mg/dl at any survey examination or if found during routine medical care). The population lives near Phoenix, Arizona, USA. Results: Their ADAP algorithm makes a real-valued prediction between 0 and 1. This was transformed into a binary decision using a cutoff of 0.448. Using 576 training instances, the sensitivity and specificity of their algorithm was 76% on the remaining 192 instances. #### Relevant Information: Several constraints were placed on the selection of these instances from a larger database. In particular, all patients here are females at least 21 years old of Pima Indian heritage. ADAP is an adaptive learning routine that generates and executes digital analogs of perceptron-like devices. It is a unique algorithm; see the paper for details. #### Number of Instances: 768 #### Number of Attributes: 8 plus class #### For Each Attribute: (all numeric-valued) 1. Number of times pregnant 2. Plasma glucose concentration a 2 hours in an oral glucose tolerance test 3. Diastolic blood pressure (mm Hg) 4. Triceps skin fold thickness (mm) 5. 2-Hour serum insulin (mu U/ml) 6. Body mass index (weight in kg/(height in m)^2) 7. Diabetes pedigree function 8. Age (years) 9. Class variable (0 or 1) #### Missing Attribute Values: Yes #### Class Distribution: (class value 1 is interpreted as "tested positive for diabetes") Kaggle dataset identifier: diabetes-data-set <jupyter_code>import pandas as pd df = pd.read_csv('diabetes-data-set/diabetes.csv') df.info() <jupyter_output><class 'pandas.core.frame.DataFrame'> RangeIndex: 768 entries, 0 to 767 Data columns (total 9 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Pregnancies 768 non-null int64 1 Glucose 768 non-null int64 2 BloodPressure 768 non-null int64 3 SkinThickness 768 non-null int64 4 Insulin 768 non-null int64 5 BMI 768 non-null float64 6 DiabetesPedigreeFunction 768 non-null float64 7 Age 768 non-null int64 8 Outcome 768 non-null int64 dtypes: float64(2), int64(7) memory usage: 54.1 KB <jupyter_text>Examples: { "Pregnancies": 6.0, "Glucose": 148.0, "BloodPressure": 72.0, "SkinThickness": 35.0, "Insulin": 0.0, "BMI": 33.6, "DiabetesPedigreeFunction": 0.627, "Age": 50.0, "Outcome": 1.0 } { "Pregnancies": 1.0, "Glucose": 85.0, "BloodPressure": 66.0, "SkinThickness": 29.0, "Insulin": 0.0, "BMI": 26.6, "DiabetesPedigreeFunction": 0.35100000000000003, "Age": 31.0, "Outcome": 0.0 } { "Pregnancies": 8.0, "Glucose": 183.0, "BloodPressure": 64.0, "SkinThickness": 0.0, "Insulin": 0.0, "BMI": 23.3, "DiabetesPedigreeFunction": 0.672, "Age": 32.0, "Outcome": 1.0 } { "Pregnancies": 1.0, "Glucose": 89.0, "BloodPressure": 66.0, "SkinThickness": 23.0, "Insulin": 94.0, "BMI": 28.1, "DiabetesPedigreeFunction": 0.167, "Age": 21.0, "Outcome": 0.0 } <jupyter_script># # Predict Diabetes using with Machine Learnin # Import Packages import pandas as pd # Used to work with datasets import numpy as np # Used to work with arrays # Visualization import matplotlib.pyplot as plt import seaborn as sns import sklearn from sklearn.neighbors import ( KNeighborsClassifier, ) # Classifier implementing the k-nearest neighbors vote from sklearn.tree import ( DecisionTreeClassifier, ) ## is a class capable of performing multiclass classification on a dataset. from sklearn.svm import SVC from sklearn.neural_network import ( MLPClassifier, ) # Iteratively trains because at each time step the partial derivatives of the loss function with respect to the model parameters are computed. from sklearn.linear_model import LogisticRegression from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.naive_bayes import GaussianNB from sklearn.mixture import BayesianGaussianMixture from sklearn.linear_model import SGDClassifier from sklearn.ensemble import RandomForestClassifier import lightgbm as ltb from sklearn.preprocessing import ( StandardScaler, ) ## Removes the average and scales each feature/variable for unit variance. This process is carried out in an independent manner from sklearn.model_selection import ( train_test_split, cross_val_score, KFold, GridSearchCV, ) from sklearn.metrics import ( accuracy_score, precision_score, recall_score, f1_score, roc_auc_score, confusion_matrix, classification_report, ) # from sklearn.metrics import plot_confusion_matrix import warnings warnings.filterwarnings("ignore") # Data # Pregnancies: Number of times pregnant # Glucose: Plasma glucose concentration a 2 hours in an oral glucose tolerance test # BloodPressure: Diastolic blood pressure (mm Hg) # SkinThickness: Triceps skin fold thickness (mm) # Insulin: 2-Hour serum insulin (mu U/ml) # BMI: Body mass index (weight in kg/(height in m)^2) # DiabetesPedigreeFunction: Diabetes pedigree function # Age: Age (years) # Outcome: Class variable (0 or 1) # read data diabetes = pd.read_csv("/kaggle/input/diabetes-data-set/diabetes.csv") # name columns print(diabetes.columns) diabetes.head() # Show part of the data # shape data print("dimension of data: {}".format(diabetes.shape)) # The diabetes dataset consists of 768 data points, with 9 features each: ## print about information diabetes.info() # check is null data diabetes.isnull().sum() ## print describtion diabetes.describe().T # "outcome" Is the feature that I will expect, 0 means no diabetes, 1 means presence print(diabetes.groupby("Outcome").size()) # 500 is rated as 0 and 268 as 1: # create datarame in Outcome =0 and Outcome=1 diabetes_0 = diabetes[diabetes["Outcome"] == 0] diabetes_1 = diabetes[diabetes["Outcome"] == 1] ## The number of views in each categorical basket using bars. sns.countplot(data=diabetes, x="Outcome", label="Count") # #### Age # histogram of the "Age" variable in the "Outcome=0" dataset plt.hist(diabetes_0["Age"]) plt.xlabel("Age") plt.ylabel("Count") plt.show() # histogram of the "Age" variable in the "Outcome=1" dataset plt.hist(diabetes_1["Age"]) plt.xlabel("Age") plt.ylabel("Count") plt.show() # histogram of the "Age" sns.histplot(data=diabetes, x="Age", hue="Outcome") plt.xlabel("Age") plt.ylabel("Count") plt.axhline(np.mean(diabetes["Age"]), color="red", linestyle="solid") plt.show() # plot Age plt.violinplot(diabetes["Age"]) # boxplot Age sns.boxplot(diabetes["Age"]) diabetes_0["Age"].mean() diabetes_1["Age"].mean() # ###### The incidence of diabetes increases from the age of 38 # #### SkinThickness # histogram of the "SkinThickness" sns.histplot(data=diabetes, x="SkinThickness", hue="Outcome") plt.xlabel("SkinThickness") plt.ylabel("Count") plt.show() # violinplot SkinThickness plt.violinplot(diabetes["SkinThickness"]) # average healthy people SkinThickness diabetes_0["SkinThickness"].mean() # max healthy people SkinThickness diabetes_0["SkinThickness"].max() # average diabetics SkinThickness diabetes_1["SkinThickness"].mean() # max diabetics SkinThickness diabetes_1["SkinThickness"].max() # ###### The thickness of the skin of diabetics is higher than that of healthy people # #### BMI ## histogram of the "BMi" sns.histplot(data=diabetes, x="BMI", hue="Outcome") plt.xlabel("BMI") plt.ylabel("Count") plt.show() # average healthy people BMI diabetes_0["BMI"].mean() # max healthy people BMI diabetes_0["BMI"].max() # average healthy people BMI diabetes_1["BMI"].mean() # max healthy people BMI diabetes_1["BMI"].max() # ###### BMI is more common in affected patients than in healthy people. ## histogram of the "Pregnancies" sns.histplot(data=diabetes, x="Pregnancies", hue="Outcome") plt.xlabel("Pregnancies") plt.ylabel("Count") plt.xticks(range(0, 15, 2)) plt.show() sns.countplot(data=diabetes, x="Pregnancies", hue="Outcome") # average healthy people Pregnancies diabetes_0["Pregnancies"].mean() # max healthy people Pregnancies diabetes_0["Pregnancies"].max() # average healthy people Pregnancies diabetes_1["Pregnancies"].mean() # max healthy people Pregnancies diabetes_1["Pregnancies"].max() # ###### The higher the number of pregnancies, the higher the incidence of diabetes # #### BMI ## histogram of the "BMI" sns.histplot(data=diabetes, x="BMI", hue="Outcome") plt.xlabel("BMI") plt.ylabel("Count") plt.show() # scateer plot relationship between Age with BMI plt.scatter(diabetes["BMI"], diabetes["Age"]) plt.title("The relationship between Age with BMI ") plt.xlabel("BMI") plt.ylabel("Age") plt.show() # average healthy people BMI diabetes_0["BMI"].mean() # max healthy people BMI diabetes_0["BMI"].max() # average healthy people BMI diabetes_1["BMI"].mean() # max healthy people BMI diabetes_1["BMI"].max() # to compare correlation between a target and other features in absolute correlations = diabetes.corr()["Outcome"].drop("Outcome") sorted_correlations = correlations.abs().sort_values(ascending=False) sorted_correlations # show bar to compare correlation between a target and other features in absolute # to be organized and easy to compare sns.barplot(x=sorted_correlations.index, y=sorted_correlations) plt.xticks(rotation=90) plt.xlabel("Features") plt.ylabel("Absolute Correlation") plt.show() # ###### We will train the model on the most important Features diabetes_corr = sorted_correlations[:6] diabetes_corr diabetes_corr = diabetes[ ["Glucose", "BMI", "Age", "Pregnancies", "DiabetesPedigreeFunction", "Outcome"] ] diabetes_corr # ###### drop Outlier noise data # Calculate the interquartile range (IQR) for each column Q1 = diabetes_corr.quantile(0.25) Q3 = diabetes_corr.quantile(0.75) IQR = Q3 - Q1 # Identify dataoutliers outliers = diabetes_corr[ ((diabetes_corr < (Q1 - 1.5 * IQR)) | (diabetes_corr > (Q3 + 1.5 * IQR))).any( axis=1 ) ] # drop the outliers from the data train_clean = diabetes_corr.drop(outliers.index) train_clean # visualizing the correlation between the variables in the diabetes plt.figure(figsize=(15, 15)) sns.heatmap(np.abs(train_clean.corr()), annot=True) plt.title("Correlation data ", fontsize=12) # split data X = train_clean.drop(columns=["Outcome"]) # data y = train_clean["Outcome"] # target # StandardScaler in dataframe Stand = StandardScaler() x = pd.DataFrame(Stand.fit_transform(X), columns=X.columns) x # create List models models = [] models.append(("KNN", KNeighborsClassifier())) models.append(("SVM", SVC())) models.append(("DecisionTreeClassifier", DecisionTreeClassifier())) models.append(("LogisticRegression", LogisticRegression())) models.append(("LinearDiscriminantAnalysis", LinearDiscriminantAnalysis())) models.append(("GaussianNB", GaussianNB())) models.append(("MLPClassifier", MLPClassifier())) models.append(("BayesianGaussianMixture", BayesianGaussianMixture())) models.append(("SGDClassifier", SGDClassifier())) models.append(("RandomForestClassifier", RandomForestClassifier())) # calculate accuracy scoring_ = [] name_m = [] for i in range(len(models)): kfold = KFold(n_splits=10, shuffle=True, random_state=43) scor = cross_val_score(models[i][1], X, y, cv=kfold, scoring="accuracy") scoring_.append(scor) name_m.append(models[i][0]) print("{} : score {:.3f} ".format(models[i][0], np.mean(scoring_))) # visualization boxplot Algorithm Accuracy Comparison fig = plt.figure(figsize=(10, 6)) fig.suptitle("Algorithm Accuracy Comparison") ax = fig.add_subplot(111) plt.boxplot(scoring_) plt.axhline(0.75, color="pink", linestyle="solid") plt.axhline(0.80, color="green", linestyle="solid") plt.axhline(0.85, color="red", linestyle="solid") ax.set_xticklabels(name_m, rotation=70) plt.show() def confusion_matrix_sh(confusion_matrix, class_labels): """ Display the confusion matrix using matplotlib. """ # Normalize the confusion matrix confusion_matrix_normalized = ( confusion_matrix.astype("float") / confusion_matrix.sum(axis=1)[:, np.newaxis] ) # Set up the figure plt.figure(figsize=(8, 6)) plt.imshow(confusion_matrix_normalized, interpolation="nearest", cmap=plt.cm.Blues) plt.title("Confusion Matrix") plt.colorbar() tick_marks = np.arange(len(class_labels)) plt.xticks(tick_marks, class_labels, rotation=45) plt.yticks(tick_marks, class_labels) # Add labels to each cell thresh = confusion_matrix_normalized.max() / 2.0 for i, j in np.ndindex(confusion_matrix.shape): plt.text( j, i, format(confusion_matrix[i, j], "d"), horizontalalignment="center", color="white" if confusion_matrix_normalized[i, j] > thresh else "black", ) # Set axis labels plt.xlabel("Predicted Class") plt.ylabel("True Class") # Show the plot plt.tight_layout() plt.show() # function evaluation def evaluate(model, X, target): """ Evaluate the performance of the model Inputs: Model , Data , Target . Outputs: Accuracy, Precision Recall F1 Score AUC-ROC confusion matrix """ # split the data into training and testing X_train, X_test, y_train, y_test = train_test_split( X, target, test_size=0.25, random_state=543 ) model.fit(X_train, y_train) # fit model y_pred = model.predict(X_test) print("model: ", model) # Accuracy accuracy = accuracy_score(y_test, y_pred) print("Accuracy:", accuracy) # Precision precision = precision_score(y_test, y_pred) print("Precision:", precision) # Recall recall = recall_score(y_test, y_pred) print("Recall:", recall) # F1 Score f1 = f1_score(y_test, y_pred) print("F1 Score:", f1) # AUC-ROC auc_roc = roc_auc_score(y_test, y_pred) print("AUC-ROC:", auc_roc) # Confusion Matrix confusion = confusion_matrix(y_test, y_pred) print("Confusion Matrix:\n", confusion) """ #confusion matrix con_matrix = plot_confusion_matrix(model, X_test, y_test, cmap=plt.cm.bone_r ,colorbar=True,labels=[0,1]) con_matrix.ax_.set_title("Confusion Matrix") """ report = classification_report(y_test, y_pred) print(report) # ## K Nearest Neighbour predicted # It can be said that the Neighbors Nearest-k ,It is the simplest machine learning algorithm composed Build the model only from storing the training data set. To make a forecast for a new point in a group data, the algorithm finds the closest data points in the training data set # First, let's see if we can confirm the relationship between model complexity and accuracy: # split data into train ,split X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=543) training_accuracy = [] test_accuracy = [] # try n_neighbors from 1 to 10 neighbors_settings = range(1, 11) for n_neighbors in neighbors_settings: # bulding nodel knn = KNeighborsClassifier(n_neighbors=n_neighbors) knn.fit(X_train, y_train) # record training set accuracy training_accuracy.append(knn.score(X_train, y_train)) # record test set accuracy test_accuracy.append(knn.score(X_test, y_test)) plt.plot(neighbors_settings, training_accuracy, label="training accuracy") plt.plot(neighbors_settings, test_accuracy, label="test accuracy") plt.ylabel("Accuracy") plt.xlabel("n_neighbors") plt.legend() ## check accuracy of the k-nearest neighbors evaluate(KNeighborsClassifier(n_neighbors=2), X, y) # ## support vector machine model = SVC() model.fit(X_train, y_train) y_pred = model.predict(X_test) accuracy = accuracy_score(y_test, y_pred) print("Accuracy:", accuracy) param_grid = { "C": [0.1, 1, 10, 100, 1000, 10000], "gamma": [0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000], } # Create an instance of the model model = SVC() # Create an instance of GridSearchCV grid_search = GridSearchCV(estimator=model, param_grid=param_grid, cv=2) # Fit the GridSearchCV grid_search.fit(X_train, y_train) # Get the best parameter best_params = grid_search.best_params_ best_accuracy = grid_search.best_score_ # Evaluate the model with the best parameters best_model = grid_search.best_estimator_ test_accuracy = best_model.score(X_test, y_test) # Print the results print("Best Parameters: ", best_params) print("Best Accuracy: ", best_accuracy) print("Test Accuracy: ", test_accuracy) ## check accuracy of the SVC evaluate(grid_search, X, y) # # ## Decision tree classifier tree = DecisionTreeClassifier(random_state=0) tree.fit(X_train, y_train) print( "Accuracy on training set: {:.3f}".format(tree.score(X_train, y_train)) ) # To calculate the accuracy of the training data print( "Accuracy on test set: {:.3f}".format(tree.score(X_test, y_test)) ) # To calculate the accuracy of the test data # The accuracy on the training set using the decision tree classifier is 100 # While the accuracy of the test set is much worse. This is an indication that the tree is suffering from over-adjustment # overfitting , It does not generalize well to new data. Therefore, we need to apply pre-pruning # on the tree # Now I will do it again by setting # 3 = depth_m # Which reduces the depth of the tree. # This leads to a lower accuracy in the training set, but improves the test set. ## We check accuracy of the Decision tree classifier algorithm for predicting diabetes model_tree = DecisionTreeClassifier( criterion="entropy", max_depth=3, ccp_alpha=2, min_samples_split=5 ) evaluate(model_tree, X, y) # ## LogisticRegression for predicting diabetes ## We check accuracy of the Logistic Regression evaluate(LogisticRegression(), X, y) # ### LinearDiscriminantAnalysis evaluate(LinearDiscriminantAnalysis(), X, y) # ### GaussianNB evaluate(GaussianNB(), X, y) # ## MLPClassifier for predicting diabetes mlp = MLPClassifier(max_iter=100, alpha=0.001, random_state=0) mlp.fit(X_train, y_train) print("Accuracy on training set: {:.3f}".format(mlp.score(X_train, y_train))) print("Accuracy on test set: {:.3f}".format(mlp.score(X_test, y_test))) ## We check accuracy of the MLP Classi fier algorithm for predicting diabetes evaluate(MLPClassifier(max_iter=100, alpha=1), X, y) # ### SGDClassifier evaluate(SGDClassifier(), X, y) # ### RandomForestClassifier evaluate(RandomForestClassifier(), X, y)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/839/129839627.ipynb
diabetes-data-set
mathchi
[{"Id": 129839627, "ScriptId": 38488486, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7577589, "CreationDate": "05/16/2023 21:38:22", "VersionNumber": 4.0, "Title": "prediction Diabetes classification 99%", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 676.0, "LinesInsertedFromPrevious": 243.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 433.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 2}]
[{"Id": 186222996, "KernelVersionId": 129839627, "SourceDatasetVersionId": 1400440}]
[{"Id": 1400440, "DatasetId": 818300, "DatasourceVersionId": 1433199, "CreatorUserId": 3650837, "LicenseName": "CC0: Public Domain", "CreationDate": "08/05/2020 21:27:01", "VersionNumber": 1.0, "Title": "Diabetes Dataset", "Slug": "diabetes-data-set", "Subtitle": "This dataset is originally from the N. Inst. of Diabetes & Diges. & Kidney Dis.", "Description": "### Context\n\nThis dataset is originally from the National Institute of Diabetes and Digestive and Kidney Diseases. The objective is to predict based on diagnostic measurements whether a patient has diabetes.\n\n\n### Content\n\nSeveral constraints were placed on the selection of these instances from a larger database. In particular, all patients here are females at least 21 years old of Pima Indian heritage.\n\n- Pregnancies: Number of times pregnant \n- Glucose: Plasma glucose concentration a 2 hours in an oral glucose tolerance test \n- BloodPressure: Diastolic blood pressure (mm Hg) \n- SkinThickness: Triceps skin fold thickness (mm) \n- Insulin: 2-Hour serum insulin (mu U/ml) \n- BMI: Body mass index (weight in kg/(height in m)^2) \n- DiabetesPedigreeFunction: Diabetes pedigree function \n- Age: Age (years) \n- Outcome: Class variable (0 or 1)\n\n#### Sources:\n (a) Original owners: National Institute of Diabetes and Digestive and\n Kidney Diseases\n (b) Donor of database: Vincent Sigillito ([email protected])\n Research Center, RMI Group Leader\n Applied Physics Laboratory\n The Johns Hopkins University\n Johns Hopkins Road\n Laurel, MD 20707\n (301) 953-6231\n (c) Date received: 9 May 1990\n\n#### Past Usage:\n 1. Smith,~J.~W., Everhart,~J.~E., Dickson,~W.~C., Knowler,~W.~C., \\&\n Johannes,~R.~S. (1988). Using the ADAP learning algorithm to forecast\n the onset of diabetes mellitus. In {\\it Proceedings of the Symposium\n on Computer Applications and Medical Care} (pp. 261--265). IEEE\n Computer Society Press.\n\n The diagnostic, binary-valued variable investigated is whether the\n patient shows signs of diabetes according to World Health Organization\n criteria (i.e., if the 2 hour post-load plasma glucose was at least \n 200 mg/dl at any survey examination or if found during routine medical\n care). The population lives near Phoenix, Arizona, USA.\n\n Results: Their ADAP algorithm makes a real-valued prediction between\n 0 and 1. This was transformed into a binary decision using a cutoff of \n 0.448. Using 576 training instances, the sensitivity and specificity\n of their algorithm was 76% on the remaining 192 instances.\n\n#### Relevant Information:\n Several constraints were placed on the selection of these instances from\n a larger database. In particular, all patients here are females at\n least 21 years old of Pima Indian heritage. ADAP is an adaptive learning\n routine that generates and executes digital analogs of perceptron-like\n devices. It is a unique algorithm; see the paper for details.\n\n#### Number of Instances: 768\n\n#### Number of Attributes: 8 plus class \n\n#### For Each Attribute: (all numeric-valued)\n 1. Number of times pregnant\n 2. Plasma glucose concentration a 2 hours in an oral glucose tolerance test\n 3. Diastolic blood pressure (mm Hg)\n 4. Triceps skin fold thickness (mm)\n 5. 2-Hour serum insulin (mu U/ml)\n 6. Body mass index (weight in kg/(height in m)^2)\n 7. Diabetes pedigree function\n 8. Age (years)\n 9. Class variable (0 or 1)\n\n#### Missing Attribute Values: Yes\n\n#### Class Distribution: (class value 1 is interpreted as \"tested positive for\n diabetes\")", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 818300, "CreatorUserId": 3650837, "OwnerUserId": 3650837.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 1400440.0, "CurrentDatasourceVersionId": 1433199.0, "ForumId": 833406, "Type": 2, "CreationDate": "08/05/2020 21:27:01", "LastActivityDate": "08/05/2020", "TotalViews": 440450, "TotalDownloads": 65613, "TotalVotes": 496, "TotalKernels": 245}]
[{"Id": 3650837, "UserName": "mathchi", "DisplayName": "Mehmet Akturk", "RegisterDate": "09/01/2019", "PerformanceTier": 3}]
# # Predict Diabetes using with Machine Learnin # Import Packages import pandas as pd # Used to work with datasets import numpy as np # Used to work with arrays # Visualization import matplotlib.pyplot as plt import seaborn as sns import sklearn from sklearn.neighbors import ( KNeighborsClassifier, ) # Classifier implementing the k-nearest neighbors vote from sklearn.tree import ( DecisionTreeClassifier, ) ## is a class capable of performing multiclass classification on a dataset. from sklearn.svm import SVC from sklearn.neural_network import ( MLPClassifier, ) # Iteratively trains because at each time step the partial derivatives of the loss function with respect to the model parameters are computed. from sklearn.linear_model import LogisticRegression from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.naive_bayes import GaussianNB from sklearn.mixture import BayesianGaussianMixture from sklearn.linear_model import SGDClassifier from sklearn.ensemble import RandomForestClassifier import lightgbm as ltb from sklearn.preprocessing import ( StandardScaler, ) ## Removes the average and scales each feature/variable for unit variance. This process is carried out in an independent manner from sklearn.model_selection import ( train_test_split, cross_val_score, KFold, GridSearchCV, ) from sklearn.metrics import ( accuracy_score, precision_score, recall_score, f1_score, roc_auc_score, confusion_matrix, classification_report, ) # from sklearn.metrics import plot_confusion_matrix import warnings warnings.filterwarnings("ignore") # Data # Pregnancies: Number of times pregnant # Glucose: Plasma glucose concentration a 2 hours in an oral glucose tolerance test # BloodPressure: Diastolic blood pressure (mm Hg) # SkinThickness: Triceps skin fold thickness (mm) # Insulin: 2-Hour serum insulin (mu U/ml) # BMI: Body mass index (weight in kg/(height in m)^2) # DiabetesPedigreeFunction: Diabetes pedigree function # Age: Age (years) # Outcome: Class variable (0 or 1) # read data diabetes = pd.read_csv("/kaggle/input/diabetes-data-set/diabetes.csv") # name columns print(diabetes.columns) diabetes.head() # Show part of the data # shape data print("dimension of data: {}".format(diabetes.shape)) # The diabetes dataset consists of 768 data points, with 9 features each: ## print about information diabetes.info() # check is null data diabetes.isnull().sum() ## print describtion diabetes.describe().T # "outcome" Is the feature that I will expect, 0 means no diabetes, 1 means presence print(diabetes.groupby("Outcome").size()) # 500 is rated as 0 and 268 as 1: # create datarame in Outcome =0 and Outcome=1 diabetes_0 = diabetes[diabetes["Outcome"] == 0] diabetes_1 = diabetes[diabetes["Outcome"] == 1] ## The number of views in each categorical basket using bars. sns.countplot(data=diabetes, x="Outcome", label="Count") # #### Age # histogram of the "Age" variable in the "Outcome=0" dataset plt.hist(diabetes_0["Age"]) plt.xlabel("Age") plt.ylabel("Count") plt.show() # histogram of the "Age" variable in the "Outcome=1" dataset plt.hist(diabetes_1["Age"]) plt.xlabel("Age") plt.ylabel("Count") plt.show() # histogram of the "Age" sns.histplot(data=diabetes, x="Age", hue="Outcome") plt.xlabel("Age") plt.ylabel("Count") plt.axhline(np.mean(diabetes["Age"]), color="red", linestyle="solid") plt.show() # plot Age plt.violinplot(diabetes["Age"]) # boxplot Age sns.boxplot(diabetes["Age"]) diabetes_0["Age"].mean() diabetes_1["Age"].mean() # ###### The incidence of diabetes increases from the age of 38 # #### SkinThickness # histogram of the "SkinThickness" sns.histplot(data=diabetes, x="SkinThickness", hue="Outcome") plt.xlabel("SkinThickness") plt.ylabel("Count") plt.show() # violinplot SkinThickness plt.violinplot(diabetes["SkinThickness"]) # average healthy people SkinThickness diabetes_0["SkinThickness"].mean() # max healthy people SkinThickness diabetes_0["SkinThickness"].max() # average diabetics SkinThickness diabetes_1["SkinThickness"].mean() # max diabetics SkinThickness diabetes_1["SkinThickness"].max() # ###### The thickness of the skin of diabetics is higher than that of healthy people # #### BMI ## histogram of the "BMi" sns.histplot(data=diabetes, x="BMI", hue="Outcome") plt.xlabel("BMI") plt.ylabel("Count") plt.show() # average healthy people BMI diabetes_0["BMI"].mean() # max healthy people BMI diabetes_0["BMI"].max() # average healthy people BMI diabetes_1["BMI"].mean() # max healthy people BMI diabetes_1["BMI"].max() # ###### BMI is more common in affected patients than in healthy people. ## histogram of the "Pregnancies" sns.histplot(data=diabetes, x="Pregnancies", hue="Outcome") plt.xlabel("Pregnancies") plt.ylabel("Count") plt.xticks(range(0, 15, 2)) plt.show() sns.countplot(data=diabetes, x="Pregnancies", hue="Outcome") # average healthy people Pregnancies diabetes_0["Pregnancies"].mean() # max healthy people Pregnancies diabetes_0["Pregnancies"].max() # average healthy people Pregnancies diabetes_1["Pregnancies"].mean() # max healthy people Pregnancies diabetes_1["Pregnancies"].max() # ###### The higher the number of pregnancies, the higher the incidence of diabetes # #### BMI ## histogram of the "BMI" sns.histplot(data=diabetes, x="BMI", hue="Outcome") plt.xlabel("BMI") plt.ylabel("Count") plt.show() # scateer plot relationship between Age with BMI plt.scatter(diabetes["BMI"], diabetes["Age"]) plt.title("The relationship between Age with BMI ") plt.xlabel("BMI") plt.ylabel("Age") plt.show() # average healthy people BMI diabetes_0["BMI"].mean() # max healthy people BMI diabetes_0["BMI"].max() # average healthy people BMI diabetes_1["BMI"].mean() # max healthy people BMI diabetes_1["BMI"].max() # to compare correlation between a target and other features in absolute correlations = diabetes.corr()["Outcome"].drop("Outcome") sorted_correlations = correlations.abs().sort_values(ascending=False) sorted_correlations # show bar to compare correlation between a target and other features in absolute # to be organized and easy to compare sns.barplot(x=sorted_correlations.index, y=sorted_correlations) plt.xticks(rotation=90) plt.xlabel("Features") plt.ylabel("Absolute Correlation") plt.show() # ###### We will train the model on the most important Features diabetes_corr = sorted_correlations[:6] diabetes_corr diabetes_corr = diabetes[ ["Glucose", "BMI", "Age", "Pregnancies", "DiabetesPedigreeFunction", "Outcome"] ] diabetes_corr # ###### drop Outlier noise data # Calculate the interquartile range (IQR) for each column Q1 = diabetes_corr.quantile(0.25) Q3 = diabetes_corr.quantile(0.75) IQR = Q3 - Q1 # Identify dataoutliers outliers = diabetes_corr[ ((diabetes_corr < (Q1 - 1.5 * IQR)) | (diabetes_corr > (Q3 + 1.5 * IQR))).any( axis=1 ) ] # drop the outliers from the data train_clean = diabetes_corr.drop(outliers.index) train_clean # visualizing the correlation between the variables in the diabetes plt.figure(figsize=(15, 15)) sns.heatmap(np.abs(train_clean.corr()), annot=True) plt.title("Correlation data ", fontsize=12) # split data X = train_clean.drop(columns=["Outcome"]) # data y = train_clean["Outcome"] # target # StandardScaler in dataframe Stand = StandardScaler() x = pd.DataFrame(Stand.fit_transform(X), columns=X.columns) x # create List models models = [] models.append(("KNN", KNeighborsClassifier())) models.append(("SVM", SVC())) models.append(("DecisionTreeClassifier", DecisionTreeClassifier())) models.append(("LogisticRegression", LogisticRegression())) models.append(("LinearDiscriminantAnalysis", LinearDiscriminantAnalysis())) models.append(("GaussianNB", GaussianNB())) models.append(("MLPClassifier", MLPClassifier())) models.append(("BayesianGaussianMixture", BayesianGaussianMixture())) models.append(("SGDClassifier", SGDClassifier())) models.append(("RandomForestClassifier", RandomForestClassifier())) # calculate accuracy scoring_ = [] name_m = [] for i in range(len(models)): kfold = KFold(n_splits=10, shuffle=True, random_state=43) scor = cross_val_score(models[i][1], X, y, cv=kfold, scoring="accuracy") scoring_.append(scor) name_m.append(models[i][0]) print("{} : score {:.3f} ".format(models[i][0], np.mean(scoring_))) # visualization boxplot Algorithm Accuracy Comparison fig = plt.figure(figsize=(10, 6)) fig.suptitle("Algorithm Accuracy Comparison") ax = fig.add_subplot(111) plt.boxplot(scoring_) plt.axhline(0.75, color="pink", linestyle="solid") plt.axhline(0.80, color="green", linestyle="solid") plt.axhline(0.85, color="red", linestyle="solid") ax.set_xticklabels(name_m, rotation=70) plt.show() def confusion_matrix_sh(confusion_matrix, class_labels): """ Display the confusion matrix using matplotlib. """ # Normalize the confusion matrix confusion_matrix_normalized = ( confusion_matrix.astype("float") / confusion_matrix.sum(axis=1)[:, np.newaxis] ) # Set up the figure plt.figure(figsize=(8, 6)) plt.imshow(confusion_matrix_normalized, interpolation="nearest", cmap=plt.cm.Blues) plt.title("Confusion Matrix") plt.colorbar() tick_marks = np.arange(len(class_labels)) plt.xticks(tick_marks, class_labels, rotation=45) plt.yticks(tick_marks, class_labels) # Add labels to each cell thresh = confusion_matrix_normalized.max() / 2.0 for i, j in np.ndindex(confusion_matrix.shape): plt.text( j, i, format(confusion_matrix[i, j], "d"), horizontalalignment="center", color="white" if confusion_matrix_normalized[i, j] > thresh else "black", ) # Set axis labels plt.xlabel("Predicted Class") plt.ylabel("True Class") # Show the plot plt.tight_layout() plt.show() # function evaluation def evaluate(model, X, target): """ Evaluate the performance of the model Inputs: Model , Data , Target . Outputs: Accuracy, Precision Recall F1 Score AUC-ROC confusion matrix """ # split the data into training and testing X_train, X_test, y_train, y_test = train_test_split( X, target, test_size=0.25, random_state=543 ) model.fit(X_train, y_train) # fit model y_pred = model.predict(X_test) print("model: ", model) # Accuracy accuracy = accuracy_score(y_test, y_pred) print("Accuracy:", accuracy) # Precision precision = precision_score(y_test, y_pred) print("Precision:", precision) # Recall recall = recall_score(y_test, y_pred) print("Recall:", recall) # F1 Score f1 = f1_score(y_test, y_pred) print("F1 Score:", f1) # AUC-ROC auc_roc = roc_auc_score(y_test, y_pred) print("AUC-ROC:", auc_roc) # Confusion Matrix confusion = confusion_matrix(y_test, y_pred) print("Confusion Matrix:\n", confusion) """ #confusion matrix con_matrix = plot_confusion_matrix(model, X_test, y_test, cmap=plt.cm.bone_r ,colorbar=True,labels=[0,1]) con_matrix.ax_.set_title("Confusion Matrix") """ report = classification_report(y_test, y_pred) print(report) # ## K Nearest Neighbour predicted # It can be said that the Neighbors Nearest-k ,It is the simplest machine learning algorithm composed Build the model only from storing the training data set. To make a forecast for a new point in a group data, the algorithm finds the closest data points in the training data set # First, let's see if we can confirm the relationship between model complexity and accuracy: # split data into train ,split X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=543) training_accuracy = [] test_accuracy = [] # try n_neighbors from 1 to 10 neighbors_settings = range(1, 11) for n_neighbors in neighbors_settings: # bulding nodel knn = KNeighborsClassifier(n_neighbors=n_neighbors) knn.fit(X_train, y_train) # record training set accuracy training_accuracy.append(knn.score(X_train, y_train)) # record test set accuracy test_accuracy.append(knn.score(X_test, y_test)) plt.plot(neighbors_settings, training_accuracy, label="training accuracy") plt.plot(neighbors_settings, test_accuracy, label="test accuracy") plt.ylabel("Accuracy") plt.xlabel("n_neighbors") plt.legend() ## check accuracy of the k-nearest neighbors evaluate(KNeighborsClassifier(n_neighbors=2), X, y) # ## support vector machine model = SVC() model.fit(X_train, y_train) y_pred = model.predict(X_test) accuracy = accuracy_score(y_test, y_pred) print("Accuracy:", accuracy) param_grid = { "C": [0.1, 1, 10, 100, 1000, 10000], "gamma": [0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000], } # Create an instance of the model model = SVC() # Create an instance of GridSearchCV grid_search = GridSearchCV(estimator=model, param_grid=param_grid, cv=2) # Fit the GridSearchCV grid_search.fit(X_train, y_train) # Get the best parameter best_params = grid_search.best_params_ best_accuracy = grid_search.best_score_ # Evaluate the model with the best parameters best_model = grid_search.best_estimator_ test_accuracy = best_model.score(X_test, y_test) # Print the results print("Best Parameters: ", best_params) print("Best Accuracy: ", best_accuracy) print("Test Accuracy: ", test_accuracy) ## check accuracy of the SVC evaluate(grid_search, X, y) # # ## Decision tree classifier tree = DecisionTreeClassifier(random_state=0) tree.fit(X_train, y_train) print( "Accuracy on training set: {:.3f}".format(tree.score(X_train, y_train)) ) # To calculate the accuracy of the training data print( "Accuracy on test set: {:.3f}".format(tree.score(X_test, y_test)) ) # To calculate the accuracy of the test data # The accuracy on the training set using the decision tree classifier is 100 # While the accuracy of the test set is much worse. This is an indication that the tree is suffering from over-adjustment # overfitting , It does not generalize well to new data. Therefore, we need to apply pre-pruning # on the tree # Now I will do it again by setting # 3 = depth_m # Which reduces the depth of the tree. # This leads to a lower accuracy in the training set, but improves the test set. ## We check accuracy of the Decision tree classifier algorithm for predicting diabetes model_tree = DecisionTreeClassifier( criterion="entropy", max_depth=3, ccp_alpha=2, min_samples_split=5 ) evaluate(model_tree, X, y) # ## LogisticRegression for predicting diabetes ## We check accuracy of the Logistic Regression evaluate(LogisticRegression(), X, y) # ### LinearDiscriminantAnalysis evaluate(LinearDiscriminantAnalysis(), X, y) # ### GaussianNB evaluate(GaussianNB(), X, y) # ## MLPClassifier for predicting diabetes mlp = MLPClassifier(max_iter=100, alpha=0.001, random_state=0) mlp.fit(X_train, y_train) print("Accuracy on training set: {:.3f}".format(mlp.score(X_train, y_train))) print("Accuracy on test set: {:.3f}".format(mlp.score(X_test, y_test))) ## We check accuracy of the MLP Classi fier algorithm for predicting diabetes evaluate(MLPClassifier(max_iter=100, alpha=1), X, y) # ### SGDClassifier evaluate(SGDClassifier(), X, y) # ### RandomForestClassifier evaluate(RandomForestClassifier(), X, y)
[{"diabetes-data-set/diabetes.csv": {"column_names": "[\"Pregnancies\", \"Glucose\", \"BloodPressure\", \"SkinThickness\", \"Insulin\", \"BMI\", \"DiabetesPedigreeFunction\", \"Age\", \"Outcome\"]", "column_data_types": "{\"Pregnancies\": \"int64\", \"Glucose\": \"int64\", \"BloodPressure\": \"int64\", \"SkinThickness\": \"int64\", \"Insulin\": \"int64\", \"BMI\": \"float64\", \"DiabetesPedigreeFunction\": \"float64\", \"Age\": \"int64\", \"Outcome\": \"int64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 768 entries, 0 to 767\nData columns (total 9 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Pregnancies 768 non-null int64 \n 1 Glucose 768 non-null int64 \n 2 BloodPressure 768 non-null int64 \n 3 SkinThickness 768 non-null int64 \n 4 Insulin 768 non-null int64 \n 5 BMI 768 non-null float64\n 6 DiabetesPedigreeFunction 768 non-null float64\n 7 Age 768 non-null int64 \n 8 Outcome 768 non-null int64 \ndtypes: float64(2), int64(7)\nmemory usage: 54.1 KB\n", "summary": "{\"Pregnancies\": {\"count\": 768.0, \"mean\": 3.8450520833333335, \"std\": 3.3695780626988694, \"min\": 0.0, \"25%\": 1.0, \"50%\": 3.0, \"75%\": 6.0, \"max\": 17.0}, \"Glucose\": {\"count\": 768.0, \"mean\": 120.89453125, \"std\": 31.97261819513622, \"min\": 0.0, \"25%\": 99.0, \"50%\": 117.0, \"75%\": 140.25, \"max\": 199.0}, \"BloodPressure\": {\"count\": 768.0, \"mean\": 69.10546875, \"std\": 19.355807170644777, \"min\": 0.0, \"25%\": 62.0, \"50%\": 72.0, \"75%\": 80.0, \"max\": 122.0}, \"SkinThickness\": {\"count\": 768.0, \"mean\": 20.536458333333332, \"std\": 15.952217567727637, \"min\": 0.0, \"25%\": 0.0, \"50%\": 23.0, \"75%\": 32.0, \"max\": 99.0}, \"Insulin\": {\"count\": 768.0, \"mean\": 79.79947916666667, \"std\": 115.24400235133817, \"min\": 0.0, \"25%\": 0.0, \"50%\": 30.5, \"75%\": 127.25, \"max\": 846.0}, \"BMI\": {\"count\": 768.0, \"mean\": 31.992578124999998, \"std\": 7.884160320375446, \"min\": 0.0, \"25%\": 27.3, \"50%\": 32.0, \"75%\": 36.6, \"max\": 67.1}, \"DiabetesPedigreeFunction\": {\"count\": 768.0, \"mean\": 0.47187630208333325, \"std\": 0.3313285950127749, \"min\": 0.078, \"25%\": 0.24375, \"50%\": 0.3725, \"75%\": 0.62625, \"max\": 2.42}, \"Age\": {\"count\": 768.0, \"mean\": 33.240885416666664, \"std\": 11.760231540678685, \"min\": 21.0, \"25%\": 24.0, \"50%\": 29.0, \"75%\": 41.0, \"max\": 81.0}, \"Outcome\": {\"count\": 768.0, \"mean\": 0.3489583333333333, \"std\": 0.47695137724279896, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 1.0, \"max\": 1.0}}", "examples": "{\"Pregnancies\":{\"0\":6,\"1\":1,\"2\":8,\"3\":1},\"Glucose\":{\"0\":148,\"1\":85,\"2\":183,\"3\":89},\"BloodPressure\":{\"0\":72,\"1\":66,\"2\":64,\"3\":66},\"SkinThickness\":{\"0\":35,\"1\":29,\"2\":0,\"3\":23},\"Insulin\":{\"0\":0,\"1\":0,\"2\":0,\"3\":94},\"BMI\":{\"0\":33.6,\"1\":26.6,\"2\":23.3,\"3\":28.1},\"DiabetesPedigreeFunction\":{\"0\":0.627,\"1\":0.351,\"2\":0.672,\"3\":0.167},\"Age\":{\"0\":50,\"1\":31,\"2\":32,\"3\":21},\"Outcome\":{\"0\":1,\"1\":0,\"2\":1,\"3\":0}}"}}]
true
1
<start_data_description><data_path>diabetes-data-set/diabetes.csv: <column_names> ['Pregnancies', 'Glucose', 'BloodPressure', 'SkinThickness', 'Insulin', 'BMI', 'DiabetesPedigreeFunction', 'Age', 'Outcome'] <column_types> {'Pregnancies': 'int64', 'Glucose': 'int64', 'BloodPressure': 'int64', 'SkinThickness': 'int64', 'Insulin': 'int64', 'BMI': 'float64', 'DiabetesPedigreeFunction': 'float64', 'Age': 'int64', 'Outcome': 'int64'} <dataframe_Summary> {'Pregnancies': {'count': 768.0, 'mean': 3.8450520833333335, 'std': 3.3695780626988694, 'min': 0.0, '25%': 1.0, '50%': 3.0, '75%': 6.0, 'max': 17.0}, 'Glucose': {'count': 768.0, 'mean': 120.89453125, 'std': 31.97261819513622, 'min': 0.0, '25%': 99.0, '50%': 117.0, '75%': 140.25, 'max': 199.0}, 'BloodPressure': {'count': 768.0, 'mean': 69.10546875, 'std': 19.355807170644777, 'min': 0.0, '25%': 62.0, '50%': 72.0, '75%': 80.0, 'max': 122.0}, 'SkinThickness': {'count': 768.0, 'mean': 20.536458333333332, 'std': 15.952217567727637, 'min': 0.0, '25%': 0.0, '50%': 23.0, '75%': 32.0, 'max': 99.0}, 'Insulin': {'count': 768.0, 'mean': 79.79947916666667, 'std': 115.24400235133817, 'min': 0.0, '25%': 0.0, '50%': 30.5, '75%': 127.25, 'max': 846.0}, 'BMI': {'count': 768.0, 'mean': 31.992578124999998, 'std': 7.884160320375446, 'min': 0.0, '25%': 27.3, '50%': 32.0, '75%': 36.6, 'max': 67.1}, 'DiabetesPedigreeFunction': {'count': 768.0, 'mean': 0.47187630208333325, 'std': 0.3313285950127749, 'min': 0.078, '25%': 0.24375, '50%': 0.3725, '75%': 0.62625, 'max': 2.42}, 'Age': {'count': 768.0, 'mean': 33.240885416666664, 'std': 11.760231540678685, 'min': 21.0, '25%': 24.0, '50%': 29.0, '75%': 41.0, 'max': 81.0}, 'Outcome': {'count': 768.0, 'mean': 0.3489583333333333, 'std': 0.47695137724279896, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 1.0, 'max': 1.0}} <dataframe_info> RangeIndex: 768 entries, 0 to 767 Data columns (total 9 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Pregnancies 768 non-null int64 1 Glucose 768 non-null int64 2 BloodPressure 768 non-null int64 3 SkinThickness 768 non-null int64 4 Insulin 768 non-null int64 5 BMI 768 non-null float64 6 DiabetesPedigreeFunction 768 non-null float64 7 Age 768 non-null int64 8 Outcome 768 non-null int64 dtypes: float64(2), int64(7) memory usage: 54.1 KB <some_examples> {'Pregnancies': {'0': 6, '1': 1, '2': 8, '3': 1}, 'Glucose': {'0': 148, '1': 85, '2': 183, '3': 89}, 'BloodPressure': {'0': 72, '1': 66, '2': 64, '3': 66}, 'SkinThickness': {'0': 35, '1': 29, '2': 0, '3': 23}, 'Insulin': {'0': 0, '1': 0, '2': 0, '3': 94}, 'BMI': {'0': 33.6, '1': 26.6, '2': 23.3, '3': 28.1}, 'DiabetesPedigreeFunction': {'0': 0.627, '1': 0.351, '2': 0.672, '3': 0.167}, 'Age': {'0': 50, '1': 31, '2': 32, '3': 21}, 'Outcome': {'0': 1, '1': 0, '2': 1, '3': 0}} <end_description>
4,761
2
6,467
4,761
129839208
<jupyter_start><jupyter_text>Brian Tumor Dataset ### Context This dataset consists of the scanned images of brain of patient diagnosed of brain tumour. ### Content Separated files for train and test data with separating features and labels Kaggle dataset identifier: brian-tumor-dataset <jupyter_script># # Brain Tumour Classifier # This image classifier was built as an experiment for lesson 2 of Fast.ai's ML course. I'm using it to learn the basics of their library on a meaningful dataset. # ## Initialising the Dataloader from fastai.vision.all import * def is_tumour(x): if not x.startswith("Not"): return "Healthy" else: return "Tumour" path = "/kaggle/input/brian-tumor-dataset/Brain Tumor Data Set/Brain Tumor Data Set" dataloaders = ImageDataLoaders.from_name_func( "./", get_image_files(path), valid_pct=0.3, seed=32, label_func=is_tumour, item_tfms=Resize(128), cbs=ShowGraphCallback(), ) learn = vision_learner(dataloaders, resnet18, metrics=error_rate) learn.fine_tune(3) learn.export("tumour_classifier_model.pkl") interp = ClassificationInterpretation.from_learner(learn) interp.plot_confusion_matrix() interp.plot_top_losses(15)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/839/129839208.ipynb
brian-tumor-dataset
preetviradiya
[{"Id": 129839208, "ScriptId": 38614513, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 15100726, "CreationDate": "05/16/2023 21:32:03", "VersionNumber": 2.0, "Title": "ResNet-18 Brain Tumour Classifier", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 33.0, "LinesInsertedFromPrevious": 15.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 18.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 186222539, "KernelVersionId": 129839208, "SourceDatasetVersionId": 2236708}]
[{"Id": 2236708, "DatasetId": 1343913, "DatasourceVersionId": 2278530, "CreatorUserId": 5456766, "LicenseName": "GPL 2", "CreationDate": "05/16/2021 10:20:25", "VersionNumber": 1.0, "Title": "Brian Tumor Dataset", "Slug": "brian-tumor-dataset", "Subtitle": "X-Ray images of Brain", "Description": "### Context\n\nThis dataset consists of the scanned images of brain of patient diagnosed of brain tumour.\n\n### Content\nSeparated files for train and test data with separating features and labels\n\n### Acknowledgements\nWe wouldn't be here without the help of others. If you owe any attributions or thanks, include them here along with any citations of past research.\n\n### Inspiration\nYour data will be in front of the world's largest data science community. What questions do you want to see answered?", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 1343913, "CreatorUserId": 5456766, "OwnerUserId": 5456766.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2236708.0, "CurrentDatasourceVersionId": 2278530.0, "ForumId": 1362909, "Type": 2, "CreationDate": "05/16/2021 10:20:25", "LastActivityDate": "05/16/2021", "TotalViews": 42814, "TotalDownloads": 5355, "TotalVotes": 87, "TotalKernels": 38}]
[{"Id": 5456766, "UserName": "preetviradiya", "DisplayName": "Preet Viradiya", "RegisterDate": "07/12/2020", "PerformanceTier": 2}]
# # Brain Tumour Classifier # This image classifier was built as an experiment for lesson 2 of Fast.ai's ML course. I'm using it to learn the basics of their library on a meaningful dataset. # ## Initialising the Dataloader from fastai.vision.all import * def is_tumour(x): if not x.startswith("Not"): return "Healthy" else: return "Tumour" path = "/kaggle/input/brian-tumor-dataset/Brain Tumor Data Set/Brain Tumor Data Set" dataloaders = ImageDataLoaders.from_name_func( "./", get_image_files(path), valid_pct=0.3, seed=32, label_func=is_tumour, item_tfms=Resize(128), cbs=ShowGraphCallback(), ) learn = vision_learner(dataloaders, resnet18, metrics=error_rate) learn.fine_tune(3) learn.export("tumour_classifier_model.pkl") interp = ClassificationInterpretation.from_learner(learn) interp.plot_confusion_matrix() interp.plot_top_losses(15)
false
0
289
0
359
289
129839212
<jupyter_start><jupyter_text>Song Features Dataset - Regressing Popularity **Introduction** Spotify for Developers offers a wide range of possibilities to utilize the extensive catalog of Spotify data. One of them are the audio features calculated for each song and made available via the official Spotify Web API. This is an attempt to retrieve the spotify data post the last extracted data. Haven't fully tested if this spotify allowed any other API full request post 2019 **About** Each song (row) has values for artist name, track name, track id and the audio features itself (for more information about the audio features check out this doc from Spotify). Additionally, there is also a popularity feature included in this dataset. Please note that Spotify recalculates this value based on the number of plays the track receives so it might not be correct value anymore when you access the data. **Key Questions/Hypothesis that can be Answered** 1. ARE SONGS IN MAJOR MODE ARE MORE POPULAR THAN ONES IN MINOR? 2. ARE SONGS WITH HIGH LOUDNESS ARE MOST POPULAR? 3. MOST PEOPLE LIKE LISTENING TO SONGS WITH SHORTER DURATION? In addition more detailed analysis can be done to see what causes a song to be popular. **Credit** Entire Credit goes to Spotify for providing this data via their Web API. https://developer.spotify.com/documentation/web-api/reference/tracks/get-track/ Kaggle dataset identifier: song-features-dataset-regressing-popularity <jupyter_script># ## NOTEBOOK IMPORTS import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt import plotly.graph_objects as go import warnings warnings.filterwarnings("ignore") # # **This notebook is prepared for the Spotify Features EDA (Exploratory Data Analysis) project. The aim of the EDA is to visualize the following questions:** # * ***1 - What are the top 5 songs in terms of popularity?*** # * ***2 - What are the top 5 songs with the highest loudness?*** # * ***3 - Which artist has the most danceable songs?*** # * ***4 - What are the top 10 instrumental songs?*** # * ***5 - What is the instrumentalness level of songs with a popularity score higher than 70?*** # **In this notebook, Python programming language and relevant libraries will be used to find answers to the questions regarding the songs in the dataset and to perform visualizations. By following the relevant data analysis steps, the results will be presented with visual graphics. This will provide a better understanding of the Spotify dataset and complete the EDA phase of the project.** # data = pd.read_csv( "/kaggle/input/song-features-dataset-regressing-popularity/SpotifySongPolularityAPIExtract.csv" ) # Loading the data df = ( data.copy() ) # A copy of the dataset has been taken for ease of work in future stages df.sample(20) # A random sample of 20 records has been called from the dataset. df.info() # The info() method has been used to obtain general information about the dataset. df.describe() # The describe() method has been used to refer to descriptive statistics of the dataset. # **To detect duplicate values in the dataset under the 'artist_name' and 'track_name' columns, it has been decided to convert all string values to lowercase and replace space values with underscores ('_'). This decision is made due to the case sensitivity of the Python programming language.** # To convert the values written in uppercase to lowercase and transform them into snake case format df[["artist_name", "track_name"]] = df[["artist_name", "track_name"]].apply( lambda x: x.str.lower().str.replace(" ", "_") ) # To call the first 5 records from the dataset df.head(5) # **At this stage, the dataset has been transformed as desired. The 'track_id' values contain unique values. Therefore, no changes have been made to these values. This is because these values will be referenced to detect duplicate entries in the dataset.** # To detect duplicate 'track_id' values df.duplicated(subset="track_id").sum() # To identify the 337 duplicate entries duplicated_songs = df[df.duplicated(subset=["artist_name", "track_id"], keep=False)] duplicated_songs.sort_values("track_id") # **As observed above, certain songs have been entered multiple times in the dataset. These values need to be dropped from the dataset.** # To drop the duplicate values from the dataset df.drop_duplicates(subset=["artist_name", "track_id"], keep="first", inplace=True) # After performing the drop operation, to calculate the total number of remaining duplicate values df.duplicated(subset="track_id").sum() # **We still observe that there are 79 duplicate values remaining in the dataset. At this point, we can assume that these values are incorrect entries. This assumption is based on the fact that we have previously dropped duplicated track IDs with the same artist names. Therefore, we infer that there are track IDs with different artist names. To validate the accuracy of this assumption, we need to dive deeper into the data.** # To identify the 79 remaining duplicate entries PWEs = df[df.duplicated(subset=["track_id"], keep=False)] PWEs.sort_values("track_id") # To drop the remaining duplicate values from the dataset PWEs.drop_duplicates(subset=["track_id", "track_name"], keep="first", inplace=True) # After performing the drop operation, to calculate the total number of remaining duplicate values PWEs.duplicated("track_id").sum() # **We still observe that there is 1 remaining duplicate value.** # To identify the track_id of the remaining duplicate value in the dataset PWEs[PWEs.duplicated("track_id")] # To retrieve the duplicate track_id from the dataset df[df["track_id"] == "3Jj5Jho1NVrUXi9j6Nunf1"] # **At this stage, we can observe that the difference lies in an extra character in the track_name value. It is evident that the two songs are the same. However, as we have observed earlier, there are entries in the dataset with the same track names but different artist names. It would be more appropriate to mark the artist names as unknown values and drop the duplicate entries.** PWEs.drop_duplicates(subset="track_id", keep="first", inplace=True) PWEs.duplicated("track_id").sum() # To replace potential incorrect data with '?' symbol PWEs["artist_name"] = PWEs["artist_name"].replace(r".*", "?", regex=True) df.drop_duplicates(subset="track_id", keep=False, inplace=True) df[df["track_id"] == "02mqJ3mpw8MLDmM2cqIVB9"] # Concatenating the data with unknown artist names into the main dataset filtered_df = pd.concat([df, PWEs]) filtered_df.sample( 20 ) # To take a random sample of 20 records from the dataset after removing duplicate values filtered_df.info() # To review the cleaned data using the info() method filtered_df.duplicated(subset="track_id").sum() # To review the cleaned data again filtered_df.describe() # To review the cleaned data again # ## What are the top 5 songs in terms of popularity? sorted_df_by_popularity = filtered_df.sort_values(by="popularity", ascending=False) top_5_artists = [ name.title().replace("_", " ") for name in sorted_df_by_popularity["artist_name"][0:5].to_list() ] top_5_tracks = [ name.title().replace("_", " ") for name in sorted_df_by_popularity["track_name"][0:5].to_list() ] table = go.Figure( data=[ go.Table( header=dict( values=["<b>Artist Name</b>", "<b>Track Name</b>", "<b>Popularity</b>"], fill_color="#7AA874", font=dict(color=["#000000", "#000000", "#000000"], size=15), align="center", ), cells=dict( values=[ top_5_artists, top_5_tracks, sorted_df_by_popularity["popularity"][0:5], ], fill_color="#F7DB6A", font=dict(color=["#000000", "#000000", "#000000"], size=11), align="center", ), ) ] ) table.update_layout( title={ "text": "<b>The top 5 songs in terms of popularity</b>", "font": { "size": 28, "color": "#000000", }, }, width=1000, height=200, margin=dict(l=20, r=20, t=40, b=20), ) table.show() # ## What are the top 5 songs with the highest loudness? sorted_df_by_loudness = filtered_df.sort_values(by="loudness", ascending=False) top_5_artists = [ name.title().replace("_", " ") for name in sorted_df_by_loudness["artist_name"][0:5].to_list() ] top_5_tracks = [ name.title().replace("_", " ") for name in sorted_df_by_loudness["track_name"][0:5].to_list() ] table = go.Figure( data=[ go.Table( header=dict( values=["<b>Artist Name</b>", "<b>Track Name</b>", "<b>Loudness</b>"], fill_color="#7AA874", font=dict(color=["#000000", "#000000", "#000000"], size=15), align="center", ), cells=dict( values=[ top_5_artists, top_5_tracks, sorted_df_by_loudness["loudness"][0:5], ], fill_color="#F7DB6A", font=dict(color=["#000000", "#000000", "#000000"], size=11), align="center", ), ) ] ) table.update_layout( title={ "text": "<b>The top 5 songs with the highest loudness</b>", "font": { "size": 28, "color": "#000000", }, }, width=1000, height=200, margin=dict(l=20, r=20, t=40, b=20), ) table.show() # ## Which artist has the most danceable songs? sorted_df_by_danceability = filtered_df.sort_values(by="danceability", ascending=False) top_5_artists = [ name.title().replace("_", " ") for name in sorted_df_by_danceability["artist_name"][0:5].to_list() ] top_5_tracks = [ name.title().replace("_", " ") for name in sorted_df_by_danceability["track_name"][0:5].to_list() ] table = go.Figure( data=[ go.Table( header=dict( values=[ "<b>Artist Name</b>", "<b>Track Name</b>", "<b>Danceability</b>", ], fill_color="#7AA874", font=dict(color=["#000000", "#000000", "#000000"], size=15), align="center", ), cells=dict( values=[ top_5_artists[0], top_5_tracks[0], sorted_df_by_danceability["danceability"][0], ], fill_color="#F7DB6A", font=dict(color=["#000000", "#000000", "#000000"], size=11), align="center", ), ) ] ) table.update_layout( title={ "text": "<b>The artist has the most danceable song</b>", "font": { "size": 28, "color": "#000000", }, }, width=1000, height=150, margin=dict(l=20, r=20, t=40, b=20), ) table.show() # ## What are the top 10 instrumental songs? sorted_df_by_instrumentalness = filtered_df.sort_values( by="instrumentalness", ascending=False ) top_5_artists = [ name.title().replace("_", " ") for name in sorted_df_by_instrumentalness["artist_name"][0:10].to_list() ] top_5_tracks = [ name.title().replace("_", " ") for name in sorted_df_by_instrumentalness["track_name"][0:10].to_list() ] table = go.Figure( data=[ go.Table( header=dict( values=[ "<b>Artist Name</b>", "<b>Track Name</b>", "<b>Instrumentalness</b>", ], fill_color="#7AA874", font=dict(color=["#000000", "#000000", "#000000"], size=15), align="center", ), cells=dict( values=[ top_5_artists, top_5_tracks, sorted_df_by_instrumentalness["instrumentalness"][0:10], ], fill_color="#F7DB6A", font=dict(color=["#000000", "#000000", "#000000"], size=11), align="center", ), ) ] ) table.update_layout( title={ "text": "<b>The top 10 instrumental songs</b>", "font": { "size": 28, "color": "#000000", }, }, width=1000, height=400, margin=dict(l=20, r=20, t=40, b=20), ) table.show() # ## What is the instrumentalness level of songs with a popularity score higher than 70? filtered_df_70P = filtered_df[filtered_df["popularity"] > 70] sorted_df_by_instrumentalness = filtered_df_70P.sort_values( by="instrumentalness", ascending=False ) top_5_artists = [ name.title().replace("_", " ") for name in sorted_df_by_instrumentalness["artist_name"].to_list() ] top_5_tracks = [ name.title().replace("_", " ") for name in sorted_df_by_instrumentalness["track_name"].to_list() ] scatter_plot = go.Figure( data=go.Scatter( x=filtered_df_70P["popularity"], y=sorted_df_by_instrumentalness["instrumentalness"], mode="markers", marker=dict(size=10, color="#F7DB6A", line=dict(width=1, color="#000000")), text=[ f"Artist Name: {artist}<br>Track Name: {track}" for artist, track in zip(top_5_artists, top_5_tracks) ], hovertemplate="<b>%{text}</b><br>" + "<b>Popularity</b>: %{x}<br>" + "<b>Instrumentalness</b>: %{y}<br>", ) ) scatter_plot.update_layout( title={ "text": "<b>The instrumentalness level of popular songs</b>", "font": { "size": 28, "color": "#000000", }, }, xaxis=dict(title="<b>Popularity</b>"), yaxis=dict(title="<b>Instrumentalness</b>"), width=1000, height=500, margin=dict(l=20, r=20, t=40, b=20), ) scatter_plot.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/839/129839212.ipynb
song-features-dataset-regressing-popularity
ayushnitb
[{"Id": 129839212, "ScriptId": 38616291, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13716192, "CreationDate": "05/16/2023 21:32:06", "VersionNumber": 1.0, "Title": "Spotify Features EDA", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 363.0, "LinesInsertedFromPrevious": 363.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 3}]
[{"Id": 186222550, "KernelVersionId": 129839212, "SourceDatasetVersionId": 4874140}]
[{"Id": 4874140, "DatasetId": 2826158, "DatasourceVersionId": 4941208, "CreatorUserId": 1569506, "LicenseName": "Other (specified in description)", "CreationDate": "01/19/2023 21:37:15", "VersionNumber": 2.0, "Title": "Song Features Dataset - Regressing Popularity", "Slug": "song-features-dataset-regressing-popularity", "Subtitle": "Spotify Song features.", "Description": "**Introduction**\nSpotify for Developers offers a wide range of possibilities to utilize the extensive catalog of Spotify data. One of them are the audio features calculated for each song and made available via the official Spotify Web API.\n\nThis is an attempt to retrieve the spotify data post the last extracted data. Haven't fully tested if this spotify allowed any other API full request post 2019\n\n**About**\nEach song (row) has values for artist name, track name, track id and the audio features itself (for more information about the audio features check out this doc from Spotify).\n\nAdditionally, there is also a popularity feature included in this dataset. Please note that Spotify recalculates this value based on the number of plays the track receives so it might not be correct value anymore when you access the data.\n\n**Key Questions/Hypothesis that can be Answered**\n1. ARE SONGS IN MAJOR MODE ARE MORE POPULAR THAN ONES IN MINOR?\n2. ARE SONGS WITH HIGH LOUDNESS ARE MOST POPULAR?\n3. MOST PEOPLE LIKE LISTENING TO SONGS WITH SHORTER DURATION?\n\nIn addition more detailed analysis can be done to see what causes a song to be popular.\n\n**Credit**\nEntire Credit goes to Spotify for providing this data via their Web API.\n\nhttps://developer.spotify.com/documentation/web-api/reference/tracks/get-track/", "VersionNotes": "Data Update 2023/01/19", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 2826158, "CreatorUserId": 1569506, "OwnerUserId": 1569506.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 4874140.0, "CurrentDatasourceVersionId": 4941208.0, "ForumId": 2861224, "Type": 2, "CreationDate": "01/19/2023 21:35:46", "LastActivityDate": "01/19/2023", "TotalViews": 4374, "TotalDownloads": 541, "TotalVotes": 41, "TotalKernels": 4}]
[{"Id": 1569506, "UserName": "ayushnitb", "DisplayName": "Ayush Oturkar", "RegisterDate": "01/21/2018", "PerformanceTier": 3}]
# ## NOTEBOOK IMPORTS import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt import plotly.graph_objects as go import warnings warnings.filterwarnings("ignore") # # **This notebook is prepared for the Spotify Features EDA (Exploratory Data Analysis) project. The aim of the EDA is to visualize the following questions:** # * ***1 - What are the top 5 songs in terms of popularity?*** # * ***2 - What are the top 5 songs with the highest loudness?*** # * ***3 - Which artist has the most danceable songs?*** # * ***4 - What are the top 10 instrumental songs?*** # * ***5 - What is the instrumentalness level of songs with a popularity score higher than 70?*** # **In this notebook, Python programming language and relevant libraries will be used to find answers to the questions regarding the songs in the dataset and to perform visualizations. By following the relevant data analysis steps, the results will be presented with visual graphics. This will provide a better understanding of the Spotify dataset and complete the EDA phase of the project.** # data = pd.read_csv( "/kaggle/input/song-features-dataset-regressing-popularity/SpotifySongPolularityAPIExtract.csv" ) # Loading the data df = ( data.copy() ) # A copy of the dataset has been taken for ease of work in future stages df.sample(20) # A random sample of 20 records has been called from the dataset. df.info() # The info() method has been used to obtain general information about the dataset. df.describe() # The describe() method has been used to refer to descriptive statistics of the dataset. # **To detect duplicate values in the dataset under the 'artist_name' and 'track_name' columns, it has been decided to convert all string values to lowercase and replace space values with underscores ('_'). This decision is made due to the case sensitivity of the Python programming language.** # To convert the values written in uppercase to lowercase and transform them into snake case format df[["artist_name", "track_name"]] = df[["artist_name", "track_name"]].apply( lambda x: x.str.lower().str.replace(" ", "_") ) # To call the first 5 records from the dataset df.head(5) # **At this stage, the dataset has been transformed as desired. The 'track_id' values contain unique values. Therefore, no changes have been made to these values. This is because these values will be referenced to detect duplicate entries in the dataset.** # To detect duplicate 'track_id' values df.duplicated(subset="track_id").sum() # To identify the 337 duplicate entries duplicated_songs = df[df.duplicated(subset=["artist_name", "track_id"], keep=False)] duplicated_songs.sort_values("track_id") # **As observed above, certain songs have been entered multiple times in the dataset. These values need to be dropped from the dataset.** # To drop the duplicate values from the dataset df.drop_duplicates(subset=["artist_name", "track_id"], keep="first", inplace=True) # After performing the drop operation, to calculate the total number of remaining duplicate values df.duplicated(subset="track_id").sum() # **We still observe that there are 79 duplicate values remaining in the dataset. At this point, we can assume that these values are incorrect entries. This assumption is based on the fact that we have previously dropped duplicated track IDs with the same artist names. Therefore, we infer that there are track IDs with different artist names. To validate the accuracy of this assumption, we need to dive deeper into the data.** # To identify the 79 remaining duplicate entries PWEs = df[df.duplicated(subset=["track_id"], keep=False)] PWEs.sort_values("track_id") # To drop the remaining duplicate values from the dataset PWEs.drop_duplicates(subset=["track_id", "track_name"], keep="first", inplace=True) # After performing the drop operation, to calculate the total number of remaining duplicate values PWEs.duplicated("track_id").sum() # **We still observe that there is 1 remaining duplicate value.** # To identify the track_id of the remaining duplicate value in the dataset PWEs[PWEs.duplicated("track_id")] # To retrieve the duplicate track_id from the dataset df[df["track_id"] == "3Jj5Jho1NVrUXi9j6Nunf1"] # **At this stage, we can observe that the difference lies in an extra character in the track_name value. It is evident that the two songs are the same. However, as we have observed earlier, there are entries in the dataset with the same track names but different artist names. It would be more appropriate to mark the artist names as unknown values and drop the duplicate entries.** PWEs.drop_duplicates(subset="track_id", keep="first", inplace=True) PWEs.duplicated("track_id").sum() # To replace potential incorrect data with '?' symbol PWEs["artist_name"] = PWEs["artist_name"].replace(r".*", "?", regex=True) df.drop_duplicates(subset="track_id", keep=False, inplace=True) df[df["track_id"] == "02mqJ3mpw8MLDmM2cqIVB9"] # Concatenating the data with unknown artist names into the main dataset filtered_df = pd.concat([df, PWEs]) filtered_df.sample( 20 ) # To take a random sample of 20 records from the dataset after removing duplicate values filtered_df.info() # To review the cleaned data using the info() method filtered_df.duplicated(subset="track_id").sum() # To review the cleaned data again filtered_df.describe() # To review the cleaned data again # ## What are the top 5 songs in terms of popularity? sorted_df_by_popularity = filtered_df.sort_values(by="popularity", ascending=False) top_5_artists = [ name.title().replace("_", " ") for name in sorted_df_by_popularity["artist_name"][0:5].to_list() ] top_5_tracks = [ name.title().replace("_", " ") for name in sorted_df_by_popularity["track_name"][0:5].to_list() ] table = go.Figure( data=[ go.Table( header=dict( values=["<b>Artist Name</b>", "<b>Track Name</b>", "<b>Popularity</b>"], fill_color="#7AA874", font=dict(color=["#000000", "#000000", "#000000"], size=15), align="center", ), cells=dict( values=[ top_5_artists, top_5_tracks, sorted_df_by_popularity["popularity"][0:5], ], fill_color="#F7DB6A", font=dict(color=["#000000", "#000000", "#000000"], size=11), align="center", ), ) ] ) table.update_layout( title={ "text": "<b>The top 5 songs in terms of popularity</b>", "font": { "size": 28, "color": "#000000", }, }, width=1000, height=200, margin=dict(l=20, r=20, t=40, b=20), ) table.show() # ## What are the top 5 songs with the highest loudness? sorted_df_by_loudness = filtered_df.sort_values(by="loudness", ascending=False) top_5_artists = [ name.title().replace("_", " ") for name in sorted_df_by_loudness["artist_name"][0:5].to_list() ] top_5_tracks = [ name.title().replace("_", " ") for name in sorted_df_by_loudness["track_name"][0:5].to_list() ] table = go.Figure( data=[ go.Table( header=dict( values=["<b>Artist Name</b>", "<b>Track Name</b>", "<b>Loudness</b>"], fill_color="#7AA874", font=dict(color=["#000000", "#000000", "#000000"], size=15), align="center", ), cells=dict( values=[ top_5_artists, top_5_tracks, sorted_df_by_loudness["loudness"][0:5], ], fill_color="#F7DB6A", font=dict(color=["#000000", "#000000", "#000000"], size=11), align="center", ), ) ] ) table.update_layout( title={ "text": "<b>The top 5 songs with the highest loudness</b>", "font": { "size": 28, "color": "#000000", }, }, width=1000, height=200, margin=dict(l=20, r=20, t=40, b=20), ) table.show() # ## Which artist has the most danceable songs? sorted_df_by_danceability = filtered_df.sort_values(by="danceability", ascending=False) top_5_artists = [ name.title().replace("_", " ") for name in sorted_df_by_danceability["artist_name"][0:5].to_list() ] top_5_tracks = [ name.title().replace("_", " ") for name in sorted_df_by_danceability["track_name"][0:5].to_list() ] table = go.Figure( data=[ go.Table( header=dict( values=[ "<b>Artist Name</b>", "<b>Track Name</b>", "<b>Danceability</b>", ], fill_color="#7AA874", font=dict(color=["#000000", "#000000", "#000000"], size=15), align="center", ), cells=dict( values=[ top_5_artists[0], top_5_tracks[0], sorted_df_by_danceability["danceability"][0], ], fill_color="#F7DB6A", font=dict(color=["#000000", "#000000", "#000000"], size=11), align="center", ), ) ] ) table.update_layout( title={ "text": "<b>The artist has the most danceable song</b>", "font": { "size": 28, "color": "#000000", }, }, width=1000, height=150, margin=dict(l=20, r=20, t=40, b=20), ) table.show() # ## What are the top 10 instrumental songs? sorted_df_by_instrumentalness = filtered_df.sort_values( by="instrumentalness", ascending=False ) top_5_artists = [ name.title().replace("_", " ") for name in sorted_df_by_instrumentalness["artist_name"][0:10].to_list() ] top_5_tracks = [ name.title().replace("_", " ") for name in sorted_df_by_instrumentalness["track_name"][0:10].to_list() ] table = go.Figure( data=[ go.Table( header=dict( values=[ "<b>Artist Name</b>", "<b>Track Name</b>", "<b>Instrumentalness</b>", ], fill_color="#7AA874", font=dict(color=["#000000", "#000000", "#000000"], size=15), align="center", ), cells=dict( values=[ top_5_artists, top_5_tracks, sorted_df_by_instrumentalness["instrumentalness"][0:10], ], fill_color="#F7DB6A", font=dict(color=["#000000", "#000000", "#000000"], size=11), align="center", ), ) ] ) table.update_layout( title={ "text": "<b>The top 10 instrumental songs</b>", "font": { "size": 28, "color": "#000000", }, }, width=1000, height=400, margin=dict(l=20, r=20, t=40, b=20), ) table.show() # ## What is the instrumentalness level of songs with a popularity score higher than 70? filtered_df_70P = filtered_df[filtered_df["popularity"] > 70] sorted_df_by_instrumentalness = filtered_df_70P.sort_values( by="instrumentalness", ascending=False ) top_5_artists = [ name.title().replace("_", " ") for name in sorted_df_by_instrumentalness["artist_name"].to_list() ] top_5_tracks = [ name.title().replace("_", " ") for name in sorted_df_by_instrumentalness["track_name"].to_list() ] scatter_plot = go.Figure( data=go.Scatter( x=filtered_df_70P["popularity"], y=sorted_df_by_instrumentalness["instrumentalness"], mode="markers", marker=dict(size=10, color="#F7DB6A", line=dict(width=1, color="#000000")), text=[ f"Artist Name: {artist}<br>Track Name: {track}" for artist, track in zip(top_5_artists, top_5_tracks) ], hovertemplate="<b>%{text}</b><br>" + "<b>Popularity</b>: %{x}<br>" + "<b>Instrumentalness</b>: %{y}<br>", ) ) scatter_plot.update_layout( title={ "text": "<b>The instrumentalness level of popular songs</b>", "font": { "size": 28, "color": "#000000", }, }, xaxis=dict(title="<b>Popularity</b>"), yaxis=dict(title="<b>Instrumentalness</b>"), width=1000, height=500, margin=dict(l=20, r=20, t=40, b=20), ) scatter_plot.show()
false
1
3,688
3
4,044
3,688
129463180
# ! nvidia-smi import numpy as np import pandas as pd import os from glob import glob from matplotlib import pyplot as plt import matplotlib.patches as patches import matplotlib import seaborn as sns from dataclasses import dataclass, field, asdict from pprint import pprint as pp from typing import List import altair as alt import cv2 as cv from skimage import exposure from PIL import Image import imageio import os from pathlib import Path from typing import Tuple, Optional import numba from skimage import color, transform, exposure from fastai.vision.all import * from fastcore.all import * from fastprogress import progress_bar x = "/kaggle/input/understanding_cloud_organization" path = Path(x) print(path.ls()) os.listdir(x) train_df = pd.read_csv(path / "train.csv") train_fns = sorted(glob.glob(str(path) + "/train_images/*.jpg")) test_fns = sorted(glob.glob(str(path) + "/test_images/*.jpg")) print("{} images train set.".format(len(train_df))) print("{} images test set.".format(len(test_fns))) train_df.head() def split_img_label(img_lbl): """Return image and label from file name like '0011165.jpg_Flower'""" s = img_lbl.split("_") assert len(s) == 2 return s[0], s[1] # split Image_Label train_df["Image"] = train_df["Image_Label"].apply( lambda img_lbl: split_img_label(img_lbl)[0] ) train_df["Label"] = train_df["Image_Label"].apply( lambda img_lbl: split_img_label(img_lbl)[1] ) del train_df["Image_Label"] train_df.head() train_with_mask = train_df.dropna(subset=["EncodedPixels"]) ax = ( train_with_mask["Label"] .value_counts() .plot( kind="pie", autopct="%1.1f%%", title="Shares of each classes", figsize=(10, 6) ) ) # utility functions def rle_decode(encoded: str, shape: Tuple[int, int], value: int = 1) -> np.ndarray: """Decodes an RLE-encoded string. Parameters ---------- encoded RLE mask. shape Mask shape in (height, width) format. value Value to fill in the mask. Returns ------- mask The decoded mask as 2D image of shape (height, width). """ numbers = list(map(int, encoded.split())) starts, runs = [np.asarray(xs) for xs in (numbers[::2], numbers[1::2])] # pixels enumerations starts from 1 but arrays are # indexed staring from 0 so need to make an adjustment starts -= 1 mask = np.zeros(shape[0] * shape[1], dtype=np.uint8) for start, run in zip(starts, runs): mask[start : start + run] = value # In NumPy arrays, first goes height and then goes width; also, # the pixels in the mask are enumerated from top to bottom and # from left to right, but the mask was filled in a different # order so need to transpose return mask.reshape(shape[1], shape[0]).T def rle_encode(mask: np.ndarray, threshold: Optional[float] = None) -> str: """Encoders a binary mask into RLE-string. References ---------- [1] https://www.kaggle.com/hackerpoet/even-faster-run-length-encoder """ pixels = mask.T.flatten() if threshold is not None: pixels = np.where(pixels > threshold, 1, 0) pixels = pixels.astype(np.uint8) pixels = np.concatenate([[0], pixels, [0]]) mask_start = (pixels[:-1] == 0) & (pixels[1:] == 1) mask_end = (pixels[:-1] == 1) & (pixels[1:] == 0) [start_idx] = np.where(mask_start) [end_idx] = np.where(mask_end) lengths = end_idx - start_idx encoded = np.zeros(start_idx.shape[0] + lengths.shape[0]) encoded[::2] = start_idx + 2 # adjust for counting from 1 encoded[1::2] = lengths return " ".join([str(x) for x in encoded.astype(int)]) @numba.njit() def rle_numba(pixels): size = len(pixels) points = [] if pixels[0] == 1: points.append(0 + 1) flag = True for i in range(1, size): if pixels[i] != pixels[i - 1]: if flag: points.append(i + 1) flag = False else: points.append(i + 1 - points[-1]) flag = True if pixels[-1] == 1: points.append(size - points[-1] + 1) return points def rle_numba_encode(image): pixels = image.flatten(order="F") points = rle_numba(pixels) return " ".join(str(x) for x in points) def on_kaggle() -> bool: return os.environ.get("KAGGLE_URL_BASE", False) def get_dataset_size(debug: bool = False): n_total = len(get_image_files(DATA_ROOT)) return n_total // 10 if debug else n_total def get_combined_code(*class_names): assert 1 <= len(class_names) <= 4 combined_code = 0 for class_name in class_names: combined_code |= MASK_CODES[class_name] label = "+".join(class_names) return label, combined_code def equalize(data: np.array, adaptive: bool) -> np.ndarray: """Histogram equalization to normalize images before previewing.""" data = data - np.min(data) data = data / np.max(data) data = (data * 255).astype(np.uint8) method = exposure.equalize_adapthist if adaptive else exposure.equalize_hist return method(data) def predict(learn, test_ids, metadata, n_sample=None): if n_sample is not None: test_ids = np.random.choice(test_ids, size=n_sample, replace=False) with learn.no_bar(): predicted = [] for test_id in progress_bar(test_ids): mask, *_ = learn.predict(metadata[test_id].full_path) predicted.append(mask.numpy()) return predicted, test_ids def view_results( test_ids, predictions, metadata, figsize_mult=4, n_rows=5, n_cols=4, overlay_resize=(128, 128), ): f, axes = plt.subplots( n_rows, n_cols, figsize=(n_rows * figsize_mult, n_cols * figsize_mult) ) for test_id, mask, ax in zip(test_ids, predictions, axes.flat): fn = metadata[test_id].full_path img = np.asarray(PIL.Image.open(fn)) img = equalize(img, True) img = transform.resize(img, mask.shape) img = np.amax(img) - img overlay = color.label2rgb(mask, img, kind="overlay", alpha=0.5) overlay = transform.resize(overlay, overlay_resize) ax.imshow(overlay) ax.set_axis_off() return f if on_kaggle(): DATA_ROOT = Path("/kaggle/input/understanding_cloud_organization") OUTPUT_DIR = Path("/kaggle/working") else: # local setup DATA_ROOT = Path("/mnt/fast/data/uco") OUTPUT_DIR = Path("/mnt/fast/data/uco_prepared") TRAIN_CSV = DATA_ROOT / "train.csv" UNIQUE_CLASSES = ["Fish", "Flower", "Gravel", "Sugar"] MASK_CODES = dict(zip(UNIQUE_CLASSES, (0b001, 0b010, 0b100, 0b1000))) COMBINED_MASK_CODES = [ ("none", 0), *list(MASK_CODES.items()), get_combined_code("Fish", "Flower"), get_combined_code("Fish", "Gravel"), get_combined_code("Fish", "Sugar"), get_combined_code("Flower", "Gravel"), get_combined_code("Flower", "Sugar"), get_combined_code("Gravel", "Sugar"), get_combined_code( "Flower", "Gravel", "Sugar", ), get_combined_code("Fish", "Gravel", "Sugar"), get_combined_code("Fish", "Flower", "Sugar"), get_combined_code("Fish", "Flower", "Gravel"), get_combined_code("Fish", "Flower", "Gravel", "Sugar"), ] def get_masks_rle(img): """Get RLE-encoded masks for this image""" img = str(img).split("/")[-1] # get filename only return train_df.loc[img, UNIQUE_CLASSES].to_list() MASK_CODES, COMBINED_MASK_CODES def rle_to_mask(rle_string, width, height): """ convert RLE(run length encoding) string to numpy array Parameters: rle_string (str): string of rle encoded mask height (int): height of the mask width (int): width of the mask Returns: numpy.array: numpy array of the mask """ rows, cols = height, width if rle_string == -1: return np.zeros((height, width)) else: rle_numbers = [int(num_string) for num_string in rle_string.split(" ")] rle_pairs = np.array(rle_numbers).reshape(-1, 2) img = np.zeros(rows * cols, dtype=np.uint8) for index, length in rle_pairs: index -= 1 img[index : index + length] = 255 img = img.reshape(cols, rows) img = img.T return img def get_mask(line_id, shape=(2100, 1400)): """ Function to visualize the image and the mask. INPUT: line_id - id of the line to visualize the masks shape - image shape RETURNS: np_mask - numpy segmentation map """ # replace null values with '-1' im_df = train_df.fillna("-1") # convert rle to mask rle = im_df.loc[line_id]["EncodedPixels"] if rle != "-1": np_mask = rle_to_mask(rle, shape[0], shape[1]) np_mask = np.clip(np_mask, 0, 1) else: # empty mask np_mask = np.zeros((shape[0], shape[1]), dtype=np.uint8) return np_mask def get_mask_image(image_name, shape=(2100, 1400)): im_df = train_df.fillna("-1") # convert rle to mask rle = im_df.query("Image == @image_name")["EncodedPixels"] if rle != "-1": np_mask = rle_to_mask(rle, shape[0], shape[1]) np_mask = np.clip(np_mask, 0, 1) else: # empty mask np_mask = np.zeros((shape[0], shape[1]), dtype=np.uint8) return TensorImage(np_mask) # helper function to get segmentation mask for an image by filename def get_mask_by_image_id(image_id, label): """ Function to visualize several segmentation maps. INPUT: image_id - filename of the image RETURNS: np_mask - numpy segmentation map """ im_df = train_df[train_df["Image"] == image_id.split("/")[-1]].fillna("-1") image = np.asarray(Image.open(image_id)) rle = im_df[im_df["Label"] == label]["EncodedPixels"].values[0] if rle != "-1": np_mask = rle_to_mask( rle, np.asarray(image).shape[1], np.asarray(image).shape[0] ) np_mask = np.clip(np_mask, 0, 1) else: # empty mask np_mask = np.zeros( (np.asarray(image).shape[0], np.asarray(image).shape[1]), dtype=np.uint8 ) return np_mask set_seed(1, reproducible=True) DEBUG = True DATASET_SIZE = ( 3000 if DEBUG else 20000 ) # read only a small subset of data to train some basic model DATA_ROOT = path fnames = get_image_files(path) fnames[0].parts def get_slice_id(fname): return fname.parts[5] get_slice_id(fnames[0]) train_df.sample(5) MASK_CODES = dict(zip(UNIQUE_CLASSES, (0b001, 0b010, 0b100, 0b1000))) MASK_CODES def get_items(): return fnames def get_y(fname): return get_mask_image(fname.parts[5]) empty_masks = set() masks_dir = OUTPUT_DIR / "masks" if not masks_dir.exists(): masks_dir.mkdir() for sample_id, df_group in progress_bar(train_df.groupby("Image")): shape = (1400, 2100) decoded_mask = np.zeros(shape, dtype=np.uint8) for _, row in df_group.iterrows(): rle_mask = row.EncodedPixels if isinstance(rle_mask, str): decoded_mask |= rle_decode(rle_mask, shape, value=MASK_CODES[row["Label"]]) if not decoded_mask.any(): empty_masks.add(sample_id) mask_image = Image.fromarray(decoded_mask.T) mask_path = masks_dir / f"{row['Image']}.png" mask_image.save(mask_path) images_dir = OUTPUT_DIR / "images" if not images_dir.exists(): images_dir.mkdir() def copy_images(source_folder, destination_folder): # Create the destination folder if it doesn't exist if not os.path.exists(destination_folder): os.makedirs(destination_folder) # Get the list of files in the source folder files = os.listdir(source_folder) # Copy each image file to the destination folder for file_name in files: source_path = os.path.join(source_folder, file_name) if os.path.isfile(source_path) and file_name.lower().endswith( (".jpg", ".jpeg", ".png", ".gif") ): destination_path = os.path.join(destination_folder, file_name) shutil.copy2(source_path, destination_path) # Usage example source_folder = x + "/train_images" destination_folder = images_dir copy_images(source_folder, destination_folder) os.listdir(str(OUTPUT_DIR) + "/masks") def get_items(source_dir: Path): return get_image_files(source_dir.joinpath("images")) def get_y(fn: Path): return fn.parent.parent.joinpath("masks").joinpath(f"{fn.stem}.jpg.png") seg = DataBlock( blocks=(ImageBlock, MaskBlock(COMBINED_MASK_CODES)), get_items=get_items, get_y=get_y, splitter=RandomSplitter(), item_tfms=[Resize(192, method="squash")], ) dls = seg.dataloaders(OUTPUT_DIR, bs=32) learn = unet_learner(dls, resnet18, metrics=DiceMulti) learn.fine_tune(3) interp = SegmentationInterpretation.from_learner(learn) interp.plot_top_losses(4, largest=True)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/463/129463180.ipynb
null
null
[{"Id": 129463180, "ScriptId": 38371024, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 8973955, "CreationDate": "05/14/2023 03:47:25", "VersionNumber": 2.0, "Title": "notebook57e28fc572", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 454.0, "LinesInsertedFromPrevious": 143.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 311.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# ! nvidia-smi import numpy as np import pandas as pd import os from glob import glob from matplotlib import pyplot as plt import matplotlib.patches as patches import matplotlib import seaborn as sns from dataclasses import dataclass, field, asdict from pprint import pprint as pp from typing import List import altair as alt import cv2 as cv from skimage import exposure from PIL import Image import imageio import os from pathlib import Path from typing import Tuple, Optional import numba from skimage import color, transform, exposure from fastai.vision.all import * from fastcore.all import * from fastprogress import progress_bar x = "/kaggle/input/understanding_cloud_organization" path = Path(x) print(path.ls()) os.listdir(x) train_df = pd.read_csv(path / "train.csv") train_fns = sorted(glob.glob(str(path) + "/train_images/*.jpg")) test_fns = sorted(glob.glob(str(path) + "/test_images/*.jpg")) print("{} images train set.".format(len(train_df))) print("{} images test set.".format(len(test_fns))) train_df.head() def split_img_label(img_lbl): """Return image and label from file name like '0011165.jpg_Flower'""" s = img_lbl.split("_") assert len(s) == 2 return s[0], s[1] # split Image_Label train_df["Image"] = train_df["Image_Label"].apply( lambda img_lbl: split_img_label(img_lbl)[0] ) train_df["Label"] = train_df["Image_Label"].apply( lambda img_lbl: split_img_label(img_lbl)[1] ) del train_df["Image_Label"] train_df.head() train_with_mask = train_df.dropna(subset=["EncodedPixels"]) ax = ( train_with_mask["Label"] .value_counts() .plot( kind="pie", autopct="%1.1f%%", title="Shares of each classes", figsize=(10, 6) ) ) # utility functions def rle_decode(encoded: str, shape: Tuple[int, int], value: int = 1) -> np.ndarray: """Decodes an RLE-encoded string. Parameters ---------- encoded RLE mask. shape Mask shape in (height, width) format. value Value to fill in the mask. Returns ------- mask The decoded mask as 2D image of shape (height, width). """ numbers = list(map(int, encoded.split())) starts, runs = [np.asarray(xs) for xs in (numbers[::2], numbers[1::2])] # pixels enumerations starts from 1 but arrays are # indexed staring from 0 so need to make an adjustment starts -= 1 mask = np.zeros(shape[0] * shape[1], dtype=np.uint8) for start, run in zip(starts, runs): mask[start : start + run] = value # In NumPy arrays, first goes height and then goes width; also, # the pixels in the mask are enumerated from top to bottom and # from left to right, but the mask was filled in a different # order so need to transpose return mask.reshape(shape[1], shape[0]).T def rle_encode(mask: np.ndarray, threshold: Optional[float] = None) -> str: """Encoders a binary mask into RLE-string. References ---------- [1] https://www.kaggle.com/hackerpoet/even-faster-run-length-encoder """ pixels = mask.T.flatten() if threshold is not None: pixels = np.where(pixels > threshold, 1, 0) pixels = pixels.astype(np.uint8) pixels = np.concatenate([[0], pixels, [0]]) mask_start = (pixels[:-1] == 0) & (pixels[1:] == 1) mask_end = (pixels[:-1] == 1) & (pixels[1:] == 0) [start_idx] = np.where(mask_start) [end_idx] = np.where(mask_end) lengths = end_idx - start_idx encoded = np.zeros(start_idx.shape[0] + lengths.shape[0]) encoded[::2] = start_idx + 2 # adjust for counting from 1 encoded[1::2] = lengths return " ".join([str(x) for x in encoded.astype(int)]) @numba.njit() def rle_numba(pixels): size = len(pixels) points = [] if pixels[0] == 1: points.append(0 + 1) flag = True for i in range(1, size): if pixels[i] != pixels[i - 1]: if flag: points.append(i + 1) flag = False else: points.append(i + 1 - points[-1]) flag = True if pixels[-1] == 1: points.append(size - points[-1] + 1) return points def rle_numba_encode(image): pixels = image.flatten(order="F") points = rle_numba(pixels) return " ".join(str(x) for x in points) def on_kaggle() -> bool: return os.environ.get("KAGGLE_URL_BASE", False) def get_dataset_size(debug: bool = False): n_total = len(get_image_files(DATA_ROOT)) return n_total // 10 if debug else n_total def get_combined_code(*class_names): assert 1 <= len(class_names) <= 4 combined_code = 0 for class_name in class_names: combined_code |= MASK_CODES[class_name] label = "+".join(class_names) return label, combined_code def equalize(data: np.array, adaptive: bool) -> np.ndarray: """Histogram equalization to normalize images before previewing.""" data = data - np.min(data) data = data / np.max(data) data = (data * 255).astype(np.uint8) method = exposure.equalize_adapthist if adaptive else exposure.equalize_hist return method(data) def predict(learn, test_ids, metadata, n_sample=None): if n_sample is not None: test_ids = np.random.choice(test_ids, size=n_sample, replace=False) with learn.no_bar(): predicted = [] for test_id in progress_bar(test_ids): mask, *_ = learn.predict(metadata[test_id].full_path) predicted.append(mask.numpy()) return predicted, test_ids def view_results( test_ids, predictions, metadata, figsize_mult=4, n_rows=5, n_cols=4, overlay_resize=(128, 128), ): f, axes = plt.subplots( n_rows, n_cols, figsize=(n_rows * figsize_mult, n_cols * figsize_mult) ) for test_id, mask, ax in zip(test_ids, predictions, axes.flat): fn = metadata[test_id].full_path img = np.asarray(PIL.Image.open(fn)) img = equalize(img, True) img = transform.resize(img, mask.shape) img = np.amax(img) - img overlay = color.label2rgb(mask, img, kind="overlay", alpha=0.5) overlay = transform.resize(overlay, overlay_resize) ax.imshow(overlay) ax.set_axis_off() return f if on_kaggle(): DATA_ROOT = Path("/kaggle/input/understanding_cloud_organization") OUTPUT_DIR = Path("/kaggle/working") else: # local setup DATA_ROOT = Path("/mnt/fast/data/uco") OUTPUT_DIR = Path("/mnt/fast/data/uco_prepared") TRAIN_CSV = DATA_ROOT / "train.csv" UNIQUE_CLASSES = ["Fish", "Flower", "Gravel", "Sugar"] MASK_CODES = dict(zip(UNIQUE_CLASSES, (0b001, 0b010, 0b100, 0b1000))) COMBINED_MASK_CODES = [ ("none", 0), *list(MASK_CODES.items()), get_combined_code("Fish", "Flower"), get_combined_code("Fish", "Gravel"), get_combined_code("Fish", "Sugar"), get_combined_code("Flower", "Gravel"), get_combined_code("Flower", "Sugar"), get_combined_code("Gravel", "Sugar"), get_combined_code( "Flower", "Gravel", "Sugar", ), get_combined_code("Fish", "Gravel", "Sugar"), get_combined_code("Fish", "Flower", "Sugar"), get_combined_code("Fish", "Flower", "Gravel"), get_combined_code("Fish", "Flower", "Gravel", "Sugar"), ] def get_masks_rle(img): """Get RLE-encoded masks for this image""" img = str(img).split("/")[-1] # get filename only return train_df.loc[img, UNIQUE_CLASSES].to_list() MASK_CODES, COMBINED_MASK_CODES def rle_to_mask(rle_string, width, height): """ convert RLE(run length encoding) string to numpy array Parameters: rle_string (str): string of rle encoded mask height (int): height of the mask width (int): width of the mask Returns: numpy.array: numpy array of the mask """ rows, cols = height, width if rle_string == -1: return np.zeros((height, width)) else: rle_numbers = [int(num_string) for num_string in rle_string.split(" ")] rle_pairs = np.array(rle_numbers).reshape(-1, 2) img = np.zeros(rows * cols, dtype=np.uint8) for index, length in rle_pairs: index -= 1 img[index : index + length] = 255 img = img.reshape(cols, rows) img = img.T return img def get_mask(line_id, shape=(2100, 1400)): """ Function to visualize the image and the mask. INPUT: line_id - id of the line to visualize the masks shape - image shape RETURNS: np_mask - numpy segmentation map """ # replace null values with '-1' im_df = train_df.fillna("-1") # convert rle to mask rle = im_df.loc[line_id]["EncodedPixels"] if rle != "-1": np_mask = rle_to_mask(rle, shape[0], shape[1]) np_mask = np.clip(np_mask, 0, 1) else: # empty mask np_mask = np.zeros((shape[0], shape[1]), dtype=np.uint8) return np_mask def get_mask_image(image_name, shape=(2100, 1400)): im_df = train_df.fillna("-1") # convert rle to mask rle = im_df.query("Image == @image_name")["EncodedPixels"] if rle != "-1": np_mask = rle_to_mask(rle, shape[0], shape[1]) np_mask = np.clip(np_mask, 0, 1) else: # empty mask np_mask = np.zeros((shape[0], shape[1]), dtype=np.uint8) return TensorImage(np_mask) # helper function to get segmentation mask for an image by filename def get_mask_by_image_id(image_id, label): """ Function to visualize several segmentation maps. INPUT: image_id - filename of the image RETURNS: np_mask - numpy segmentation map """ im_df = train_df[train_df["Image"] == image_id.split("/")[-1]].fillna("-1") image = np.asarray(Image.open(image_id)) rle = im_df[im_df["Label"] == label]["EncodedPixels"].values[0] if rle != "-1": np_mask = rle_to_mask( rle, np.asarray(image).shape[1], np.asarray(image).shape[0] ) np_mask = np.clip(np_mask, 0, 1) else: # empty mask np_mask = np.zeros( (np.asarray(image).shape[0], np.asarray(image).shape[1]), dtype=np.uint8 ) return np_mask set_seed(1, reproducible=True) DEBUG = True DATASET_SIZE = ( 3000 if DEBUG else 20000 ) # read only a small subset of data to train some basic model DATA_ROOT = path fnames = get_image_files(path) fnames[0].parts def get_slice_id(fname): return fname.parts[5] get_slice_id(fnames[0]) train_df.sample(5) MASK_CODES = dict(zip(UNIQUE_CLASSES, (0b001, 0b010, 0b100, 0b1000))) MASK_CODES def get_items(): return fnames def get_y(fname): return get_mask_image(fname.parts[5]) empty_masks = set() masks_dir = OUTPUT_DIR / "masks" if not masks_dir.exists(): masks_dir.mkdir() for sample_id, df_group in progress_bar(train_df.groupby("Image")): shape = (1400, 2100) decoded_mask = np.zeros(shape, dtype=np.uint8) for _, row in df_group.iterrows(): rle_mask = row.EncodedPixels if isinstance(rle_mask, str): decoded_mask |= rle_decode(rle_mask, shape, value=MASK_CODES[row["Label"]]) if not decoded_mask.any(): empty_masks.add(sample_id) mask_image = Image.fromarray(decoded_mask.T) mask_path = masks_dir / f"{row['Image']}.png" mask_image.save(mask_path) images_dir = OUTPUT_DIR / "images" if not images_dir.exists(): images_dir.mkdir() def copy_images(source_folder, destination_folder): # Create the destination folder if it doesn't exist if not os.path.exists(destination_folder): os.makedirs(destination_folder) # Get the list of files in the source folder files = os.listdir(source_folder) # Copy each image file to the destination folder for file_name in files: source_path = os.path.join(source_folder, file_name) if os.path.isfile(source_path) and file_name.lower().endswith( (".jpg", ".jpeg", ".png", ".gif") ): destination_path = os.path.join(destination_folder, file_name) shutil.copy2(source_path, destination_path) # Usage example source_folder = x + "/train_images" destination_folder = images_dir copy_images(source_folder, destination_folder) os.listdir(str(OUTPUT_DIR) + "/masks") def get_items(source_dir: Path): return get_image_files(source_dir.joinpath("images")) def get_y(fn: Path): return fn.parent.parent.joinpath("masks").joinpath(f"{fn.stem}.jpg.png") seg = DataBlock( blocks=(ImageBlock, MaskBlock(COMBINED_MASK_CODES)), get_items=get_items, get_y=get_y, splitter=RandomSplitter(), item_tfms=[Resize(192, method="squash")], ) dls = seg.dataloaders(OUTPUT_DIR, bs=32) learn = unet_learner(dls, resnet18, metrics=DiceMulti) learn.fine_tune(3) interp = SegmentationInterpretation.from_learner(learn) interp.plot_top_losses(4, largest=True)
false
0
4,079
0
4,079
4,079
129463197
<jupyter_start><jupyter_text>magic04 Kaggle dataset identifier: magic04 <jupyter_script># Abstract: Data are MC generated to simulate registration of high energy gamma particles in an atmospheric Cherenkov telescope # The data are MC generated to simulate registration of high energy gamma particles in a ground-based atmospheric Cherenkov gamma telescope using the imaging technique. Cherenkov gamma telescope observes high energy gamma rays, taking advantage of the radiation emitted by charged particles produced inside the electromagnetic showers initiated by the gammas, and developing in the atmosphere. This Cherenkov radiation (of visible to UV wavelengths) leaks through the atmosphere and gets recorded in the detector, allowing reconstruction of the shower parameters. The available information consists of pulses left by the incoming Cherenkov photons on the photomultiplier tubes, arranged in a plane, the camera. Depending on the energy of the primary gamma, a total of few hundreds to some 10000 Cherenkov photons get collected, in patterns (called the shower image), allowing to discriminate statistically those caused by primary gammas (signal) from the images of hadronic showers initiated by cosmic rays in the upper atmosphere (background). import numpy as np import pandas as pd import matplotlib.pyplot as plt cols = [ "flength", "fWidth", "fSize", "fConc", "fConc1", "fAsym", "fM3Long", "fM3Trans", "fAlpha", "fDist", "class", ] df = pd.read_csv("/kaggle/input/magic04/magic04.data", names=cols) df.head() df.isnull().sum() # the last column class is in string format we will change it with help of label encoding from sklearn import preprocessing le = preprocessing.LabelEncoder() cols1 = ["class"] for col in cols1: df[col] = le.fit_transform(df[col]) df.head() import seaborn as sns sns.histplot(df["fAsym"]) sns.histplot(df["fDist"]) sns.histplot(df["fWidth"]) sns.histplot(df["fSize"]) sns.countplot(data=df, x="class") for label in cols[:-1]: plt.hist( df[df["class"] == 0][label], color="blue", label="Gamma", alpha=0.7, density=True, ) plt.hist( df[df["class"] == 1][label], color="red", label="Hadron", alpha=0.7, density=True, ) plt.title(label) plt.xlabel(label) plt.ylabel("Probability") plt.legend() plt.show() corr = df.corr() plt.figure(figsize=(15, 9)) sns.heatmap(corr, cmap="coolwarm", annot=True) # Train, validation and test dataset 60, 80 percent split train, valid, test = np.split( df.sample(frac=1), [int(0.6 * len(df)), int(0.8 * len(df))] ) # the above means from 0 percent to 60 percent is train data, 60 to 80 is validation and rest is test # the scaling is way off in the dataset so we need to scale it else can cause errors. # from sklearn.preprocessing import StandardScaler from imblearn.over_sampling import RandomOverSampler def scale_data(dataframe, oversample=False): x = dataframe[dataframe.columns[:-1]].values y = dataframe[dataframe.columns[-1]].values scaler = StandardScaler() x = scaler.fit_transform(x) if oversample: rs = RandomOverSampler() x, y = rs.fit_resample(x, y) data = np.hstack((x, np.reshape(y, (-1, 1)))) return data, x, y # before RandomOverSampler print(len(train)) print(len(train[train["class"] == 0])) # gamma print(len(train[train["class"] == 1])) # hadron # there is more gamma samples than hadron but we wanna try and increase the hadron samples to match the gamma samples so that we can train better so we use RandomOverSampaler train, xtrain, ytrain = scale_data(train, oversample=True) valid, xvalid, yvalid = scale_data(valid, oversample=False) test, xtest, ytest = scale_data(test, oversample=False) print(sum(ytrain == 0)) print(sum(ytrain == 1)) # ###*Model*: KNN from sklearn.neighbors import KNeighborsClassifier from sklearn.metrics import classification_report knn_model = KNeighborsClassifier(n_neighbors=10) knn_model.fit(xtrain, ytrain) ypred = knn_model.predict(xtest) print(classification_report(ytest, ypred)) # ###*Model*: Naive **Bayes** from sklearn.naive_bayes import GaussianNB gnb_model = GaussianNB() gnb_model.fit(xtrain, ytrain) ypred = gnb_model.predict(xtest) print(classification_report(ytest, ypred)) # ### *Model:* Logistic regression from sklearn.linear_model import LogisticRegression lg_model = LogisticRegression() lg_model.fit(xtrain, ytrain) ypred = lg_model.predict(xtest) print(classification_report(ytest, ypred)) # ### *Model:* Support Vector Machine from sklearn.svm import SVC svc_model = SVC() svc_model.fit(xtrain, ytrain) ypred = svc_model.predict(xtest) print(classification_report(ytest, ypred)) # ###*Model*: Nueral Net # import tensorflow as tf def plot_history(history): fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 4)) ax1.plot(history.history["loss"], label="loss") # ax1.plot(history.history['val_loss'], label='val_loss') ax1.set_xlabel("Epoch") ax1.set_ylabel("Binary crossentropy") ax1.grid(True) ax2.plot(history.history["accuracy"], label="accuracy") # ax2.plot(history.history['val_accuracy'], label='val_accuracy') ax2.set_xlabel("Epoch") ax2.set_ylabel("Accuracy") ax2.grid(True) plt.show() def train_model(X_train, y_train, num_nodes, dropout_prob, lr, batch_size, epochs): nn_model = tf.keras.Sequential( [ tf.keras.layers.Dense(num_nodes, activation="relu", input_shape=(10,)), tf.keras.layers.Dropout(dropout_prob), tf.keras.layers.Dense(num_nodes, activation="relu"), tf.keras.layers.Dropout(dropout_prob), tf.keras.layers.Dense(1, activation="sigmoid"), ] ) nn_model.compile( optimizer=tf.keras.optimizers.Adam(lr), loss="binary_crossentropy", metrics=["accuracy"], ) history = nn_model.fit( X_train, y_train, epochs=epochs, batch_size=batch_size, validation_split=0.2, verbose=0, ) return nn_model, history # A lot of the neural networking is to find out the best hyper-parameters so we know which is the best least_val_loss = float("inf") least_loss_model = None epochs = 100 for num_nodes in [16, 32, 64]: for dropout_prob in [0, 0.2]: for lr in [0.01, 0.005, 0.001]: for batch_size in [32, 64, 128]: print( f"{num_nodes} nodes, dropout {dropout_prob}, lr {lr}, batch size {batch_size}" ) model, history = train_model( xtrain, ytrain, num_nodes, dropout_prob, lr, batch_size, epochs ) plot_history(history) val_loss = model.evaluate(xvalid, yvalid)[0] if val_loss < least_val_loss: least_val_loss = val_loss least_loss_model = model ypred = least_loss_model.predict(xtest) ypred = (ypred > 0.5).astype(int) print(classification_report(ytest, ypred))
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/463/129463197.ipynb
magic04
kiyoshi732
[{"Id": 129463197, "ScriptId": 38494774, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 8894341, "CreationDate": "05/14/2023 03:47:38", "VersionNumber": 1.0, "Title": "MAGIC Gamma Telescope", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 194.0, "LinesInsertedFromPrevious": 194.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 185542312, "KernelVersionId": 129463197, "SourceDatasetVersionId": 5679957}]
[{"Id": 5679957, "DatasetId": 3265250, "DatasourceVersionId": 5755510, "CreatorUserId": 8894341, "LicenseName": "Unknown", "CreationDate": "05/14/2023 03:45:46", "VersionNumber": 1.0, "Title": "magic04", "Slug": "magic04", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3265250, "CreatorUserId": 8894341, "OwnerUserId": 8894341.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5679957.0, "CurrentDatasourceVersionId": 5755510.0, "ForumId": 3330856, "Type": 2, "CreationDate": "05/14/2023 03:45:46", "LastActivityDate": "05/14/2023", "TotalViews": 77, "TotalDownloads": 7, "TotalVotes": 0, "TotalKernels": 1}]
[{"Id": 8894341, "UserName": "kiyoshi732", "DisplayName": "kiyoshi732", "RegisterDate": "11/15/2021", "PerformanceTier": 0}]
# Abstract: Data are MC generated to simulate registration of high energy gamma particles in an atmospheric Cherenkov telescope # The data are MC generated to simulate registration of high energy gamma particles in a ground-based atmospheric Cherenkov gamma telescope using the imaging technique. Cherenkov gamma telescope observes high energy gamma rays, taking advantage of the radiation emitted by charged particles produced inside the electromagnetic showers initiated by the gammas, and developing in the atmosphere. This Cherenkov radiation (of visible to UV wavelengths) leaks through the atmosphere and gets recorded in the detector, allowing reconstruction of the shower parameters. The available information consists of pulses left by the incoming Cherenkov photons on the photomultiplier tubes, arranged in a plane, the camera. Depending on the energy of the primary gamma, a total of few hundreds to some 10000 Cherenkov photons get collected, in patterns (called the shower image), allowing to discriminate statistically those caused by primary gammas (signal) from the images of hadronic showers initiated by cosmic rays in the upper atmosphere (background). import numpy as np import pandas as pd import matplotlib.pyplot as plt cols = [ "flength", "fWidth", "fSize", "fConc", "fConc1", "fAsym", "fM3Long", "fM3Trans", "fAlpha", "fDist", "class", ] df = pd.read_csv("/kaggle/input/magic04/magic04.data", names=cols) df.head() df.isnull().sum() # the last column class is in string format we will change it with help of label encoding from sklearn import preprocessing le = preprocessing.LabelEncoder() cols1 = ["class"] for col in cols1: df[col] = le.fit_transform(df[col]) df.head() import seaborn as sns sns.histplot(df["fAsym"]) sns.histplot(df["fDist"]) sns.histplot(df["fWidth"]) sns.histplot(df["fSize"]) sns.countplot(data=df, x="class") for label in cols[:-1]: plt.hist( df[df["class"] == 0][label], color="blue", label="Gamma", alpha=0.7, density=True, ) plt.hist( df[df["class"] == 1][label], color="red", label="Hadron", alpha=0.7, density=True, ) plt.title(label) plt.xlabel(label) plt.ylabel("Probability") plt.legend() plt.show() corr = df.corr() plt.figure(figsize=(15, 9)) sns.heatmap(corr, cmap="coolwarm", annot=True) # Train, validation and test dataset 60, 80 percent split train, valid, test = np.split( df.sample(frac=1), [int(0.6 * len(df)), int(0.8 * len(df))] ) # the above means from 0 percent to 60 percent is train data, 60 to 80 is validation and rest is test # the scaling is way off in the dataset so we need to scale it else can cause errors. # from sklearn.preprocessing import StandardScaler from imblearn.over_sampling import RandomOverSampler def scale_data(dataframe, oversample=False): x = dataframe[dataframe.columns[:-1]].values y = dataframe[dataframe.columns[-1]].values scaler = StandardScaler() x = scaler.fit_transform(x) if oversample: rs = RandomOverSampler() x, y = rs.fit_resample(x, y) data = np.hstack((x, np.reshape(y, (-1, 1)))) return data, x, y # before RandomOverSampler print(len(train)) print(len(train[train["class"] == 0])) # gamma print(len(train[train["class"] == 1])) # hadron # there is more gamma samples than hadron but we wanna try and increase the hadron samples to match the gamma samples so that we can train better so we use RandomOverSampaler train, xtrain, ytrain = scale_data(train, oversample=True) valid, xvalid, yvalid = scale_data(valid, oversample=False) test, xtest, ytest = scale_data(test, oversample=False) print(sum(ytrain == 0)) print(sum(ytrain == 1)) # ###*Model*: KNN from sklearn.neighbors import KNeighborsClassifier from sklearn.metrics import classification_report knn_model = KNeighborsClassifier(n_neighbors=10) knn_model.fit(xtrain, ytrain) ypred = knn_model.predict(xtest) print(classification_report(ytest, ypred)) # ###*Model*: Naive **Bayes** from sklearn.naive_bayes import GaussianNB gnb_model = GaussianNB() gnb_model.fit(xtrain, ytrain) ypred = gnb_model.predict(xtest) print(classification_report(ytest, ypred)) # ### *Model:* Logistic regression from sklearn.linear_model import LogisticRegression lg_model = LogisticRegression() lg_model.fit(xtrain, ytrain) ypred = lg_model.predict(xtest) print(classification_report(ytest, ypred)) # ### *Model:* Support Vector Machine from sklearn.svm import SVC svc_model = SVC() svc_model.fit(xtrain, ytrain) ypred = svc_model.predict(xtest) print(classification_report(ytest, ypred)) # ###*Model*: Nueral Net # import tensorflow as tf def plot_history(history): fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 4)) ax1.plot(history.history["loss"], label="loss") # ax1.plot(history.history['val_loss'], label='val_loss') ax1.set_xlabel("Epoch") ax1.set_ylabel("Binary crossentropy") ax1.grid(True) ax2.plot(history.history["accuracy"], label="accuracy") # ax2.plot(history.history['val_accuracy'], label='val_accuracy') ax2.set_xlabel("Epoch") ax2.set_ylabel("Accuracy") ax2.grid(True) plt.show() def train_model(X_train, y_train, num_nodes, dropout_prob, lr, batch_size, epochs): nn_model = tf.keras.Sequential( [ tf.keras.layers.Dense(num_nodes, activation="relu", input_shape=(10,)), tf.keras.layers.Dropout(dropout_prob), tf.keras.layers.Dense(num_nodes, activation="relu"), tf.keras.layers.Dropout(dropout_prob), tf.keras.layers.Dense(1, activation="sigmoid"), ] ) nn_model.compile( optimizer=tf.keras.optimizers.Adam(lr), loss="binary_crossentropy", metrics=["accuracy"], ) history = nn_model.fit( X_train, y_train, epochs=epochs, batch_size=batch_size, validation_split=0.2, verbose=0, ) return nn_model, history # A lot of the neural networking is to find out the best hyper-parameters so we know which is the best least_val_loss = float("inf") least_loss_model = None epochs = 100 for num_nodes in [16, 32, 64]: for dropout_prob in [0, 0.2]: for lr in [0.01, 0.005, 0.001]: for batch_size in [32, 64, 128]: print( f"{num_nodes} nodes, dropout {dropout_prob}, lr {lr}, batch size {batch_size}" ) model, history = train_model( xtrain, ytrain, num_nodes, dropout_prob, lr, batch_size, epochs ) plot_history(history) val_loss = model.evaluate(xvalid, yvalid)[0] if val_loss < least_val_loss: least_val_loss = val_loss least_loss_model = model ypred = least_loss_model.predict(xtest) ypred = (ypred > 0.5).astype(int) print(classification_report(ytest, ypred))
false
0
2,113
0
2,132
2,113